From da0943b56fbb79662c7b7378569837905083e2ee Mon Sep 17 00:00:00 2001 From: wenhuach21 Date: Mon, 12 Dec 2022 11:47:03 +0800 Subject: [PATCH 1/9] add pruning v2 Signed-off-by: wenhuach21 --- .../scripts/codeScan/pyspelling/inc_dict.txt | 11 + docs/source/_static/imgs/pruning/pruning.PNG | Bin 0 -> 25509 bytes .../_static/imgs/pruning/pruning_criteria.PNG | Bin 0 -> 29972 bytes .../_static/imgs/pruning/pruning_patterns.png | Bin 22446 -> 37938 bytes .../_static/imgs/pruning/pruning_schedule.PNG | Bin 0 -> 52290 bytes .../_static/imgs/pruning/regularization.PNG | Bin 0 -> 37001 bytes docs/source/pruning.md | 2 +- neural_compressor/conf/config.py | 4 +- neural_compressor/experimental/pruning.py | 9 +- .../experimental/pytorch_pruner/patterns.py | 574 --------- .../pytorch_pruner/prune_utils.py | 221 ---- .../experimental/pytorch_pruner/pruner.py | 347 ------ .../experimental/pytorch_pruner/pruning.py | 163 --- neural_compressor/pruner/README.md | 194 +++ .../pytorch_pruner => pruner}/__init__.py | 6 +- neural_compressor/pruner/criteria.py | 188 +++ .../pytorch_pruner => pruner}/logger.py | 4 +- neural_compressor/pruner/patterns.py | 1110 +++++++++++++++++ .../pruner_legacy}/__init__.py | 0 .../pruner_legacy}/gradient_sensitivity.py | 2 +- .../pruner_legacy}/group_lasso.py | 2 +- .../pruner_legacy}/magnitude.py | 2 +- .../pruner_legacy}/pattern_lock.py | 0 .../pruner_legacy}/pruner.py | 2 +- .../pruner_legacy}/util/block_mask.py | 0 neural_compressor/pruner/pruners.py | 565 +++++++++ neural_compressor/pruner/regs.py | 127 ++ .../scheduler.py => pruner/schedulers.py} | 85 +- neural_compressor/pruner/utils.py | 247 ++++ neural_compressor/pruning.py | 301 +++-- test/pruning/test_pruning.py | 155 +-- test/pruning/test_pruning_config.py | 80 ++ test/pruning/test_pruning_criteria.py | 87 ++ test/pruning/test_pruning_patterns.py | 83 ++ test/pruning/test_pruning_regs.py | 98 ++ test/pruning/test_pruning_schedulers.py | 81 ++ test/pruning/test_pruning_types.py | 87 ++ test/pruning/test_pytorch_pruning.py | 203 --- .../test_gradient_sensitivity.py | 0 .../test_pattern_lock.py | 0 test/pruning_v1/test_pruning.py | 132 ++ .../test_pruning_group_lasso.py | 0 .../test_pruning_pattern.py | 0 .../test_pruning_pure_yaml.py | 0 .../test_tensorflow_distributed_pruning.py | 0 .../test_tensorflow_pruning.py | 0 .../test_tensorflow_pruning_utility.py | 0 47 files changed, 3375 insertions(+), 1797 deletions(-) create mode 100644 docs/source/_static/imgs/pruning/pruning.PNG create mode 100644 docs/source/_static/imgs/pruning/pruning_criteria.PNG create mode 100644 docs/source/_static/imgs/pruning/pruning_schedule.PNG create mode 100644 docs/source/_static/imgs/pruning/regularization.PNG delete mode 100644 neural_compressor/experimental/pytorch_pruner/patterns.py delete mode 100644 neural_compressor/experimental/pytorch_pruner/prune_utils.py delete mode 100644 neural_compressor/experimental/pytorch_pruner/pruner.py delete mode 100644 neural_compressor/experimental/pytorch_pruner/pruning.py create mode 100644 neural_compressor/pruner/README.md rename neural_compressor/{experimental/pytorch_pruner => pruner}/__init__.py (87%) create mode 100644 neural_compressor/pruner/criteria.py rename neural_compressor/{experimental/pytorch_pruner => pruner}/logger.py (90%) create mode 100644 neural_compressor/pruner/patterns.py rename neural_compressor/{pruners => pruner/pruner_legacy}/__init__.py (100%) rename neural_compressor/{pruners => pruner/pruner_legacy}/gradient_sensitivity.py (99%) rename neural_compressor/{pruners => pruner/pruner_legacy}/group_lasso.py (98%) rename neural_compressor/{pruners => pruner/pruner_legacy}/magnitude.py (98%) rename neural_compressor/{pruners => pruner/pruner_legacy}/pattern_lock.py (100%) rename neural_compressor/{pruners => pruner/pruner_legacy}/pruner.py (98%) rename neural_compressor/{pruners => pruner/pruner_legacy}/util/block_mask.py (100%) create mode 100644 neural_compressor/pruner/pruners.py create mode 100644 neural_compressor/pruner/regs.py rename neural_compressor/{experimental/pytorch_pruner/scheduler.py => pruner/schedulers.py} (64%) create mode 100644 neural_compressor/pruner/utils.py create mode 100644 test/pruning/test_pruning_config.py create mode 100644 test/pruning/test_pruning_criteria.py create mode 100644 test/pruning/test_pruning_patterns.py create mode 100644 test/pruning/test_pruning_regs.py create mode 100644 test/pruning/test_pruning_schedulers.py create mode 100644 test/pruning/test_pruning_types.py delete mode 100644 test/pruning/test_pytorch_pruning.py rename test/{pruning => pruning_v1}/test_gradient_sensitivity.py (100%) rename test/{pruning => pruning_v1}/test_pattern_lock.py (100%) create mode 100644 test/pruning_v1/test_pruning.py rename test/{pruning => pruning_v1}/test_pruning_group_lasso.py (100%) rename test/{pruning => pruning_v1}/test_pruning_pattern.py (100%) rename test/{pruning => pruning_v1}/test_pruning_pure_yaml.py (100%) rename test/{pruning => pruning_v1}/test_tensorflow_distributed_pruning.py (100%) rename test/{pruning => pruning_v1}/test_tensorflow_pruning.py (100%) rename test/{pruning => pruning_v1}/test_tensorflow_pruning_utility.py (100%) diff --git a/.azure-pipelines/scripts/codeScan/pyspelling/inc_dict.txt b/.azure-pipelines/scripts/codeScan/pyspelling/inc_dict.txt index cecf227cbaf..3ade5e4de45 100644 --- a/.azure-pipelines/scripts/codeScan/pyspelling/inc_dict.txt +++ b/.azure-pipelines/scripts/codeScan/pyspelling/inc_dict.txt @@ -150,6 +150,7 @@ berts bertsquad BertTokenizer bfloat +blockwise BFP BGR Bianchi @@ -327,6 +328,7 @@ convolutional Convolutional ConvPerStage ConvReLU +cooldown copt coreml CoreML @@ -741,6 +743,7 @@ horovodrun hostfile Hounsfield howpublished +hyp HqEgzS href html @@ -1179,6 +1182,7 @@ ngatang NGPUS ngram NHWC +ni NIC nifti niftis @@ -1240,8 +1244,11 @@ nvidia NVIDIA NVIDIA's nvme +nw Nx +NxM nyu +oc ok ol Omer @@ -1251,6 +1258,7 @@ oneapi oneAPI onednn oneDNN +oneshot onlinedocs onnx ONNX @@ -1885,6 +1893,7 @@ UI UID uint uk +ultralytics un uncomment uncompress @@ -1895,6 +1904,7 @@ unidecode uniq unittest unref +unscale unsqueeze unstack upenn @@ -2121,6 +2131,7 @@ tensorrt hardwares BenchmarkConf PruningConf +Pruning's DistillationConf grey ModelZoo diff --git a/docs/source/_static/imgs/pruning/pruning.PNG b/docs/source/_static/imgs/pruning/pruning.PNG new file mode 100644 index 0000000000000000000000000000000000000000..0c6c53295abff842b62da7be736fbf655ffff534 GIT binary patch literal 25509 zcmbrmWn9$T_s1(GDIqoFARr)(APgZTDXnxPQqnSXj4&Y5Ac8aq2rAtj14>E_N(e(E zJ#^RoqMY+P_kSPW2fieCtiATypS|{b5vHlGc#D9R;Kq#`x0IFSp4_;B8GYjh20z|S z;47kRTVB9_7;aA#Wp5Ps)2{%ZVB5&3$=tY67DISufdhPw@1kVjcH;)s-K&2X%1>Cg zZrrdGQrBh6S5|MaF$nysRv2D@|#dpsv`{Zkx>d{_#U=y`LHO zn_o6iE`f7NAd!Mw>e|MDGNX%HrL*oz<#^cl9BwY+%mS7lM_7F%|!OFAfhU z`sxd$%wz=gAy?m%W2S%*Tz&729ES_VpYMff&T#x~=)?c-hQ=O6gQd{X^rCYfn^PMe z=*g98hw(YxBE!Ys1DTrv2NjuhadB}o`@j2A;RB-4^P(l-I4Qpc5&^tVX{TLa$sNUW z%^7`x&S(a=)5A?qqZ%joM~@!KAT;a+t-s^-FNqnKbYJ~gCK3U=ZjkgDEn1tpMx<5*w4^p1*Zt`5P8)D<8(oxi z2HINd-Sp4Wu75)b9O*(CESX#^6pTc>_h|K$)VqbjD)NIs!-z4rnuphzRM8Pi4Luf|l>(w}x zv|viOi!pg0TFbDfyq6eiEZSdh%Zj~BpflYqr8B)$_4b%Mne0S&pDQae#QwFqT<>u> zC07vd(kZ&3L}>nGGCz9Jcz;Uq!j_kSp78hbP>$2~T(2VG??^Hc|8E1R!Fn4`L9~dY z7qohO@x2*iZvOl0<>1hJ$JaBSCh9gB%^*J6?03PzaIr!2GUD- z4XULIJ67KRZZMlyJhho;>Yn@ZbUXzMajuNJA_`sB|4kh) zm6Nx2(|ckCHL)aOoM=%C^P;GqwGRD0B6f?jzs48paWE z^{TZkrtzTAjl2E7QXTE`eWYF>Wu|lCD6N?!vI)kOx5)*lx(E{C4kf-7wsZM^humKc z^PF^5wESLuPrC{HU?s~LiRkBW`F?}$+jT@0YZKDIl_(M`3opTc2)`;!uizo!GEB;GbQlaMF!?+zmgQWr4ksY3(^~3n7HN znyWoPmiS0H{V~Kghl^_WNRbgd2-+XHfsP!H5gAdEI%bmczTQ9jqGk;qMGeQlQ|at; z&GrK(bK%X(xbuMhg62D88a{mZki~R9uqAoelpOQm@2@U!5ne5x7GLvzk#n7f|LRkx z>1O{TPdT#S-s5V=iOK$S@&7fp4<}9x#O$P?7k$afAs3&=cm3x!#&&iD8E?nP6E!A{XUVDv|RX*M*5!&XF4@KHFIj(A7LtbZ|Y;W zmhNM@==Uw05`i6`M|Z5*1Qr2SUTp+Xt~ry0Sue~XEtOK;=WqXuB}!yRCZJ1?>Q8)@ z9DIm+r`uTPwsh03>352~;pPuIuB8IK5)Y9{mq$R?pZ>Ug4cnTz0bp*#)zN?n{rw{q zC6X^O>BRzGu<1?YDWcDNbmd2@a2(6CTaO4?5v)9K{j5>rXHkTc4r#1g$I9&K6pg>d zhV0vW61d<4$8?M96$8YQ6$0-QJ0JzSwW?f8dS?Dr6V3j<|KCo(=oAEDLVqnxphVcD zeq41L>SrZ$-Ju^8i%vK(_C`OZ^u@1BeW(O0#@$S#!A^Tj4RJ|YhhH|XftjdIG@@>H zChoFtH8NrM7M(D5yh@7G!lS<~@xTDLS2oa+KtVwv2y?Gb<*>HIZN@31p_i^(}}ea+c`_X0xpxq;orf-Xonq_YPXjy`x8z zhkyOc5R?V{cXJeVi|JDl&_9))MKt?tW3jIzBuh^_ci#P7SKJM@4*k;3)1ky~wTZG9 zOHIqC=9wW_lJ-Nacu}`KB?ec!PSuh@Sh{S*O$Wc4%On;j`*oVY@)ygnD}P}pCKwoH zLwlGdA4$(|ti_{m?X;6W`@6)9m4rxcNF-e^XjIlNkPY=iYYuI}lj7PrG1h4-Qi)>W zRdfH=bDXD^8Wa+j>O?2vq}@c43>~b=TQyGx);ur@1@T5YqI;<~>>JZ1wiAI3)>+;K z&gla0w+5Bj##*DgDwST_jmnPOVYu4gTL87AC+wc7);7>_C*AT)9cAEJQsw-RTC-x^ z&u5HXi7aDQvdx%#Y{QVRjA+38vdGO%f+g{W&y#&whxJH zN0T|-t7PJ*wf)spW=ma%Q=!(@BPx|!DJG2K9@P39>ePNB?gRT9(s)v4F_3r{g#{(VQUOB)|9OTag*^52Q&bk@??4=zvDHAM`8Mc?< zL~c3Paofa+_U~tG)57az{6i_`8R9EQg zc`a}X+h-Fe0&$RHj4^G=OnCadSG4BAR2L8f8`wXe351^w`Er9IZrY7R(TgMw8WOY@ zg;LoX>fP^jx#i0sg26Uf4w6jNRliWcQnRo7ipN;CEa3jN=eW?IB4|5MO=Rc8->{kT z32v3sDuB}&1NOWUZ+A|1L?=*Enf`8;QG4+*A3o5?H2+4<47lm;FKp5Kjv()D@4cp<*D7cmgsdK#IoyZh)O24mxO8*ue|zBv*lY$j<@!c({X6H!Q)e-upeD z*_3{T(~G~SyKtA-82&%!QEVw+M%DQF{9%b>TAWia*7eaM1YR?Yrt9QTz<z%QRE2@m@PMwsz+JiUsd7PpC;<`{CVzyf|$;q zb0-yADo#M}=CavzcoWB=?W5r2QcDFg9lDxO^#G|Trp&j(i}m;TewDCmH=KtfLt`no z=0Mf;X-Wk*2T)P30TtyiR(glyxm2@l;%Vxop_UPge{TFH?42{S1!i&b6!h;;9k*=YBzTQmH5^3Pk*Ft9w2pP0IAE* z;Wc&`AhCY2vqtFSl%FZ+#;q1g%<$r4CGFMfJ|e=LeHp#|$QXA`M^vxrFiz?O3nv;! z_Jd%Y*lExmPh|J*q|IfHkNC<$-5+`Uq5J^zUx|~dmM&JsZhfeCtxy?&UwtH46SY{o zJZFa{%qMPWIqE6wi9B^RLl5XjnTj-A%ha|-`cw(;@`m;DUO=MT1hQDtUWsapzY-mv zzjVYp&cY@`9OaLGmt2NZ%K_X9t}YF0_gnR@l^oB~(>zuDpn4VFy4?SJX;2Av&vH1% zU9SYIdBRTjpK;v;E+-x&XuZ+>{Oef{y;OG*u|u+oxXyiTi(GR>vAz4N&AmBXxM_4V zY8>Bx?-z5fKV78}oK(I{E_&(0xa5|;!+ z&z0Z$O!wVX!!1+aH)|mT#9}YtEhM4=XSL!l7v4-X`PNL-x#dqZdRL@A^gS4Rf@2-o zsbBs)tl{97J!-Z+6ux^BS1PrIV-y^BSt@1SpSA+rt|^Zq<{`~y=i5`hn{}C&XG>0i zOq&8_ec}VI-}gzcx=kZ~QmhEbkC&3Li?cvZ;IatBZMILB@GtO%h`WAHjoS(}=7=DT zGs$wUSZJvs1!M$|pdl-n)f>s$t; zr1|8<;iTI@RsSmZVlQ6`XYti$Qs^7o=50mDo3jmP*o4?qydjORc6aJ)Y=^ST4o`G& z?-Yj2PRs4$AhMehiFG3|J5!|ZmI2BI7YuFi*dVyDDJt;Z{T-zf`>DvNrvKqqD;9cx z)XdrBbF!TL%h|RN%QZig$An{sRN-CBiAuYtE>7{Y4NKzem#zrr*Df8$@J%zkAt|}4 zLB_P?9~N21=Xvl!HNx$044#u@^tedGR90&l8!3?{hF4H9iPV|PdiU+Pu?Ei)y|}h) zi|Ut)J?3G7Y!ChKct~g&GIp`Fxh|!9-YnV1U>@oNL+3Ali?rC8R9)~ZrEYSbVFeC7 z)bJ!i@inT?kw{X@3Z+7LD&2{Trb3U8=40qe;DhRfG�ST+7U2)cb+ZQV}3$fq{1u z2aJI?aPmwdM<)xo8nriFUYw$VYc9U&uB69L9X;=PjJ`3RK=ZxhrA%j&BGXt6!bAbW zSkl*0XR{=h&p-j;>T@x4PPGU?uh|y~u3{ZC^SNRwo9dOAGMp*tHy`(w9Wj5F3MTE# zNvCQ)SuH5rmwX%#B^9%?n1olXn2YDaf9AB7b!vtc?r6$nJ4`n}Jc=B*xXFc@4mjOF zoLY-a`?>L>#d1O$G@df27>Uv%=YaEU9Y7;t2yK}X+-X3de`Blwz^fdmb&=Qm0{ zugqQTk%+qlT}Ilh-VBsCGb%TBna{N-bCWSpZ!$g(N8)1ds1iQfQ^cANWuBZP@;*J6Mxt3t9TiPs z6t`A7Lh&*_PDdVkK}yeFa+kDsR+0B)17lczs9eQw*R-kdQw$IID2kX(UQC~XfZi96 zG9)NE2?}?Oq;Ih7j2;os43-%ZIy91h-iU``>GhDS+R$fjND*$}<&id?pl7;rus$AN zutRGQ9LWAO`jKjb);p&z(7zFRX0bFRE) zF#~Q$e>rZtq3nDYC(%NfC0Qyf^OM(7^$2%WrVYa+eAdy(fcK4T{QuAdJz%B% z%55!v`DDJ%hLR#*Sm-jSMnFqxDIf>0T&Uesit#dTi`uLpKNfk4?M9fLF(CwTVmUHQ38y%;_#59Wd>cdP|O zg!7&9*FsJh-kosv0xIjGLp$XHc&s8}#j3e(E_@MiaNLEJaP!l9{U0Cj-}|1INB4^p zy|dU0PwY_Vw@t3X`t0Yy*l{0!)Wp-or8!K_)IvIPy%93IA~$rw9~enD*k}Kma`5)x zy>upTtVYVT{3($ zveltti`#yLSWco|Mr>R#d49vV!iCzC%0k+B3#a4K?j8~xc^>959?y>bu-lW8^6}0O zR$waSWJRZTRs7CP1CO+E?YQ-;9SmZrd&xq;ZM81ZW8dE@Y@hCevDJc25 zxGUNpco}}jjJI6QC=LJSp$0W$4RrFT_71u;czX+1V>B)Gd61@f`4QZY z$=VNGJyu8~+gbli8Ot~L?a2Jru|aV8d%$IK^WQLi{^XNMIV}5{qYSrl5hsW8ejnV8 zOiZ5wiP!;hXm{j2WV&at)oUC&!JftZ=J?@JjWm@6-Bi|9GtDnZI_2lRgz1 zq{;sF2y1&@9FOp!g5;5U7oCde*eU7^u}|vLU$eGA*7N23PH>k(oXnHBeaU<2m9Y=E z+izj%teM9t;r7Y|TFNsCX>&wg?zYKM6^tnx9nBpr=fni*%GTR$_(WO|ZF)MknP+z5 zW5ru9zqYxp1_6EZdTWKeWhdN59vlwhU|1qShL%j=v$hQ^4svTKcAQ6c!Q(F`;ylo&Y z9KqkX)kcVL!r@-C9Bl`BI|kQRd7HeJT28?uOra^ES&n+;(fcS(O3CvF*q8*PFQUry zN0lF*K-W7;)&#B1TgcZVi$iT+fLu&2{2Sn+m_4iE=5D7zG5~ipU?EP?aPv33D0K`o z+&-l*Nq}W+O{{>w@oc9T(Kd$-on3pjZv5!i{|5Vqc<`^C!kv(Zxgv#ZG|Qgp2$@EbVb5~K}2~hF;xs^=UW-` zLd^F=l!zUC}0aP7p< ztcV354_zd9j)9u3yoziQTe!0HCapCNuy^vNFgyVcD6);Y+h{ZXg$3i69|%rhR)7G! zZBZ(KlhC7jmz&V=K@sKV;D<*=o(;pwplrEA@w*z|wNHVw*+>x@N~I?*;zM%NvLcVw zWNtRO4}?{@)StfAWP?eQK8fjlCPSNZVNS0J#2qbGgY!FIvtktpci&b`d8SMv)d?#! ztB7?&prsJtJpel)=CRxz`PiP{A3_-k(BFuDU!566p7Y8qL|PE*;XF0VB>@js<%`;U ze8#y^;^=wCH)M#pAD^FgqC>Wy_j(vqYJ2q9(86vU5)t0NXjMdUOn~L%M!>Q}nBJBA zgm$Y46>Li?=)G=a#Gax7>vkw9 zTvp34>9x~-b1KzNgDjWTP|6S&!#WNUaR&5-@ijC^`5Rva;eCRN_eE_A?5`>A5@}`l ztbQ*0=+5s!`^E!n28&z1Tj(H4~{ka!i^AzJ$mRsxYim{_PUPnWiJh>G!jAJxWKLLj{yw zI{mo_8B?}LY)MrY^Dyv!K%8%UMyd3URA(-wlJ(BRi=%m&0w*X9IqUj}n;pp7Ppi+92CmONE`32lj@F??UmPA6-}wMp#2zhRd1MX7}VmCZ--W zuAqybVHmOPxJg)rtzR0L=7R%iyq%kmt@(4J4$&=_7b8R85dO*7ax;{aO7$x| zN^^qM6A@y;V~b>esm@EdXkacZzjBK7op|LGT`?d!W!_DXq?e4jOBPQ0yT`{`TVMXGV2hc{tkl!@RUdR zid~d|0oHrdLi*iX0=Oe#!UIR+_op9|&uy3o(HWJ?;|?|z;^B>R({y{o)_w7O!#g_6qD&K0eF!~gRUc8R-j>#`&#j0<=$jWA4{P^B< z$2!PuK>alRZQ>`XEv+Z@$1FYnRq~N2a!btHMl8&ez^ku4!$WnTe-%R*VAb%!HYbV? zAqH#yhC8d~s5s&gYTy~Jv>1=mf&EVRS^h6C~e(I%B?Q>XU>tn9r}HG1_jIr*!~z7uIt9dLfa^iWkb=sK{=0o#z3 zvM9uR&0i$^2ZD`>p%4sY=VTYk!BGZ0pxCj_Zq&8(5Y?jm8i!O+y1?|BDh2 z_Y>S4Ie)ESVqyFhbAR9k1%vYor8#yUM|Rt*8;g`TgCp%-P_whMW%k4L8jL&G5f9_HK|;8b-izQNRs(^Oy$6C)@FNiEonNpSF}+ zy*GIu^1aSRi(-Z3&W>g^)oZx?1>a-B-a#2Zi&35)YBdG1F<%OZ^^L#}=G$NO!${wRc+%-N`M6tXz^gMX<7AyL%r~lC>IFz%Kv2tTt!Mr;XduU-4oYfD&;~&=^9{as z7h7};KL|?IW-1DU4P-nl@E)}5R#N0Ulaa)y z5OEmePsA0hH_`VAdeHY)QxWJay0(6ri|4xvpS$J}a61K5uQugLB#M8Jx!jdro=VaI0C{9f}wI0&kcjiHbn$n&fC7 zgnr^1$GQyvgYl7}?Y&=M+2GF&RMsDz#5nwbD_K^n0LEba*w#)mlj_)Lpn`to64@Z(_5B z+ zy7Dpwly>HRCL-l0@XO(749=kCfP8Mh{=~ra5l!xPuvP0*AxW!(N4=Tjz!@Cm$NbKs z2e@CVjkM(osa^RRKM;h(U%4cIx^u&|lKdt)`33S}pqB#9+e6YUQ%{}Jvxq)f%OHiT zD;+qnRLTd(-nDEp>SIK4s0WHz{v?NU`K9+<7(1o_5eZ)Bm_cCRoMH^4BZEn_)K~Li z62qimwm+t_26iPbsUQKqMvE5d%~SeEN~$1IDml_wJVMsXQ5~k0Io2o#rfH`}V_nz} ze71zK;v1m(n_$@4_DKHx!oDJ(g))JMX4V56^kki-_c!1a1`!=pD$7D=sUjJAi8 zc%n9iBBWNkb_b-uXI*87Ulyq?RpdHOC5{*35#&%t*c(JnW>qNzkl$-#-`s4=0=(Mh!Y=I%&<2(bCd# zNrN;--^jP%=RWv3?LKOfA!QpyKzJAPm$PhlKt2l#i{-SyX}Tey@elC%J5Q<}D^-%| zdqpvcFm8!)wpEg$OF4M13{uu|5aE;&q(xU&L8)a&JD(S7$h#UjIgI=8%5(>$E zc40h2yrV#6s|D}atM*TT_8I8AT8FBkKtkJ-zcr!Eg!>b{e|U#iOyHon$NI8S+?!oR zXn}Yn)7&rnl3M_grEYKffMyq0J}qoy77UNR{FLo$y=$D zkXoOC!>-7otwVc-I-5!ABEIuaAsc6i>;VfYq7>q4E-+kovk$xxKz*|tj71J%6!+i; zW%Y@9x?()L{YV;;x#rTu#&Y%i3&3Wu?9HEwSC1Ng3o%|Nk}eC~M<;6~mtWtD3fUy5 z)05bf#4u9yS5P0RsdKV)zjA*_^KVI^_n`f5K!FGyMWH;PsGHw49HQ6`t@%o{I_t5)vqsGXynEmRd z51>Hy*iTFW9xf9C<6nVO-R~&5&6M|`fBtN%+%&02@p9SR>}E8@kY{L7+o$ecqlQE|wWLM8BfF(}I{YwD8PU8o7-Fd*5dr0S<>%-y+IT(@8@v@v5 zPBizu@++>4&uW2Fr17_fNaIEF&idE=x)@aaX?(QUpV!v^SvKqh9>xe2o%Wpe|41`R zqQRkcY+Frypf=;Rx2#p;zqD#ur^@ z!|kPWY@0_cEa~#K8$k{Gzl0e_+F!y9n597Mh!{@HU(1(DanyW&gRUz3ZNYH%VJ9v_ z0XJG`?H76b|K-fixUap>b&Pr09glIp=>%{_3)mpCv+aj_nM-d@kD?p2v@DPXOWDz( z&B|@0L79tWw4ZH&fx$&(k;8YU;A+JI5B?@AwnlnAjatdW*#IuRVj}CUk-ox3twqZp zDOvESTl9`p0O{6?3SJDgw+Gmuw^R53Jo5r5oW2r^u;a*bjAluY;-5v2Ikp5M6=JNd zo_6KwF{tKCkD@Lu4P@tC(9?Y4sM?4`$=|&C2hSZh+U~~G-+PpEDjAQy|K}MLXw6Fr zEIj`O3vDClOMX43@8JJ8offqbQnI53V{kB@>3Y0Yt+e$2=F+H}o-|(1Y*G3I(^8kU zLV%0-U;C7PLjl^)z_)_uLR(Uy==)CG`wpDwYKFnPf_e>Z)L@6mI8y*0i}=TJUNSk3 zSrue5R_UB_IePy%Tm@{FBsF%h87+>sjT{#a;RNUC>K=Eg2_TK~q_ic&KWGK!Jh0LL znJ)F4x@5U3)GJFJbNqR*1-4ZW9E#H%Mf4)GcHLFPs&;ZlYq|i~xRw|C&3@ke)n#@H z^*WLrMa#h3eH1=@41+ioghbqT3XU#}I|ryZNAG`_Gw`t`{mu8(j)}#)?-@^mOn#XZpuz-SU;T)DFSD3n<+J6RN?vDH(B9LvNVJ1qZ8rCr6kS!3>E!cPRJh%{W( z$KhSvzxx=x+u7*7dj+x5Ixjq0w4B^YjA6Euo9VJ%hODRArB2p_GCDTKk?cIWhFE}L zNwW0J)kIon{Ltp%3~%@UtjDEWh{u*B1n@V2^%i-+^2j%>ae4Oi2RU{VlQnq{k^cqF z{!H&%v*4M&#>U?4DeBuW6^>o^?SGfzzr>thq|wxl7Hbq9y@>M^J!Jj+XORYHb?@3M zE^q)y>KTfvhFf7|;n4EMkG^nk}?$a-aWO@p&)z^DIm zhF43Q7Lk5`KhAKhCO-p@5tb@dIFUu8Av?Ji; z-K!WU#O(6bU`&rMU?%wC7_P^qe#T#X=&F(Q(<&9l!i03cl>yY(&xAphckvl{z$d1P zK#*HE9^qZ5=4u0Tg1-#|mdB&B@pqhaI!p*75t?e2w1f8E&v^51Q@|pcB}3>rJMo_D zudh(5s{rl7djL~6F5o=?lcQa!)fpWfS zM~RjU^5l)n*k|&Z20oM0{B4ibuS0hG-%tLso`2Win@I!p!KJC!QdpLNw@S{3YqM$Q z*}6N_8_eYI2)wv9TjhRIv!HAIY0WO$uF?NcPO?j5YX9>>jMf#2(H`nn29w%EXH4kz zxAd8KjW1`N3(2r7|2Y-Eg8}H$ZEdu~xsk`DUVD+{`9Dok&J3j0{_@{u_Z0fejxjg? zXTl{*Q$tTrrKFm^nYfHE3%{^~D-2%GmFiy_?SH#r=paqvviulr)?Zh2H z8Q#UE^G5Rdsi;UDA#Wd}P|%+!*LfyjKT>G!XFd$TqyKZ=7@+dt_3PJ|`Toag8|ch| z=DI7ww{vhXlF7{A`jl!9-7UNHKj_aGYdJr<>3Q+d4KDQj7Ol*mU`&3{d4-PhoTj|U z&Gh@8^kFBEQtOW^v+zFzclw{6JwOvmHGcjtI1H@Fyo=<4h7C%;?tCq#*mKG3K6@=J zBWvj2X#+Q*cQtvmD|*0*n%$$QCB)`3*#d*j)Vi_td0y_pH=-UOHGruz{#g{o3lK2I z&5W8tm($Kov+0qGMcmQaT>_%(L0Vp9rFtIkEJi(8%hLs%=DI}*fQX)ftVh^5O5XWC zD%`JBvlw``@P}XpG?eQ)t<~64e~H`b<)G(Dl*;pO-!Ferx{PeK->O zeiqolbGE88cQ~=#$zP6&}J0R^+C^nFJg!MhV(GYF`btSCLGJ)5UKwK@Su6*%hDED%YLzei_GEC`b_l{8b4iyLA-x778SQXDYyMF1xu6F32?lDXel9WJT;L{ z4Q?YLyOYzf!(e@pum8c-0g)yeE4&UnKl~+ddgfWe{m@ou?~ma69Rp<7%(ImNH0}%F zL9XbW9DG=-~Xeg&V z{VXA-^{?Wb1)l3HL(rj1Bn>Sm9do}xD|bu)<*6HZeI$L8G3@i96r%lSP5G;53nC0s zu0$R6A`4*@G5zce0&W?y=_|$P0XyEuc~~<%K$C6|#vlg$lz@EkkKzPzH#HGo&k2wK zpg?7YLgEX}VO^k<)faq7*fWEBdtmL`p$P2`@8-h z6SJ?A2vU`AhK!%-f^d)-UF`Dga=v)4xv2lH3RN{T~%C zxk~lFsg!odvp4keHWc2U51@kF&(m7WNfsLu31s1q{b8;4KdjD7U#sKqooUoc`X4zz z{@HTS_V3oozDT-n^_cXB$;wOICp3aPIDLPJVvs3+a2zpp!##hbc z^03(ca3;Xr*ctyif-GzNb#W~i>7-hUP`6qdq>O*w$G9f!0<^a?6xK{cADA%Wn0lw8 zqCXYg?9)NyiG*2m5v6q=8v`%B==PpHcZF*cUC8~7jY}l;@pgAvw^I2nWM9ln0Hs*7 zYhV2KVu%kMfguSTU8SD=E&gTgj$|+QD=iL>pT36Y2M%N$Wgq{i^vWq+m{H?T@dDsc z=V5<5JU?BE`%UA~Tm*dN5oUV?Z+(}E*K7#F3}B%$J8>eCdO~4QY$q&_r*We@$}Br4 zFV0Uss zn4M_|d+-;nfa4KsKIV$GLw_kKepz8NaL|Jze8tx7D7KLDE?F?p)6i`9hXWTuarOxgNDPaW!;ag*7c))`8`Wjf_Y8RhPA8_Tqt`V!uv$)j zmIMV(Z1iBhmJYvQDGX6brID}m^4eJR~4#|Qv^t4#beu(L7%5iC2a@M{IqanoWs zFlml93>FwbvLE2X3XN)t-4!mSr@UpGw>#)8ji>~IUjm2^;_?XYCI@?FsAO%R+vieQAcW3Dht6s!3pSD{I(FdP5|pC6X*qe zge5~HqB2a+kWn6E;`)seLEZ@vTx%mbDTEq&3t8jjM5s9fgv=X(+LOuo7^$fdFp0#L^xFDw+2(soZ!#BA4nWFMhWg{8R6jTy7WU!; z!pBWRVq1>islcp?{|nq=*=JpkfnFOJ9#2#ycuj+5MXb|NHfg_g5)mGP*Q`DxR;ty)1dcZuU0t=9V zC9qrioOG5nlboz13FN+IRJ3$qZ!Yet6HaWm5%LL{5)LQ2McaKLCcdoDf$d5!S^~&)6Hc*yL0kh z-Eb_7?S)0g%PCd{dWyHJ=ExhGVi17HB_1SrgkfQ9IeD7`FGwwszX3SA;aQkRS_<@J z-CF{n;R7fU%Y5?< zM@jRY)R)&#!B2Dok>RWwoeLpnFO6vcs8&UUXI+83vh>qSQyT_)Y~->zZ7zJC!)3j# zBZ>|l6hvAH9}gaJ4;!*_Jw$o>;>2Hi8`}Il+DZaNS~225d>@F%+z5h_%0UZMxm6Hl zk#;X|O@yGcp}R_Y>U=?5JMcGhSgcd_ibEG1fSTifxUc(XP=YRoN4U@2Y zG#RQ&mtUH)Gpu_eSIw#Z_Kzv^fwC1~FRS;RBwCL9oP%(3HoVwCTdaLdtNcV1HmwlM z@8pdPF+)D`q&q*JYSaOt=99aE_uy@F%A%)R=1i{>N1QqZl_VM+$>-BRs{PM%%0E0C zPIwhNcHRKkBTp(k$nQ?@Y@t5Pw14~Gr{ zg_@Z9JQN0%QXfC+P6; z@LvBDinK>-(r?G#0vSO4X8n#JtLUB-JDOXuM_@ALop<^nQpjgAX5w*QYtnDJ>umtr zB==`q<=$h$0*)mB9czix!`)?K*aO}HN$FG= z8<8U(Sc{ z(F5_0HS)INA1eS-?p%e6=ew+qp}xB)pAqh97&DZg4~fvoZWXw5hG!J`&}Ywf4fR}e zYqZcn&_hz(d23ms{Nkd zip_cqpdd(RW;^fFQnJo6!4gx7rar$ zp?dX@b5+c^hH@wsHq>AL#0@pe4dJEbG_QRy99hhO-tCmS5P&3xE6L9Y21PW+nl?~Z z@a{?Al_}3=W)DE9NvxxZ*(Z6IQh_HQfX?0*gdq1}OUL;2gRtej|8?gPpb((!kwE%0%>m zuY3hX)nL`?Nmi=KPaejNg3ddj>fH;OcSW3EJ!g7mTtNf_cC332bN6X(W-FfxV-FrE z>i$)?*;S=VVs;-^c;czo9&6(&bCL6=VyDRUn$U>pv;D2bm8O~K^FQ0}blRM1k~s|5 z;STy(7;pBP|%RasAck;*Nke}n5)t{W; zE_&E=3Dm`i;d>}G?4?AfNH8CHcOg^p(5tvn_x;di*lP2B9@$9SRt6HJLP+^B#H#hB zS)rZX%E!fbAN^OuSC|4nB;2ZChL$>9*A&;2a~Ky6=`X0?fL-in!fAqls(w&`1CYGy zHF-s*zD7lYYYCE>29j?<=-5L0aDnJI_Hj;YR0EHNq~}sB%537?sf-4&4ok}(;2G%J z(){*oNK8IXg61bzQYad?cE!8I%{zqye;q&q*bnzhU-rWpVC@(*y1`V57^zC7$#P!# z>A-L}eyuPW%9koJjR!NSiX{;NR7sZaYfRk^4>xQdpt3ns51*{E&@!xWCH=ALA|OMl zgOcL_CAE!~X?;|{XcW|RIOSVZ5pU7J;u1hLhslbJnAxJKNNmSoosh6*(4*N`d;%Hi zSos>pijjV6m3J5{AO909E1hhp&+59^NW{ZUh&0Y6whn=^{zS0d`BU6PuU&?sFY@H( zvp7U}rRV9y5cGR%+g`-%0H^?N#j9i#Lo-Q7t$fF?MnGE?QYuCnMA`;mvopsJ$F z;6Q*xEC6-eVU8l5!!MFe=+bLU20ez7lS;CyS&%&jf>XZ{P*Jun5qVb>Z$vEa2bf;*kYFV&oOQ{~=`9p^ zwJ&4^;D6Mo$AlHJAJdk8wsCfSfh*s8jJp;M+l2SZd=$_Q zg${%beP>&(cC?U-1dkPTG`MrboSPa!&Ng8qFYv+2!F%@08G~b$SL)AAK~r*^#|1U zkYX_Yy-5RA>uw1Co@ykm4C8xyZ zwve&Vx|T|YV?Kr8O-J%UL(SeN)NHC2TFj1Cks;wXe>f;BNcYWn?BZ6Cfu52&2IEB$3hkpid``}u&SC+oc-C~x)UlrmvS{puHUUxkhWJ$Y?hd?o`M zVOnn8&v18Hv7}po<=cLl>5`fGZd8pl{A?38pWUb)^M2*@`ir^dK~9V$EmxW8Kx3Ksp2>>S3NGW_RI7>*(+$vlvI_fFQy5+4kZB-j~GW*9{|YM z1~m3G+jrKHr~r8d~q#z!w-coHKDaD4^Jq%eV$u6 zB*mDA;99uB2xB8_ow%xd<~~Cvi<}e)aioM?BhuVV>M7rH%T9EIv42ptS^(+Qd$|qF z1yWzK;-!K);HI_~!#&O3AS;5u*IC+NN_|8o^B1hQDVErXQuv@ACBLG zs;+K!OvXbPivVk6QqIbp+yRyb{Fyvlnw-cZ#{otm4+Kjs3Y`}gQVDLAu_(JXo*-*m zmOw_50t5IrNcpPyQ-CiAs%LJ`cE`!~L<$F0l`|=owT9>x=en3qzCLzEUO53b?7ke`#}7O2L|1;lmkI122FDuoU^8 z|M681)vZ{sp1zaH*Ed`l>WcuuGAdGWx+U6)aaob3v4>ADvsjP)Oyc9xKJKCAXQEV$ zsQ;(7GmnR|>;HK6WwN9UiX=+KhzKD|*=1=_mS|+HVURI)%2H%BMGdkq(OpJkO=hB@ zhO8MGvV_o(B}pjJb1rn>_w)Sj*YEk~_xpQZbDeWt=UnG}zvuh;ye}nWh6J(+$i7Fx z3T*)NfvOehglH?N}T4ie!_`DxL7^J2-#90v}3Ft7uG(wy$zL|dsLSD%3Xt5-tX)TI`D zT3J_z;&i42a4X0Ww=j2>K_7F%$EBet0pPJH#DPV5XVaUyg1cWWG>kW;p5vmHD^l!n zwvLF=-Xb-znvcY%@9k3f2|(c=2<$fOeOf1dQtzJHHVhI4qn+wmrowO)!VPYiAR?OE zGO=}e!gM(JtW#^1_C$pOJi2gSi{$yV)bwE@zY-QToFYS%@CChj?>3tHMJDs98MlYn zP_XM#UM%Rw$!)|Ke+A|M=C%``q3!<(YniVT%b~DFWwjV603=^b@bf z`>LJwtsgjRAiw;kO|o5k+4Idx4U)=w(0uBhso96qmPf}p>P&u8e^rvy4N)>o3h*Nm zrRk^d=|qno1~U19rXV|sqQr8$o`gdflE<2oujE)!YDE2YK4L)WweeA}VCHtMX+ySr zi+k~fr|Ia1K2L4^8#ga~cBU9=ZAr%NHksVhkeI9`^Nou|v8<8!svcJy2=5(xtwkZ& zszp%d>1NKTw@xj``pE&_A8qrjmygFfo{WYw9;7Gn)84ptQ61lyzRyc!wM1G4{-AYSoT5 z*QwKIOP59hYL^|}TyA0=8P-C^9@SwLPhyX3_DR;BR4)B#3VOGsVH5UJ zj_y}AKCQ!2R_SJoMyGOFgnn&RtPJ5D32U2>NgVdo*!-gA!%aEbEhizK5!b|Hj_UZr zK9#$}0&a)P$Q;VeV$04ANQ`smD~8wR51p1e(-kg(8QoXYq&N@=)H6p(8bigTYw7Tr2&JYl$Q?M*~aV}(E4N5yRW2QOjfadX@Aa?D| zjWe9iPE%Ty6jRgGRYeb${t8rb@J z0l6o@$}daza1tA%$V*JQgC4;Dv@Hsz<+KR#3>eerEv~vWIQg;&u{MTqUpymoq+9>c zLHVvS??LS+@ln3Y8MKC3m3H5jPm_ixR~vb$;%v2EE3ZgWu`LI1=+>B&xow^eO8gH` zpR?>(70fM`>8i_Z3bNKv=Z4NNG*UPxkcYtuk`#B6b*6H*b_IoTDc#*wB$NT0 zjJ6SXJrRu+7fx^vIwi+OdrfLN5eQcu*zTL#GA97{C>7V&ocodIVY5Uiv6$L|8bG^& zlWG{eVUIcI?q=L^&FixBvrN!4!Q2^{jylR1C0Rp#Uw%hjzDCNn38X)0T4^d%*U6n` zCl0M~E}}09vU1DmwcOH7E0*u_7BW1K9_@Uaklhc?E~y$$z^x-03v%YGFWuQsK(MaK z0pHT4yr$4_kbCNh@R-5DX{fjS6H$=FhPE9eYsD#RH)^{hR2cfOqz1`jDsdKeDG$<5 zf1WIeiqaQCxES*kd_$7WTef9cLn)Mv}r{7g|!Nf%Okn1*J#B}@8LVO;Z# zVNcyO>GLBc!zgoevvN>e;#*0$?&9|Fs=y|G{4_OiJM)k9&$!PD6cJUjQZyM$|8YFF z0Qk0&2!8Vk$OkhT>aiI5u}FjKhm;EX-s3+ZWq~U?_eBXOt zH$Fz;>O*5jSbEzIZ_1V_jja}6r=Je)#I&!jF4jt-Qa1}^f*{SYTMCI|+cG*4{Owbh zbC9YtXLUlmQl0_f%W;zxDgDm4&N}p%xI}i^sS)9{z3G8RQCX_U{cLlMG}LH8|IhYniaH*Zp- zBFaqaH4!NWWPb?R^g{NhBvT+MzMF_E{Q2V zUUAg1z-etxzO=wnNlah>fs|^zP_LdbxVrFEubViQ@S?}<{V?n+ml-z23OQOkp(7X? zAfMZopEJ&;*W($NN78Am6@pJTa9%8)}{lMuO z9|N8pyI(I!=$A_x%yrkU`YV!eQLf9<^@)rmU1Oi25+a{v&!Z>?3Ai%A`O?wo3VyLH>XpN|MQ0zy5{PW5dY<>4B}bSw!dJXuu}sGXmh_W$G_369mLwik@Aq13T!gCkX@HC{&C-jx zt)a-R(RdSvb1m=}j>wJ~?IRQqAAAy&wE-$DnWM^20fI~5Rvx7Z~HcUaPR7xgLY0zX+gT zaRKd*4pTd_I*(4Npu=G zM)Qb<&Ay>nqdy8itvWKfNPd=!;@FCo(_hS{h7fVyf>>Oi)+rAh3USA zFeR03@}U+!$Ir=amFe8h^wsX>d1`k>a^{>nxPA=ex|}kpViiF2kGc^xhupk_E1|+; z_Y8HV%3EShcnF~$@%1=g!o3pA#i_0navxR}2QQ=mmFbQefyJ>^e^AzkKp-pxUXuB8 z)IS0YyVIvYwldK5(HUoiA87725`|F$~O)lJf7&^}dnaSMHA(8hFp&pP&n+h*G=ik8Gsd;pNWCL1qfT;~-q%(0P z$=YR^lNa+F=!P1Pb;t4|fVXHXC!^OU;k94)x{}mbWWrhA=pEa&zy1pjq3Jz!UFEY% zQBTos2>cpYr^zTH=Y^82s+?z2(HOdJu6~`>?8)Hw^m?%q0Cpj;4SyKRg41i)zn;;viGsAq$%A&~`)v3N zXAIXdK=<&gAfz+233?AlJoljjJmH=jm%TxU647Tr9TeK21cB*4GE${VBV6_5vk8#S zAbiYiSo|Y&UKUukF7`<~9P+Es4ERE&iGmjMYmK4GH~{W|rIe zB%@<_I-pX(qH0=b<)q_4>Rt)#xCZ`OQ7%$3OomFzv3j~={mhBNEdF9OEiG}+&kebK zK>2C5F7gCjhCqfr#+j^#odPp7L#WsO&T(3AxH4l1ERX}BBKBKOWA%Of0ywN5?J05A zbE<30T+G%)xkQZMF!nn)(!|Yya2*&e-1p6_M5Ly?ls( z!K`I{2(Bft0YQAo4_8`cJgphiUYW2YLyW;|85p_=8WcL$C@K(c2`An45Am7(lR)^uJN`MW?VFEle^HE8j{!LZjbcu?!$Y={9n zp1trq9$9`4a0s(_=vJ}-&B`dU-Fn^~BC38&^G0uIPP6Aa1|h^BD8)Bm*_N_~!)8L1 zB~k0Y{^kXSa&r=6?jY~%I@AIkK56SLPnFq!c4N}b4)PTns5u_oRokDXTcrSiu@G>C zdf2AM6*vbI)ard$la204>j7lGmDN&xslFQ-CCNcZEb z84*6K$C0{K_Ti5W!T3V9?Y0Y)KwnZU7KqE%pom~>LDb3HBc92MX)8CM={t^Bg@kXv zuXm;7>h0n_i-9)`QxU!;QGwSXT>Ro5&y_98HP!otcOG^IhEXaiQnRHPzh&H@voY*D zZX{Qq{1So0l{sS5qQFFs_wdsm8>a2>_S_Jbf(P{r;^GlWzy3)o+VqO1I9Jc$gO%3l zK?eCd~4X?WQFyXrNI+GXgiXW$c4boAVEmQC%YhROYu+?p@kmq}T zc|`?`P^H(7qgi8~=&IrCv@}FBkh5;I`d=Z!PW_?H`h0 zCX+ZEEz?5D%e2vTov4e$tTm)RX{xWjl5|3-tNLgVpDPFOHr_Z+LpSQ_-OZiE3v_JwpN z@XNgq1lA@+s!l|>+>HrHQG}l(4Jn{Qk=rI0_yUBD$dUEsD4r=5rM(L3eD&@rTe>CC z1qxt$z@!j&&4UEApe>!JclboaDWZMO>qF zqusEDXiwDs%~>ctWMQ0A2g9n_FHtGOI`_)VR(|m(fa^os;ZQm%iyHE%IkUc4{I4KJ zE&14+lb89OSscn;@r$H=@NxtEXO+D|MLRTmO?e2{eaxH+Uv#scqYD*urZhT&W}F?m zaD1bhp4CKqwQ3_jM)Bp1`R+MId!2;#yHMkCVPtwV?kW5C*BaP01ZPv8gRbaxWc%Kv zX!{*R4#n;4^g0hc`|>@vC9U4$Ve>Ec()8{3*LQE`3Y}&(Zx(@j@l{N|cyvn@pDez#*DxV7dl=YdWH4NfQ4 zrdz2(L*DQA%JN%ATv~r^Mu6qWG+A~|FTAwaau1|lAG=7rDyp}eD(c1}4d-vE>rrCI z!(V=JOeE;uVrps9SWl7*9=x*S<}uErI`)5C%3ALGelqvcEzJTEupsrY4kcd*4|>zu zhbhZ&E>+l6{JX)}h>tJ3X5w|8K`2&m>%w24y*DqEyLbF-Wj3$0Ri3LNdZA3OyMhCi zjzYwwH-vQP;=5I8g0KKrdqLyJGE~og|Ge2ujz2^%?KP*gPgW6HXDhz

4AtojkHw zg5UzW6A}5$0e}M?m7OF+Z9Lll!FsIbDugz)&79MA`?)y4iwT4o%IXol<(H^SF96qd zmkuD*Zv0tXC%m7JkB=i}yqW14@GEqob!{uSe%+0IgFd^Nlff(6U8)X(x(K1YG#`oT z$4@A8O&b73x%KPZ1$E8C|3PM?Ry(Xgbn&aSY68zrE-}!P=OK%n-A+q&07<||sKGna#GHYjxDf$sGCVcty_(|mz z0w0*v0AOee3XAY@FoQCFM3%f2hWglVk>LF=;^n8(Gu0r{dL`oP z1-Z+AcOR|s-q()%^u!{-`8LX(pl!!6_D%)yhX|ky2!&T`jmVY_T!`?yG(9!%XCzmj zE0lErz}F$dGxW%g00M`P4sHmiOH^ytv@SV)xhwn1stLg33*qEs z5bVAHqv)F?YgybG&5CWH6k-11v_Sb`+Bm%E^31+Ya}VRXUu2^G_~}wp1$4sdPwtSxQAz{-#+Dw3&@Zph^n4S5m4|WRM4^-nL{-l~O16b@jkn!V zbK0mA5aOQq+@!8k>AU<__W#wToGx#$!8=d9;-2@AN>}&IT~~}!JYdqauupHAdicRT z0!FxlC5 zF4bnLPH9Ip`OISG-)-f{s{Ol<(@Prr`!0gCB7TlY4uAsWC+PbH6*Ui(AiyNhm&P_3 zDT8Y1zkQ)V*$s#Gy34S6aqsIh{%+KvKWEB9DBuUoKuJUOsQ~aT1XW-mIdNG`OpIr) zbxqg`@^e4O+U&WLV3Aq44T#}f&AS06MpJN8H9W=NomC0wNrw(w-hnplB`#Tdk vk2yA$7yke80f+yyFG2kG3vFVq9t=M7*2PP5Km~kSW0Q%2g?{ldWaPg9C8vB4 literal 0 HcmV?d00001 diff --git a/docs/source/_static/imgs/pruning/pruning_criteria.PNG b/docs/source/_static/imgs/pruning/pruning_criteria.PNG new file mode 100644 index 0000000000000000000000000000000000000000..a91fcbabb5fa17a7e1eb52d5cee57f14100ac6c8 GIT binary patch literal 29972 zcmdSBcTkgS)GvyHARQB7!JL6EO55RizgRy-1VZ zdnZEZEtC)l+&8%Q{?0k~&YZvRjEuvpdDgquv-8&CFhu@%QEgPJEt0L?NgPbb12AqxX{{4&-6gF zbuNr0<5Hh?*1az;CJy~(5B=!a$TzC5JyLpfQRUGiHj>wEV}xR&?{5p4_UiT= zOQFB#b$#^dnjO7@9NjSuY+K(apLf)x2vvJ-i3(YCIbD2ApD6B{ ztwn0SNI493z&~|<2tTbEFSCwjkoGP^d+GO%Mc`+BG2K1Xm?z`=w!A#72_&4q4IFFD zm%0;2Yp)B7#3M`w)g}W)vh8=Zh0Jh#gEHuk3{`V6rZr2_GNw%B=HTk99hvKSsI2)?t700MXduEq&Dsh%GlO3NNn8{vg>C+^Pgd4QcGDTCy%vQ7nd&Z<2N=o)E||) ztvm;0#+ZY&Z3}0y*qW*Bfu5rMWGp4ik%zs}urTwMC(#Wgb5}m_If?ftTJf?k#mPlO zA8cSW5)Up}2I#fPtWOHM6lfu0R9N(C5_Sjb-NvuDVwcG=jf&8?2pRmn$YRhjpFy1< z64&W=@@2v4U|mnh&$}(c6D>YECE8IIu4hBqJGSOmFd}eZ3h($VcsJF%4ol! zN9`?rUcV#){6nREk~^NUGbDo3U!c?()mlK~)9{v4$Z1b-Q1nn_P;^UGYDAOP{CWA1 ze6n8Shz9#?k#%C_L8IkTlSW-g3->3%4Wg)*cyp=Krvlg$OmBD89hIt{;tThiRM(o- zX3%Cz8B!fzB)&y>m%cSPi@YV@Rbt-Rl_Kr)TBsvqig%SP%zRNcx*;xYhflXCZOzPq zRbcjNl5=xemw56=O;L9xzoF^hL<=MJuNU<(lgPsOH87skDqUJa z!H_x^2N7i4k^ql%Z(~L1E}1Z;+iHfmyP(EZ9UMLZ9THKLhtTAq&$sk4RiHXW?E&I^hiQAeZ0=z&BHbO zU{vNr@MZ?zs$JQsAQ+&YB;z+xR!-&8lXD?}D=vFQv9`CmB0qzbK)WqMLAEx4^yQwA zh2vzo{nBDARDJ53Vk`4=c*Y)}$lwGLg+G7VqkD#Lb8WtDq*1H?uBckikTdWzT;e+u zc>7Z7z^@UAJ5JU0bU_5ONe0cf0p2TY1lo!!yzv2le5li7YPW2+<$B*F!)WIMe4u#* z;@yV)VNRZQeuMfd=PGuM^iFw5O;2)XoR$%6k3WGVC{Tc6z<#FC zFWk$c!#4hbZzEULU^KIi+_);~(^sDmdBU-UexJ#kV$1eokFY!~^AkU9UxB#y80wla zHBTqEiQ(H6XHm@~y)XDCk&ccP^NRX_WB9_Pc2WK|vE?ykg0Z)nMEu<*F5jlM%xie9 z>>lCR8N8HMkcRaN|JsgDNMm=RSohF(tz=Br*4#=8S}P-OE3zZ78T%ns*?0~8HIxqO zDpNKZ@6l`#J%`|jLwod|eNsa6!>SB2^Gq_FJHKm}!J@vI;_jzEDxLOA z36Bj1aGlCDoZQQT!PkEPFSlrc}_n3tbcGR-&3YD^ZfKT1aqDLbqRg4 zJZmDYuyr66!Dx?t561@AJKx3$XdkexbR^8jV(F29pSS|(>9u%i{CJ~l7mcY=d)YjD z-GZy($J6&J+VQTjknXa1)8~KRm<@Ts2c);`b!yhK4T7NR?r`IB)Do9?cMva|>Nx<7 z(*g{QsHpW8*KMIBh|KFkFSnr`RVJAZyLIQhUT&OTK5A?&wtHM9)ofy0Q=2we3$7PY z|D0JjI~0=+*UI&>`(x6J;o=E8f7APgoHzr0{jMNNrY^t}3)uqlcU~RU&AyyKLMlt! zx7lCbcjDc=V@O}OAXKOLWRKtAXM0j5R+;ZA6g2`JA?PrJ&^^$rJ^GpdF?(7hRBTnu8I=$`U=yz1qvp%jbGWOhfG| zvu72*;Zsy%B)>Ck-jNxwiMyf+ zB)B;s6nm)GoDwH=5eHBuq+XRs;%Tz?OkkDORI@-;HcKFk=%`ls>dj6(O zVN7C4n6%lqOX04lK6~`j@{LH8*l7m))$~+@T-(ut?&S9S^F;&_4uE(Y=bOHyWU{J7 zcSYz^p$E2ED6PmtmQ$tt3d9c-6rX>QZ#w+;_nn=V`);A7-f-riT_Tp+!_Ss42P}g{ zY+{7Spfh%R%l)4TN;U<^fOyGfA=9VKy0{W~WXwl7+y54k_VF|;7y3Z4$2U&pzXKa$ z*ptX2gzxW2X_PnsLLeN7My&pLnd@Td!XueSxG2Cg__0OX$VeG~Icj_xP$}%IC?D4@ z`sD0~nooD*DCBq6yw3}~xik>3b<6Ga3VO}OBl@qC_V>n>eBseq>M@go>H-n&h8Q4C zeQ9_7Pm7*qEvJk_6nd7Ul7-z+^FA_LBj_)(wN2E9x53hi>HmV#Cup{&nHYD!EkcI&2?r$XKU1{MTe^Qad$!P5bNL?7Rk&jDaZzSld^qY5Z-1(5)YO?#LN|`jt-s#7SG*L3{b@MS`s&LiRK{Oxke}&Yl=OKb8S&&SN!&Fl@SdZnkB9ki z%r}Boy#s73vaxJ->*s{;T6}V4S+ykL(i+Rs;fo`tOIaCPiv!vY;*5HK<@3j|u6mTQ z7sIg@G^^QT+7I!=xs@6>zWd)^&MWV5`XB><-fzK@lL%3y;ytlQr>qU}kOL@>&R{A4x6>UKZ&*ndK^9 zBB+50=hzK=DZ);`092d!KMKkgeFxHKTaNaMa&K}xiIC9SixV(SO3AEYfGPF^m*mUZ$AJwC0s!HqGi)9!2uxm1se2 z?R5MbaYs?J-;qlH+M|E%@`?%eh=eXU}nR>z1)E#Lc z`yqI`sS4*NaS-mnaaT&`KMQKhR!c-k(#$pyTvA0pM<18rd*_Ae{boB%7_^}0%e+us z#w}hzr+-LW;OjhH%(2X|e~8afsMukc?Y~Cb zKRYyJroCBXti4s2MA4+<8?N$Kb@-rl>q(#~ZHxrq>Rp(won^4$f+qH_A-W^H{%n(8n ze!2ZA^^H+`+5X-D+9`|fDrJ6S!+#X~!ke{xWKwhy`~x1-YmrdzWo(p93EsI0+|9jo zxNO_ay1+-%0%q3@f{wMr5T)B}1 zhQM4h(MQoYn;<96vk$N1^W-F7H-GF($U9{EH=BqwK;0#cbHv-dJDNRI@6DV~#4@0< zpmh*O#RrNE zY?IGM{gVRRQ$EA_7fQ=_9hfx>E1(2Gza$GPAZ^+yWXf1KcP&QVQIXiI`a+rFECh^-2Zd`&- zi-^($@neO(yXf26GK=;j;!A#G8bcN6>*fZ>icPf!|0dx;ERAmr;6bu%9qnK1#oV)R z0uf$cg}NhWGz_Oyqpz>e=nzG;pfdo-h>+v5H~6X%lTo>;52eb;{60DIR1_WBz5w06 zyK49x29Qav`kgo7Uimc$6gKde#9)qwB$~>jw~*8)1!~Q(|A`;_g|r-&(tL;9Hl{tg z!5XkiZ6jgw(BJLRT-A)d?7dfo;rsZnOTBeMye~7S=adZ!Ca78@x% z-x=MXduHs%?hwrL>=fT49w?1{lZ&oIlxY@X-$JZkul2sm%KxW=Agy@*d2=tNS0oW$ zh3r0AL4L@5J@h1bNDb-5UXWmix0eq}Y+;H>pgf*YZu#~};(MY&!#`CDnRkl50Z;H4 zd%d+N^epAjiw>y?+ShbcQL(j=Hg>SX167O|9=9*#cYL5W_3Z=hTP*Ipayjbbc)&?t z%0_Hgd<B-)vR!VaNa&&z>!9;3MSNKE2!HS#rWcYLr&(8K%4fzz;ItB;Xz}N>Pbfpr z*6EQf#`0{@Z})>hN4jdfG)w0|2BsXQy%r|}&mVW0D;f8|PV0u2aBFE_Y%e*lp6IvP zYs{3*OWfDvXNbGtQhlpl>OmqM8HV2ja3#B_Er6sk&;~FS^1K*#2cK4)$81F%N2F?X zx3pL%uU%?;Ocv99Z170r_JPSP8EaPViT18Mi(Zn{V1BB9PqZbUsEXLS5I;+$O*z%wN#nfwIrV(I;BUA{GGg^ zYDA+qjUzzX=isVc)Jc0jH1w`b$q#_}9iUK@(?va_U8OZWIR21oC+aCP^!jw8?fw$e zVOc_boo(dNC&>wBl*rZ&!(>r@R~)}Wyq7MdUj?qERmuP{509_+P1LJ$h|t6QaxV2c zWMT=Px!Lxn+sC(6)UH#;gTZSAnoUUgGZZ5d`ZE+gB|$z?qh_1O!?Lbz0r!^@AvPlL zvjDjzTTd#%vr<%BO8rUiv2Txuo#Ph7Wr#axN@^|(O8v_+)o(<3(|do_9B9p$q`gRs0&S^+CpqyILSguQgkA#m&cjZp*zh3^k}fZfoq^%>&$< zW8G0U&}bU8C;tp(-!VM{(r-^h9Dn*M*G^|C5S3cwqZ?w5u&9Xc&+Lj_i2DXfeA{}Z z7UC3vlSpxarBAP&cB|aiy2!x3eU+Z4#rwS}(v$HFsSPG;=wr6uMU;~z8k)unI&Q^R zh$c2n!v(~7(IjYv>-#`WJdmYgf$m9G57n=5`id4;V@8>L$Wn==Eof^~IoqE95`P8z zU*qgoxOo7X&XPx2v}~C95m3UBVsLAPl9a7fDVw=>sWYE^q7_F{$>8?}d!#WG5YlK) zwbkt25grAi_Ep_@Hs75Vy6~l!x!UfRI`y-)*txwPzh>Yua!R3-4*&95zc{p6XNlp) zpf18snB0y{m|HpMU}JK0Uw8yabh-c+UoFBpxhywX?OEt`hCh^4=^!HkQA_sAU6bP} ztbFo%4XA~6M?C=9^}gc2*meLo$8aOhqL|Fs=2;a;n~vsgpd{jgz>km4d3Hk~=FFH_ zBk%T;17!V1$OdEQ?t4yu%D`56`L8>DeXz6RSxHlcHxYB#-A;Z_TQ`r_9HKx&BPWR$ z>ioH(pO{T1BN%*TZwCSdX3Hz@k+ZPs(xLU89?7kTRhL7{XQyUZ1zraZ%Ktj^Z57DP zg2I$;-T_sZ#&5SlPKzHn=c&HKLr%8k+rT@~^(3HoB&qGkQwr24%g{=f zEU&|9Psu|oUS>&QW&V%!6ZAy(P-#r;B)8RJE>yEb{im))H2R@7txb#I0)2Z@@*D-g zJCKG_z72Yuk?wzOeJW44_}7u$&O%G_7Cok&D>V-2a!77{5}1o~!0oS%CITwxaU44N zAXleQG)FyI5RPpEOQj%H*CHZ^UhjPj}cA1LOZ;9_mui0kTrSM8TfO(Cr|L@ zRCs2dw*Q`juB+UBvMZ3BrpmA93w>B5DSq5C)g5sIV&%*QC%G(nbm1hj7d4c6gPPag z{xx8-RvfqPq)kj#L_}M&n(r{I#j>`KAxa_7&6z z`cFSjex0H06PY3CgyX2 zYgD%P_6y$00=?2~O!DNs9Jx=J3hN_dHc{e|_e^3A50Tu{LUpFRdS#|1tmUCRe@Ac!$zoL76?DOz4M7Q@&@G+JXt0VoC$1N+M%M$H=RN*Pwj8Jor`o&=+NC z+dBfX3{#|+qfRa`j?v;_7oSn%rsv2m^xLXlP2g_o)LAV(G@104;rtHO9N@6x#i%4W z=UD~*rN#_UBY#Ir8Kf}BC4Rl)<}Vqbce&=9BkEYQ6ErxN)hGaBncf+G=(-dtB>Lg5 zsu?(RuJa`m4}QWNP*lu&59m$vb8BnjZS5Gu7*qgZ!IC_V)hflmR0Y) z=eX8hmB5PEd0KsZ4%T<1r76t6Ci{^;lOxYaoohZ+?=vZ?T#pctjb{si?!Tiy?HJ?Y zwb~^M>jSvECW3f>$A;O|XIv-r(=Q-1%arMU@r(oUc}|)6I{WoKrp^UfAUp!Ofr(Yz zK){Vm;2pF$pONacm^^e_juax#6_!O{<`F6)R&xg%^abg))Q{fr$Q`T>=O+8igvZDi zUHJsY`;JbXrpb5~nI%Qyjim7;k}Eq6V`5gjT$x!=%_xonUE23)&Rs!f5t+TLuG~vp z;aST0x~)FGXV#qa?^;V1zA794db}dx$*xSV(f$7Y;ut-byg0qk!K98E2i(xP1HJj@ zgW^f$J!qBNcWZn5jR`8$0R_@zeRnLjx8k+hBZzUC3Q(h4HDhg>fCbQlpMt@3r6qg+^FZc-nsD3B`2Bn#bK8&Bb~JxC`O17;&eaZrw^ZQF zt#gW;s)JnCNmN3E<#&?Xp}Dsj2W%gd#VF5#j;E{ktMC7S6(H_@*lofsUovlTKDpV# zuiR2n@x7eSImW|xvE!yCwaqcPxd6oZDz@iGi{L32m2-dm@RqizUNIp23k#bPo=rE% zW4gbrD1!ZljtU@FZ-9Hw^?mp3=e`tqfm|BZ-Bj?kjzxZHJCAXCjV(d=jp*PK@GPB9 zVmX{%r(2Ly0dea}ZmJ7bF1jRo1T3Z*UY|~KYeg49;{4N842Gh|hMC*NnhPUtg4Hh4 zumuA~ebp+Ps&)G4tM)FcwKj#THc)P4@lJcp?sSpt36QoWzn`0~joAEdI0k)}mG}Tx z**#!|cfS6jK0WYOm@eXm=A`p|n-&3!=MC$JHZ3K*8O3>6pn)yqG3piP><*i!4J2V~ z{q{n>Kxw)igydSCH|nKPlj?4fUA1{uU}tcDZb#-qzcgoSIS?EY31P3)ve#pCn57K3dMmqC=)3c2#SAPrqA3d$k^H84@|Qj{kqex zO$`LMgRR-Ep%xlDBORnkG*~Q#o#L%2ht<#qDgHTFIi=w^X8sHIy7BfWBMDzr#~{ly zFI>cjmqvd3FEPV9Pa=Aurz_+aM9^0uqWgy6UDrgC*>%8Y_Pd1f76Cly)*A8 zz%zmpNLrN`0fI|C$!kN(h4I&Pc2PmOh)_< zPtKZ@WlE+_;6kIGFT_ge4k-0y73WD2CTAoUt;Y=Ivbq63SIr1QyZ{(|yJD0%Em}uY z2V(Rz+zy-tGL|I&)lD+x=8BCd1k6WTEA$J+K+s*@Z;37YH}6~t3pWpIm<|sHOXc4? zGj(n&(RU8zh;|N&<$JX{CGqMHHhMW5Fv2~+7DzMKk_;{>bFqq&a8%l}x9(k1K9;Su z;?%7yj1s8(AiU(apiN=wyPku=9d2!%EYkM2hB0+lx}qhJ>Qp{`;&1W$fg3BEbK9UWir#5QwKh!Z6mW&WSm)yy z#wu&rT|Jot{+u7a@%@@o_yVatHE$Lxka|=`-7%i%GE9I_Bow_;m_bl-I2?H#g`3IC~Qp;a_kw6QWj#mNG4Ic z3i}h2DCeu|DDcvSUlo122tHs+$kAQkTKMO~nRN|Y>7q~_$GxGKJi64}~C zYeq0;tlxk+y=1ClqY7l-dj+k11^HsCF1n}$gnXT@&PoyUq|$o1P%#N;>wv>*DC0O_ zrVNwhVRmhz)-3`~Z6yln#k|r$6XC5}GL6zY5w)06JLtlsU*bXp{3OHm_^tiH`uN~c z0h~kEn=)t#?wh9G|8y>inl`U32_R#j&WgOo%u(^6?G^PiGMz$?YnZ1n zJN0CV;h*>AGFXS?h|&x_i~g)e<7!t;A(gKhK}sTVr*Gfd#;$ov_j2r&3ip4N|4`-q zz}}R%1E^m7UTx%jgPTT+xKB2f}72z z;d1lQN5Je@0(W=v;QB09{oP#FEa>WQi}V3PQ7{~#NjtbbM~1u`H2|hF+2>hg|E`PR zaBeLr%IptsS`Z7Di_t}p{-yjj@|)83t!ph$nA;=5c$=CMLN_bdt+oyI`X_@@*qXCa zbL!T=zl@9Rgq0v9E-#tfGJZy-F;qO}^Ni21!Wbw8JSIKc4%-jg*6tgWyE(8hBViq8 z(puy_n!!JycV!6i?enK5n_%deE-jfd67_8;#MIX@ireA{Ys;%b3v^;1na&4t(6?N7 zn`%$xfS7b3jFk@og?@daF)ER&xMQ$4Xu$9RRyKirpsOCdVzj`W$fg*-9DY#;U+u9W z2sFdC5RGUl=I75nW-bmy`*_b&hrqW;ze%CPhhvGRS7^ zAwn2n9?&tOzE^w~f)&7*4v^81j;!mXls0x65x2scSa{naz_a@}><%i~C-8#-uk$L` z_D8ttPzUzrQR?W=5zHR3M#twD&shUPGv2|Jb%C+!$YSmoVF=Bz)ikATBCU0N{5u`M z!Zb5ExK)n(KH; zNy~X;YH)DtK~=yVnWsf0tc7U5cbY>6H>{T}Z@+jK>D#j_$EBX+Btsz|g=08tiqh&t z+^@F(c>ytq@^J10j{*5}h&;2sIMuu65~=H+lHp9pSgz}BQ9$M@NEk?7yvb!eHkn2u z$e2V@mDUbErnxrV7&W^8?hcJZiG9r5)IgYTfi|sltcM2Y1(Efe#`?J4Byp6K__9%P zU~za29Jgof3N%%F@@=C<0EEuR2}B0<9mHbT!%1huA<0wc*5ZnOU0Mh*U~8zOWZW~M zJfMs^8FSjd)A*b*Jw~o*Pm(@C4{Z)b{2>p?hb&9I#-c}h&tg`{mVVvV4L&m&S+oy? zA>t>h+-;FyqEH@tZPvNA#@^!2cYsJA2b*ht0Nows z2V9luE1Nrzb@?JQNTIPZo@DbuSojJB_lMxe0qS~!|w;jOe$w^6-{1ZSr3=oGls8L*tht5&BTSxmL8l36o+~C3 z&d&xy%uN<_b%hZv0i6CX=gLXB=>5BS()uHpjXb#^>s;FjZ@G1hUsLmvXF7|zL5g}Z zyQnohVItcs5CMZ|Hk`e^1KxTbAUnwTrkf8Odzxv|CuAEH&LUZ!$mekDKD2Hs-r-#e ziP9qgv+xiuK9kgG5-rs6Tc4MQQEtNd8Y4sJyXN9FK)hsX7p2)6KN~B~xysACa6vd8 zuj}yW3et;FXfzg&J(THp-?~|`15&d?>C~rkbw>pWaPy|~!2stk^y9Q_-h<2(N`LwO zM6+(z++1V85|C>YuK*TEXm!*ym)u@JvK-%1DeiJ)rT~h!WxxV2guSQvWF#Z`yD}PV zBt(SRw6B^Z58q{I1;BB`o+E%A1}w0n9!_D0d(nGDW@=1>my7n2u|7d1&j| z&V$Rd0Zd`$^T1g1UBmC-=CqgUqVa9Yv<0;6P6G{NGFy$$3mz~;u8;6Tu6Y4g|0W-O zS30R2wzu?Z4T#XmbG@MV%Ps3X8{{;cZi9f$A6BX+-L5S4*;~W_Esm18n{B$squJcW zdHT%qbesBs%`bUVaq>h6 z!u(`_VoH$h0`>H37nNHSpvq?Q=sp)_Ah+~Qw3u8-sFJhhEz_CcYjd0b_5LLd#IJ~b zwgN>j57CVslR147V*Yc19xa~Qe#Y2&qE*EaDIOorEWRM+-OIj8BF_p%lk*;D{(2KZ zbdNIk@|zV;&+2HB2v+Hd%JkO;2Y0WO!wRxDQ0l)lZvXcD<=G_qWRQ@L0kA#=G&aNn z!2W5y@s3EVa+sE|G$$tg#xpK&8jpnWEeROWRo?#uV=aiJ&{f?uyeW>j-ll{D^m;UF z(*6nL)LAoxBQht{V{2wZ%KeXf3APxhX(Y@!1QLA)zMZaPQ@%?W8{OUbe$h~>56aC z=>C#Bn*0xNbCaulH?MwVr!B*_l@ENpgxv7s29GKJG zk$#ubxxw7_u1(Oph91<`WDB@gVIU)SS{g)=vHm;x@bUFb*^j-8I2wT*KQl4)dVWKY zoTDj+LYGRIvfjLzS;%UaBHQoOda!QJ?!nsF2R{Rao(ek_kl5Niq0oRF#l!V+(}T@vrr!XKyX=LyPS{B&ZD(}0lQJ7B`A-8CeV-X=xRJ%g zQXTM<9NZ=WOtLx<-rt0H#&F{8Vs;Z21%BJ{N88bbuL(o0>I>#C;S3RwPnG<7C7GSb ze*v8Oj~Ev<3x?$UhH{%##(NhxaXhxof_j3%hRh0jG2e?-_boNy`ZqYq|0sTJPh2z> zjfit3?RofGeX zFm&g8Ws7ix-pY!Pp~4Y>*aF>p#=npD|7KgvsW#qgj)Zw9#Xrz>z_i-O*dHcXs=>!2 z;NyEaYPX_fSy{kB`G3MY ziD|MsHgaW}V&CAR%jd~X=jcw&ub0KSCMAOJTLp(xGfx$w zCpn&xh958qH|{042?DTAY*2*qEnJRjvNd1ru~+uh)PVs2>I5eY@%QDAfg#t5;yJ?e zaRGom&?BBLT59|PPEp``2gvBj**DFMq)L2=88!xSS^_>RS#=byRK2*^zjI3bA%PV| zm0-zPlkgW!%E2m5hi?js5YFj=iqI9n5QN7n`T&pwQuIb*@z9pRJcZ#bBnM1cN<{#v zZVTDY(TxbADy(g6?FbC%?Ujz)r;)^DIv}p5nz77q?8oAB&tVlJo@jk|mXmxLfj2DQcP%cmpDb5RFm$@}y?(u|J``8_XfsMSUi#$! z3J8^RNZn&C=_{Sdqd%Ru9C65+qd7P!)4G}H(zOSUf&FDjBSE;mr~!_k1V;}*4^U8T zyxuJxWj5OrXSTTcv0vYrE-X{4(QTvYf?E$*Z=XR5O%KJHyqsaC|RxA2wv8ZW@t|@%+ z*ERvK)A8nFzeevwI|dv#%I{0S7p+i+$`kbB0rU%o#m(@u_wbjOOnpP^SQhiT{E*Ez z8{dn$97=N(!GeAUQEDWeFuqT)g~oLs)Emnypa9>^f? zRkbK^sEf+OkiQn)d%IHUA_!ct$FXmfUYQyEr1VSE$VL8?yvC`P}0oW4x(s(^Mxed37p zO27_)?e*<8?PUOzs;6+;9qx4a$|_ygXCW{E6a0(1Gfud20ME1zwmbp=ovm=UK!3kt zFp^b$8a7eYCN=ruXg(t0FA*uXQk&r)f6<$Njf;a$IZgH6(e~SyOJu}U{~!+&Z{Tx9 zH$=#wXKVmxn)?83t_j$ZaKW>Dgq@%6L&j?JaT?-;Tw#&_Yqvt#Hj5o!Duo#aX`c`b zNN?Zrvmu8+uyf*4x0kv%X|=iPNjU5;BsFNGFbSZ=Ypv_KzNn0B&8jgm&?8 z=a?T-E-R6cJytna=2ODhwX=A!|D0ulTRp#ow(xx3U6{r!U3aqI!GB2M+48NVai3jp z0OxxSK>kp$mDWpNIEg1qkXsH|Zgv)P``9Vc%Z*%f(QiRCiM-&U9woH`H2r6p;?Vp~3W#Dd;Vt`W-Nfi7|9NQKPQ!+HpjV zXuKtk%G399BGDs)$Ay^uBC#Yd|L;!I4F2BsR}&4*0zvkWYVZlgDIPu=+TZ9mTeZE)t-;nurMoA?NaO-#MHADCmi9_M=<95vlGz zf5tmNmP6*K)1%M(-zUy|>|QEfd^q$y@yqRWjl418$Mttx!NlaOVq*(d-aL4Egr?{k?0xBQw9^E(ymczG2U6Kn_&K?sDivpGv$(3eWR4;KkrBY!s~ z=7FiQy7*N1_79$1PNc>*g!hXnxDdagK{sV_w%0cmPU2LA@uDVYJLk3eJ+Q+Tzk_N+ z|Jr_O8owIx17iD!lRVH1vm{P7*QOD+ae0iw^_(Q@O#m4_VpF=i7*4`N_D^;}R{6zc zmIfug)=2>W?u6ItRr$SxjGY~|PdOY!fO?)vUAS=`fW1fFOo)OwYv2A)b^$ndc zfeig-B=H=%3q9$%f(^hAM_71_&z_kjc&(0iL}@E?{Z&s>c7@Of1e|c$d&=bs=+ zUHSB)9A~2M_R9QH;b!U=>n26tFxThG{Sb>)>QcM5$#_v;)u-pluFZwTQ*8U^lzj`r zqR@_*3XmD@`{alpcphp9Xza88sfktiM|T29DP5xSc!6E1e{}~)KN}2C&osT-fs)BIWSrUxdmNxoKm`0ZSxYBM7ed{wY z$*~yNzHfc5HAAU4U1CXo0iB3s;(=TZmA02Bk1;=evduaG2r&Ux4ZwdHrKWhNTv~<5 z0SGV+0lN%HRXb02KIZI&;zdmTR`NPfKwSMVp+A6cGMJ{U566)_14#1X5)%`X%|1DM z>IwS(BBbd5Wc23Lmt9oWi&Fx+pfl392xSW5kNtywh~3_e-p6^L|Jg>X_rh3=*7$=U z(pXV2qcsTflEFaCYxhQ5Z=S#7Saj{Z##&ACv~#BtZRb2GqQXgT%y?cp{;2Q&BT9Y( zY)mFA9*E0=WCX@e3aeW!egSKL3K0w_Y}}1<0OU>UB@@?wxrxq~QPRWChEO;X_USCP z=Q?OU9iDi9%z;=l!RCOyG$?H&q*&oxL&irm28IU)qR693BDn$v&(k*rd>AKEneD;W zSc5-tGV%Uk8(BQ$Z1Xoz`^{0vJc+02<}W80e*DX7KCz+gubSti_ErYflgFg%8P`{I z^cC<66fpCfI|3Bv9F33k8v1e=hY81nimsot^sWB$9|`inXf6$pWT^-#p}v#h!w(6N zELidOrOx)=);Um=qop~9;uZc;SOu?VUq;NF)}MlWKjC;MboKKG0=u(H-t;f$`4`rZ?A8)ZUNXz%~K?Yn+f{*AM1Y{<1?p><)qm!i1PB6g<$wU_a^*T(VWaU`J+kmocGZiEZKY-4V zK|+Dm+>Z5mT?8u&a=%o+ZSTmo8$le~UEZ|S`a0%Gd3qR)F61js}6)o5Bj`4 ziXhXM{u%fWHo4=5@L)Qj7XV%x%!yb5qC%cRya$WkFd-%>;H@<)IIhL@0p(wzZnW22 zvHw!yAu>=R7-%|y-^3Iw7_@`j&760YkT6nljg-nU-sz&=8n-_`#$oHVvaK@flGd&1 z1s4Xr2B76A5bvOdo5AGG)PNkLOA;V*L{8Gs<)Qi35!z09e>gef`1umN@bM+5Gu^+@s{UiGQ4IF1UB-j z(g}^#rR(!XTZQMHV#WLvyNHR4&` z>lJ$TsQ|+FG?qjg1H+r4{&I78`>|LbVQ`3 z#T(t%qo47c`Xnks{g~Tx;if0{j*gy2 zgCn05Y99s(sEq){c=rb|Y^*Z&IWSJUFKRb7m!xDUca=$Wy_`lh^lg>xC?7Bg943IK zDX*vr--{wL%9!1}l_=@yaLa+%`;uB&6#>#+?HX}%`@-PJny9yLJSc2**%F*ZKUUl| zjbLC*`$Jdwu=N#XF!YH{C(yaC^&2?8JKHF^RWn$bvp@%st;4PI2)X=GLT64O9aE#1 zoqe9EOE`QPf3-OVfNIadwT|%P$)4xC#9^OW19otS^=GvVz)EF?EkL`cf*fetU*-qC zRH4!n=0v6zI!Q9c`Yjs{Y!W2-Q(Wn97TkZ55IFI;velEoMrW}?V@`QhN2liVnT0(BV ztS7!Bl|bU%Ap2ZhFp6cqv{%N}`{WNvvv$7z;?fC z^pj4Vi7CWl%P6v@ktbeZZ@d%UHqujjHhiPKf$B>YP7h{fB;S2~60t?{m^=;uD2K=o z>1Joj_jz2^bL^B`h^zIB+>#9r;`A4ZWav=#_|Zu7%jNhQ%3}psXzz1>PFz@d#quOf zx||BNqI%ImUt`wA{YkB`;G`nI+4ZgATpiCkSIH@=Z2!rGis2ZAg}^fsvg037Z{@Ci zSD3<~NZ{Kb>CLz8K2x;Cg@Mr{z@W0?bv!l1WJ18~=Y5n)*u;PxdFNJieKadVl~ zikH86_RFXuwGq2_uw(q5iIw>leKnVL_GoY*>gTAD&^go{84hxwdz!H~SfRAtjP#z& zNafHew^Zc_=wk>?ApyQV!Som0833lhzv9T(;k)GT^ z1%_M6Nj6Xn*yWb|oll${YyxKW-N7^qd1tbQTHlRJxm}QxHGLr<*SP)yJ0*Ud6G4fst;!S|>DFEb-9A|txOg@N#d`# z9=dw4JakIKXgSXK9lqi+d2It4*9cX3LPT{;0uKA+VwEPx6pwAPx2<>bnPdei7jnr2 zwy-?tyUt;pnLitZ+fIF}hEJC}nfH)L9o^0SOdzezt5p*`5&l)vKE5iwRp28^^4WrX zv@FV2^GbIuWzAdMp{VvT;CS(4Rb%jx6g(hu4bJMRs-D-`*M44)$NdT71i>=ZvW_mjA-oq}1z_^~)1Irw zW2_AlLWFrDxHDYu&4iq9`w-pLAE5a|0)0qjM9e6E`P+6gGJhw;{5%Z?8U-M61C$VG%73JZ^J}M`QZPO#@x|Rw@pek>&6Tek{l?V@E?u@0?Klk3 zOre5(A0hDn)vjLT$`P<=)=AwvtTP}Ena%**~j zWg!stQaXUow(1EB*Za)C>sFq3LDz;dMnzs1Xt~?h ztii=Awov>kJILHi(%Ebpk>zlG!Zc5_eKg$aH`s?hJPCZ9tqO0^yv^^HLe zBS(AhO8miD_L0fu?KiAKQU8kUmWu^)fxwPaO?MlJXwXA0X|NC;UrwV#CSF;+Wd{YJ z$r4l5igTS~DY)7b)I43;`?sZ(@;(7e0lH!`oUO#I&a8PG@_mYeKj%}uC#%XNOK!G; zot5;|$1rI_!$HDx*bTAkHSs0aI&}6}1GP13>&!jr%LvWN9E1@|5y1vW!Gt>DraD|* z&J?II5nNyx(&+91_yj<(8kt#~$xx#vb5;Ovc<2qxAl=vO0u67?G#!SvRK1uD?mxgp zc{#%wW{#m_mw*uN9D8O$!zK%ocH5tQr0eVrOX`w6+PXsOwi94-kj9pWEmd|#I zZ3Vsu3TTI@LG4)XKD(CtxzND&`~C)Z3&94NTbJ0xURB0k&OL{qtay<%ZS+jfdwbM8 zS@%)e+x8d^Ei3uGAO4g{4yclmueP1*$|bE|(g`)rZZP`SPt+?B1{{2>7a}041t+hm z#}~_iY{C4t?K`V|w^Vk?B^MI4@)nK57uGod7<2UCUTyjSmvylKzgRk}zn1P?4eX~j zoC(j$w+MRQ9TS8wp-$iT1qLmy3vWp-TO^6_aum9yVwl9Ko=eBGUAOZXLv9J7_(;_R zF7T3Z!C&608Fzn95-R1MwNU*0#OJq9$f3{3zS{!#B0+Ct_OW}D+hNSc+br!N3#QR< z`7se>x!ZT1MX5Q(3^1Fs0^uyFMv)2;+2&_k2G_oD-nC49KO#f|aS)7Y5lAJH?wB{d zxul0znK(54CX88c8{}82yZdvST3!S5%ZNg@MG_+hnxYna=+7YglN@0*u8}7^8$O_L zOC9KjJ$Vtp>c4Bk9ox&l2U*!1Ltb*OzweUx#7g9t^TqsS9Xb>#p|g82dJFhe&Mkzc zzZWXFes2;$v#@p(0ea`Jk{)Uc!GrVCrX_|hPgFUY|E$pj8~Np>nxe2*(LQZrXB{rS zDL6+ovAWNFpI5K&g42VF$#YNABV)8NROEB4YBbkV#)CANR^vB5zALvLj>uwcNzvm^nF>hlIfYRZjs{f{?W1fui2qx&Xr_+KHf@g>Q_Mg^2(%>rz!(eIMA_Hw6wOcxCZF88!v_nBP6| zuxoNjGXdx@q@UY%CV`?}3>WD*T_neCT+&grV0{vQYo!AT1)4HtD}yBbWHC8v*f@{A z^Q#fPp#F+LvmEh1gw#%7h>9m{Cix8GgbSOA zJggnSlY$P?$d^(Y|7j$dU?QDgp0o{~4$ctBJ9gwH&mKH|p6he7D*du+TBF@a9{(ZO zN;{*N*j(y3B1Z`n=7`?OdVLYq5pWWzO_lzE=zC=CrHQGmY8f$#R;-IA=+)ofBbc@{ z8x5RnmqOk0ThJgEJPM|cQECzqM87cwE}W#@Fl3~euKK3*{Ssgi6mFm?D{(essz6$P z?(?Z(5+n*;C40a4e&(tiA2CIBVE&W3NNgj;TDj37o7NYY&x7#j=7)Z}Di5^z4ZpNB z5A?GSQ{H9Sk4pP%UC3jDM9HgC0?~?a`JW7#gR%Xf=Xe5(QRnx%EC;W{^_FBDCc=xk zNMDfN@nlp3FJc##na4&GB);>XQR zB6Rez6e)22HGr)L`R#aZx@=Yw-xoRluH3eL1-VaH{Kdvz^iBDq4S;FwOu~jCI141> znj*w3j`vM>)+Q!~F6tK<`p!S18c!+ukTflbcsJ$4S%K!8x?n0a`8rqg$`l-RKhz|w zeo?=p`E_p(WozqOuIlzwxJS9$Oqk?Nxf(_E{>(q2z^9?YR4hP&GS(Szenws>#e-?S zhQD8%`UP#%fk2U{2!pXgplx=K?THldn9f(P%m03R?j9Fq1J3)z!+e&~gV18AHp?ZP z|3RTo{ee73-AD!c_uChw;Sb6gY3CWq@UxsB%UQGp9zwh|vU(ORG1+V>4(;Z%WC3C} zUp5!VVbr>2;G>o8Xuy7I9w??!W2J+ z^~PT5Pd-#Ux)^N?&I9 z-uM!SuyM7JP=bKQ)I~;-Ox@8fh8K8^QTbu^X8JkpmpeE6L>HXMZ}NFYMAOgbf0hE` z`(5rJZ901N?g;%$<2=}9V3G_*!X9bP#NQh)PTE-cMQMOn_k~X9G_Q2?sBdYTOI~%D zxcQufBJ|2*nB^tpJD^j{aV*5XXYu^0(tn+N=8B>FI;=hv8O+q%LwgKP_|^A_xFlTG zp3Q&y^5n-(Ur&;WnmIB~(XZ2Q$EZFKwxtX|4yKLlY*vgk0X$JhUgCWQj zRS#r!Gc++5>u*uGx;bWZR>KT8Ei{D8sQ? zp%0w6Ku*h}U^JaWb4kvwWy^j3*t20s2Wn;A?E_Xm6=1zkBY4u0_7)xa>b0RfBQYi6 zEvk`}BY$EF>JO$Ju}-!)$MF$Azt^Sza{% zDD5t?DpJYHx)m#WYAxgm*QSdV0(KH@%trWHu|H}?``I1K4siZb{0Po}SL(!IeTIe4 zs``MngCXN~MvLnzJ>fa6>TTjK6ydppHXjQQzbC@y-fUhjoH`R=Hacvr66^kI@MO%o z@J4^9D?F(o-$1rd@_RBaxGm4r07Kcw)ey{wvW!M42EEGx!_T2fc+ssiEaWzZrb91W zj1(K&h$0Dw+Wg^dJ7E#CYouZ*Ch3E|BOJ-LQD=~};ctC$n4x=E$h7xidCL!BqvhkH z1L>UtN;t`ive3ob&-OpYeKgPHRR|@1OE93ur?-4%QZ??nQ&&QOFlq7)3b*zVN=x8& zhLJG?W%V7F8L#X&#^Lwf37K*hDDwIa&V9x@4Cv6=U}|84 zc=!w4j;SN(V$)OI-jP0|9v~$x1N~m#@t}^?hA+ynY2LZ+m$m}9R7NT~Ut3d3Y%wWMzy>i3WOZjBnm z1K{sOeAk|f`#i(&D#%nG!4-Q&g#mOoM#i+xDR-*Qk@BH@BCvQdf+e*4tvAs&+OX3OKk-Ngw3r=c{wgTYX^?Ly-~d%h$0Ak-nl9 z5|ZWpQB-Tgg>Aku{jqMtVqE>Ldu7*c`vhjp_jN-u>~m}xljc^-S@M3-8wI=k7}r~D zbkn;I))cO`2aVQ+Gt6=SaeULY)Y$`PG@Wrm`Xg` zDS<-4)m5K*S({~Xj>I1d9J8@^`h3qC1W>Y1K9=0vsp5^BT}te!JX#WGbYwU}EdSiT zRzn3YG}b=ph6?+QpEtWMpOTm@d0#doE^_krJHCBdZqnO_3n7H{m(ouDw-JQ6Y^T=<6{cuM?E@v!@c1F2JojSo0d^>%Q#X|h^pmdW9E&?=ne+5&wq&*dI)VLC}qn#ZM zqBG2)aF?Xx?39=r6I1zJ)QWMd_A|Lj$M(4h->N`uQG&-3M4d@&@o5t_{&<6t zYazw&T$HeKIqLYT%489eu{a3Zs2Ga;LEG`2-{F%*vDy`2rITRrSgW+cXfsy9(JelU z0fC%D1$x-52)vAK4Ff^ct;8(d+nTppx|5}oh5haF%imY?CC0okY7r?UPf3TIaw4Ie z?As@bA*L$DU>Y%X^^3kjDq%ml5_)A?f)v>}E*}Ld5SD9os>b%q8fPk@k@w^A?-~uT z4arV5i7fV8)5S309MqrNUDgCf1y?D*St^8|Yh6&vEuM&2$Aow#-MOHB5GUP3#s4EX zvdkw22V5@}4=>#gBN+iYK#cGmwrhR1fHLg#%D9!x6 zTk7Be=FIZdsGB+b);Y&(`(&Q{TKVG225w!RyKT76Fpb*tCM%8KqK!y;^OgWthu?QZ z^`kf?oAuL{0wG(`9golb`xw;9)PS?KVmPo6ywZ1oS)%(iZj>Ku8F-bIVr-_vO}Ad4 zD}{*&`PqaGtf;`vRTf2c%Oj-D4CpaR@!HW&=Ce&~-(^}USgPAUcS+Oamc5Kdj6R@* zpiG)a6Znd<3lh2pKqDdD`QSC0yF7M5D>XYc%W*Fj&c#(B6 z);wcUATs#4P4H#EhL=t+F5GlH%+M5uu9ONBS(Ro54ohGeMwod^Ml^1a^-f7ber8}5ncEQR2;A z3g{L)%1#wKGD}h!zPYqAg|hyBT{a8!)hzGhhOD_{>(m4nxm}UR8QpH=cKG1zk06$| zt-^4mEU;l`jllkXtp_C8iP3tstR1dz{4H(cyRY8MQ1`eZA7jhh{CcX!`8u_O zU%gg0)c}GF$Q+=uL*-|Z>i0vR&5RcGm4C~y%WrtQ#ZXnUT!CbyoG<#MN=ISv8-dg* z0l`4O9=VgR)?+^36wDG*Xf(|Jqo2S}^??}U3&-_|9#PZXXz0WL7W*VayzY5G*t0#r zw=DYkg>`hGBHGI_d|0C13UqeNhl3$=t6AKVE1&vlXeRVTy$W0LQ zgp?=Dg9g2FmhCScr>hqVCXcd)1a)Y5`Q>z@-UX z?LI=)H+W&W+J|PH@|y!AG~l0?<usF%s`0b@P40vU(4RLH_79mUyWH7lMlXg-lO`x-VCo8^Mkfv0091J zp9K0vlL%(gcC>`MpJ#?hJhmB}=Li5UJjd~$ZCf@gHdZGYAXtV(xSBxb4~fpr1-;PZ zjl-3Y4^Q0shTuL0%i^eAi5W!wy~p?fde5=3F$kr5Do?Le4?c8(5Cf&7fGK#VyONN zXq+ihGr&~Z_{Axv?MHucSIxF)8 z44$oY;58SBjbb3lDxogB~N0zytl@pR1fTM^gxkckOF-p za+2%Lu!qAhEOmEkXrDqe*0XOJ!>TE(u;uLubAHF!*YJqP;P3p-k|*8R*{}fAvxu#~ zg^Gm#EYN4}s>P4Qs2-pS+do}0!e~Nm>4eotSZyho?Hl^`QUtmc`&DyoYS7v%b5k3y zwFQjU{Z;viK+?(w0;;UwdaN#}1>vI6GdriNVc(NAfz1_BJpZS+#@Wdj>dWi{*6m^d z(RS4VR|4m))RPf$4TX;mMxmVUzm#uG6h!#kQ${8YVtpRUD-&TA@4~K^xklb zJ*nx}N0*@Al%9B~B5{Mnk%fKw17+<@kFf1(41j>Y=S;RW%0}w{0M7GUg=5 znk4Qqn9^cn=|q8u2)ufR{GTxt1vEO+ypyaEf?_9Ix#FkHZOsfNr%zd;4fVaqX=GR8gcbzjeGGaY zxDlR7qSS!L6_$tA?s|b+S9R`bzG~1o<4>~8uW0}tFPPE;!KxE!w}q3F7(HC|5BZ~| zWut}GUSykd>C$XRm5^tcDk#t7`B8F&I^IT7M--4@--8Rmt8bJ>@u~vMLzsWVDK^7#at_OzHP1obuK0g4E%!~oJ)7el+5tl-<6Ey zBFn-fsxW9;9~vKD2`%jK2e5W)fe>)-OVrShNP4p)D~mbAN0^^daRHZdY|-l_RQ=2< z=FSqvM$t%9ZKn?9jrUu51>1^T437)3);P)9wnn1&=E;X-= zlf5=)j}n_GWfdQToL><6XLU=4?R4_T5Ydm@b)~m8a>RoL5A4;5RbvGl)LUEJF9HFW zxeCvPKT=dMvM5=_M@0!b{HApufJIrzdPrbiW^YzhK?bY&NBkOyYcz8p)X; zR&^n-Sb!0bZR^1u;F`?Grl7s=4;4BMjtz~05AL^Z=SXpQp+g4PR0j!plJ?b{zKo}&gaA)T2d^(d5Dh>%! zr6ds00uHtlNsi~LH|HB=u#d;KCj#v!2%BnQSD^iD|1w5QKu2hiU_d)o03d6A08N43 z1$&Ws%i5ZxyI6sz6wiu59rlkAqs ztjfj451;QJRcuA1hz;jW;>&eP=T$x_B_nxytlxk+#b)A5*)q$Q9j*Kn+iRB9xZ#*d zCkpSr%$5CB?O0lHTmhOS%o(k#0Jw>H5AQ7nqiVvxD;4E&_o|W}NWQWA{Y@^*VTnJCF$^>P!C;f#XAGuyJ0V zs;yldE=*knnJ@kn_=kQ2oy`(O12wbODTL$>O+z5oYUjyrj|uR}Dlm2Z0@y}&Cg|+K z{_dtr*l5vwI%ID6!@|9{m{Sc&J?a?5+_ck&H}B2WqQ-uXo|_SA|G>61!x@>`QU~CK z`);I}z5CTI8!G=mX5_UrGX&a0RLI-4D$LMLQ%}#jp`yt;^ZH1mm5c!+P()vnqTtP+>S2 z`Wfr!5P5u6j@mo!(e0d?fVYbYZnjaAFB09Z}bwR^>}>yQ@P*@yS&N~AXse&<|kq<{0pUukLAtB;#WUMnIS>Cmx4N7~cX zl+~)(JjGUI=h!OEQF`;l`B~=C#6`ga-*G2ysXe3NtE)#yxYTP9T zn#+ycwU1CcCCT}S@_e(knOAhziJJ^4j-nT{`n|?ibjMbBTb<*X+Z9QUJHC~8m$}m_ zT7uOYe9O1&BUWt@p{!BtPV#879m=w50k^`jRkbTE$wh7P!kuejFW<#YfedMDBJ$g% zQ6P=js`5`GUT+(&Z`D(|fyrk^5$_B#C*7xNY|Lw0tS5nwh98A=E`*`nZF!y6y((!$ z4l3KKknarb=P`ODC)g=C)R2HCd;0Jvg(mIlgm;^4R@kl7QaEDSB;;iy(8R`vzf~Ed z1rY?tRWz665NRv?2nN#;LFVnEeZkqWr*)Gbo(dMF^M%R(=@S`Nyyp*pM6rz4+*)Hd zk-qeC@Eteyk*f12Zmw-g*`Qa_y^X0>rUwYZ-aF`JC2Dq;pP)|M?qy4xx64~Tc?p(L z85BK*o;*ajp(CEUnmJ5;TnAQ)+&LKl~$-+&g zq6%_9_S(ye2%s*cXkN zHS6Vqql2T-q5CzgYB)W(oR#Liig!=3ZRYw@m)>|=AejZDY;7)}!kGuK;Q$pIWJKzm z+$XK4QJv1W2qifDMYYFyaX;fK;#SR3=Al(vcyyxyvs~L9arWJ*T(wA%+`Wgk1_#@v zp4ainGCQv?#bvJGYy9tAbAIl46-gmAVQ_EiYpday(-iH66nmzBgf;cc<+VZBl zc`0)o59T)B8-PXPl&EicICCs|e=)${cmSLB`HqergrbROT0TPkI^=0ayhO@MyV3eZ5ywdz^oBzGKbR=ZqEsm*DPNyacca7gh_@{ zMo$VJqo1qPIB)2je##(qHO$t!){=4vYytLu;~U3umB;xE9$!zAjc;&$^nT{f<=p>1 z>+T4seGsYKBwDR9tVtO)GJ((*J%!=FXdeP>6OQXrmFOc6PX}8|ArEezEvUDXFcyE&ADVw%~#6jukz8PuHIY^!`()={HMPtUV))q z2ULEWFso@^&a$!i_3(051!|k1UXNQv%vhFn1@Il_;Ie z2ET+tc{x=$jY1RQ&gz&ZqwUJFr|{PxQK#bRynLQKhy(5ib4PC>Znr9s>r;k8*+0+z hpJo4C6q-9BN^J^ZEu*n81({6GAQaW_6x=rS`yWT+hD-ne literal 0 HcmV?d00001 diff --git a/docs/source/_static/imgs/pruning/pruning_patterns.png b/docs/source/_static/imgs/pruning/pruning_patterns.png index 872c6cf8b3501f182a05da28ba3bc807e81771b3..30f5f8eb2dca8bac903c94de42bae5efd4476c45 100644 GIT binary patch literal 37938 zcma%j2UwHc(yfK22#6pc2tibe^o~?fY0?CxNe5|CqXY~Pq9D?H??{*4L8?gaQUe6( zMF=%?62g6h-?`^I=RfD(|9Mb}5Z*m|X4b4Vdp7>h)fC9C(qFxB;R2bG;#18F7YJ|{ zE?m4tLInH|F%)G8{B!Z0ro!V3Mg6x|ffoc;vZ}HdE|i9nVoeEw*H@er_1;~$Kyx4e z=c1D4z3mGZB0H3x%D#MWw9$Y;GLAxLlHp(APvExhSvvaF(8N9ah-~`^PSy4ChhdYY z!KWuL%{$_}#jdUnUQDLEL2^au!>4=q?wJx?@)jexZW2OtCG5M%$i)ukYt&bIgo$m9 zq<7cdw<;|MdQ%pr1Xj|M%5n zKk{^cnSXo@_%3ljR@`6TiGR;;=7b}vzrES}|NZ99RyLeksx;$m2^TK0_uD2;Nc(C+ z372&e5xep01-hjyHS1+PUQ0c(zGpLyF!v(~OsPx^gmg$$h4-)Boo!$lIx#yDgvi1rP|^7rm6ba?wR zxv`JefX(e-<#ZhpL^l3AFSP2WD{XbGCM)moryqoWp7f=Y(>l%Jtd+$-$73;cGwt0f z@SIMN^zdeAsT#HccY)0+YOlT9`#rerHI_esC2aPJMi2)U)e7s@<2Gl}suYCEj_)7n zR&DtF)AlK$v0`ITHuoCaS3_fP+s)=+(P_FVkKMkx*KLNbzivuyf4|(E5h_iOTRO8_ zaXm0Uv!=JVM?4>$MIQL`REEAEk$y@OaT4krWlVeAQXGh#Ls+~z9kg`jfh|&;?`50? z7ZsdMEs5k{Yi8@Ajpf8=lfhAp_kI18D=`jd8V>qpOB37ghrq-4QzO~b58gVk{@Kj@ z;zoKqWL2T@CR8w%m{j{09oMweOvgS(jIM3u;V+}Bq4&v#%AC8r>!0qoR-!6x5ud4U z9nQevVjm+BVj&Ak9jevb8}vd){pkmkp_9IyUe4Zs9NG!`CA%_o?q!cfiquIqT+Q`( z8946yM1B=vpyPXy8V{s$^sUwKq9s|+MdZ_SLsl9Nr%4?e_QyQ+R{FTeX$pSP>K6;h zCuG_mhp|Be-}qIbR1-yfkWA3U@a~{JMFiUVZQW-#q8Y1K50CO-*D?<7l-<~n8PY)e z0-JOf?o6Dl{Et}s{MCLwROwTpel_{+Qf#fsTy}@IkG@gCfV5hw2bVh9vHSJsbLZ|( z+7=V)7(p$XJOp*tuhJNt8F&~z;2UD4=n1Xo@Mc%5Ff@MGCp^a)Pa%VKvX?(uG>3{5 z8hR7W=#5CvYhbSH`5k~(*^gV~{x!rqL0uyPJ01Dn`8oq?CcXMKEOGEY@8(`I-jKe| zP>-(Et{|nRZgzZ(wn&~~cfBD6JjS%Wb{-F^)+@(5py6!L7ew7G=qD0o{>UulSR%~9 z1}QnCxRA(Q8MM7--Po+eqYZPqN{aBClljgq7yd_%b%Iu3O7FNHZO@rZ*F#Nxc=g|^ zI!$C-d4EGNGSr1T#>ueZ7SOjJzRlG6vBPq{j_sFfze7XqH__asON@cAr0QsI0sr-y zjf6+=8Q7tZ@pGo?aNgq=AxhP@uGyuZ zp0VL_b$_qpe`sOz{lpNpDR$pzeTUjO>{GMOUly+t{lm}y#FS3ZM6TU@Xx$R)BClb+bj|i8 zExmb^?^}&V@UKzFGMQx~Y4<@kc<>c`v9;_ZxeLV!J&; zQrLNWwz)kH^BN(BgisOBD94heSTe56<{$6}0TCa7glBRtg|g$V8)W=vN_MK;SoB$~PS2Go)Z^DFqtl*K^w*KYpH^Av#RRf!#D1tJJn3f&t^ z<&W9fmI@=Qjus;1bb3G`!95@lY)|7%t4=w_fL1?-mChm~C@NZj6l9CY*C{4PsM+QL zTT_Aex#JFeAsk$Iy>#Y zWnZ8n-L2>P_drI;43U-pCu7yW`ZobcMzp&!K*0&_?{@dzDtI{bJ|ihA_U#MYgLB9? zeQzJL_axO|$I0$i!t1EP?tC3jD==~&Kz;Aa#NuTWcxg?^gcC0guM7vxCq5aFcAf(PSrP zygt$T9nH-3nE0|l**c>-7kj6}U#`{d?2e2ZwHw}B{#0yk!e5ChDL_EV68|?#+2!Z_ zn-IouDK4=C3Qo|~Mrd0(@IYIO940O1^BgTPsn5a`Mu)a9|9YBHpuK5hyf#<_pV13)i)(Y{?ogkNn9@FecR58%y~=;Hiy6a>uHeD7kw3^DF~rs(rGCL$|tA z-XlDWeV%B9VG7h~97FrcyX50JBp`8?1%}1?)jS3ah4%8_^6e1x9>r6`U!G z_MQWXG+M#+=eIq)JMcgt=EI6pOBTOLLx*WE0}&V-;#5#UZ}k~-)o-0=f1K+K%vGcT zD%gyOm7+|viVPMQXs|ngNUaG3*KC}O={u5b*6;R@-CC*=e>qqrd_?{nLnpuS;-i zKUEKusKXFc_JSCAkZzq(*(D%pJi~sKsJ=u| z;_4YsciVq^lAVgcT%BbJ3nN+M8xR{b|`u=Qn?iS54_H+@w>Cr z!$XfUDUnc-TfnZ3*d&y=!1{y{pFoeS>oh_IHePub1H=Q*J-qiptDp7GiNwD}0o!3K zsQ8jbXYl}%3cFFy_R0|mf#=06e=>GCveefD=u z{|om+->PHnXQVN$1M%-1DR65;9s2m6Pkn0g@!W)Rd*r&DIjzUr<*ahqSaDUVv7+K8 z+=z#C;@xjK}8 z(q}_=U;Sljm^&!{GF2ew?@X(RuYUqJi1Cb;wmoZx2eT+A!@dzJ zVg@;9^aZ`pky(^^W6Kgq!n|HI@TOAICci z5bytDNX|ArJT-pzbXL7zYQzoGdyHv0p@#XKGv3do!Z>LdE*b9*+?)PnQ^RaXu#B|Z zciS-gY9Q%j-~4YgqlP^#%qpo#^k`vA`lp-30(PQ!zmyL8$M1BQysa6R?`%mrYXbJh z`;{)LPT8CGq70IOc(|8-0|Gu+D2B-Xfrfvl!`+eY^IKI&2U61GZ@blmXB!fSmD4!f z!r#){ayD`UZ-H2`#p$5@`fVe~s8(|fxBe?fTSO5zpn19WX4294Gk1~P3qe#R1oq4Cz@+u(BhyOPyk5BVt zX_$CieEb#C6E>?2GTb&pbu}J-MN< z7hmY^vJU{0Q!ahJsv-_ka^&hG;J~xShl5&j{<7(x2)$`1@ zx(fg7aX|JKTekh#&l6iCnAJNwBYVDXHOAZ?A+rs)(LAj=mNR<*^s9iUFv@S3e*myD z@IUIb(6`hHNAAncQN zA8S?ajuk=4-u|kE6bicsm)q5CPR=UBy3_uDP{S^CF851H#z8QM`*vz$eLh6~TOs*K z&b2>^X%`8V(Vw@QXL z%==fJt1ltYcj5G!Ih_vySVT5YVM?8K42eL_?3+GM>1N?ckjnr_ZR@c3IOP{9_g%xhK^RDVm%O z?WR?Qy5B5ilMe?Wi*$+%ZW#c*fpczGocC`5@!#kgG3IcKf)qFUE{3pa_fWK=ot+mj zO6!k!TmL0d_Z%B!Gs4<;~7Z#=?(nJ6WsQ<6@ zhX0u=HdzuX5GMoo)cq)Pj2>-NmBn-~`Vr*2}G|LQMObB4pTJWFyN^qM`Cu9iM(y@e1N zV+So|)<@Nm|8bbaX3gE+1<*3rRov-JTWr=9`3d&B7FoS!A1ZCV4u`Uo85~8##s-Vj zlf=F>i+W9VTmUKu?SG@$o$9OpGUK%nTitNSlsd7aYWO=<^I)`wTDLwM-n_ZpK?M*Y zz6S4}ob?;s_*eO)LcXo>DLX;6c4PuiN;3}l9|4-F4bV(2|5-D2wa&P2HPCuehFyit z>BkbEEf?Jl%}^n>`L%Z&#Q9jEI{%#0=6O}C3>8fy9Fp}_MP5IRbU z>!HPP9h{T>4crGjh8~-pEhsCS_lR7{;L_&lW{&_Pg+&Afhv~`%g7XdQytinZevLG{ zx@cM%co+Y*uhnOYI^7@*yleh0_rsIBS#h)5b1kMzB?GCI>_;2wg-JCU!uKY~yPtT+5vZa&5CbW&LS>wFh|Da2&E!2FVYmW9 zpsnE>{*SK4q}}G!H*-e4Z6$D$U=On*pwCBg_r%()&^%uV8n z89?n`v5E9uQuvU5EnRHT1-6hRKZ+jWDz8! zghbyNU?s|}x?zt{Y3z*`WD^6LUz~zNcMW?o`~lZHO1=RBY|M}4>3L(|A!f~WnHY|1BWz~Fm5pDS6$@cw6g5OY1kXR^U9tQtv-Y~G?2#u+jkD>{ z=9Q+0R9M6S4++jf1*UMT!->l#EGA2t$!#c$X?{|;SRYMILnXM9>}uq&AQs|kjBM@n z#U=#&6FL90T-(0Y)0(<-f+{AibKN>f`jiamub(Y$5}*>_Y4UV2-FAmJ@KLXxt(mW& zW6#QyXC^{VGi<_mR;#c4Uvbs^nT(;n*5K``MRRPU6cu(U*2+uf6zerh(XUH~?YE*K zIy>Q|@o8_GXm?z1X}rCVUFvyyGCN*s-eB-F`Cg@M;^MbiFVY`5<#9C+52xOFzw3X< z_NSu_j(n(_IJsX#Y>c+O{MQEG1IikE6X4zRHgiwKNk-q_I(I#~Y`CBoIckIhlDjG; z?^gOvn!TFo&7naQcbp_w!;BLDG+jbKGE9*j==w?JP_Q##dWr(_N)%)w6KNZMIm0VHXm#tTiB9fc#Wse)J}e5nciaLayQ&WyHUMW5s);h%1-f&`W;^q3&4Ap_{sG$Vm)If*l{(PfKHY-L zRM{68d=sbv8vou>?gs?KN1QwahrUx}SUEFvK-{?7l?wx+Z9Ws&jAyIe zV(_Psr80}2Yp zo^om-Yi4k)A4KM3urp^EcuSi7zN^GI9pJ+|61 z&m>BCjPV9x(++!_W#LV$*@^Wo%`Gg$pW7NZi)w!C5>hStQh+u$M!8>!?yO95S5Mu* zu3O2VHh!t%2F{LU<`}Q!3##*OBa(m5`4<&EDQTww!hsAGBnC!!&x?ADTI7Og*&N(U zYIk)msX7x!&AyOf7gY-u1V2B?Y06R6K?@11*Of3P+uO_WSN$44uJly%NMxO= zQ7|Y+b=V{v1-3WX7JXw4avwwFe5|Rl67?7tA=~(JlM3F1UE;;qGEi33=$o?jz*s(o z(mJmWs-!rj8TGazhwX))VNFrt%xb+Lp-HP$Se2_Y=8N|v71K!Hn!oY)ONW;!B!3=u zcvDEz;l>Dao5)_q87`XHt#q;NQb>nCqOxaGN&*f(c(4o&?8)tQW5bUU#NP3$K*|A< zG8yTd#sZEk{Xn7xIohhWUwjlk> zQZ~R|+t|b070hS32R}s_`(i37McK`rJAsuQob_$Zo-6`IZ++_w-NWnKR86_o{%u7q zXw;X~aI<^cagt!)LWWg(+)}zUjrAkzT#RNM9YA_Zx#L7ryHfhsZS|4}@jt(}Z7& zbF~t3G>&(RG*<27*`d1R&eJnBVcLG0*-3)rkp@m#*f&AbGgqs{3~AFjS6q|J#^h`Z zh0L|;O2Yn!`+h|1UIOrAnKRGReMHOY1mb`{RWZ$9MDSS>X{gmU&{*~95K69zKgwjb zAl=$TR`X)d5*-^Sngad5eJtvFuHOBIF@jv#x7AMnrBnNHYrh25Mf(qKyNdy%gKPb5 zpBtt-9cfDLbg|vAPA~jfRqj{LV-%&vnY1f89|W@p{aU{+S6a2q0;J6qCEV}YB9-BZ zfYM*zCqF6+Z9>8`*^m(@ZnTO4?qOpHq?+c`!tJKLA!S(MYI~ziCWHzu^a|M@YjbT3 zTE0=AzO$BJ{5f=4KSvZ<=jyl(oYw7!ykQcO?^bH52)Rl^UR4JnIw+Off*-Yj4yuSw z>*DDy!y-W|+2mlx8|53|$AbNABrf{pi(hk6*6i(>3~FjW-ZD1{FxsoV$S~x=f)6ex z@K|)KPG2mTjeCLmG-d9^fsk+G2gEZDjIU{hpv>wkh$d@6mfPiwL(!4;; za9Ue;jZ97$0KD198TqvWeIOJ2nfX#{HWysRy|#v`NSZ_D!s6=aiW=gK6E~z{pgZ4# z169t#q$7OM{M59QsX%QzH4e0aey~0M5J+lt^X~VFErgM~{aqU09R63Av#=WSgdCe< zyAs7YPfHOCd`5cKKQ5@lB^)O zHJs*fvNQL5#ffw`51EyX9Z56hnSugf!z#9HIM4lj<9*t~pt_m`TZ01$*WWeIg{I-a zq0`SVPo@!3~hcA1nprr?NzRp<#6|3DExZe>)y?N(ZUFjnx4>*GV z!De%A<1~$Yn^fl{B2gu#hFH&Rreb)~UuHJ?DsqKQ=2*!Qb>V3s#9@6o(UDv&MREcI zp<$Y-Zgb#o9G8D`vcwCYKxTOU&RtZ!n{C<=eqZseJe>c+tj$uC0- z&dW-Aftc;E+-k*lXu-223(EX+uv(4?E82@^HH>cq8UBpcJH2?G=_)re=8g0_Z=5?v zU~f(XOo86pXMKEJ-YV631M{?p4TnDiBs-kzf8e?>KfB0lR4+C~anWV5Gklu&veElF zAJ3KVx=#XlO-|=&n|^O-P;9o51xxrB$OZ_F^KO`(d|OE&1xO;(X0`Poi(_&@E%OB= z<9$>G_%%mUcpF&}h<)}7KgO^pUNABjkOlybiAxGX9vI8?e)J)^`KN*2T>KcBXB&?MAOg8UZHpU3_A5!pB;Fs*`Fd;RYww4eG7@a=!S)6f z8XC%EFIRq6^X-2e&fI3pcDvkWID0zAIX885BQL+`s*&gM(lqbHm-1J?%3Ks(1(*kd zU`DCTk`wULll=zni7(_pi3l9d6nLi+{((#mU*!_6Z48|}W&{fKxTrEH_awan6{MWB zYU!KHVB4Y>UbMGbR>Us1^3@yeBGMv?d}b$yLWH_j2@eJuQ{xnVUY7ghsnc zvbbUbkz9g5-<3H#9&m5E_C#u3iR_xOF8EPHIyD2noc@UYuem zTH0_lTJCi4wV_`7pK{o+^u>CLfmUvTiFyMCc_FkZJJD!FJ9y+{;qQK3N<@8~YRB1O zo?=~#l*iz_GFF9rCM9=YGQ(ky?;-*OuFKZBY^=r7E+`vM?L+1`h-~YHjwGY5XinKL zkKg3fCcujocYTE?KaGKzh964XMd$!eDV8fSomMdN*maR0y&Sqg=vszyD~z(=Rq=Mr*Su%dZY7 z6Bi?Zsnq6j0*7>8Ys-RU9-T*Fm-1kZj;O^vQ`9O7qQQD@o-qDpFpMQdO$~d|Rp+70 zK#CL)^=TS znXP)s03w(V%@+je0vZuo>hg*J4+yK>WjO1EA}-W9RZMzPKq9wbWEd6f{=BMKM`@yl zSZi-TkDcK#&2F8l%h_d~cR+_}ftk)}C1fF%Tc1V}XxcOw%fCY}c5SaxN-!;4ifWn? z?m~X~Vi)EHh~pMvb472Tj9v5(Qr+iw6?7OrmzWzP`x$!Rz)Gi(cw! zY0?G__0gxLUraO)OqzLczW)TLCK2-mYk@;Wg9Ko0?Sco({`EhNN>~(iPMlSBV)hjx zoh`Kf=V8`*^8z-}9vef9BC)V+pp4fhAG_B@O53zK z%m?&0`ZjdLOH_%rGZk(HoO}}aDMBYyz!ONrP5;drMvdlXDEaaLUqV>_kgM^rFhZ#SiS#uW?dU=lQ`b z(n2SZPdWg>nORB8rsn)=mOJAN=&P@IB?bmZ9>x#(*|qD2-}Vk$l!KD-J60w ztC>FjAW3+3U91NSabc1 zj=#o-KKXobZ9-9jrWGf?Q0iFZvTQ~JI^Dwg^2?i; zfZ28_Gne?BJ-{%iU!XfSv38N07^4GKQ5p!%Sf`%ESvHTlh|t1WU>T?-BeQTE%9~LdbN?Yj(kpZ9IFq#K3%$nR_((!l;-> zGLeOq@es>)^!dr5(;0p)qEc|6bLj0#R;WNY8X@%-+V#QBX~wDl?~7y>aHPLLUP zJ#6iMt~JH^OlULuOZRwebY_iCR363+;KvR1FK=OHM> zjm&IzYKa0DI+_X739owS5@vWjK3=%=a-@SRr4Mm%B((~WCUtQTx7bpAxe;kYYYk|h zLe?|-M^tTkDS<=x0stx&z#3Tope>IT2hc~ED4!}@8zmi=Cpwlp#7mBOMFmf+l<_ZL zF>oqJgLx@zz{Mn5?TuWo{oSFt8O1r_!kZRZYvXKXf?CLPd_GBdfaQh%GYmM2WIR1INwN*K}%@Uh#7U8Qg_wjn@yCy8y zwXhJ2EeyDqdfktNMcWgHKC2MleoBF18M$u9lS$j~o13a6I1eVUaVfag7VTIjq?KgR zz~1+GEO1Ear!!c)QaAYkXt0>uf4d4mGC+fev4cd0dX+MdYRedEBiu~1o?A@yJRYlw z^{r?$x({72(q8+G0t1XEBlqrs|GG+FD~S5E+x&Kpdg^A=lI6?!`Ir?R#7gVB&xyVY)4q-}Xoh1*Lxz&~V@ zZ>MWweZLYk10-IkYXc6m@PLV8Ge|XHbyf0Bh^et^4%9|!_Kdg|ANqsvCSWzNH6 z8eJ{L0Be|y(RWzQ2^=h0O|wY5>@Z%^tn3P?8`JNF0kp;*gUTRt{`oS-p$1!f8|^Dg zg8UZpPTUMC={V-+TDJ^15(%3guql2YN_P1u0^fm2Xvikz8!|#Un&#Rt<3&p6I$xh4+LW&la{8!)VnXC zVJ zfO$EwI8jHVI1%Fy9-H4*+?t{;56)htv;(d>L|-$P}o z6*R8H^i4PaD!tex`(yOdY1s_GXy$BbEJ`c%*jk6$b&bDtws9thN>Ij-n|*2`X?noy zq+7cg%Lg3|fmop*KME$w1t!ct!}Cd|2&_d-kc>MI5XF3{VV;aE&n^`rc#9-x461hG zXUpUgu=$B10fJ&qvyb9L&(S!Im}%u&MY4z~(w8p^S<;RmM7I zS`D?j)kOvRFT9*BiVZ6abY4lVr4`jEBw!0KD>q%WzXf!g1)cSyUrrzv=8d$JYv~5Z zbt*s8w#5Ujk3-H-ttnZE#C_Qdb0(`gJ4*_pilyV_zbyFc<~}oc0q2%qs$IpM-Q3@N zLTes~PQTB9>wH`PrRU`fc|SxnBt+Ty$xu7>h4!Tz9ePmiM_QCM4|QV5EkiBpP&&Gd z-Pczl^YAY7rFE1Pw?)C}dPf(H3w3Aa92s8@c0`thzIzGLc=ObFFdv;>VL#u?NQ?4Q z2JYHGOB-Jj4@GZ`MUTXu#RhvX{EP_0~U?G97PNl z<>a$)9QZHuRZ_uCSOPxQ^q#OfTf-C7^PXO^KQJ&Xne+Kr=q_I4Jh$$(3^+1I#J^lDubw^l zuA9a)$*#vbt??2(V-V9ke;GhL?LzJtP8b>LwY?W42YR2OK(r=F>DVKpwL@!jB{sEG zCmN+6Y--6(G^X6&)N0EeXG>^)sgAuENFPc3;|q!N1JrY+wUTop2DGn! zY^)DB4qk|UIhOu0v7$^m#vbLOk)8f}N9ktVOtxWKa`h&3W=uM(t0}-HP>ewi&=@wY zhEbl|&BP7H#MfzS@Ac;ni1Dc8NGwT8 z6FYpH9((}re)!Xg_eT_A8 z>vCr#c*1OXg~=dllHUd4Ol|P!Ra)2N&cpVbk-8}tqC41VO1M)cX&a`Lsu##X(o;zB z1QuxyZ^D^cp5^>jte!6n)7?^fK%pbaWKhu6G$dr0w3O8=XTy|NGocX$4k%|a1I9{% z>Vx@Sdt~;vHlhD{1YLDFjE2@ox=ukr1f!-6P zNcb@rBMZ<<4S-Y9d~DQPwp1`ue3vv|&|9Mm_)T)~S^;m+WgvL7Q2<}2t1jwAh3177 zt$!{OnhEj-w>P9OuA*oT$3YF+)l7ZE0Gu*G@?WIt019XEXKSjrVc0LAE;MlO{QS;s zy}kC5@(8^!-QbRaOqE-yubG4kD^fnJgnm2%6v!d(g>U+a_u|S3IB0CD$-8E1>TX<eaRO-e~DT;i0TG!_F^u4ABJZ|d1^#&dX)dUL3?MIF%z6=jdTD-YNCoTTtIx&*;24YR$SPE@^x+Xm6 z&%1|`v(cV6OXcjJ3e+5C4@r-=&!! z!fDaK1rGP7Mqoq+rw=G6II{G=v1{)2rJ zRj1+jz@>|{2|QSNz5t?JuIBmE!|7?7JJWN&2%RRPetc|ypwMTB1eOsLgybMs)#a=3P$wAD08Nk{RgxRJT zuL=^F3=m9fFj;t7TgF%`&;^PqZWBX;VvAIEJH*0@daYD}DSVurQ&n z@C;IB1=tU{VvMfHM?uFFy&C)s%HFX!o7vF)ezt}5;7c3tNK+GXVW!P@n+bOl*oG3G z0;!vvX&9AlT3YNU7FfqF1x3H!LJaCo)3|nU^mj_A6@*+?#roIlf*3@HqWcg4XNapP zO3KV8SnbA+J;nQ2G%~V4C0~M01|00n~A!*C!h@ zS^*3cicT?+X)da>Rh%9*tbgylbhycA78Iz_DBaFdhy~3N5isG@AF+sqi7xpQu`|W; zEaU?hZbS`)CWkU5Spy7GgD|CbJxJIF|5_w?SpMUI>uzdX4a{Nsi%Gvl`Ke(fTzc)Fd&*RH>=o_eb+17EL26`{@&B|2%Y>qbfQJksLUd=G1-2{USNgS_|Ta;o!+ zv0oqwDrgfH6r@`2LhR-iADd`iECXCrydphFB4NJyW!6I#M>cp5ETZ<_g4<1l-eQV2 z3#{@gN%iE%ODZ^UJe zVk-KYG2#nGG*hS!MhTY@ro*AM%}76(nTPc}FX3iDgp8~yAPeT2Iv!r2VlD=Th8jV9 zMim7~cn|>Te_$*bLM>5kWn=rFz|} zNO!c4e2}JKK;`OcwAe67bsB&3Fxcyg@$qf)ChKag#v<-v&n>EBygxt1Ek6e{s zKxkF5!QWxRA7}X-eupihrWq~5UvFLwA?<#|_x5tst@L1ofNqEB?d$B6EPhYwo&q@7MADKJ>1CcWbm+ZtL;Im&1fHPs|$mD=&;H7E^GeVjyT z{!73llG>_*v^xz@ulBlU@%Mn-9*uT*$ohrzTlkY{7YFj&Vm~n!0*X2@)vUjPvUShq z*;F}04jtGsNq4H;)1o~%PRW`h5EgZ@NoBs?ZztWqiQ4XhQH_)7QEa8?H2%b)c)!K) z${~cO*O1BrxJOiZ^{&@-+V>7&t-7?V55l&|)H7OPWerygxMD!G4Zw}2z3%T-cs1nu z^pyG2b}%YXNt$sKa7fMy)0?2vW9T8#iczfq7eYv80}-Xf7&p>t93gg>N&Q(;p)SAs zoQcTSL`D&w8yHsMJGK=Z%l?)or&gMZIwCqcui|U`ZISI?IaOaoYyrOvV1n1%*+Qh( z7<}TOIYy0l47F;YJEu;6N+j4O4w!n1#861>J$EeLp$vva)`xSo2eRUBI1!sze&68N z74HyZR(yrSR{AVo(Ui1k|D}f;XqzSFt^X;JM^6$qc){Xq8D(dZ{t02H7c_FvvOOwu zIxORZ(Ygq!Vhdv@Uhz(|h`dxEvQiZOeNq>)usmR5roQMT%_>|{4R~+I6=ePCH1r)g z!HPk;jz^-<*UMuatQ`I9-tIXkqdLYL8-_CAdA#(WK4Sxyd)FsAS?QkRf$JYU$ddZi z=jah;l2j{@$01uQ0CFjs7n;6Tpj12LA$TQ$<}gmNx>38jDk#vs^;V$N7@|YTVEr!6 zZ}kxXa^uMtQBfd|ZP^mw`hpWfVAL34@&LM>M6HOqgt?9JC%~Tf4;T1N8=vYC4Y4xU zU5xN>tlOyCJ|npxDmG3#2b_F?JWKn$zTNt!=zdOr_?RA!@z_Qw;EX#ngs!B?jJk*UeBm5zBkm*g)tDSLqd<*a;4VEO4q+fyXZ zGyf+GAL=7p_4d-8Z?6Q_Z+NIYE@=OP*L^6#NTA6#Q7f<(3SR3L!*?VmN_VMqcH8Y$ zhk<&m^b;5r8EiBXRsx6WMvfp*tlS6wtFlyZekaPC2##a=glK=#;=DW0$C;Ho7I}Qa zsWX{8kLEn%YuuG@?LJO$`Dg>BDj^}kwx5;$MiKc5NGt>M;&URO?iO2)Cz}V=F#ru} zgClqCPKniYgx_u#Fc5iS+x`@$ds9hTmJUBA*oy(iuK+Cq0+`m>zP_va2yPEqO}t`I zpd{n>$4(kwca2xHdToG#QKa+Wne{j!`v7SAEnWDnKc!a*m#Ytq#5}1LkcgrR9Cll% zU|qvAN*l8&@7jv{tM}WaFj8NO5nX_;%_dgG%9W@oHG-4Tv(4Ufj)TnFL_>3r2iBAy z1(27GnJ(~LpnR4nV!s9&Ky=B=ri}2?r`6Py*H3g@Vp|Gq@e()w^Lrk_~iVqb{T@E`s>17>kf zqrU^!84s(1h$V9HMs5cqS7djUj}B zEMqzF$s{751vnfX#*2mD)Qbv_6j8V9UGTrf#A!yul|GUg=Jmz!Hzmo?rRQVJZIf(M z7cGp}!xfmWR4eS3OGaLzMo##iDl9>P-(iAd9IsH%I3I^ZEF7g%TKq_r%rdiBaHQdn zZe#Nh?k1w)vcEtt$CMhh5dEDUAGXK&GYoa7g9yj5Ok;`@Pa3cY2kV{wGQPERA&Qa0 z2Vgt^N2Z^Pc$C$`5T)Ip7PZ?zWcFJ2|Qs8{DaS3koK0@()>Q?45QP5_RzdAX46#&l4RA1g!LsSwmy)|gC4z4TX1^mj3U z`QvaBI$92At37p88%-Wd*|Y1UR*0VX10|Wx2mP#Vz_G7(nNd_oIyso8fqh9SJUvLi zU|H@QXL;K{Ixk&7m*^s18pgZUb7XJ^9sxXnIksdocXb70_BBbhXIyJdRpb&A{CSKKg%rAPC+g3O7AW2iIw zWD&SHK}u}V6PWX%?D+_r$dT6ZGZT8icaZU|6zGeB)8<%5tZq`@ft2crFkF)ueE?>E z(2tH-Nf+uCYmIl;=;j1Nd;A7E?rR4Ezr6=QRw~F{!-r|ywPl^}Up&=`+3|I@5O5)1 zD-l!muav{LMYGB9Q&Nz5Ci*_~Jnz}STf6wi6x#09n9`8_6gf=%iNB2aea0HaoJI27 zLjJb^Z9c4OfAYbJcds+BzC7Z-VmK4y%t704ED0;-3{dvIV~@aJ4AvFsq1{&FoV3-` zZnlm+e3`b#CiqLw-nbt=q!=U#E@RVr`N|Xz>plL)Yocr9FK(p*1V}>ipq;^S-TrH+ z$Q7Yh@TiFE=2QvpY=;#2dHryp{*D;NYa)kAXjR7`DL7FQRv#qB`D{8+lP(8fsC^;e zjB|@gA&<|1CiipsC(Sj^7AZTd&zKDL#y~^SJ5Pm8Z;stGRJv6v{)M~x4u1Ac%?%6= ztw*=|mcgV*@rz8lY{L@X3Uu8l$Gg0n3l($j@##JcWs;giuDxhO8Q91Myp2>4dZZzm zPvt60%cXlASM_$Od%P^-Go${~qwn|&YQoxzE2}xFoA!(!np70SLM2uo=|P|Qpm!wN ze=Xv5uo&#b*|GRy#wAz!dYbvGi1U%ZKy|%T=Ff%FT#|-t!=99OD`;0~S8uDcEm{%c znZZnB(21S~Sj0hoAH1gNBgXk8CQvi201&pDavV#AyOtedgRQBZf&1R#_y#|#;lnre zx8#5xoR3T`|Dq+Ci!E-7h{Mp9|%21#iF0RgE+Dc#b|;>-tifBSvi`}=u*oN@MGICQh%dFGt= zy!*bcwaIk{Q2(kI^eoM*+$Qo6a5Zom`nU$wq2{lj*R;lCQcUpI!@6mnHi!XLb^_6( zPPCS!W=J!U0E9mv8Vo|07kLVgripeUK1S)gekdz(r2k^#a`@wTcBbZNY+Rqh*|c-1 zzAk1nHZ_}{f^qVaDg3If5*~;!CM$+G71B;psRw!Z^KIJ;ZG|ycdTihknAf?Kgfo*T z%hN5swIW`eHya2lf`xf4A$;4ohH}e|H1W*>H{+{WoJT{7APPK^(hYfJo=oBhh>;Doppj0H3Z~gCRrAL}TTtwQ>B3jr zm5J3e#z@|Sm|%t@llL-lf&LXunyMgedBX_a>c$j#qs2#}w3R;`kdi@~3JCn@|+{tM-h!al0xOi4)b7L#RP1Y;t3%$zk` zTG&duzByVYnLWt2E77dFcb&#gs%*AjO3QKNBEw8J+ox1@6qE8`pqhr0GQIGPt3XkX zUFGgt3pV+Dk3%NqXVCaqeA^GdcB1t6!XR&z*XooM|(SIXXC)=X$pW zHORQ$3buh)4?37vLbY9aib-l@r+lT1yyk%Ro$z3dCkXD-gQW2cF->3P54#=acv56> za?S|Q#D8h?M_yJv*JG1N&N;~p2Mbx}(9iGpoenA9R#7|uB{El7wO+TgIIm{f>$J&G zA^eETcm%=bl-|GPdo^j9uXaz~I{4Y@HBJQAhh3mVf#7*yp_lgdy*ErH@)?fKeZASa zkYt5aBZEc=YCj9X2F@X8#+JTWkNP*M`<}EBpwxVAreE?d%4nbMJG$~zQx}2xCRr$m zxCLCgCFc`y%S`D&24gx@olT_sRdFTWZYM+A?ZW3n|>`p&dDABFKaC>O{;5=fE;t37S&QzSqh#h-<* z9%rbtspbIO?_7;L#!HzCNyQ2vTMwo+s(zKGJ4BCngOAB`H8s3)ZH~l0&;WQZ*BB5r zw{(*9o9H5JGyHg4g!x7*jA$I9Zr<9)ReA*q(nXc$vD^ydy(eK^Y|+xaXFfrb)$C6F z_-Z|q7u^U7$&VSt*0lRP*yls3r|o+K*rY$lH?rgpX70Jyu3(*|naCH+V0o%TqLlcc zcCeY5WS3`AjB1vsKYOScp&}|zEy~^wl>b-#2m1iot~zHLstZcRY*L6VWoX5z62K|b z`Lkm9k<9|A!^#C9e&765cT8|NGduGn)argJiE-5%z`!q8aCOqaZQ!9m-4A=jC!gp; z)#AG=b>&9-Nt!rgr?Q09TM#IAb7f#m`ydWnt zuzKkZng^!K4kz4k6Cc_SxEgiun;l&!tt6|4*$aF=)=*unrr-12QepC*_C}U{IzpR- zD~S~3r5m0vMeV1f7LJqyfc8sG)Kfh@+LmaOTXE4AXb8oKiR>p#UBE5tqxUVUu(Eq3 zAEyzh7k=d+My7Yz+l;zDH%kn7oMble7}50wkcB=q-!^l9wDK);j_xDz;YsQM?(OLt z=1W9LOe&mfQZ1zluv1KDTeP*XvWgX~@F|NDTQB~BGwm&G9RALg@O z`^-$(U7?J_EQ-2%p~ryg>NdP1{5JVC(SwJZ{+2>+4d#?=nlAOd>~>NlPM|B+sg`&W z6jSheH4BU+nEcv}W{tFPaOL;+9s%9q>Sb24u)RMeH8{low8;vuqH;dfx`?3f5RHt9sv^lRN*o@#;v;07F z&!KUcG}y7`2GytoI)!hGiK~kHeS9E*mC17Uu43*8Xl6NDFHE3r(G(z`?o!SL7(F}s zZ^{>QD;>4{!}=I^`y1a~wRK!;B5N4m*!A4fEpIoUU!CT2encF^7_VDtw^d5qaxQ>n zw#478$65@W#%++3$fOVE&)PSUQoTf1=+l?UE|X<)(kDwnpLTM@#-5lYYR|oGeH0CLMu*L$^Y!S3H61VFx#U*WzilU{Flm zJeC&-XEmSArIqG9ZQx*v8^DI8#aua>b2Gjw3~z*_#Aa3gD(GC3YWxA3>z@W*W%gjT ziXUJM9}Sw05b1Zjbp~$KLEZz3mRt{UdTyaRCHTG_s^93$Ov0VW#`E(j%Cxq7Btc!E z1YOn%{yb>!zBiTH zON3epyNU?D#3*hsSwGYkVo#bdi7S~cYk<&b>7?LkKX)sLTb~%0C42`AK&+tPM3d&0 z5*!6BMqP}G2}_n8ib{#jz|emWW3Iw8v=yfp-k(dZgYc1t&3V!l;_>v?@_BacLn=L3 zG|UlI+i3IZoV|h#Z5uJt(YJU=HPZE)7_h>Rcyq86OU8Tqf32SyV|kDCe;k9pVY4>wH!y@$7SSY2~XkJ1+pZT`1svq zl_3encxHAu+a`;crgt+byB;NGI_v<{AXAtK#l0r|g-%cDeN zI!Df=dyB!hq>0vsbwxZ3C@L402bTo|*Gw0202-7H)Ar4L%`wWvI~@6mP9bJ38%*3( zqoq*AyPB?0g+cl<&)rBKzuo$U`)>>V$7qgsTgH<69oT6B97A{QbCpe1&)Y7~Hs_QS zUl>6c7GP?4Opq3&4;>f>L80ncV?_%EwY}>X)zQ{RT3=ogn!ekTB=~f!-KU7_wf&>G z%Li?EFMlZpr%>GI%QlwHw7Z^d2FyabI890^bM<`q%s zVLB?k#r(Rmyw!F@+8{qV%agec*e{KYL&h2N;xiR3| z)XyXjQSY=Z0#d7e*68J1B?B8ZZj~#Y1{is^%yEF^LBkPuy%f?qMJ6`q4dbDmT zJ&Gf_nZ?g6tAtb|ZqrZaXLc$|W|C-nLfWPi9w~9su>FME2fx2Rg}E||d2t(zHboQn zTdjZT1M(l_a_=h)G}6z7>o`BV;KqoLMS{-@9FgbfQ!UnDG-0w3>$U#Tf_h!1YF7Ou z^^0$AWBuL4xXRmDmr%V6kH6syo}0AIXK6W|s5K5|eUAsVtv=k`-O59a9^8pf%A*b6 zf|0(E@y3q5ET>im@<%wLhu;aEoapbz<{Lv3=qK~bN1;IkCLoEmY}xvXfJL+G&D=Q& zu>U|&aS_i?N?eVC`ct)tC*1m&gEcztzwz(kI?-Ab9e9?q70+H~y(`?TjpY~*&JkiplkDFqKhqK?u({?dIG+14;{H?{zv{uo^`t^&v_C-+_aYv&`?}c%aan( zp8U*}E6UV51zLRILfEow+-YSwS;)a>ps8MhDV2(&%MumJ0r=+3n)4u6RB_;_TA=8Y zIsX}mRhs}g*?Yv)hcCv0r@shrG<(^FoSLpDx}4HhSBqWA~L zSIRpG3{pRAjPm$0U*M&=_5})Wg*TsYxSUcDC!e>$! z@O!JKs+m~bjS@cjm$WK)-4?Hv6732=#4iUbc zvD5d0gW6rKIE?krsQ0bHQ0JbkJM7P=J84h0UQ5Xi!pcl}*=Bc!{J`*Q-U7b)Bq&pC ztr$@Yy~qcx%~gK+AjR>tmL-q&(R>)!bs@_mV($LbcdQb=rS~7qw}vq~)n07)U?|#O zdey6BNML~}277j}bB75T83BrHRgae^9Y7JW-KmIP6~Bh4Jk@b9UDL^6`rE-2CTwm>0ax0a>Kyow>nS) zjYE{p??uE7T>LngE!F@TY`Sy8U*M!skWuoXT!qu#gg^WC4n>LHeN~b7c;~45mkQU7 zp@9A=fL9))K06`^cm4o6G7}b$Ccr%|u}pAYZyoD@QMh~G#)D;og?WluXW^Fz z$jBWNDK+3GA;mFj)D~Yse-LOU(95O*vwL?9Vj!EA0h&mau>0+LqM=Djv~8Gi;w1^h zRwE5nw}ABsU9-(xF}|?tY&n<;#En7fU3xaq(H7O1aeaE9mTfhf-69dDGk82FC5o2| zy0JM>O9d^|;Ju6&?_}#%@&XAe6nT$d^8nP-ceXZ~&5XM%9~KvYtjgxjhvm{?qs4aQ zp$Gurd0>d2e0Zb6jajlSZV}|^g&y^##j@p)>3ARr$ zR|1yo15lL*0Yv>n9N+6dQcG#QHlh&s9uIRitnu478o4e8P&LRRv4=lAH(i=&((E^9 znhZm(LSyW~BKsMPG7ihj03~O;U+uf|;EyH$*MUYN!=RTOqze(4OQQzi{1n)h$++MU z;KMvECE}o6e2CLe$;h?`^zZtb!xywmU3}&n(0Y``bil6|W}tJbitQ$XJ3m?5h?F&~S-Tb~EbGllpo+$0zaaMNBO`8Ey;Q(gp22swjA36@ zRbg{JbMdryD$G6?qypuS zFOz3D*u)B|VfkX5aDO6Fn7CiMeRhqNi;lj*S5e2M6vKE%Qsz>wXLSdQ`o|SJS+#m+ z-C1Sy5_wMREgIiIWpOv_s++fDZ@`B&tM+M**Jv7+#dMd-%C~ewA@T7I$>5juIGQlh z?`5nDptTgxng#9CEPR4J`P7IV(kcR#6B2(vqRgg@4orS47JoG4H!^?EY_;I)#5+TZ@6I*G6;H`YdUp7wk0iX00Q&_P?Tp+D=tW_mHg>4a?Z<>B|T8N#J z0N%;Wo;h1jMDm@lJkUsp87eeL|p3Vl25WcSsD!L{|QNVIe!{}IG}@U;DxUy_y)*y`y({L z1OIhJs@6rz+sGEWD4zf}kktN{ej(Jh* z7uPQxV{r)}V{vYa)G6xnTB36qwt8zaw>#H&9VHa&( zbDF3=_;h z@6Io4^eA>ElV7%C=yK{ZSsHRxMsG8T2N#l^MfK~tcjrWVB5GwerG{Hb=4A|LNz#DR zpsfGHXo;vk70lPz$W>k{pN`btrwX@yy~L>(a1p(I3eNFxc%W2cb7Vp8R3W|#DF<)u zs-W$UD_>(P426!wzs!sSNT-Xa;0;{GySCUh(q+iW<{Cn?9}ri;aYxjm1*hqP>;C3^ zjVRb^H_+XOPuR&G=G4Daz~)oLAByHV{!sxM!iK*0q~iB7{ir>5EoZ8Y*JqP~qN4-b zmnZ(iEp&j*v_*U>v#BjSPP8HgxINDV3N~`e`3^1@AOc+Oi7_)^maMSsGc00R(f*g; zqA@bslsbc&RC{uwC`+n~JKaZYdYjjTmh0<3tEm$)L1B&P(^$pxW4gqQy9Qv=(S@e` z)x$}4bENcJTO9Q`&Q_dFB-_0uA!FSurpU$;#Kc6gPZ5A;d9_s>OLY!2x8U_2-+r96 zc`eFcjqGQH#Q;I!*xu1apel2rH>}8io>k80l>8OFRSd!`EzULXwYJ~`lz^rB*>Has z%l4dF&~M;aDRO;dvjt#E?{D#GSG_A#?ZeYuS& z&j?%9=PfQ%Wp@@ME{K+k%vJC^9W0j}kj;)Pab&yL?A3q$1`}Or9Cnz6ebfhSS}RSqXnj)xvm5~BAU(H%*uYtQ4ml+jL^Jq@_3jLD|EZHKhm8`QME3ajx-=L+w9j&Nk~InskPQN-bksqg{72P> z!tVAbx3f6z5L!q~Ofcr@sM{KdU?>jq*;{Phq=*&1RdfnQeR&xP6t>Ll^fEbjXt|C9{6UfT2XF^xmEOfOwoZ-VpVfN zinx}pLCnEtfNSD5Bw>o`N5ises1(hp!*>|k!nerZ zbQlmB$z8D;@>j8mLhl#*v00mVsd)`76-B;12PhEdyf0GW1)_=q#|OP#lK1^BrGlxK zr+e5!(YT5q*Qa(m_{J&6gla%`(H)2S#q}2;P0~<%n7Dk{=~}w^8Vp=98o${!Mp|79 zU3~inPT?-0{(~^m!hu1f;z;U4;3PlDYTC5UPPK+r$Hn^o-syYpjW~55do}fRXV}k^ zW><-5*!1PpO>EiYL|HpBX*f2h)D>E?d;Se+u3L>0VCiMl`mp@hZog)K<*?PKUd?W| zfyd70wa0;`3l_SfU^2}a^j)LpX%sCj;rf%a$YoDHsq%u)$}-3;YB*)fS~uUrm@x$a z<=4QuLL0;Vzp#XWEx&k$UPOtL%X((rpe~cbqlseWpxla`pwG70Wh7EmB$7wEy~om@ z93awx{Jk2F!f*Ns2(%FA6PXCuW<7p!Q#d>{D1AG+JgY~#H~tj8;=0xCZo7KzI~M*~ z+Ddf7JC^YbWnpLrj2RXS@rZr>`l7p6EHxpil|zBZ0l&+7flzDhtw5z;3^P`jsm#(e zUw=u{0WW5P%r{uB85D@%?4h@8Pk(Rp07EW$B=Oj7I}7>jxFKRxO*exZAp{dK@yg7%!ggaM+PbK)R8m}e6Roj ze!jp?%Y2RTH;D`coPT!8-!K7%*~17phtGo|-b&t(dRYjWLnt5oEsOli4WntT3L8Ye z^?1$T`T%@Nsz&|^qvSLZ3gE?2JKuI{y;BE|O(2-jv$t51tTDvUM0xxNp$NnW@cQ~1 zvgs2%Di7a;SA;}+MQdrY>Qh)%EUhHCP+MdnB!OXn1FUX8z23wj1Sj2}G&asL6ItQK zeLnC$O)jj_lF_UZ;PbGTezv99Lr+bn4ExHa5}_WoDIAVWYkSD@QpH0+qeOv*aeray zkGM&?H2&7lkK{MWx!Yp7l43?Qp${iQjj;0FK+R8V)p-rnd?8O33in?RT+DS1 zJL23~AChE3eU3T&K1_euZ9*^Y3?#!IaGR_LLfj^%SAbBgDxofGBG&nD>CVR^%R@{jQ63L+|tUro`~*<*mOgrO!a7?F>FH0<+lw{_l> zEyH@=FFMYjtkX}vpx5<_d?$G2y7f@_+{-}7&*M)S)j_lpFWg{J{E zm*+Z|!-1z{PoAosM6K)Ay1H0=V6Y4~k6Whqm0`eGAXj9`^6)#gcbtD? z?D6V_56#!6Pe?!pPO$fXUV+CinI+jYpXplMjWGbfs-IO7K#%{UVrUpawk!=_V^+&P zzXG_3Bq_k+!#^lw$3xg9yO^C?8tq8%@Z>n#vM`?ifJJ_;HU?TGIzpN$AduqNb@jhg zE4YF!jh}Af=U(Xay>(wD0HS!&O8-~g)UaxDZr&Jb&3OtawZIz`ulH}M`3@`C{TQRt z4obTUe!p!=m{NGm?r<#%rLl`oW_h8eWdEso!2h5zLnJL{aj(#*2gE&ZYAD#_Yh_A~ zD7*MeV+;MMv4;SZTJ-jv0Ue?dFoT_wZ@XwR{`T#Xt-S!zf%b;ksGt=%2NmpqzQ(i>8b)({x!*JJ|RB*C{6{)w9e z0LfeZ6mjbuXCB^hhHLPWXX>|pi5SkG>IAeL5cWP6HtF{@4;ykE`1(#JJAU<%p-zoF zpVPI%HoH)-dqgOxK!oq_Vmw6Xp2vnt`#Ux{w7WQ9cZuj`S;wej^#M(Z%J%IhpTqCT zz|JTAVdrN??Z*jvjpjojkE?v6Q&1ZoJ{0Nnj!pGjDN+BGfcCQanOrvw3BgvGcfPfz z2@VV_b8&BM>!PAC`G1FBhoo8TM4I3mChmFcPPDwW0IWR0Nz?Y1DslM2|BL19gm{JW zy8eZqknIo-rjBDu;0Yhd`!nW@U|8L+Bac;bFY5`T<}IGJGSK~&|JIaskivOwJzd*8 zJkfD!TtJ*?kqB=hFF8E@qUx_xof7}!R6he$vHId%z;dxV|AF@! z`0&?MyzF=jfbYp$TS`8iml^-~=1$`{Kzj86^VRup^|f9w?H(qjYe5hGi(%wM7i0Mb z`-T*ey0ZV*WB);zK10Pllze1x-Y7(L!gDZ31;|g}PlsCpF`dzdxxW(THX23^fc!yb zj)i3^&LPXAi|s|`-x)`KZ!!s;DVsC>uV=!49q#&{PTUMYzB0Vl9>q>r`+C^qgcKv9 z4&4eMV~sWtu}Z;(%k+f5DNa`ZiQ?q<0~)?YhK1WOU!`Zh4TjEk`wBlPy2UhnvU^>mriWSn)aOMUx_|Pl09fJf zEJKms{{GJm6tSOo8{8azoLIfNlF##0@E0SBNv)C3f2^%{rv(tMq>V#_E2#m$><0BJYZ~&|-#A=O92P;>fn44A zd~i;=;GF7D&dGZ<^=YEevB9e8tSH*?g!N>|UvKKKTb;qpAkO7abpVmCjQ%8FnZ^ff z5=$?DFNXc6@}!i##*$bFFc8#YS$$PX6SW0zRF^xWO}6ni%O~`|1L70z*@L>oIH!!Q z?Kc}c-dnf!@&3~JKebo-jL@Z6*VrYM=O(pc+9qEjPTX|rKiu?&K`t)mQ|{{bhAp-) z`*XE_dU+xiP#${TF4P;4;t+?PNkC%8z6IAFh%GJ$Y?#*ew^%ryu%rBssGl&g%$NOQ zVrlz#trZ}A(|h~dxFv6|*J0&_euau}M4u12a(AOns8UR7cX$4UDh1M0Zot#=AZasA z>p!p6S(HVve64)*UDiqHsLnz6D!Ka))epFnirNDpR*COfIhMQ&Vn5~Grj9XsFI7&k z_5Lr?6zqQ_P5GYC-}U$W$JijSyx>}Q2d%^21t_+RZGIMf0L`Y|0B5y zWCz}wyL;}3zdJtd{R2?qq(k&w_euU;q5ZV&Ycg|STv-dTEx>FGnRd%nGsIzWhH#V?3F*gvk{ z8RoqT2Q(>eV41@T{n?sCvDUqTfXgk@?8KjdK<_W&JIxHKW7QrsQRQ&A$?v_FBKiGX z*OTY!2-xeb;^+@u{^>)3&Gq`F-}L@e1{UKYSrU zyjtdeSu)6E9L(neupS-mJOWA0oDtwHj@b1uBpT%EXa4j{f0D2q{UZqr2zU>G0E2*+ z#&y18&^gvDysu3+_1{+sNLvox6#%q&FB&g14<$9Q%*BuBR#3h$-muP88mmCJGW`rZ z3Xg9#{0kY&Z}*>w&NQK#7I?(ysLq7yVKwjH*5l9Jr4)4)lB3avf_p#!b`o|dZwovf z#?2#ciAYA6^$UifyTYP=Dx+{lJ1le5jpL~x%8=|`ayCJFyJ$l{mA7RH(o=doK};HV6Fk*k}%#~EbiK`Lkc}i0F4cr zJz`$#cut0MyltI}fT$v24vQdwK4Bf$rh|NdslV7@l7F(n2v%D$47(N~cL+j3r|)Dk8K22&Y#mmuF;6Tf z0|1REvs&K8_Fw!kCjnRJ>0I)?6MC4S6Lw8o|I2{V6xh*!2tumf7@9Y7)-~70KIWZo zsa<^GPk*s2p5p^4s|wL)h9fmaevB#?fZ8|@m5uY2|BJUJ>)H2po|s?6E%KOEz};d- zw<5ODWX-4g`m=&j03h2inhLOH31sb}599)joBY3{zbYPwxbWOf@I$|Tj{9fY2e|B{ zGya_K8Xs$e7bxRs<5w85b8m{URyKiOvlsMDF7y@Uciw$k`z%q^TxoSUQR^-;nNlqc z0#K6?p#J3@nTZ1T66^wvzSgz>&NlpU3jT(GzxHbv&1er+(fY$u*tgQKwB06FMUYtm zsT{G??4!v6q}dJnV37tH{)JVw!M|?uq=G6a^9|rVI7l zFk6hg%*D{%y%DJFjgqE!NvE3g|Vx-juLhtF1 zGoA5*N=45erokpk!(jW(9jKn=#?*BHxNP;Iy65={_J;eosIlQHJH}#dFyIipjRV}Y z$NtCk&qT19XvbxGS&AFdKgWrFW`V%ggcrg3UPQdnID)bxZ zh@kdANw?+gDMvDZ@X#Vor0!GKx`})(Lorcuq_3!f3#ImF3c(HS4Uh4oMTr8c_qXYO zr6LzM|E^xaTa^6xjE`wZU$pGXg?_2M%I|&nhZq%ZP7T@W`@brZGd!PZ_~?ZKlGs__4$d7(oUQ{Y}n%pkuzX^!tc zj@j5h6h}`P?s2~BrT?X9Y5zz-IDg@UQXaM0gA!!A#Bm@48F39!;qclt?9QX&c#gY# zrk!zv6(n~~SYcY1Q?`LD0yKqu)G?;!etw>-sq5;jzRo4UtuZM$%yiJG&xQv`!~cu+ z<)<>fdWnRNOpl9wcPfgsvHyKQ2m?Y}M4p5WaJPbd=s(El3nxQRD?55tG)5VwOs4Zx z{OOp(ED2Z8O#=I6fpxR?*el58#st5|J#4w1$pN^cLH3ExjUboZ%(Z4(^^0a)7{|~w zo|}DlG4Hb{)$)Jvzf4~Iq>t&cb2Sy+4o(xVNEv7NeU&R&)B=cHW)! zG$GPDGzoL*o`h|ju*IMylm!c5VjK9b`~Y(Fe&TA>ajfD5!$E#&aTs7rzY1kT5*%jU z7yyvBI(KZ3f!FKg<~Nal@xDNYbrcxZ>ym&o{#)Nz{6(q9*Z_#g5J|HSrg>xQsmBzm*^{4SW)dVzcf z=$XGkOopY_{Lx_IWCe(DPfAM567G$&fPVeFSK|pk zO-vJiDf2G}OX5!t8sutz639TgI0G;QQMWaqPm`@##Gp}Zrep>InKp|owR7s4pwUqE z&hwHH8sxV_$k$Fv51yCqoxkD96vP0&0sZadkGbmrFzl~?T}tvWn^64yy-W9eE`r&> zU!R0NF5vV3fB66MhDBfE8?{kc)vn#rt{cZJk8hXSL|HpeQW)w+vFW0RX-|$Z^Kncu zobvcE@pvO211fe4mY46HKMhXS=w39xc#16*7P;+_>G4t`wsrNEnswGC5vUKQj0if~ zDP!noM-0+1sKXZ2$S`dqM^CKLH83PN$0EmZ!;%Geh(+Z%4gP?yg>8d9p$!@YQ61<&AW@5A(w#ccVm%Tx4fs* zjn?p+iI<29?1{8;9L(_L8)gq?kfY1Ad_S2%G0dhWy0gnKxpyNF(I4BpNgTJC7A^S| zEy2i%tcw>v$MpdJm2{C<@O1TXwCD7!Ukv@bsj)ygbW0D?OCKFCJ6yZem^cdSpfg#H zR*&q^DV0X}6o{*QF&r8D1lberO z&^@kvmax$t@(4XWokT6P=Fx0_*On3`oFP9=(^?_yT-fiO8CiagV?joQr7`j?p6mfW z;s(bh>`1$wk3UutJ$W=K_Xyue9vR8iNjeo3S=m(AXyBATFd5){G-)nN?Jg4KomcA4S z@s20Mga$8`CD}@EgiK4J+0@vXx!p$|aHIB6#EoSog4~#{Z5}he?urf6A+@DH(54TF z3JThq-tr3l>A88<;*y;mw?+@d_AjNiUevh`*;X#9Q=jl3ryqj$pubN{eng0>=0 z8$&PYi%XObGnp+jqnbd!G;ERBK`xT0i`b*E!lol0!)Oyq;j>&%EbN=ofg0$3WRdN+ za9f?C8Np_6^m>hlKwU8fk(Q0_*)HH5rFssd^E)E#7`9Q?q3YpDLEvo2s_73R(^z$C z=|kX&{7WNiWy_94sG%Q9&ljWYC%Q@*Pgd(PGA=nU-)0)-4kNXf$Lfi_cOki-E$yOu zIQ{6fO_OvS`5B6bSA_zK>n=(jSv_dFTyEc(bf$IN9!IW$dzlfLR4%$4K^0yJ0g!tm?y@#$D(?$f?}f(3wqFMg>SUfQ?FU{+-&c{ONouwW5l%7 z;M{G?J**<@gf3E)lRq5cu@EGs^7%+Yd_fEbZNayE0JMkV*5*zp=B2lt|c}}2jvb1u)Rjy zRVFpm{wx2*sPfGrlY0b^ml5NWbv2a7XSt!=WU?CZk|+K<*OL*R!Y8uBEq$kQOZ{d- zim4{rX2y7enZ5TKb80R90oP?b+0kf+Aa2{^gOokCpwdNoXY1if1_L+V`|&!3yCfEg zDUNA-^+&9%1m?Xk$E%;C5Jb_)`678o6*)Sffik6Ih1xv`c)NKmEPCsX;UdA)I5a?(mRq@wOSdt@{4il z$$(4;9>Xf={FWypDLRb9u6GxVZU?u=PA>VfU@Erc9UZ%D{9+Z+VGdS9BRoZ42l{9c zHL~u;*;TnE;?|@tXijTEnM~6d_L1?P%`}8$S&n4EdXJ<|3ZIvcRF1A3!39@_>pkZ@ zlm=g3<1N@WHCw5o+VcWS#H_HnuGicZ$XXb@GXCC7i7hNRj7XG1B85r4{fmc+kb)cj4);qpXU z)Pqpb(PgE&Xvr~wl#vivVc;Q)>t>JfTxxo!P*TReFCCJ`k>=2!H|e1i-DWfc6&-C7)6RM%orRH-!iA%ZD=DV>Cz6Irf|m4m3lgzKXl#h)&d7*?jj|WL=6DVuXq%tW7%TWX9~mA9eBQryQA-FW<<` z38HN;&|JLzAPShy__Q{dXnq|}@CX0SinD=E>fCJ^;=}UZZoS*=Tw-Yit}BaY!UtTI zX|*sc9(M;tB-&y;o%D1HBN5@X1V3MY6Kk3Y2v$cP(2t2#4gu>blrV=%`aK1p2&1v! zHYvgcEo;1n;iiW+&{+(Pvde3uMPQTpfoo}N7RM=7y7F`TOz$rsuGI(4vPb`z*zQOo zDO`xD(x6ImFzsOPz+nUWh<>lmg`0^`^AN98QCYaxIb=K5e>v={TJNW47X1}?-8X^9 zW_pXRKh&qD=XKMFZ*BJ3F(yN?QJu?(1G1~vd(VlkSfQ=Y3pp*JAJrwB{G5 zPw42lMDp^QQZ-m6(&x;jao@JyndjTKy`!+%r$Z;7Ud|__J3d78lFe!;^l5L_y<-YB zZ-zzrvYhGnPNcQ&;I)H#`inak7<&VFFUR4`%iow#0!NV&1gm8Uxb{WIkeJaVfTjK; z)u8TLM3mCm>b>rgIdihtaE`7*MPdepH&10^%%1fIC`eae*w*)9#P$CxCBCtGBj+HZ>NGb$0a5*Fzc*C=CLo&2U-e~A$!`o{+PFGS)_tt;#FYWMrk5DBa{1zL@FwKV)t4<2IBGrAG>%E-j zLW1g}@@V#qvmSTd{WkX&FRs95+gzo5UNe%CG;1CaK94$5o`q$J^oCM_YarW>|dY9Q;?>@|Oe zB`=7_vOc==g=KzTYO4s)9Fjru4cq=fKiD+4ku6V(%)r&MK_%y=8BZ`QOCng+dktaJ zHbWZBX|w;zkY zr=@k6+L_zcov`a^XHvsC*tHHUxY0`}{ONw1V1o3>BlxU={z!bY{beNlOIIObd6=+9 zobND=knE&Mk}=6A`aj^Tz4JAAuHDm?-Z@XK z@<|7qsuE7e@OR=#kGnbToa5ANe4kt_i^~O^qA{QQQoEfcBf7V!W)wEd1A1}{_?q9# zs#T=$Ng+y792=>KSk1N2DT`04%_!`#?E}Sg#|zT|%E8g?RP}cbH0el3(XyO2=gY z&bFPa;@J>02bG!({f%9Uj4UdT5MGCe7Y7JoPZLHe^4W>t2wJm8R2S?;1CCg9nYl%t zxAs^iKUjQuBkdDiF4eX0o2~Z66s4&rDk_7b`36P0<+4fx8W_&qc~9qg=_~-UXvE4FR3FKSAzfnQv>yQ< z0bz!%Tykl^eOK^t!=IlhVN9F1q9=0w>hP7B0er&q5-m+{25WM;qR7JXl^dSqZ$Z|F z!BgHdHOIT3M8mY9k;&ZrBrD|e3Ehzl*@=l|*!2XFS*zivFh0qKX| m-vghb`hVkxgRbu$pJr7ROvQWNfjkBNgGnen$h~La{(k@yy%L51 literal 22446 zcmeFZ>0eUo|NmRNT~=1^cB!SQduL_kK$b&^!)}+Fnws;BrKyPmrGf(@pPi*UO|8r! zO(_*;#R*IWtW3?EMMOcMoCR@0P(k$2=X-9PJLmiX=li?C<63LoSnFDAab4Hz{dzy& z>)9PU%fkni4({5u>#)_Wn~uA7?ZNNbwcBU^KIxJ1L2JIW*&Xg^`S-5szEktkonM2l z+g{(bt2Rw`>)u}J{(*SFQaJnbx7*6`%<`^XZa1uMUU!ZH@tD!ii#%dzVh#WP z=i^r@$}3{tu=XLBqdI7+9SU_#if0h_|M~UAlP52pyG!JL|Hc2Wt=;?fJwyN2QuE}u zOMkrhYrk^Gt%uXPWY7n1z$Co5ghmhdRliISZ}ll_aI5A@7diR!oT!3UOnLgz6^r{{ zkP~nD#Mx-J;b}#SK56H6?b=^@nf&X|jXzGk^8DHU<$gwbgS&S9r{)%T_s@-cFNc5m z*?vA<_Va#rJu!Eb{n_qME>if}{-$81{j>c;>;IhK|NR+gKOf%^-R`Q$h~p$?4Sv2| z%|Q|_c`mC>DnpCD1rI(X{&61P_ZX308h5rVF1+ZKQa3J2O+~?0k@b~PH32^v4N>Tj z_x$R%eDk$`e)YxiUfUig-{#6B%O~TVGk?y^C(!^d4ze?)%$tmg;|CojL|%uBvf(}V zEYyo^#i4j|>Fz9rrzK$L;k_g^P7M9r+_$$8HYxba?Ok(>U4vUzJZJBzyA!nVnnR11 zgHHN>t?c;r#tw}4i{Jhl6aQWy2E~2~C8wkTs#@2#nuE~z^09%CYo`to$*Nxp?4J;O zhX=VJT^}Lw)4sO7%107G{Tq-H)v|ZQ(aIU8c38?;i2LG?SvKdJ(6|$}s z&XOUg$a7Kl&Pma#zwC>D=@Zq;5o6`@296IlyvIec%Uvv7SGM()qK%iE=fjSZ(#k#& zj~q^mcER~K8m>YzPCwo5O-$;qS|p&pRAHET-XS`%S^tm+fXP( zqf4kq!7SZOV_i?EOQ_xg@;fKyuBrxMyQY*%`Cb*1=otE)l)}5?eJgy|t+;*__iU<)@rt=ulpAKKUniL#JG-5RV@#5CVLoAybOVti~LdkP(wQ%^5hrINGg4)odpCZD^&AXT?|pvoIfq z+o#%3axIUY`crQ)3eR~AsUC*-H-R;RRDi~dMJ;VYD}X1wSGVDcUvOCfBrUos(8Tt` z(Y81JxpuK)B&(?=Ip5D}-1Gt`D~C@|R7OBTsCF$kkdnnW0wR3e^t4z^B4o-62vaixEte_XYS^{aC{!(K{bI3 z0uzYtHj+oKkqteej+&^Yg~rHcp@`(v5{>hh$sgxHo*RwXA%B$1FKx&(JRzeLFMZ`T zBSPfEx$8}{>BP1309|g0dL3NOvmeA>Hqsf(ZQ0WjInd}2Yn=PX##zR8nJhV$D_l{w z6;s_{jNC{e1++bS;O--Dt79!yDFmOEI;fk0Ydq~j{>wBB>+bzX1W@>mccrC~Z3|pG z{(Ax8=_^x}=sQG)ciEUHvX(mIvg+L0l_hmG2^G9C|n|grv2x<)@`*?1k^#cr) z{kGlf4f&w`H;ca5m1z1fAB<(-b0FH9WhV9F7MYQGg%xkqrLZ2##!iP~H9ezBTf76h zR*NITp+0$!7vz(TA15JBV)JTRw>JaCQ{mc3z7Qq;rVN~Cl^z@H&YsRi!PQOIw(hJm(i&Pi?*qOC6vZ-~!#21Y) z0{!t0Jt$lh@UW4PnhQiA;$C+6e97#ty$Li>7UV#nm17f*jK#&nyuP?cne*nb;|Ot; zXnyhzR}+q0v0#9NtK-y6eWf$_rQ~ym3{igT8M!6Gj=U=j;NY3_(C-(8uG*uH^_+BK zLgW#9Fu)G=xTJ`X^@^o;^q|qM2&4BxSJK8>roKrTikVJlAWdsX%;C&Id)`pl8Mqvo zEhw`U3rs(Wp8{F~aLjSr^p>x1r~P+4K;SWIl}k$;oS9EQSB4sj5Uo7*%h$lS3Ok-L zSv>l6YcQ(tL<4$W&pWR?0!|9{s>mNMR>#j0>LauL@*n3J{N;em@yizu?KIEgMujs8 zk}bFXg$%FR3$AnLBZmKceQs~CmtI|9p&ury8iI);1jCqPw!qa0^jeXtvKOjnBB8S- zM0JT^=z0shXKc)@&Jem(JSE&H%I)x+f@Dq*Q-@8b8Q=TkID1?0W_5tAH6P$gk*oiu z%XyD0nwCV?ZPnFO-WUW{a<3EAf>p;W&L8UdH|c8GQc`JA+U3wnoJYu`u0@qYyv6O0 zseLi?YmWro|N2lWG6_FIcsZo+2Y9lyEFv!uSax_Y-O=|%Q3PU*wE>JIo35J?A@7XpUyzr0h&Z=bo&)O84+28u3+6R8h zrvFtk>gg@8WtQ2NpPseb4F%swE5&xIOqN|Y-~V*8cR1BQR^R6&JJolMe-GySyG`vj zsNTD4SwBZk?&#a2w3rN<84MulsDC1k=w)_&^j=JSndIEzw#xeg zGd{KMchObClm9U8U6$W`$TdkV?uYy!H4J@OKD&7Ck?i9vl-#o*nm~e+JOR{M& zz3#W6B*1N7w68m;nmcGcHpTPM=RY#xEe-6fu+k>bJ?P7~&7Aa!8&$(j9X_R_JST4p ztx2O)qK>?e;5^|{^}O+xCdbv=NhR65y{c@<^7X57-9_{~g7Z3RVr`o{WN8adl`#SM zCJO<$%?X8EjgdxyrCzQqCa3Dzqg9t7O; zUbEM42?ah08-rfRyba^tIm_ofsKj3E?KN|$EuDRE&v>bq-?obUt@3>8M#e-ly$>f| zpxHPWGQGYo-RMD=?E`fVCtT@c+(*#r%udjml4=rseH^+3Usx`ho%{&28AN^_1p1U| zjr!Gtb2ar_$R71^tsydrmLpsN{e+S6F5g#j_hAxv7V;o#nDS|@_cWk1>-&2N3xU&wT^hIIWDNxUu>y?cr*w&XZX*B z85d1?g9vv$92X?9LkYXe-?eXQPJ+lJ_PAcB^YEvPyt=#<(K{*1sV&sqaGk-G!O`uZTD$dy&g*x{|^kw5O$@R`LCxX z2#@*F>nZ#=rTCYJNntK&ikddkmAq*Qe;C(4_ZiAEsp&HZv=4l3KvhR8S83xm#v21# zG}IX;i;VfPk2<3G8U{>Y1?kLWz>$G10rtoy!r0ojyNa2gP%5A*4yUqu>K8wz+KUlG z^9*zQ&TDv?Gv`Qyo{_ezo+*3T?_6U~18bn`pEWG^;Bbq*X-SqoR<*%cW}PPBZ+^ecsBZNgz!SB<#1QerCbrESKlvz&3GaC(#x4Y0{hR4+69k9ZbLd-jI+Kt#q!U zyGk!mh&mk}QSELT_g93h=}Dh~`@P+cy&9uFz-!!8FD)pphohesS=Fo^6KtULd~E$?CnbB)}1wJ@%(MWaFLm8+eeI-`{UKbQ*(tYtqR zS>CcJ>3ebUhsq7X?Z_!nM;4H{Z|ws(+{tio7cKu9ro}WnTc%{u{nrclyi`-&Pskz1 z9%Z=`J{<6(uY=d=wA5it#0~kAG+Q&iWKdojqtT{rp7Ud5Fw6z3bgT^|c@|4`baQUm zY_-N-eRZ=Z*)`-M`VK2`kqUJ?4rJRb`G}4Ru?lK)9tZyMBazv?}k^D z1rw#uQ~fyqra$HzH+U09OFc=1yR(Yz^Y3#GnP)xDl+jYbKZq)emSlf7@whN^fwdXX zigR+GJv$?B<%@M(xq9dJNCe?sxdI8{iB5S{+-aPWyIqq-aN(aeH*DX_2Dy~Gq(#mI z#Btngbo(cpQg0sP|2;Ts@3aD``F*_U`)+efuvc%h!1xZQ@O)Ahe@K(^=B`T$kU8X` zO-{*8JQ#DjV&J7+w?L>I^LF?x83$=L`2ExT*XEf7I4-x(aad^V61A^a8Yblo!<$KK zXLrrUMgO?+eggH}=$D9JE3Z{g)ml!$2K#WOQYM50NTlFuX{PHK+R+qFQ4+UI$GKRRysD#?ffb3 z6``ooy@b)2ZyNdj)3F9^XUs&sg$w)qNa)n`XMdJeQuL(E-pt&Rs!g*yhF`lTcGW}$M(u2z)=pDZZ4e!{zwgF?dqJtB)cg-6cT@MTdC=4AQ_~t( z#_!19h_uejxLSvGZL1EgE?QEL4w>yq7y5zZ^9^k&^<(C2g$6~5@ZsIgS;b%(=vq@k zPRYchx{ccqrV%-Xa^wJN#eaMBzB9c;pU8C81FW5BBD%*G6aIZyAB0F7EB!1YfUu}QDIOZ(h?EDIc9+QaN**UlxUDfJ8>G(mXlH(bn7tx1$13# z65P6+W1e=v6souFOB^I9=N@L5U9BM%P9E2EUN7aH3r0?Jefd1D03HC~RkLyf420u=%9z3OXF#MgH-$ zWXN!CyR|jud)LeD@|z2_#bs5WpOPOuAol)PYy$8Dmd12u8Vphx`i{V2xnmaNv~;O;gKC#YB40&>^)lYH+7IW6Y#3cU_Li zSEJY3uj&n^(yy=Ch5R!7U<86EJqR(i8oj1E<_3%8t;N2u1FnIQ6<;dfZHL-dINIR?MaUfwIBaIf<+7A!FVB1YRc^N{d^;AEPLiKB>-K2lDQC z_*OH*X_*VtB@E$b!CC3NdmHjjx1oMXcd0L*r}~!O)fDs}vN71J)GjGn6n=q+C3sW? zh&5)OIY{5b=@IO^ELF7>I}Ak;6WiTsxG=4(-2$JjW-^)fmTE-Ky+)T;hUO`rz16;q zI!!M(N1_-@OO+ZSpA_Mz%X5O=qkBSVJZz9L%LM-#;oP@+|F9e-wN)DEjFL!`e$Z0% z%g)?Lo4hytVPQE+Tmq`e72gnlwrAE)*2js28(&|a2nN>F4%Jj}P5VcM2}+0sYHmw# z;?1c%tE6+~UL;U`ionIoJCs3+f6ruc89(0iqX6mj|Kw4C*wq?+j57Hl2?7uim&Y`< zLwX*f-)`o=HZa@1fvjyfdeJ%t7oXwB)bFyPxoB(KXH5p&u>HlRF5%~&2(c<6a;Cz@ zOu>c{oAnH?9@)&fi)*T5-gsL;`4wNv|KeY89Pgo%B{<>s+r82*$CeZZ?q7}ex@_zm zWE!u-5ijmYo?!DJ!_;uE6kALg351{d!=f}mGN@FP5VzAr~aAIuwCZXM|@)s z#wXq|FR7V7zE|aFvPTk}mbI&h0lJ`qxkDg;qR;ir(7pvoxHyqepE&hcT z?76aIjyc8FAs3iF=z^p(`qUr09abwCZVU@Of!=49hK-uOKHe8JRtn0J*axWk;|WdLg^H-bI}WWf5<%Tk-*qea z8o`FoZqS0hjjdS(Ib$4~D#JxiSFwzv$+S-uCr;b-#r}_l{;}}I|0ZvpF4uNmg8n+q z>jM4~{eWs?_>0?oZI8KRhV*jz$HsYKMYmD^^=V72(d|~Hp;HZqW=1UEMfN$#myBTD zufnuXzBm2n6@7C~wQ%*!y}vha`-}&z|ko>x&)%@_aXFCH27OrbB~u{Ua9>QNB+;LACQKLB&e?rU-QQe z+jj!YW)Wd@SG2}T_m2lB$j9>-=qVe?+S41y8$5Kg!!e6zwv|>R4)R{QAMCxbxM1towSw5zgJh!N1G%IoRq< z(K}zvLY9NIXC&1I?s>~)t$Q5Wyu(|vdL4H1m~%oCQ^65c*yvH7tDQ|7;?4+${&V9z zCkuSl$0GG*)A31Lldy3$I72*R+>__1LM*Dy0?&TB?4k=(_sjH?P@xyg)8fv%mT#EB z%&6BwiKlRp<-X0gdkrWGu>nC5`pJg;ZPt4xIdU#by8O^<_o~UtijKZuallw`RbX#s z7M(zD_G{@3#Ifc_t%B~xUmpy9?AkAif3L6XA}*eF3k|{Fixe6mmfnTPbfUO|MAx!k zt46aqcPd;LfM)o605ekP4&MeTw85#0>z)q4ZT|e|hQ|9e&Q@#DZ9t8)I6#6*kV9}A zZAmUPa_0+_$*gC#IZ3sCkOpJ`r>*DVl06 zH9}Y5$oZJ=#qpM)b)nFy@($Cf!aURnrl@aA_5Z61RIlvT7>W$(j+@6H16a=Ym3NW` zVg)q1K60hwmC24XqZ)><1Wg|Ea9CJSVR%I%c&zQ(yti*lww3hlk__5R(spQ#ZoDg) z*iXLiD|S zy=vRr16m@lt~SB?!!yZnfnUyj#iQ zr+IoHCDT(LDBO(-Gp*r6Yuz-#Edd96l1Tk#`1@WVhmpL+CaJ26`MCN{;p%R!Up{{I zR4pB09L9Mu(_a`_@s7DJ)EWRyc%qKwp1!^o^7Vxh@##)tQYW!JkjYWyhrWl-DoWS* zMUTd%l{<-sdnl>Z%P1pWf9FQsZe{+s@({J`wD)dIY}2XHQ? zhg&56Y&YDI#oDZ#?s|;)2(8o`vLk`N7tIyUtX29acFwksFBf1E=;v1rG|&E~zLcnq z6wwn@Flh@kmT|2t@3tHZKtX);DxKzko_6gW$!vIdS8Q2Q7I#nX;OmL|%ygk^MX(@- zVrO&%*81MZ`%smc7GdwvkW$VC^WpJ_6#uCN*yM<@9p`&rrRxJkx~H$UeDC4a_Ycp? z^Z!cUefAJXv3%$ccntNL&yx}oll}~2-*O^_YY5;7z&WLnCMURaBH|s)utSjTLWGs0 z`2GbJR(G<^+;W$^^32(pGW%dwLBRn#2SEGA#P-~AzJl?l)8&fh-nl>li9+}Wcs*Jk zhf0g&eu6tr`!Jxfs^Nn)S??ekyCWkt>d$+FvfaD0L7`&S?*Dh{a z^8WJe(OYN9@f*^?S)9Pha8KpW0i{5wdnZ)*k?HB#-<uK)#0@O04Y8T$e-^zxDOFt9D@r;db!5owc-(Y~e+#NLM4bO! zLAf^@@_x#0a%Vw z8+faB;&P^CWiBq(6YaZhK>{0A`_xu2+!mzsGV+@B%IWQ!$hmZG0%@z~6=~U-kx#Ds z8U)@mxL!&8G0Dp(!={VU6Fj#1hlM9Ak2xu$lFJYoA|xVb z2cObx(#R8>%x6^aSKoNza#O2ep_Rq`!z`v6Jx@aJM4{=lvxnBXzV7%6t~}7pv}TfW3~1UC^{~jGX8Z14k&puu z!TU$?JcxLre~FGGn#!@`D&q|61c=^z65+M*E9qgLI^I6VE`W5WhjGo`_qQ-)Nr;Sw zWY}OZZJ5*Zx{A8;lFn-4EvR_9S%Tx&bydo$oHd23;cmP#o<33{tY$VzGb2wsT&6f! z!8*^S&=t?65qRH52$G3YC~e5&;A_V>YyTubWr zVXw7tT-UEQ)sh2~Z6Ga;W(_Syjz|MDOR(<;7C{jO+UR+X7V928^)uD*e@Ko0uV?W1 z48Cv$TW#_|wb#E2InEh7zuesFHeKF#18-&xQT+TiqNcG*WGt+|0CDl|xgMZ)^u%e} zZEn8o%ok(*86xcYEM(bs*qpy*OzV$)ii5s8_~aItBKF{@G%MB4bw1Wfu4td$NYVk| zWVBxa0Gp2;%EvblQF3l)*h2wd3)K9v6&o*X3GD&My_!pB*uIPLgU2hjAMX9dZFY#k zou=`7SFk##Gi3~iw0|{NVgPy-`D?+d-}IIu*p-?h_Lm>!D~ElGHp$gdCw-q>OfS~X z)xUxnzWAq7io#ItwNspCMIQ?-^5P!cEe8!iIc6KnXkT^itO4%_gnT}EK@xg+w9{!n7l17 zNIs?9uz2BIX^J(R4@~YX5+=JH=MFCsmCBwcv@g21YHDLf`tDhSH33-=Y>vF(#wjIO zR#urknWSc^Wy-WgIl+~p*l+2H>I!&=L3dEc%1wdGGbIft2U0{CL5coY>EU&OGECed z$d|gNQC189Ad8DJnhEt<&d3|1E{Ojn*VFKZi*FgddErtwBHGvEyvXLSn352e`gV)< z5jSN7oD>i=i$ZpD5iAvVGl=&j6eKo%1ffcYaD6u-0Ph!6Vu=VtfJ!Z= zKergy2_m5a*M-u>u?SPV%b_L9U{2q6GbJMdQtb@!@9K1r&ghAqBfRy$L(j0gJ~Apj z%SQS3Fov)riP;=^cU_oj?0Sf}SnG6FKP3@ZK@CB`!`lcWh6^-k{#X{BMntL(&VdVl^%ddrmk(zJ?nWc)v<`FowY#KnA0ucqxF)> z{5-_IvhXLoKep{cR0m33DoM_G{iX9v)<+?c?;F)g1F7g+3~)Aoh24ka7*4r|y*w&M z%^UrhP+A|-RM&0#v4$1hANw6X`7J@aF^gBxXiu?I)J`^T?LPXBS~)rs)=Qvy<^N}x z)a2LbrgyMi@~Ec;J%DecT&Itnb;vz!8yHI-f>J}r#56|=_u5*DWIj6QWA9YQtjV3+ z;J4mm-uR5^ObxCEB4WsjQGJrxK-Mr1^{Sy}5UQejTI$(&MhkNVHGleTci@UH73{q5 z&68>1noC$bwmA1pWZAV6+<7Fm&-l^YX!}#dcDovkZbcXYHOxXo>4Rfm<2F-{VbqUQ z6+gdpDdpPXs&)%%reX!5c0o1~g%RhEJ?)n(?AJf8Q9*t$_1Qg0th!15`}O{R;6$Dt zzz#3#ZjR6WRxp#Gd6?lD67{xH9=H?OV0tBQIMrCr$Vke$G$fXo*uRK%k)%AQO9U1n z3kG1$85jF4aIjxwEaIMoEL^ax^7ye@+5^_4E)m1;sbF45HhfwUKkls3b8uNEkx8N5 zrvH$na1Ag~@xnXvYR{v)EJoH2CcW=EIElR?j~Rmn@ znWzF%L&JTbjAGfpub5xgRwMJ94o@u*LlQMqfc$8SR+^@Q#bUZC zt6?%Wxa=Mc7djdo^Z>_NEy9^4p}R!WDGQo8wu3XqVh036E{k3?F7Cxu<7&nrrxwbTDpFqefP zqyJUW?J|&Ea`vyUt}V><_=6g+=A1N|*~}h%Nk@cy%2bjyeWd9J>QDECoG9!x#`g+E zbyW>s1aw4TC#Z2E{FZsu=@IRxdrL;jqHgxW{}w+`(?Dj2^OX3GO$=&p*0@UT<4Lr zRMg#d%FDl=d9)OeIQVj();eg4Mfc)@9Ax&vU5!~uglE9o=3~<t#(;dTF*=j!*pfhFvCQ(c!@V~v^8eAe;5pW7xljWm|-R=zqYvl zK4>&goH#Ns97?UvepI*roP2v%PG3ZbS0Vm^xI~>{w#aw`YXv-({`*j#;ceVWDdM6! z(0M3`+Jkl&?iH&br)SDYaqC(tRW1Wb&Dzu=7e)^u@`Wb%Fme&xwokDHfsSN1E0QA6 z_j6v*xGu<*H`*aOI>=lBPY5zYS=@n~MEtH-EVmUh#P`9_rFdE}BW-7^S~3)(abYlp znCN@-uX}ee$fT3A4Gb1msAC0*Y*G-&55i%7~PXY)21 zfQnWSoZ0f#Ixt2uqqs1#{gu2-iKKMi$r)@tZ_U_^QLnJAWLLZlmTz zap)^OE{jj0?9NLO=z9&H)E4h);Z92f+L;uGMs|7bEBgP#KtE{@RZba`6qLSMvZ8Oy zEjR8n*NI72&y?Zfu7iH9^;R8r@I4{;Ui<#rha=@_N_{wmZenR4g_Z7(-QTxub{OG9 zTa7(Qj>2}>Cd!$w%qXHN##<-S^17n3{6Lrws?>L356q!C}ZgNn=W z4TF0Ot5%|BRyWxNjFNpdMbgeBXYbAiPo>y|pS8Y7B|?ax(4t;dr3uV z`if3FvB5aX!G{=DZa;`) zuzlVs^(93FAv%6Z@pemTJY8(;u7E$kUDEB1alF7eWf)ivY7aOnb_vYGUHlog?9%<; zOrQsvmZ_9Iw*W;18?34Q=&mWXalj=eFJcJBt|uFFzYV~UD?4(nGJo1Q$n2OcW;N$^ zH+0HeoHSUi*cdR5;!}?(*_bnKF8X41uT(pUvNGkdAIu(F?a^rTs@00k3tMS6`QX!V zZ()1RycX&mM*8gzCrwUNLMOxmK^Ho*suihDd3vm*=60QE;GNgQi1NveZKAQK6&IAG zzRiCxP7+TxzUO>bEbD0=GjjwkZe_t9&iAIco};0>NketUe|+mcEzL7Y=+khf^Iyq6 zIO$-qGD~7iUPo_c-<^JM>QSy{dMXR@vNOecd|Plgbn2-bO*iemOrw8>bL&cLA{U#x zZNF}>Bj=E0ghe;|to22diB;Env(Mtt(Tpci!=X*KlZt}2m6S)*VeIF=Nip6XDbB6l zjRmZB!Gm_1*NzSVL_LvJ#E&%~s`pjxZLmStPq%L^<)2Dj@ zlQO>+MmA4qvcay9a6H()iWYilZuG~GS~x(|uBwK9ORL#?~=+oj4#Uc~&a4`g;#mE!O5bDtj-K2a_K}T5H!FIw|x7Hb56f&G?AdMR`7(r4v zwPcJWDG(ghOpu8s2-y07@Q59+lJZ|vsX=8?ZXbcf zBG|`m1-GE}>UK7B4TeOM&;amS1yHame zWpMSz(xJrs-*{^4($EPu>x22Vsq~QztYOwHf5WAcSw8C70&e^9G!KJ_y$saDK_76e z^>N=t^jDFh1eLdkcwhH~t^2)5b(CST&M1vr-^C!og9KMje$?BNUvDZq;zOLx8@*~= z3;im1IS|~4$${V}`G4FjTDfxK5N|o+Zi}T<;;L=zLua1@-t*7LwajSR^p>jz9LWivtr?Mysakm?)j?mCBwX*nm zk*9dz=2Ui86hl8(pRpX(_+tf1)qo}~{rh9BlFR=b-nvk9E#YsgwGoP4-88mX`Z^O- z^3oPGVR`P2h7F$jh~8C$N@^*+4kj}~m(7pe7~h^7mr`HLGR6wVcCMN(UZMJxt|3vE zxf1^PESYpY$1s5zSFthf6W%6}#+xFdTlmdheh}VK8a04>e{x_C={AJvB_Gps$6D{d`Pq-aLpRgtjc#DyJiJ895J5v3Z=6g3?UT>rq^uY za*T)bXt1hnDUELH#kcC+cIfm6Z58?ztc;YpK?1raeOeY7lNQp)dEGK7&A$03WcTdl zO_)iqE}hIH#RX`qe2L~RJ`*Iw>*11a``fM*c<>>A(LZ_cPh6`}@86E$zZlke04*&d zKn0#AyyvE^CyJVbL8l^tcVdTc@(AP5s4_-fXBX!XX7Dn@5-edFuO0Zjc&}etd13tT zVW)wE<7UK3uHMtVODFWbguhOF2DWlW1VyfbjM6r*-5!?jaS#K_%agnpS9ymVAEpu; zFLqn;lI^v}sE6&rkw-#zmbLa)&K)y`+vS;3q)0aBTR|1!nqCdj95BM_}p!H-k*nf)<^}lx18Sxe)Xz(&9NvP?u~q7Lula^?L)el>|KybM8N9W(mnkbJe*>S>8)4ABJ!mC5VZD}T<%vYy z_7AST*W29BMWSG>vfNGVOiH`DpCe=bKb@PP54&x;^(W&MMW=E37Z^^2YA{B<&@cj;rf=^=i6`$Qw zytAViQdu>MV-3%~Z!tZpl8`!GAfS2IJ5)pwL&j|E^`$rvnMp)mVtE`jHjR?XsP9tq z1`<8y4(VSzy5Bgo-~TH!?*>EAUC&R}8~F)-%G2IPb~lnRv75mdWdu*Lbt@BYY-h1k z**Nhur`6jqBISBxYu$0QOpKreq68 z3dWJ~x#xH}qZZgAD_?No-B@@C73$5bDXFeZlun>}t10ZK>$QytY0U=vs?}uBP7(T~ z;puNyJgOiQf^&gXnABb08Ak|@Bo}IyrL`Z9F{%Tth)`{OH{Q3IV#$J=Juf47cD^Uq z_+@gr`;LRin5tn)2xYH%Q=}T--xn3Gfp;G-wDc5D%+}f0$uCt$6I&*pX;;_xy%(&F}MWs0b$|2exi&r zfeWqfT~jpgjr(;y?nS(d-2=D@XfQ^s4Dv*4e;}Gsb+d+6?_e7i%XsIqEUR;^2jiwk zivr^8ADzS(3GCfekH=meK7=jUc1=C0YvZ7}bFJFFe13X$)!$EH=OxD#aPY03aumn4CX3uLA@)-jjWy`o%E@WsG>%`fBa>nx+la>fVH_PYpl^V==vAMNP5 zp&heQ|KR9xLU+J;94EUhbUf|IwLnaT0FNo8}n|35HgqFL2V$*;-IJ%eHRBA&*#z6Uo`R>5*i)NPdZwbN8zA z$@Qjxe1xP@bAbc<8>;e@I3aE}o_n-=Ic&U$krWv|5ZF{?Bu`at^x6_ypB1zf@P=de zTTI$Q4Dl&@GW7^a-Oi*~;8s-bdGHlUZn7xF|58dtN=#8qS!vj$=!cai3#tE)D2IMdKmrdgNqI6EswGNuy*6y2Vk1wk+EOc{`%!w9 zOJ}X0s&#ofOVuU0dr5nl1o4(%wRNp4vqj(S(G0V(!riEj0e+*;je$TGUE zx;8jhRUI=6HP9rGog1t033-t+h@eLQS;e+)lL&|Te?v*2T1A-FSUC;0zWn(}DqvwF z_HO)-OQ9_RC>-YZb^F$az-5#D#`Q>KBI02LxFiqiNw}26)DXx2v)&#c?ofn{&PUVYrYjmN z89XoTsMg`}$WPn>;kAw-zc$xQg{oONiN`NVhW7hESP*G%5NkkHV>z(z^s3@KxX!{_ zWrzyS3q2tB@{nl~;fFR%`UC?QtK_D|@GqWKM&!t!6$!aVBE&f=c|r*LSm6!(2SDa~ zT!_vQ0u=PspFIVfDWy*zq-L7=)5P-fF7<1{V zpWGfqEUR9c$M6+W>;TVB@n3Ig;f-hXD@Mx2zhipdMjDxx9Z+5N&Z&%+7|^m}FH`DT zWK4j|QTcX^!>W8o#wHdq1*sb0eT(T$P3Sms*s^gss$|;?J!~au~a0Pzw zY%Gav|9jGAUf@C9`8*-ZcSTcLli=nqSP&#$n75Iq1PqAEbc}4@yJ*^Z@!bPZY6E3Z zCEBOPYh`iUuriNY3;$0#&64hC(NpEv>e<(^!?C|qeD~(RGnb++^184o<><5nNztYc z*T#CzfSsaH4Kxj2k4?$lvc@t{d(g?k^%`SD!ysgx9F#Vfh6)^`FJm1E3U}qe9D2NY>0?N&lY{kS^B(g@m`NSP9S<~^Me$Vz}t$`+a z%wl!aWjTNCr@ylmj+aa+ST*}~5kqB;10!5tt(rg@llv(h`DzNNx>7IZWYJ7Q4udQF zpF(wP6uTgDGp*oM$Bfg#FIS98TM_L?u$xuv(9<*KhHPCJnQPgT926(^*KHlp7pVt! z$47q)74}L=(g0}V6~1zM=dmX%DbbT~8`~_7p?!wdImF3hL=CR`G5-C&iyL=uV0302 zljfT*yJei2F~{n{MbmA(bN}-RgqYyl8#=rUzpDFCA9J*xBH?l{O7!0fkq@Ngq2rvi4#vI z_L8^5*e+eeoih;NEl2U6?(Xi{p6B*pq4uW5XO%W4Z5ZlIHZ-OF~z zJN524ZVjEWRd%#3AS+xmAiKpdTSaa=<*23gI2>tXB6$+K5i>9~$KDa6VLL+hwikoV zRs-mZf8GV%3r5PIt8CPqzjv`8TP%MTo|ZGW^EGYjUrFcAh8j=ScjI$Zm&^@a>&ZDa zNg;Y|;A&2!b@OqJE11omfF26Gov>bb3doj~IzDc#J8?ZjQJCq`LS6U+~FG3KV3YtLKn&S=4AKYCkb_R~_J>%=#$-uD{Qyke2d8$$9oKp>pN z)|9ePw@fAT_PLSO5fU+ZNjx;I=jG)ktYV9*X7+6FF{@iP-0onX@D)D{wx9mlHDqjv zFqUG7qHHMr$8xg?BfchngV}r&;o63l;CA$V)(@9#J{jDm?TCEmAAAw7DDF)7>aq8y z!A!_z;-H;Squ&ch#h_lt+e1f7cDAK2W@BVwPzcUJFUxITkCD43e^Ra3QsscN_l7L` z2OwL6G{1iyw%ywr9V=&gi*C2|!)z;&_w&Ft&?>9P(fDn~wKcso`r?JojoFD$T4*n7jCM8t7FGr^{Zflk0ix z%{-yp&JVNOG`P|Z5=%)%vzNrE`G}eIzmKWqDTw3jAi3D=q66>FsT~boT}#{^P^&W& zc5aVG9k_%Hdp-~qB-0~f54^x$x}NKilvsZ`G0v>PXT&moZSd{fEpYrr^u2%1`6%5H z-Tv2Trhz{x_<>|*nHd&znI9RAQANewAah&DnkE+gn$t9gU4!=>`wQ9qm>UjVt3gcX zHDvl)&NQ0}H^I$?Yq7U^?EKV;Tbt3V@Qeiq9ClqD{U#k`4<9h9U`?Aih$LGcW_(Fq9TPj;X6w%Sjw)M>Xq`8d z6h};f;JatK8g#Efo+o!WMvp(}X=P*&gip`*1<)^FNRzCFy-`qHGxo%UZT*(AbV|)R zVrR;K>d|ET8B(?1SYc`scYAf*hMP^>bXd9bP2ivsmWS7pb~LR!4FJHbL3!9&j6tG~ z+3aMfjH*6+L~tAD#j+?xtr`@Bu0EQkVP+a71a>Cp60hm+lbD3(a@V(ezO>|ZSNF@l zHoLOiX13Lqh4B*oqqgzST9dxE(_qkb=WiG{%Ia!Rm|*j17p)g__5vU`R;3CC3PS0a z_m|Lycy*R}EjSu=k{>eF8SJsi|GcUHjny3z%v_yacLq?e3uymWJ7@mY)VcL>?4Z>u z^ePrhF|>+|3Kv9_A&EGY0+JN5f`H&6S|$MrQpoIZZxIAi2_%3(lqn9yR0v{#5WQE4 zA(Q}t#4-gXA&`h^CJve3*!BJa?;r5~aDIH&S?jycv!Bo2=h@#CQ}9=;^u$L%WLq*l z0Q{(bM9N9?9}hwCMPBlA=mHtyW~Aa!WcnIer}?NkLQ!g{y0t zv0q}U1I(2|e|kDTr@L>;x2FJ~Yfh;H#aqCv@80j00uT1mt>+09KN;=~Zu8GM7t=Bl z6#R`glzE-}F^6%r&7MAz+CDAioG^m9^pz{#6CV( zAI7aOzuH`T$LNlDem)E}p z0w!mIRcSR}esElm6BIh^aKKeA+7b}^_Q0<{bq<95+y0rc#b57dm9nu(Gvg~_T_0XF zP4>xMW*~5-Jfo`359f6jW=y$OQ&V&KjAiewlvL_8&i1+00Z05Z1B6RV$FJGy8hSYe zYnB41sOfQIn2tC-_AQB=&Ibd@y~EV&La(f;_NBo8a#v4Y#2qZ_dRCHy!5GznKb#dg zeOc;p>V6)y()xiGd9^8+t54aVJ4X zw@gkC-j?WR2o}CI`0P*4H_`LkEla3eWSPu4Q+aFD|vW09AzzalSTpsDNs(zPGH?^nV7 zx+__IESlgGqr3&8@dHl`Q8#|_*WA+nd-WNaNcGdeQbW4+9kn74hm)4lwgy({!mgsB z)M;~~Cn)CT0r-@kRqaQM&hZ&~tb&{GLu<&gPm;EwP!_#RaN$qJ1wl15dOdbWb*)4_ zC^jU;HJ&Q!K3?er4+vsY zR}FVOp9@{OO^}Bf)8EortBJkCJsz?tVkj_DSXtJ(y8G}t_|-^EhohE}E}Mk}Lih~T z-$}hBC_~!J%InFUb%r`W6I~A)s-H3IY;Wg8&o#cCa9#VDFpos8g^%|5D_)hmU#UO5 z?D>mO*RoV`aPE7G4eR0%h^E#>&N|Hz5t> zV?6YqWzD^5-Dz_VAX>&40$6F~&l5gglho6gnc@e)-$)pe_At-uG&0F@t(8;Zq%7H5 z+{mRd>!*&SK5NyX8g_2ce7{0-BTN~8tFk`0ve@H#!L57$;fVFnj7_gMeX;2W=N+4V zAR5SH4&!^@HHy|NA_&?DPLCaSH1Ho|u~QL^5Wur6&YOgxY33wYV|E+MT0e|`UqFox zwbOOf*B=Xu?E1YVj5-CxL3MG<>g!rJzT-zA%3IRba4)LCkc(G25%RGsEAGUq8YpNr zxB68k&XF>gIYNdXoD{qKK?&LIPW`e&5MT2b8SaXAai&GVG_mi*<~{;&Aew; z%M#oTt0viI`3zH@hrO7SV^e^U^GNJ~TaCXg=ecbe2Rc=2iObrEPfIc*5P`RTd}}U+ zV|UF8k7-(Z)4#Cc|Jv9;9CqHt`EBg*%o+;GP#@b$;YvwpK6mIP#1II^7&V`{qJzZ(iK6R@|6N1Y?^ep zT=`N$ox1tAN}&Xq@&skdsJ-_hlJu4B4v9Dl%hm=@sWm=p=2EE2W3Z?F_U3?+l9GjN zK@AlOe>OyGlCW-sn3*n1TFjJY73P{oO{NrKIy(Dfy^Jh20JKMcT8g|hp^>vr9(f3O z!UH|v;q|Kxc5^UK8Pw~l#&Lmk{6PE zNY?>O`Sc%vpC~HLHStIAmJq62yYKmn%wNq)HQ^tsnN1%1}Q3)WRt0v$K z;bn>XR=;hl5vvxSF^HJu77vonWUGG$RNWC6TJfR#Ep+3O^m~Zv2mpB0Dqv z1C`~YvKo(`Gl>)vGZmK{;m)2{%+B5H?@v)0{=`fL~Kml<` zqohurZKsEdl&CAHCB>+&a3(Rul9+45{Zz|Pp6fjabLmY}q1eDui`r#6!GaD$Qlpmu zZOuHoAD_xQrFGU9Y6|XW;20^rwuP5fb`4~Os@G3!eQLx1789geOx?@=WvE4XoZkjO zp~RFpK*o^+445r&3oyj7$43$E$&6&gy|ADH>}Ue!AZjzj7fc&eyH#X@iG%qmP8}tn zq{<>e+W5kYufW7I5Iv!7M_aSFuH2Og^VWmMm9Rkqi;6yhALwsFon=%$puFe#$MIXF zi5wTJ2H{b&(B#|gi|!ZFk|QP~iC*k>--78Z9Ha6=a0=XaLsNB_`>COco=Lv~YvVg* z6#}$KYp?F2kqd!KwItU`kq9Q_o&D9GzTQA_a=4jMb^}{5G+`*ev4t zKXvH?(O7Q4RZ|~&-~=DžEDrtKxGlZxu;Z+UbTOwkNP^6RKcbXqefqYgwD)m^;Z z4^aw#>Z)x0d40xc#y9u6YCI+G!~3b zWJ~Pg+MhqRN1aFeEFClF1bA=eTVL1CoXIm#wBXWL-7l-|ypa+x@ggbcG8$ zV3u}>j2LI=zF@)8L==B3UH%y1>F_5S=Prk_6aC^ zVE7ZB&XBmz?P@WGP@V%M zfAkR_Tf^=OleXo_fc`9D*Nqpe+;Pg_5)VGN63w#1YM1spX6X2?(P!_+?_c|Uzu&c7!p$tN`-<~8kMlT>i(nNcS<*{1mk0<5NadbLJtH6> zCL|y@^YQ`_c=9zi#uEH<#_5@?1VKsvO*HuCoVmE7H~~Rf#O0$G=fU?E?Vsp45fEIz zhyQyvy9NFaw9Drb(tL9UTw0{UU}{|S+%ZPsCk?VUg?iNPcOn>|M4vW!M%Wfu|FTY zijpwD_2&aIxeOkPKOdxL58Sx$=L3~z%OvOie89r$Omz0o2Qe{EU5O0pomn4wY`wIf z`SjYTE1a(N0Jq|IPd@G(B3GGwz4k}A5vme8yX3Mm63}Axn7tI&b17*romidJYl;}+ zH2`)&x7DTX*OvuEA{&16BFkqq=8t!$$;lW*Zmv!?2z+n!aQbN65n{AHQP=8!kx~M2 zdZ5$-y}@4S(sK#6M?rlkYbr>{sb5QR^ReUI?;l@CJ`@(dDfq00-*%$b-rGiY{Jx;Y zoFBak-=+rxc2k_3fWYGX=*3@O=F#jmlEd)t^Pb1N{~R$nJxLt9-t`p({QJ-c`Oh|u z$2;S=;_KAd&D$JgfkSRY&A1W0)*Qv;68pH>FWY_>xA}=FsV5l}bYZs=yhbToW010b zP`8$wCrESe(&?&{xCmRxapX=Y(chEKO{A|d>$@~kq(eF3^bIoGoi*9uMocbxWWTxh zBO=Hbr!&3c#-6*7HO>-E-!4T!K%vjR%tIi=%0_?s%aBV*G&<0VbgAKwfQU%6v%x3hgr!* zCJv9~a`G0h0OgvR_|Gm?q&B_Ph29p19?S(nIj6(X-s9Cam+3uM%f8o+_Ed(CAo(kc zGtuJuUx`1glBoN=Sp8nc7|!Qp^Uz^oyEVItGuI52T5WYWa8fzl_=X98$OzMRgE1LU z8Z4Yf3z}HHq;#rs7xFfkfMBNWj{`}G^qaVJ`pWWWD=~vVTzxS@@-wBM!*+Gof$dcU zf0fO6#7@5VQdk~6rWS$lb|_;Hh9=wQ8X4dy+B=R^3i`z!A7PM%vSbI94R3z-rjm4) z!dQhG8ok%LcJhyx(Cw0x4A@r%ufH8i;j}9)R3F{z%Zd<5N`xMwl&@fKF`XrNb@`98 zs;o}#c>jp7pbL%2^?U5J$m?bAKGJD45kft8%y%A9T}3{gwo}YKACO0XU$f{!u0(jK zIW(cV*n3`se9riUMbSK+J_rtJa4jbdOQV1H9Q)*9J3cFHqAltZI%Qh)>QC5cJIm&i;;Au$Y4<8f39S zzZT2!Xz%Cy-S3LJf01x{GmVJyl=U7CiB#FeiksfxEuj}A-(4M}q-`mGxYldzXn?)N zu^9c#nTQ?&<$yfWzY+MH=^9+xDC3|pixNRuA{XvwsPgLwbX0@uPK^=27V6tU{M z%iHlbUn@1rM8>Ea8tv`}_3*OjL*gFjT&1lzc!6@G$G=g0n;Q}4R#J3CVzpqc%~8nw4;a6!zea;mc?KoPe3InT z7P4YV0IE>UJ6Xr?f30|b%45oRX%90NcM%)GxCZg0p6v4ccWhQ877UaaSdn(w9exhd zA$6u$Mbp(F`t}ZEmGAwZkcC#n8#}?5{b;G_zZK_uvE}w8^Zm012$P-FvDw4BTb68w zqPS0@1O&IpN+?B^!EyYB^9a9*0-}>M8b9VbLc_>*$C;+Dt=M@y1S6>HXe<`l)L;AG=IbJ&G&YV>+wXc-*`}N;DSb7#aTs= zukbr+0#is8e(XN;nz$_>oWioWi0yUGyQO(&wei3{vTR*%^BF=3a?{?p0bkpsTDFmvEsHGX z5|ZAc^4u_VnZ;n{_q}LYz1olbd}i0d;SZJ05G0(2trTzG`r`BGhtXz}&Nu2#b%Xm2 znD^KJ8CWaAbE8S%dy}Wz3x$QgXaDn5mEeNNP58z)cVYa%Y=yCUE>A9NMXZh2bakc) zC-o!+fRi?-n%6JpIegmr^S+;znXus_wK@$SBqRGk`+pK{PLK=)?R{OEhqgGjntqR% zfS~BURB1e^1M4Mqot+$$9tY8$N3j>$_vQ~ex*w8IUjezeg?I{@O5Kz6r_3gr zLyVz+e=r~ZjqF|~; zr%sK`*V2bnTaR6(wF@y~^Zd2Q^0x&El~aXp(8ml)R`_v7c;;;8{2u(DSXGF~SIZ?D zud(B7%%_Jg{~Gh=pB(QWs0gNH#`uT|j6{UIEbzl*3gW;T+n_4HKT^A^Mi+$<*MGnf zuuLKhGJ(5T37syj7jzHZmW;_b$TMjAhvINSuCwb>HM2A?=fJWv{+Bfdm|l8#T(2$4 z7L`v{5Q-VTpCfx?F0-TkF8=i3Cx-j0kgVG+h@aT-CamCJ=Ji27o=C68mi_-RuebZh zZtE<~)4t5-(*z(r6x7k3QZIhR2N+|txDYpE2w7~N*{%`QaaV(=r`_dTk@`D466j$! z$`{2r>+gBz^WYgs@t+KY3({R|Im4hLJyKE(VbvgNX*t-@?TY=EslZfXbx{(C~uSzGByO z8r>b1nGNf_?TQ$#eQEYOO#R4FpjMhdj}Z$E<&iGuvl_WO0)j#R(%=mX&g!Ehc5>ph zx&`}LMVyLvj&hno>5JX}o(%G^xv@&98u;Op-xxmkud}1^@m*mo^HB$<;MXZsh-lbo z7UJ)x`l23G2{QWi2nlB*j8XT=Nlq8%IOV;MT{JF#%&oY=;Ya6?HqE~Tcrk5!78P=6 z01wGE$8&7wu}G#exjB=yz*8&EM>m{01AV$smpingw;|iok1JWoDp8d*%EmME`RNo!7gSo3+ojjMf#_=P5NA!(wi8 z=qe1KHc9-B+=((1S$DB!ZcCd@YEf9;=1yt+%KtZb7r&)36zNtKZ1USpHL7F(XQK16 zeFG#P)%OZZw$J_vE3P?)_Kr+dfQ*F1mQIHT5KHJLnqid{L})<-26_LVtcoxJ1?g-I z@&D_*kF~VykX04Raz@~??u;R`;sRV$;jj)Yi==*KLV^@H8xE{e<}+WSnLiUcP8K8l zPCeHM0|Wr-t2V*T|G}S99&vdwOAY=%PP$UcBh}r*8~@i?my2fV=uEtmh>2AHdpH0* zZ)Rs_H`9-BReEnk8y1_}l6p#EB;*m7SN@o!7J1V-5A@K706tGHT&+X9)D5mH1h>3< zoZ)-PRy9n`O6ft$iP&_yGw@g64*#weME2*SjX*J((Q<^`t?v8}`UeIEwgEgYlzsmL zAY}I62-y)xO4~-b{b42Uyzds=CV(&25X5=ZA7E_OpK_y_qVE4k!XrOkC}3cZz~zDr zS0b2)&+EK>X8Mh+Z^o!HpvwULnF7!x!jAJ#+>J`;Ado%LY1A`qwS5h8Bs1%OH&zny z>4ARj$B$-+@7ceLwtr8t%A&{4%bceomS%tEbk+G<3ob5FdYSE{wwJzff*CUVsACo8lD>7`UsX19r*#>A5f~kO zR*lb2pieS(0_3vHf%#hwci}1&6!pKRN|nrSbBVkkIofyWWO-%fvN>R+rLgSte_((` zE~tsA`mJL$QU8K&|4>rf=fxHCR-=8vxI*FNo+3afpn!h^O8<*7Emq?BcZ7t?FR^F# zOw{p0O&sy*wv`p&x}~wtaZfJ)lP3zh-reA7+aCxu{)Ytz{Fx{IBOW+nhVuhAj>ZJm zLvW>lds;4N(hapCvz|B%h6O|MnVdcVLqa9wwoXDmR8yKpb2Qsh^59=cH>j&;FaP*s zwEu;3mo(eLihLIan;o7PZ_Ar1(;`gT{4bXMWRA!wH~T>BP^L{)!m~M1=hO-+>(73; z(P@2(qt=!O-kkASeGGR`c%i z$=>}?Kjr5x=C+CpYL7JxVukMiiMOkkd-Ib;voTfw3k6?ARo%yCF@&d597S+;9O}_4 zn5x~-(2RY2Um@a7yL4#quW`u&R!OJ|wMxmzQ7wj|xA%^H4Px{Tr%0=)uNOAzm?T-G zIhcM4#mWCi|45*>+*@Aw>A`1}fRq1KZ2W6Z=HoDE{Of^E`CV+TZ-cF4QOiQBI)ALH zsYYT&8`8wx{SiMscv9`Nd>cek`bYR{2Cq%=hvxk$n={^PhtY3NclJH%ctD|_%;)K< zJyrBaJ;1^`?8&%g7FX2I#1tr_aE{=w|BEgDXVobcx$>P9eO!=yYR1R5kvZJ?)@&x% z5ievMa4NaU9hr>@F6S_(K6uz53Rokd%2k42!h@LK-Xuc=xz9$L*-~e-({uK}^1Rs0 zTkQE%Z_M@Zv(Wl8{T9j`aFO|3G1L{@q)&fWT=V;a8D0@9i*GZ6>?#>=T?W#s7P*do zI1kHzK2y*Jk^t?}__Vjl)1p(-HudZG<9ohy>O**M-R2M>ZL_wQY5{S!m?q&y6p|$y z!Jl$iII48PXSaff$d)ZA7LAvCqISst8)`uc)Fz;Fj`~2sJ zk?!N zx;->yk5JiU)(Rs(S(#Q^O@{iSN_R><%(pjdS^ohyq@?S(P0jh5DVWu?!eNE?3go)S z(7y#C_iusl=Z~(>tgH)e)BA%eo5ZKQw?{-P9>^a(U>moWmu0RVWRp+juL@Gts1WNxQ%b75i{F@M% z^FzzwykUiLgllNt|M&{*!-M5ELx82IbCqy6nxpoGwp-8$Tf=p^zz#W}R*o|FlgBVf9Q;|?{2 z$0O$CAwBhxD3{1&|8G&7eY|8x)t{9i3%2a-Lo;lTP!#CsY*bQMv7}vZ zI1`#l?7+nkQ_VBrzEWKCMlqS+gY0(0&Oye^bO7An1*X;CHBhP?k^Y#;XHyha#%n3Trnl|WkC3neWl2b;e>O&_1#)06#cZO{E7 z@F??uV(X}$f->W4$|Hw6+KujIOQkiFKC+u(3?#&-dwsJrcFlqudx*kLV&A>GpRF6m zpZy20EzJl!U721>IyYL>B0z{0PgXJqSa*?$O4&CeMHZ$W@QI4rWsw^;_+<QGL9KIcRlWQYWpsRiiya{BQP$UwiRc}jeDn~=@0$w^ zrn~!QFok4(_Td)?SafX2nS#+Wldd;-IV?fQ&G!2YcUmMmNgQN|JPwD^KERWozcU$P&NUi?RW1(*t&oJXI#1@v0z6su{1 zzLIbx6ZdiH23GGH)_y$1fve1NH26XOOLL=}g01uV7{zeGl*F^cc>P)UaJ8b&`?mR2 zpOfvhAZEDUbZx4~`9&=Qj}Oh79(xg|dl3&KyQ!VG@95f{XVi)im~zi*W)gcmxPQxV zbRXX;Bd#$2X1mwYAI;r)X0xm|&&rUOqd$>{`CYK($Cjh5UK}6jRmz|!>Jwj#hAl4} zxLiUXY=M^Bc?O@o7v6na15jIy5q^zAet!j6EuEiT^S+t4;w#6wMNM+yO|iWiONTOy zbCh1)=*77^F*gL599!~}>|3A@2X440GN-!Vajw*3ShR3sKAf<68qAH6P61-u`h&vH0@3mf)~N>d7m?mMK8HPgSJh9 zuH6B)yhj)lt68UUZO%9reXr&Yhr}548fD$Ix-Bd?IFciFgT$FIaxwl|%ki4s=WtH% zT%}uyIh{O9@2rZ@AK;GP??BjdURg57D+n(85C7;ou!d(*a~fE`8*s-C%KAwLk$J%K zh)=O-1g?0I0jby3qc~)W!6tK=$o!~4-zKeWY>sB4$uNg4OPp7cac+%`xGy86`c7Tn znR6m(Jv7%eG}PeZO=@bFcMF6e0qgP!lL>i$T_hkdk{YK-^V*VgZyIp36p|gMSXX4= z=`9Yys{1>VEJ$kxnbm0sK|)4yEJp%APFUjU@{$sEV1vLkdwincfkRor-M-?tp(?Jm z)9UQC(=nWE(>E;6JPA(jmFH|4wpVJ3QFE&4y)o%wg&R`xWC-+TR+D`%vvq{c6)m=7 zH{_L9cX@-Bdt4YHbTHRJZAj9BaOR+iHG|cv{bfOKvOC{*xj?IeObn^9AsVgIgwyKP z{S=J2TR<$97@0O+H-Jew0Vmflh#uA*8j6?7Izr#|DmB#R5hW6JxAQe7J!b~nm{fHe z>m%k}%Kw?ucfa>LzrTU>OtaxPg}PYBYAr+Ws^%)M44AGGtJ>uFf+l?V{V2ai1AjZ{ zSDQH9hYse`6PRY|=)ZfGQ>)Ea-i@EFW>mY>y1+HR2gY}@Wj_cJPVNW)I6JVjHvV}U zr(Nza+ZM3d(ZATv&7S*s(rpcOO^NWw3BPGi%;twr7~!GiRB_|r0=W7?AteH`?<|24 zV5gSG4cOUs<)=o4V#+{feXR_)d@XBQeUOB%z~pv?yRA>4(HR@p-1U>7S96KQ1_$d* zsI5AJlm>*<-8QD^N=#g*y?0{|7CNQd=Q52G!+81Bj?@$o8xEx0@tUJxqw_e{P!#wE zwJ$;l6=r60VL{U}SJvr%-$17RkM>~7+3ug>nJMC<5(4qdOf?vp2=SVHL8#=iHwS>S z>4rLu8W;f;+|_|A{7zQ~+n}7jLXzGU>J01b8{m+~a2{NtyiLb8e&f3w!4UUOcq7nv z%TH{=0Hg%MeHXuTdyBZO>)V5>&1kyWyR|Lga?|vh>A*LUL+ii39t2=AqPrEpsxMjO z#>;#}Z8$LJ3Z7WQRFJLGu)G(RF0pdt-qW1wi)Nhk$TCeh@-|I;trfhMY*sU=a)>ZJ zZ&~=g^6u87W*QUA7^pW7AIg3Ic;;hmUZ9q~zWM|8)fKFEO@k7UqFtb)fscGoyng6n zkG8*!wiFK*kp4VaDJ3Qs*)#fR)OvQqEsScE^jX-0kym{_8THam7roc{YcLuSJ{jC@ zLxFD>HMu9dgBLaBummh@It!W}p`jWXh}!SyGA0fQ8|}eW}wzqKk8++v84bnou7=PRNtrs)*2BF95H#o zL6hHnE<0c@u|ry5L`Q$vu8Fr>zrGHC0;-mCNMF=N8dMFuQ$kV1>7irEs#C_mX50`y z+WZ_`>YW*{pP6iBrVGP9t+Bb#aB*Jg4AKrvSxms8jB<;Lx>u>jp^O?}03Q+l$ZuVz zynQ2fm1LElAYYvCMVr4p+_edjbjKNRjxb6|<#!$D8C^n}Hrp=0ULrA>fu-{NdumWfh@YF%G+3-1*g%$@-=7B z1$@;wWTU?t2sW7;A+NuB4Qz6laJ$rUxG>mdnFT>Yc7iXh^$xmWB||V6Z%cIDDDr&` zkoY^lWV(^li+I(Zti(2M_1rJx^h*Ug#do0+rJPV|&XM>XRfu}2Eu}_=%Ub(7leQ&H zt(XV&xpEd9_s72ZxCYP{@8mkNZsw_}clC@!qbr(6HypZDrdcCF@>il&Y_b?CrYG=d z^VSvY?fU7v6`kg;#;ddV7I+BpgH228+7%yM)l_+ddq+P~_VB zL>U3vqMX{b_Ag8zV|X|t!O1V$36wU2woHgc$F3+c~{*UpOrI_#37M-E|k>0 zQZx=+tnJcyG}3Q_H9jnJU4dmigTEg6g0Lch?M}MUG6%eo7q}_vBdlHiQqJU9$?ts* z;lPFT+IaB>HHdR^-X-{^t`>9c$;S1*Z`Ow!u20n<=w(cX+ogh8Xqst3^qV(lm4%|1 z3S_P-MlGLt$U-IXbxG*)=+ONBQX2X72igOV?BY0{qg<@n8AJ30IYM3!5+xez&Ct5CDmjg%eK+%_ni5Uy6zHe~rk& zbPuYpzLa)&!p<|FqxGv zF=>=dDYijsEmMi;73>4XKzHe-^qeX6+>9y2C#3%EH_h)JO}3Nu&m!E_O$s&i)fLpT zY*c__N|^>vVOXf4)wl8e8{3tcRzI9weeO8Fls-p>5>HdqZB#}M6w3=YC|vvC(YGoU z;c9aMskl*JvoVg&SVPOa8>v~l`=$7xm{^RSMb8#DD9aGwH^ygU!Rb&otsMIx z6duq_*u61R&*T|6tD;M`vzTnhuaudcb&={F=q*ltpF&kxH7N<-D-e`h+cgW;sfZU> zPUOD#z1l`^j|B^iJfBTn%JnjH*O;Z%j8P}qVJiETHUjy)2y>Sc^sWU&T>w~(o`hmD zZ0g5D5LtRd!*`Vc)ZP}2|JECI;FZLp1Z~e3>K4CfaijlUXlNhJ%X)`gw-lx@bLdQ` zl;UmBpEb&!3{;^v8U1`$ImLM>@S_sv;d7U_i&yalbg4J!g74bWe2KAQU3!)nR#}?p z00V))EJYp*L0!!Az6%Y-N`e93yXq^1Ryr2i{xB9@zQ;j!2@mh(UG2ip77{K@svh)< zjkXSvh$Tg-jSn}RMs1BxpYY!tG+Bs2)4hf}pY6=>Xy(e|XZX-{_4~&ZZU|}dZr=5aNXb!p^v$r)g9&S+_H%$ut)dVru#QQ}@(_K+2XNq~u%a0n$6|cr+BIy1=|ck9~k0;I)?J_XuLF{-mb{clS2yz6)oMRXKHij4&*g$ zE==-PkBO(kF!={HUsPy~m-^vhRv)b=Y8_^aSV3xsi5`9?7oF%Pe6CkzsiD0C*f{>x ztRyXZo(T{~-LsM81&!e|D}08bQ5v*&E}s%S2`yOCES}3WYkqp4BT8?dg8Fu;I)urL zl`u*&P{c41yoYymm|KWTdeY{dMwV@Z+M2Vmrg5|y#O|3{8 zR(Ors@?E~F8JAejU>;u5_YQ8&O!#0GK9`BMigPxS5>wh1D z^L4Qi)rH%JevA_~y+a|P^O}rcx^@(l6|WO6K%b%dKlwcV4h&gs-WF1(oOvm>nFTE{ zea$oKS-F{60-&rzZc9(me?}UvX^|2u&ZYL_0L4>r0GI*XrL{swqI6%U`f}qVU7r>zm9o%0{=htI{Wvj)smXp;!aI1!LMm_saWc1)K}}&r z<*&$Nl#_d$2@gO8p7oq){A{_Y2+&Y}eat+q*mpctGuSi6ZQ5JF>)B9TbiP_OEksI4 zaVT92LQpOS-&x!qbgHpv{Mku6E2P3sl&6$*RcFJfFG`76hU&c!-l)yObZjYUA(joX z$#qk53F#RwLfn5!WAj#X^edo}M9fMl7q)AF_lJLnF&PMgi~kbzG!gL?i#O0Ffa z02;^CggFy=eqIwEZKnU=5}v%Yx6gxYejv^&MCbxsSFHvf3$dLSQMSHIlG$Q8y@PqG(xfQacROHj^nZg zRo|2-cUg30veK*}u{tQsWa!$mdNl7c;Oa$@-}ENFpq37U=bEOZUpR;n(hL=)IU_`P z-5E>7bIve_BmkR2t$L}?^2c+wltT~xo|^( zl8Q4dZBLCFKFQRs%T7FR%fns@oAfps zP;ny5!t$abv67Fn8R6oS_Q~4VfK!R;Je*d;U46hG@q;H}Wz8K@`the*Z0!HMLR&i@*}(pnhgF zIGvN#w{@TMj91;}{pbWPN*L7j#Y{t6@aB0t8-D}rz-iBReN#zpzO1HmCb3u?ps`8- z_rFn4XIN5D3gtdmnD?Q4sR$OLuhsc14$;bcAd_WZY3CO6swYx2S1SfcLr71qZM=oA z|1tHNYzylr?XIkor_8ZmNQDm!O%Zku@R%f@89)R{(r#9|e8roWXJ&&K$kGg`T8i@o(Y#f`Uu4C z@Em#7qLG-(A+ommF{}oBp`oT?xN^Z<2OHpB-+GwG@9%i2f$VgiSv22Qk13`pIbu0K!|4uJEGQ*1Qt8vK%V?`xG{N8Y2q{6)GmymCcK0G3DMH=M-)l#^Rl z1#x;+p@a@tMvg^%iAQRDXE#}wD=*d59y;w{>ta!lQf3A z-$YNnet4H~yHH>t*Es6UScv2pYQ)VP%D$3vOP;*=`HXP@Pld&6Zu1VP9FBxtztVTl zxvBq7yv0bMeQ6?WLn8@ba!aAr?mG-%twNXaT<4=|RV$ z-TeOhUCrKym}!ESm+N8~h!0}*szO59BGyZPDX^T7M_ZBjGAE6Kl9-Y*R08yZ_2r;~ zgp*xS039&;PO?Pr7(|;ngA+53pAbXtS*Sfr0@6I+>Rn`>Us3Ni%Bu9d=k7Yr_ER#u zLE|HmxMN`%nzD@ds25o&!j zw+7at2TjGpRJJ1?r0NnAyqb#&0#(~gGqt|o~g(LFa6zR0x<2HPp8d>Ou&*JAnB!qV@7>ZFEw*u#gU z`c8iDQ=hJMP0S8Sa!OPk%S4;=G|gylJX#&iyWn=NSiz~^>gk}*kMNW;-B&x#@9QGz zyK0CMF>9UDle~zxFaeG!a3kpjv-h>KmADg7lGl=ylV(hOY6@JbhnRg$Fd8d2#oa5< z(xGkpN@qV-3MM`u$1J#vXhz`l)i9=T-w@o&Xwad{cB6Pzes*v=`R?MaL_KJM zisB6#@2k1f4qVE0*Bq`KoZ)(Zs94;srf@C|^ubX9O0VK6eum; z_yOEL{-)aWnUW2BoxzRCi9(lwDsj`S6ug_CSS%q1 zvIG55eH-+Z1vdGScD{$>3de+Ek$Pe6@xn`*s3qVW5FDsYAU?gtAu&QhDQkRrH@U?B zP;ozJ6aE?#CP0h`9wpDr(!h-hHj(f>Ty6KA4WI^uG8~idt<4;jxKm6fcHVfcWz3`& z^khhc##aT#KnqYUkzedE43qI~ozfR$U2$xg#^8-redL|nW2F2n2^gU{Q_Oga$$B3t zay+l6a=lc)-FH7P4P4ARf>>q32(slW9;xtCc~TuxQBQtnQq!*_(&7140I<0=!G+vl zWrgpa2q5GSK-Kq`pM_-m!qxilhS5!M>x{PqxzT)Rdptgct|T2?O-eMSy;BfW1gFQu zcrGTIxAlu3PAQZ^9|0#b8zX`^LzX#vje&q;UbEwCF%ca#F{#6q@;3?H$2pqwd`xW@ z0@g`l{LtsX-@9LF*1joz#cHJ`!@C_}>;g`UU+rIt30N5Je0R_%P8Tepa;{1VZ64)5 zpbhHDV5OS4atQ`2i)Y@N!w&c)qhE-tbbXc1ic^mV`j@Bj!yc>${t6@L1yQ-q;3V!V zV((b)hmYURk8i5JvQEvy{_6X5puB>Na#MRMJm?%G_G79OAii?pJRy z*zrYK?mN6Y`OC67twf!F$@n@>e{{=i)S-!(O>PVZm8@(q%NYxN2;uFSsz!#=x18Wg zKey)f{G5RGV6)$1hF6MMj)$}Kq5#nI%&VE=iVfzBg|-s(NVz%4uFlvIv-p`BS5cda^}FRCM>3LX#LA_r9pR*_?4#=Hit$^FniAft)7NJR02splxw%^)EKn4bod3Qx4PTJi8aH)wMFXs!9pvbDOo8G z;;-nH-&^6A8dBnnx1djuroDsaQ?q#sbUXf@D>jl+xPjAyrg(2$5_Q^RCnm3;1ANzK z;o&cNfPTU7jum9+Ztvka7547pp$};e=~@Rjs1GlKHF%YfzyXBc$xp{Qb1L0n9X>fe zgnxnxCRBe=6@|LM>b|ouDhe5m$K{iuf|T|QRx)1>RmWBq!Csf=%vElo%S;7S3W`vM zS9ximJu$4aJ1{s13`aptn*0yky8ADI!_{v1c0LnEe^}_8%{UER4nh2)$pm!yO#1e- z6ApP;_|E0fx{&*}Dev>oWH_;h?IlwwGG=k>j$sWhHoa?p@!qYgRcs?90iV zr)uPRz#G-Z6<^`t=GMf6bPskCrPZh$vWw_&KJ6$fOxNBo85v{93LU8$ho@+Fg7guj*7-G-n(K zCQl6}9{})?57>%_FWLqh4yAh8^pa9Jq72UZ$8CVU#jUOggpnzRfIHFZ-cdm88F#^{ zm~_Ggo@b>+DM9mhx1j1p)wx$FpcKgYzd_M6>;6~X$Z$7!)K^pr($4^0^gGwNDN@d`2snay38ng&zm__&Hjz_&_4rHTp zC(jV1du5LWs?h2SP}_JOj9b|vp|lb_Zr|--8&98s)r+{zscTn?fy zZQ9ZO2rLBlA;RPF3W$YC{2hAIq6m=#7X5145O9&7(6O-fz4Xl|$v&MO4?wD@0kwjx zs>z-Cv^H=n=5BRAHGH!v^VMJhqL4CX0!nAol`>WRfxEy+f`*Uc-}*5tcR(+jR5MmaiZA2aKL@3L z9jP({h9D&3K|8Dd;*7x|ZgdTc@xN>H*86ZLpchIMm!0jx5jFr9OLqB?J~LX98CcG2%Oh<3gCBwD<^qw+R~gaB9r<@aC>qQM&IG6%yt zJRjFxtdGVt&ER0$-8SSu2%;5F#kqbuM8@*$JQQhUT`2)ne`G!V+v zH!y*2aN8hT?1zu%MJO;n1oxC~Duo`b7@pQQI|~E4K!$HcX=|xTI3vp9)yY{z1$`G7 zp?h9JRJ-%hTe92G#JRViakQsZCw{#P9H#;IOB2?hHIUyi;hNyPya%G97Bq!aI5MhV zB{RAVp^>2QEGa{kq<&hCGbl-H!VF%tl+;fAT5ltX)r&CG2X=~u^jy4Z}W7HsO#qh(f*+;SW7}v8+U#A@jTe{TFNyK!ev5jY5b9G-C~uSyDSYH4}xt zsU}%B1fxyHoLcjU?Qh;;UwI!GJ~tyw^x zvHE2ak-E)vopN13TwuPQM6d@`TlJ?XpLRasgP ztnY{)#gF9=r0)ahb<&Sil1jkMK~>QTGe3@s5yFBqbsA-?PQX{7zj}V|PA%AdQn34! zj-Z6fmT#5FInC-u&-<^xJzE2Cp$0>s&a7krSdz5X0{9g^YRAdW{}f*;DfE1H1Ttgwl+h=<8?p`+#+pZuIV8*{Z?Eb7CI@Wf6gyKn;s;H;%-y( z^fy6o{BJ72WjL;D&j~hRlW%g{4!;&zav6Tj(F`2aq1C_boR%xjE$%VuA{EZ`C% zjqCmqoUbY`$gkNC1oV%CS0VHm?TYDS2;b<_Xz^`KVQFBq8Qk3r7N1E!?gONnHrXn^ zwPj$xY%=AosYt!$jFtkYvkz4GRJ+cp%P9^fStq&T{MuMYcgVg~m()gx`_7p8N{g%P zFV47-i{z8=_&%KfcWi&|Tg`WhfA z-~pk9A4<5!AXAm$sxD%aw^P(9qBa@LGwM{h%9$h;trtnEDceWhW0W?566cU!dp&|l zEr1@WQhDt{1O>J1y*DfCI@h|$4_DfpiEoQTGGGZX1so*Qlj3OK09AKpf1VE+U!jeq z%1*@amBG+hz*FmaWCu{HK`p5Izz1>D6E`v`@I}o;`V42l-FMZH$EyphX{mgmL0&e* z%HVCGY9>(pS=$s;v7s=bW$r8(J<0QMB3RFFLUJh~xU|o^;LDfMy`MaoNx0(Ew$8pr0#%Ae3N)){jN^;o#j9dMdHjv zYorFr_1y7AliaFHwr^AgQe>hB!U+^)8;sDp&eW72&ZbJ7YYTqiFLdxc55i!v=vK~$ zA_^YmziKnzBG9R6TXZshNNi~-ndr1@CR7Tg#AN*Z6m4ehYwcI4TVg%iVLoZV#JdV{ zLK>Y#@5v9zHWO{tz1kl{FzES$BAOAG-9<6Vt}-L*4DoYTOi-jeJf&<_Le%a@C0dK8 zDdwzPn4YCwBltCax;3hItNH>=LV&uGr?N#px&B+9@BF`{7UmsJSmZ-PN2W;klfOR) zS9}x3J$pO+K#Z?y(kXJfM{v-NdrtU_n%;>0bhKF2!U=`pb=6bGio}@4u*XFnF}_xO zjf$+&3URmFF{%xHrg^22Bp18IqaD_M&AeaEsNu_yl=Ydr#nTDCpMq2qUJU5P7ssEa zeCHLncK5+kU2RjS^TxI1zepP;z1i^<5s{*?eS%5k`IhJhxuI}1irf|`Q?fU@ zL^_dnSEEbEdjFC$5s^iHBz&s)Mcu!B12Vq}+q`J14GAyrOg19%*=JQ~3%u~oqN_Gu z#Pbtx2FHEbz*$!tyc^opLz%ARyvGt}wpi7Rv*cy{CW~gdO3=e7mPChns|YITp7k^b z);N~D_S>TkeiXf1+Ajxq-KD{_FOk}ev`p*o$gtfUxDxC-LwAX&U@dO`Mb0r>UkvAm zm<(=>Lx!DJNgSN}q?@RCyOXLFcaFDn-l)=4*V8u@=?l3J#Zz&qo-rs6fpwL|i~>z@ zFz<~d4Ml(U?qN!6@+$waGoO4R{QZb=w9K57?;grf(g=waT5l8iq2TeU3VnB*hEADc ziAO1T$Qq9^;NEnpt@PVed3;o;+(w)(>!vT=Y)71L^b2{;)Uw^T0w!5AMt3U6u3rhg zC~(~#iaQ(=Ime}}1Vzf)%g}o9Z?}=2R%^dsGeCEF)RO*`_)Kr@_UKp)&dol z#(H1!&aKe}Os{rsF0Q`E5WzhSjjV_0Zf2Hd#X}+rZcafc?Stx$d)>NENUAa>($1K15dWw%jejVbY+?m z9aWW(gsgZ78%uF#qOsueTzo`C2+uv|yT@;4CV4LL%o=^13HiZ375(}1_{2^X1^!S( zUAli^2fsTDjxDR>!u1aJt~H#vd)Pe@%-&vs~)g*+o+g6~im1B!&$J zf%${Y%pUiIgg>SVLdA^}$tDA6ExxYZ4)W&!gj$&jnKL!xmX_wj_UE;4coRBcVRIqW zZ}Sm)N5jdvzAgsN60~tvhR7pKXJ?@U@w=GtEChw{g2kyX!U8p7mW!oU-BqtnW3Rf8Qy1i=)#0o?&WvP%ZoXzZcDVB(JB@R zUE3}g?RqB0dT>VyYSfxD4BmL*Ce2S&u9;sO^!_Yb!_vB!m}RjF#bG}LXiN4Sw#1^N zP&%waGd%#@vop9AV)PrhN3AzN{v28qR@fe@oakQ(!3~QxkgRJ;$+~q}O4DkgAb9`k z9-sbHaP4F=ac}f9$y_kX>+cLVIyZ(?vx|hB!${d@3A?PdmMU^y*Pp?!>wJfM z;gW!Pzv|n1UiqWNF4HyCdsPF>u_z&Ghl_-j&sU%1L3yg!HT{XFW`uM(4m9DVKMSmq^utP07t43sT|ON0 zzx?-HH8h7Z)urx_qaIZu{jlY`*S)EfHv9pz=_qSIM;Q^BK${~U`r2r4z;jV{SD73K zXHA&%F(uz(j?P{Iz58mWR`WL&Gwt1c=bzUeUqseGa7NzcR=uFSK!%fo!WgySOgqJQ zflZD5nkTRay`V||vk+k`p8jTJid4k;XQS0stC0~d;aF545bg|8A7pdL$)ZY^G30!@ z(l^olg{$8JgN$}{4Sd)6O%Cnt$B%)Tfvq`ABX`-uFf;5g$7B1qf#guO#>2G=HZVge(6s*ff^3)26vf*@{|#A< z%f}AMen25!ZCjgMA~UhTx|}(}4UF0_5zJV-%x9aW_30q^0TyB6zREq^^SP|Hk1v+6 zKBv@aWmi9J69FYgZb(j%Up<<_KwqSzb5nw;p-+5Q<+frBImgpiiK*i#IaP$J_)~&- zPxpk)&}%gfZX3CJuJ=2S(bHMYCva|_9D0R$+vMZMCh31dcZs*(u0pr}*N+3Gw|D5W ze*ftAmD9?Xb)m6-SP`i?eOAG3($XR=y$7Eh6#q>)R5h2TiQ}>X^>_mD0h`+f`J8#b zb&YtT2+_dMt_-iT`xOg=ICPQPwWV+e74wEQQg^l%+4bR~QS(DgL7Ij;&e8G&C9eIv zZ8Sv&$P>ez6Vg9!GgzgXHM&!6dFD1`C}lpOkVNo=ZIRHAWO1Q~ty3AlHQss1Z^hEy{O!~6IB#@Dt{ggnzFF8_*sot?ia9R-n^h{tOlGsPiw%7Z-R@D5TjC_^_wh?OxdSGWiELl-B(<79fG z_)psn!n?$Ki5uVUMe*8x@xGYvm-w4}Xm6kUH%2uJ%>wMzWvI9yb<~TF+)+ldsOQ zd>j3+LV~qL((_6~zxW_M%i9;OTUlC~gT2Uod{+IJF=K3V`r1Zt44}zk1t|%MiF@Ba z+eztj=QmU+QxpWCVC(rKk9nUoRDs*}$>QBG&!kY@1t5OVua2X{+_@d^=8XbS<&OF4 z_!qId=e2IQ`F;uf;ms-R8Ci@RIKs-0klFUA^*il9-2*?8WB%2}Bjf52)eo7vUNTO$ z8%{OUmG#mU7Smxy@MZHY+nyAi=Sv$auBueLCvJZOLZU@J9++`-LomJU87iPFu22Tq zbA`%IDcUP7xwP9D#0~Dcv`mK95`_^z)j}{Kg$p)fM^VGNJh^^dODyqg%Yffj^0vAZ zPhRWm2u&ZWz=}Vr!nQl+-g`l3hiC2#z$7^u_N7Okc>jZYOj!f;iJ9^#CWkE&3IF+N zLCPUP{)+;0J;VYX!c)Oh*N#3!v@{)wi1CBzr00C7w#13n?}xpP;aEPkR64I`-tgYZ z`lRK1XDI+p5bf!{g116^$ioqp)WFjj(n9?k;;3}s(q>~Y1{vi~cSm?#YNw9dGXuUc zqmt3?5(3DDnc7KnichyFUcYbN@T!(B8+-v4)Km)&G~7|)_Ac<;C~MK>$)gR5HXf&zZ=-e2QkdEx z-}J52b?vz?xP^Gjumx&xD=Ng}e;=IFhg|HBVz@Svf<2|)Oj*kqVssUn$>5M(&t-(I|)vPS5)4Cd427eV_^&{0DP1;ivBnEXP5@#tdO0Y zi4Q)}pJ67DsE7RduP#58%K3$Nv-`_uCvSlhK1&M!ZR11=o|VZ?UqS@YT@`_Xl!&dl}-W-}Yjgrcq!xu~2XSeVyxQz(~?+H1|?3 zPID$8Jnb&QE~W)mCCkvyplgy(D1-nUiEx*2a5_Go@1bbOCCrIfbpk4-@^2JM5R1CIX+@%+gUWQT;TE_<3CNx@@g<> zR*BU?MYsK{v#8%(@HI+1JV*uIVs7)NM-x=@?IRRSw6b zoIL99_Kc;b8m>XgFmto{PTOmJr`1QKr@kT1wH~Tg>K2W(wh-Rs1{uP;jPlX8IfirJ zEhq~niE5_{9)7Ze$467ZihfULGS@SPty2i+c@@!IP?72`%R z=|x$@r@}E&`o{I%Z6IxxMkBd21^-LkSdwK^&|4suLh2>&W(l!0VKo<~3=1BEbW@5} z;mGgH&AB2M}s_v!SRX-x_n~Dqa`!jl;7DeycPazNUfO0cesH zb8+Q3I9F@W_xuOeDe-3~klmR#(6_J;&7#JB#QIpuQz9fR5Nq2hP_J*5>_)$TZfCxj zH61nJ7i_Xz8>Zj=&#oYj^`qc!d`Q}tsd~jo+lizK-RWrV3fuZ0V?94qe^eLVSG!#=n9=~e6kQ5_QMOPa zY1rWBdUA8CJk7CZU(9ce84iv_-Yd+2g}Pg=tV1ytLHh5nY8hWkyvjZE-#t!i=-h8- zz&~oYyPBl7t*?r;u%p`o(ns{#)3a_7n#FEI=c4#y=_wTfMe1%QCion93o?2XZc@KU zSk1lKk6j!19EK<=AbVu?X7M2QhD_PAlu(CVC+&2JUI=o0NcjVw;WPJh?saAo#cpkDj$u5<4`O8sD|x8Zl(LwcM^ndN-Ocn`zRa`RMWl`wyS3fUDwzU z?#S8JiO;tjn9f-p&!?77+b(5p{U_#lW4SijWKN-wDnnFkHmZf7FOjbpu*IuBJlK6q zQ$NIZd2fCzm35EY&9OBH23+V`IRJ)vI&&WsT6_DnXoM>V8GVZVadCg8JXqxaJu zqfoe>=)OW6$Y40Psan=gQFdUq9(@6r*uPPwRe$27ABi?#VX{KZKL1?|K{XXyOp*cq zQz0^K$KGn@Z0sihqzyUTFW)l1qT$+}Ao|0R+y|ZEIhW|5FFN?`Bhn+_QR=b~)Pg_z zKSt?`KQpCrY4g_engxmrozhq{@*GLMWJkjk+3)xx2%xOKd9H^C<`nvd?^WxhpjC7R zSMpKrzj>$Yk$DN&0~0$DQ~dh{MOZ&OTPxj)+Cu(|p=N)L%=4#lwu_~XD1C`Uk$?#} zL(Y<#z_rI={NcRG>zr)$l1n4cQrM2>d}+9Xhxv02@i3(Z*GBpus_H)@-Uwa&PS{n= zy_7$e5^w$6H$7h+AA{1kKLAu;xs-ZUA=FbGIW#k!i=Cm>%9r?50<#S9pwc)0e0tdO zdS}O12pOL%C>wyG!x(+jck~kIFKlq& zmZqy7KSZL8Y$l(19kN%N97Xx^DOV}oe&0}j6zWNsa%Vz&o#$pYcJN2WA#Bx=J5tTG zH8ka}XpOb{Owyx+0Cr1r_06B@8huK-Aw_iaXD5*}7(u3+&XBY!`f~ygQ!Fp_M)%80 zy?xh%vQ-P|vV7)F;Iy57m*=uwJX|X@Y+L&OGIslKSES0i(_X>QAfoznq^-1<7nGiP zeW%~9{wnq;4hJe0n9uhjqs~v5?ub$d->`-Nr0y!Mx^<&L@>>A9$UCbtT+X`HpicY; zBa3?!MPPs43n7Nx=|QTp_eC2cG>ynKb+uo>+P1E@PaR4l`96zJ)rD<@)kv1OIn{Qc8t zmo3Wlb0Tcf>wKTC z*ZAK@Nw=-Oa*OTOE7E1%e!3~1`t#f+1-ur%Ch-57wk4<`{8_HHI)i)urKZF^F4nuB z#J0;9&t6U^ZBT`OvYMRs?llr*+5By+NwhGUhmd5SCO%9xQ??!=-y%+=COSZ`U+^gT z?kh80&*RXAJ)~d>Ybf`h!&LBGTPB<&>%A$^<#nmI@p@!GLpFBUeT`hLrqqbJ^am%Z za(~5%8+!C?O#2}P z=26bVoVxKQi5~L8Q&kjQ_xqMB$ZeXTWtzBaFIqCy66T4SwHfG=nRy=B_WkVu#a^qQ z|D}-w3q@0wfh2{G@o2B)of#>R$S2-zF z7~0ACao5IJE(?Bf!XT;GFhvirlmBJ(H9M!8|AAKScOudA`&&xBPZ(&gq83b&vyK8ql1IPKQ}l=iAH$*L z#BiZXiaSO0J8VRhLW z`?zS5sLa(IoP-_z(Z#WD8ge5)b`Y@`{Cv`3ETOvDyEytKG_VkzUd6OCxe3)=9`i

tGw?c8_MI~@_?@=QAH7)~=HIjoaE$wn693nd@Aua%qr&&6 zJ`6sTJevP-UK`^Dk1WF@h&kIQV55IBm5SuNuKero2nd$b03lMPb86nEM27fadFqxr za+3TXJK%k)_D0y~;HAB-9yQ0?e*afkvte*pZKbmjI&ZS<8l2pIj^Wmyti%s1zy|98 z#&f71s@CdM4oQ#;fG%_{KdvfBFf|z4U9O?NG`Yd1t9x9#T?&}L|23#i6I;U12jh8#m&&?e^zt_*Td9ml<=Ro#j zf!Cgnval|xQVwZ*Gi*2LCANff0a?^5FD&qeq_o> z--B2q_{5Q2!{wvp;CspPV#WTp9Zzyh7795=;W_)kd_rV%ET|SVoZJX9k@h;*I|H<0 zYpRUXIKtEIw_r(Upc=E^Y9r0O7~{wM4wNYUu2c z+HWNCsiOmq&8;VFf8PIRD*A9fDk^f|aKC4`)`t9ArW+CxNkNelqQV202w4RrWKNBB z;T=*Aq%i2Ysh|msonT=f5>xRHr`6R7Oc-nC0y!Ux9%Z1;6;(5{@H{ZKxE-n*u0G6v zd&ut6e>XdF(V!mUyr#6Pp#=}m$g4zlU?;b zt7l|BRJ4E*i7!1<+%&+CJ#I}Y_dS`H}sWAO3w@238UpN6LcpmSs zm-K!aF>I~?$g^h>k`qI28@@V&ji1KX82e*gtfYTpzwl2Q6}dteJ5PC9u{P9T@;8#H znGKo4l(1*XQC8y?qEdpEq|AxLWEwuD{E{KwW8Fp0uzo9(Ao=5FLl}jJD97o(f6^HU z2s9O~Tkm_SWkaZ@c!OX!*er`6ry~85aNz@GBJ8P{I^Tty?g}rB=8>BMpq=x1fS1@; zFMFiJ0X=%0aK6mVKgD%j$6!1-kUS65pH)Lyl*vCdiBQ+9DmY3B|1X|0-rJiNlI<== zXIJfRwg`n&?$u~(T6s9H?*95Ts;_E$0`z#B$MNVg=UZFa+S-(ZkG>*V7%aB>*wJ+Ac{X3$t;@|?UvPCbwO>gT{B4Odv%H#L zpMHX+p5tQuZ&!e%`M4YWsi~`D^WhVYvV5f+a#@-c+{{BN zuFr4ZJebe;opLhz{fyTiSI~6OP{VB2M}0USUF$CNXbmH331~4$6_={3W0~%TBcd~D=SGMZ1tI8$#giyZ4KWUnRJNm&>H2=*U?>sUX z3(JYNxP6Jmq)+J3@rKYlOeSsHKf%g(Kr5h*?{igjLeOq$#(g^UH^;gl8HzmY6c1xv z_P>o0S39XJNlz8ArwSxc|1Vv+nVz{sxaeO~xn)_PR*|>Cp?VRyxnt!z;FS%*V()`8 zLfYeHg%Bg-9&Exj#XmHC0SCuuoG#0CzZJKZdaq?|C<@Z;PXm8nNyWL-oY-5k4`y0? z@ZSB~GyAU_S>w5w4NN^Ry;{Vo!NIX7)3b~{JhAU1Do>_Y;fX4(YH4<|e_<kI z0D{pd-6@y`}nm*3WF&OR8; z{7Jm!KA9Ef3;*rn59+j zC(VER2t_PcoK}+w95KZ7_%(-LM9Yu_|9b>_y~O-G&4lFgUAMmUhXr>Ji0a8<1DH~P z`4c1g_GYXS)Cff%Hi3+^H-c`f{)p^8OKF z^CvGeo50?@7NmX(aUOrt14*7X8BVJe!S3Ik4fOEh@BC+TTP)Yjk{L$!R7UU?DpfNt zi*qN=e-pL<1Z%x?0jm}V*OpmqY%HHb*+Luu#DDLL*#ygGms=SrvvI*nwQL&}n6}Z< z)1z^%&EkYOb>@uaM!Y3xh=Nf`?$ zDi`wq&U~qAP}s^n)klQ@yWJtl!v^myl4Xqwdn@cd0dGt+K0dPU>`a|;aH%l{qH~c2 z^Rq+yn@r!K|7h;oBC7!K!NsYai{xLr9(?;eW+%j_JC1-phc?g|;r)x~Z6UEmJN{wm`Nd8FI`-%Z_nU(?&562P z4Cig|2sXt=cwBC>Y#lm3HD2L6Y;)~&`QXab5{Dj-KYGfsR2<=_%fv&TLF$;762d=- zwglkAUPPhkjfV2)Q&n%{wX+f6T!El3k{U!}b~KzWe;+8)Ueul<(?g7RyIq&S;nq7+ z0f#&K->i_7iv{MUk4fi79ZVNos)YTM9vSFp@%?L{0)B>_Y0c=0#p1CZb+bhPi~ppK zug_yO2g6CfwE!VuK)qF@_1mrZ^3xv;U4--_ zv{egKl5MmU3016QTt*n=Y4NiXro2SI{7c0YFFh4Za5nL7bCC%pz&Yy~mG^J9xmLCE zdJ|y0Rd#zF1@NN(cu00YwqZk7z5QoAEk!l_!(E?|nh?7#a_no=(c68dr!SI*hnHEE zT&>}ibHm3w@+-D~MsaYMjxKRE6&<=%vgT8!TjO~n8`ZnEl&h09B|?I7V$J z)ydf_)McphyP%?KXIIx zT#Q_u8%3Ifo=B;HM;Gt$FF@sT2lxaRBgmd1#PnUy=k&APMFL14>qh4O^r@p599!IS zOQ4I~3V`4qq%B2KgI0X@dZO)#b8NyyU*MRq*RPs-r~ZWEk%ZKs^=7>D&+I@juY^j#phRO=%wIYbb zdh@HG?V*Bu+oQ*S#HY&zI@wi5tpx9X;Am?L#)B+5`MLECJ~!9h+P*Qf8g9X@eJV5%4vA5sbfZ0*?GXcv~^d zPnRWsltRQ-IgPJ3GL|rAApR$W^7l4=Go4*BLI45jWT^Jj>LjVYtQD6W{c7G4ENgmJnnx@F#J~in)H4`r$^SSa>GBq2 z;Xtd2hK%Q1tWu@|LP>*qt)eRaU?YP#3#h9bn@x>9)03;cED$;1hm~_RbQHz==cAB% zvWW*s5%L(I?hxY6lleg}BTdu9ld@ue;S?nppEBY5*Mj{1D^m1@{52A}UXM*S1=m8` zryG=VzdAn8748IG2al^@8&bLS>~ka3`pOlKA)olw91$>DIVWqGiKz)G-B;^6EqDMZmBd)tIx`F#rn@7?~giAbmv!^ zHV<{uACZgyWkIJ~eHC_QdG^14>cC@HE!^GeY+p$o&hzc3=oe-69K&ItgNFgY7w@AKlS?IRMxb8(vDX`4y=AUQ`z6^ill;A>b%A}ls<7_Gj z15{_AwO8vM&`#qjm*Z1tN&F@uCSu))m8fyM_oT$igdn#;RQdr6G5-G6c4|dK^rD$pt zsXpc8I~_v0#PhOtKgBpO3c_2ch-;hW%n;(WY~kYSJ44B<*nB!zMzC96mWzR={jdY> zg5-WX(DKLo9Mi2bZxVvfbw8oj0HkV&1lB8120I9caz|}S*rv0Qz^}1jM_gJK?;D^k z_D0u{ZkzmPg2d71NJ%f!8-^UiL8t^-5wE%$I_cP9PHH?Aqam zd-{{nM=i(24?D`pF+zR%r>@HL|(g*5W2^q8e*t< zjyg;-@~bkO258QJZ=E8PwJkZw8{_ub{Z0NOd+cXM9K zIvq|$4khl@VMgm2@4KQ0)l&jBQ9sgVwY<6QvSsc$@@xB#qu@qJT%(Y1(4u|7E;W$k z$rrYXJOeb&Y9`c*r-*sGfe^$!#o)i=%lj1x(N&n5J*2B~eT1mb3%XwuEY3AcW|o4m zc-xIhZ>@bk3Yi$x5AbdXKfws&9)d>*zoYx2)%3!b$mFsWHzCu*)<%7^noKGcl4C@Ny1JiDu(p4QFSHt^BmmGf1sXjiyQnrE{-G>9)6D zVs}fOoRe1By$$^QHG z?@5%T$AAMY1C34=lLZj_O?tmqTrSuGQ=1=s8~5s~mIJyNGRrq3&XvzxzoV2e$3m+P zWZdw+pN|Ldqr-X6DY0laMy&$=>{W~54wc^{pM=-xs5GLr()<~}SI>vJ*h)lUt&6_eE9PLXQ*FF zf%$w|ItyN4SGu5a);)LsY7;RYhc_~uO$4~|p(;eVhPqOiu9?$3ZJ?#gAQb&sw2`&y zGqmf!-~{<~nvykgJ*{hv**a$dRQLF1h`<>DlKoX`UX$K;_Biy@K{usX*zM&O)*6jN z?ij#4(e{;E=<3>6omHV##5{aXTD7OeTC1YdX^5|`l@b{6z(vbx^~)G!mN8rfBDBvI z-A06rqk-Oc%kaTpT#ud|MxTg9W6lm62H|Cd0myk+ zkFjJf?RDfJ*@gWsuwx#ef{Nr?z&u~M(b1TlaQi&8z*uIwU^gG0^d&G1$L7oUz{@z6 z00R$)fm6W18Ib0hHZOQJ0md^I(rmA*n27kFT3vqulll( zI=n%1&ElO-YengHY(O$sdYEpbsSSvBQi=NW8{|qN;Q#MPmJ~HiB{PieF zX*8QJ;p4&R$AMy>e+5stWU`t=u1|8F>!L_10t+Kj&Q|NbMULdQ`5|7wXitD`NQHle zCZ5XtmP!UZ+S!hfe zZ3&|aI+PPRKjb(&e0`!}4}N2iyxgdual%kMDEwsqUBVYhBnjX(^b+1{&|Y+eCPuB3 zmsquiZmC+2BSq5(CMN4FC$nqdq0gL=Z1A;|z%q1r61YImVOJR1U;0L3O;tiW`M_&2 zlXbU|;m5QBdXdekQDT8^{wTI$m!PrW*Hj7W(p_x@>10Fo;RhYgR5P-YbT|COSVH3S5tP}3?Vzqr_d0ZGm~5_e_8*v8MTJXhSQM% za)l1Fl_Q#0JZ0IbD}vblx(b2~AkO@8KL;&m=>YYcp0q-yeC$=iY7*Hfr=u=u>P5+x zxuouyq^o@uw>V?1av>qXiEz;q&pdeHkAMv@4@jJ{X!J4n&pUN=i2wpZUvWE|?Ws-P zK2ut=PQaR@M!b@EV6HjzH`F(1Hy9+2m}D zLg;nN%M5olPRx)j@k||m0>IzLyvIGxOWrlyuiD`lZG=G)vyHk?-MT9Zr~i zW*u)Q{vyH%b$M|bR{uu+>d40ZW4PNO#nCK6QXsL62GE}<2QA>X)iclb1c??4R7zF2_c+6#7EP$cLWhq~?4CAp-1skJ}qJnKlc z)yufJzJiD*`zb*JV` zM{qdrteaOLT&VrQNu=V5OvuG4@hei}y97V9D@DmX)mSc0(z`f5P|NuF>@7%mrO4Y3 z3+L58%4L`lbI%u?Rw7Qjr21L3G$?BVW?Hxg4i%b`jK^i6Df~*Fpj9r0#??<{u*jbw zxpC%^Mwb9RQ=#g)@xO(0!C4D|1JMTUMmfWQi_v5XDN?7Py;<&%`!<3RL&Rkm$av5i zrOy2z)pUX9a!BRC`dRdO28Dt$fQgt7go8?Uckj%ZDshp)hh?S%@3p+}s7)1`GD7Vj z>r+c%Rw=Jt&QnCp6;~v{749IKI9pT=J=yvtw|8s`uXxc})ILhA=^(5(nmyJ{2eI8g z)=|4BjFu^N^il+Ox}$@WMTK+*yjJMky1KtAioMr(-T2CqlQ0j5vC+3Dg@Q~-CKZl+fv=BJQY_*SbpigWn! zNU-2396hg@+KZ?3K%RUi%}ETJ^wOMot0NN!ZX}wD=&PtspmafA!jHGh8ei4{9(t_fIeJBp+wP z!IOT>cl--9bE9H7QMd#1oHwK63@u zywQ!3n*wL{Q>LJY@M6sRaNHEv+(U)@J_adlq?mY8%Nf(X(?R>Fz!CI_x@%VR#r&J~ z2Isl93>Du{5tl4BuCEcIp-QGTZ2JeQ8x-op&AbQEQF)h{=M`O z9v(U3?APv5S!O~BP-*om*`VUWcd|k1vtCuVfL{ZJE7x7U=K__`-QP6|)q?{XRYo#d5U<36>ZX>^{OYOw^Xsb+g) z%Mks`a77~VS>)S>5n7tgmb^rwo-`^({{uDMuRWovy3E;SeG{Bb5B+8C4+`OE;GHA& zX^g{qr;#fq#xd$fl!+Mt;InL%xl-cB++?EhmBQZV_`bJKD_Id0jw;;Yzd;-32ZB(&|oz&Z658`aGJ(J$;FD<3^|COt(&v z*aVahQxIJ#Sxqd^XREO)s^x{SF|Rg*bR&uiT4yVtp!Gm(ZM9fTePCvoX273He>se* zwQ)cQ=X2cnlXEi$$i0mLHL1)+TjJfYmf^8xekYg&-6}8(_787#n5<77@&%T@5Uc3%#55y@H^B1ONn59|ytdxGtI$mU7gb(lLPxAD*Ux?(c5xx~`g7*7|38ql?xdIBG}?BaS<<)kts0WjYh?tJ;;@>3bIALDm~_;9{l z!QeC=A2RzJPk5C9ILI>Gk|g+S`ff7M`L>nb8N@nsU(PnVb0_fum*wB4Bu>od3ij1k z^3&O%(VI$;C{?ffUGVk5p-Ouz^MyXP<>d9nw&D-k*R zKaMmch;uoQ0Rr?DZ3;a=GA|;FjZ^fr&Ge0oWhQeB9&O3m>^=YAH?NmYH)P9{sFrT) zdlV0YWDzOi$NVR6&~2Q40zJ_WA%^nvF1oM^9X@HUQ36XO*V+RkkWk}RwjZvSQl>I4 zlF6H|*?8;bzx!~zbnL=r-v%7n=ia70{A5@YVZ`bA*A~(gp|4q_d2PsHwxGLPDz^K6 z=Y8fIy$^`NbT>C=uUB!N-;62XDipRZM_TEvle~V&+0jM?M!-}e6+P2V3^&|> zNwyM{o`zI#(fAYP0F%EoB&c+sC`0@~7y|JJk~8MNUh=H};US$PGuez82SUNEKfR)l zMVle%uiWUwvBdg7(^Sos_C}>a?GQ*-vLgI5_fRGk?qY+Kdk?)9N2(m_0w49Xv3J_- z2%s5X%dEdK$(#GqHw@xQkoZt-1LZ7L_BW+{vJTi-qb=s|DP@F>x!?N@0w)Ot|18i9 zc>z8N&6VaTBhkkFVUc6%n!29a({fnBJP!GkzKcI&tk1*`?5j zi!_xxF58zv?&Z_DrRFrxA;$ak83eG> zerSJUM5-9{;p|Um}l}QmP;||!aXLw?X`T*!;p0R^vmN>EsxEjKr)L8pa z@S*$x(?iB*E%Mcis7+Pa+L{e_JG&f>8op=&+Y?6=@TNxtze~$*lkg4d_MQ>yd;IkR z7uSzqh9ny-wOTAM#B{iv%hur{7aDd~Y=zN!pYEXXSp+!+Nu*HGQwf7)g|C!YU#lc! z9JT_8i>msna_1-P`C*nUmg_J>CIF~pEq5D;{PKxf+b4vJV!RB#ufejU)j2z1^ zHb8BtW5(Jt^8gTwjYw)~(hFKsEiBkt9A!XY(C6tu7%$F6@gfbropbo04IbwUIGH#y z(502@YvH%~^wgs`+u2aN${aFcI-5(wKe)e!5E?6bpl(AlpdrYSNbmv92g+;pJ!DoT zW>0t9mwU)M51}7@bwr$o0)p#OO|Bu%9j29jJPe(S&?y5UvEo zGEpv{zc4rbja(EFH{k?-3ch9Ddnka@k#Z8KkP``#Acdw-;Cozk?wA~|_t^{TaMm)) zflLTQF&zB%`bVUELe*VK_r(`w&k#Q02v0!5w zN9h}Vfm9NXAu-%$d+1A0G%b}60@UwNT?fvEe`(tDR4b*y4(?%RP_c>%RJwen_r!ht zC7xrGu@CLQIlSufsswUA0;3du+)0skCm3%aCB@WS9Bup{0wQRSR|Xh@+mSurQiQqDcL+axP3p z;>>>9r+eX~0Gd4cAP%tdsz)-U^KR`Yw^wfB=ZmUQ9OxC*%I6k&5Rlay_;D+(Z6dl< zy~b}-)Y>+(;`LG%Gb_0%TXQrk=1i_OthW{OJnK6Pv2pF-=6jH$)8{28KcDI;);!|8 zIXnUoQ>D)7q?cL?xuz+}5|Ia>o}v2r3#XUQan5F#Vm#Rh%S;*Gtj6qe+C}Unk6eF( z4xJL9w>e{ME)uEd9NlKy#C5C#IH5S($>*bABF>e9-Y%E+x@Omk{KtUVL#_O=#K7QI zx9{`0HB^Amsb1X;xvt;_6$zZB*0T?ty#{O*&v3mRN9jIT9|~iu@Ha)I4o=lb-sKQL z0yA@zH*|zQ*N+70ja!TuKn<^>BBMc2026b1SCe+2=vxD|tA=(04~Q$>IK*iTDw=qr zW_=!sCUwjM)C1K&Bxr+_8l=sY;nRvFn9hJY1kP2&J77nQsObEVsD{$y9iFQAkB|MR zaRap4xH%s(qMX#Yj5+5N^h>6YN3`m7Rj*joy^Sj5I8eB00$c12>y1pHIw}ZseMI`p zb*$O1*&VBjHxt9jiSW2~MAbmh%)!eC zMthU&Xww~ORym+U5z!{jabXaG6K03BbAT*}l5HVeKsN7(%a3O+atRg@;Rc{71k|)x z`HNrs;!->Jo>Fs#j!KovxRK(-0vnFVJHL2LG+U>Kn5t%0VXM8OZ3Qg+&+a=ZV#%l| zwUi4arNikrkR$^?2&-bWE9ledB=(NSjY48JIDnJoFYpD2G$rNdEtz8P%z{c=bAWm> zC26Nl00?x=P$Ii3xEU%2;Og!{Uyb6f`l_8H=I{4r&UXF9JAXWCzXyh%8|n4_6**GN zegW9-z%2QAMCHLb`&(Nh>);4-TRtqdf>e-#eBa^Ij9{_K7+#xmzQQC?iWuNGeJrl_^|TmlAp+1%lY1}dDx%R@l$xI?eg>*eY^eZ9z^V*cSJj?0js zqOX+CcbGkZU!-#Q0l1GzAbhhXoC_L)@Q4KK8z9>pqdq%Z49)1KB@_j1w8UjftoETZ zO2jYt12#c)mEOx|c*JThRBJ*Q#8-ujCk5#KQuJ%K!2?kuZX_c?c9!#QyaLIsBAHZ$ z3y^j1F}dWl9{tXYo3sLC6SFsV;c?*vqUxNL`bv^D!pw8z_~UtjjXqtt5(g(!y%q~b zZlGFKdJdi*Bku7Ny$e$tJElk&Y_sl~+E76%UEY2CitIXyv?4j|77Q77$OA}lzB9ou zgIn7=PU8ir?^)NpWRV~2C{xwp5ARa_b%?u?FZ(7OU*>(0jMFav=S{RHm3}dvLWR9Y zVy*c67qH8oV$P2117w{$sO049=gNuH2EtmKm^=BXL$*HLO)M`05pZ31h-`8XOsxt) zoib8QEV5K0EkY|O*T}^BrB^~=CaTuB#aJ&>th6|hLb|;D_?2A9a#zq~AC1#}sJUV7 zO?*6!WoC3f%VP#-dwY7my>=cIxUe0-TkPaNy*~53|?J63o4Kw z@2ieD-{Fo=jE5u)oo7)G47nITzw6|?IUw!RnNAlyb4>08Ja##6Hj;ud0GAv-a!nNS`m(WqARD#vd@#^oc1qa7!+IgMa=@Vclv<#&HM z#3)!2$*xaO)~z1|hp6Z^pP`IKi+oCZe;7Nwf*m7_c>r~A2(S9iWV|O;hAW4gA7`63 z2)h}s^h0)Xi(OyZ^~q?I(GJOt+&Hco9Gj(~vX$gj^tSEN5P45i9&aX#Ja2a$D$QMt zNY78pX4HhHlR?GLwwtXp9|}Iud&N+_z?4#Jlp^sS6r|&nG<<2lBWW?59sIJ3WlEVt z8R`N!&BCZMrHJt-Ox~ABkHtBHTfg}TB(%2*WA~W}>Da6854SOhpJwk#^Twl=E{mIe zxWLEZocl2ev(|`cS?d4*Kq~@KfR-6ugJ)50F{?6G_vL%U&fiRKDxcp0@yipCoiy$0 zU&yIgBJhvcjTX!AKfN@;=>;(xUF4T945B{BH6;LElev6avXO_g9aJ@yF^|%KQvX+xj@o>S#R=W27oy<|I ze1qC3vubjg#V0ljbqWg7Gp>5PkUz`5B2^%{MANCds!=?vS;5bm#iauWGRXw6xkbV*@tXn5K0S?eIGQWv5hTT_I~F*mfQXO z?&m(f|9&5j@B7goVjlB;t><;ld7bk-5eD)s-B}h_o+E6UW#RxSxWs(xvj^q|2O8y~ zFf%PqYf)_L--23`>(Rb1LBgj2Cgkvb7$YDok9X5oSaxlS)7)y z9L{^^0_T+DOwvS3yc#vkUM=$K{aAbM_HVoMxf4e5Af#Wk67dcRC+*adQ~YMpYT8sx zMV#jr`aBulT!Zq|y)P9aD$ahQXFapG&a@12Ua!HOWZ=cx{Rf>E5ljohV)jN(adOx` zZF=Fl7Z0{y7YzK;B!2is1VsFpw)KNNyzbF2S1=;G;5Ix47s4kIMzbijj^eXmxY+qJ zz49rP%P&bRV=ood22rPNx(?w-Vg6f(Kv%~;EKx>OF{X5Fs7E@cMA)aw+5LMj4wFxi-wyLBjmj5o$Pz}7O`;8ZyA|XXT3$EcmkE) z^8$$rI>Bw{_8F?cSaa9Rl$SNzYBu=bJEWmXF9qG^{{Vn@?}IArv7M>1PQRtuC5}PV zRWgfYbkt14;*eUN!0=WQP23M8au19&Ek_58IpqHIu=3AL20Cv$@CfZ8#ml>&%3X$@ z`0$acP~G#h3}rNqaM73})$_<=@-8mH<;fwonaLx(biP-Bjox^pwD9oA>35-nGz99#gNajTpaIb9FY#!e3X8~ znYn=>6xGO}AHB0;x-+NW+fGnx8^eI@Zcg4W42pWrrz3NxqOOe)7-gD&E-IH>FrCV9 z>b5bD0UC4a#LF{KZl{jVBZaFMXWhA9Iin0rh-J@oo?p@V&aCi0khc_Lq%C3*K1`LV@Qw=1CMG|-2_qjQH zJdp5gI-LvX$G9h;(#1@iTdpk>S3z#t58a6^6u>4q70}vbm_wTS^mz+$D$<_u-Q<4& zk`dzOV&su9I?6Oa89ODPGWGtG2s>?t08!Qf$8L{)?zeyT2P^D8#O#rb-O|l9b zKk5z#?{%)Qh5m-g_t&~#kL3*7?{5|PwV~#0tge4xj%?-k5AvMd+tk5S5Nr;vHtf9 z%L`CwVFPM5bwMNeAh*_ib%T$Xa*IgtDtyxf%+4y}<3l~dNR~KP02vJR*l_03>DN;) z*MphG7N}Pq@q z`H*QtYLg)f`5s!@2Q>2t?MpmdlKpWJ`A?$K673JKBG!{@VeX@R8_Nu+JHo%A+=dd6 zNJ!S1S`P5qnme$V5Ao~#_ZOyd zPv`IpidM!(02b_RbyDsui*_(LCO=Sqas3I>Z>2tQs@aDrvqD5EJG)!c7`}mz9~INj zMpw6I%d1Ui`c?a&I7E<050D#KC}vv6-Z%HUBgMsbN>V3*0X@g1v zv|&m>A4G)crE++UL<~`(7;Y^(_s!fCbJel9?>sr4}I`-!UG{Vr* zgq*A0o6;k>62cgLXi&Eg1M}3F@&N=x)&M$}`zVNzMCy&PgB`0#54&PNkhPNgUfXm( z?_G>^;NwfqO?!MYqx$*5XGX2179?hVJvtMneiVo)@~M@sOj-1I7u?sStDo7M_nVh z)S0labrpk$N03aZj%i+CeVL^JSxo0ZuHY@iRkF3jwW6taDKD6kZnnXEqP%mM-}cMv zYjFAefIo~;QjQ18U2b_(iJViGzsLi~Zz}XYnFEn74PNMI{K+}@nxw#3yZ}#Lb*xBo zWuk1rjru|f1ZQ>PSsondIy_K-W*+h{9?)Ewd&flMGg0eS1{f_QM^I>Ujp5z8fl>7q zup_%oPV%`u);WZT?ch6@c(!{ewN0^|MVWTw`e+g}QGZ(kd3jo<7P|GwDFaEs#-Q$$ zks3AHcpwMsP%wl#5KNwn1{pIM2=*^jL{(!U!Pf#|m{4RhzogK}<>|YOwbuEE+*){S z{;juAzfO-e63;s)Q3E-mfG2u#D7g|LNY&>_#8f}WA@i%2TK5LQJ<(ipu5ID+wp9_o zdPcnnOP8*)e{+3hp`sa9ppF#Z^CE|$B>Cch=_Gi(^{1l}`g%*$sv-M~Yic z|$I}AYX`E2(Mgk~Jw{OYO^X(}+OZpqYh_aOGd5J&!&b1%WoWx$%veJgyjCtM5w(BFsaX8hZkMumxP>5wmY5&-)hX| zBZC|YQ7>rZ8stoSP}hEUkEXl_!ePdc03PzoIEV8Yqk?Xq{2#m%>L<2;eKUXOZ?DwK z)YD_iDQWbSv&%pdDaHZ5i6p=*B~?Cr znXo2XcM|pu?c2VlE)4bcBg(&KU#N3Yq=!l@E`#{)_lPRJY1|MLC0- zYXFoKBYsK5p?AmV;3o~g7a++46d)Q3(fm=*to+)jjhQ`*el5y35NAYHFu~UK3azs7T;8;ll_c* zGtV{zpf4MbJVwFQ+^)Z?nEZ$Wa3#}F|4t%qk6rIbmlc~f%vdDK^dZ$Dst{yHAd_VP zQM|WR%iO!hd*jgJ25VfYHNp->VN9+ZY0Y>S@$(QMCP2`>plnG)E^~PbQDx%*97Yz& z4e+%k#JQ8^N|EUrttp(?Nlp}-^&d{4%g0ZrU&4WX{C;D`#z^6D<)pq|!9rGE1Ts?( z#{pS*lx|bR4=1QeK-kVn_~Yi3{ZkKxzP`Z*zmL^kXI8m=n88Efrb&GUfFGWOQzTC$ zC^vgMiNu+C(>saV=5&&v`1QuT=QxR?yLFgBlL$dEx#J1{4GlxD`#9G;0=ttj)EhkQHE#pA1qE4;8c_IPGKaiv>>=}J_h%;8 zcEIEJ_nau=BZnugKH>!3i+xnpFZ?pE*yTV7C=Ff9z>iY5mZsij=??XOSp1o~c@-AB z07pdwhb~+e{`V8#%WKRqMmyLDg$!aAs(=SrQ!U((I`Z}Ihr35Nmc2@>1QY5R8I3_L z6Rp_)hF7APhRr5$lhS8Elof5I0v!V?Xm05>X$O%EETnZ4Zexu=HrPc;y3Y?Kv;F@} z_Zgw=G=7Vw7s#4P7CQ=9LLCEGl()g%i?B%~PJ4Nbh44cayP9q1kga4>R#3$I+6!GZ~evA7wg9`1ZcCUUD@1lS* zav&$mt=4Vxlr?dqM6N8@swt7=gZC*YAQPOx2DbKpiff1_mGLSXk+0c!Ybq}s!$L3r z>|W5^mN$I58PTy6cY)L7Th-IaUITGAw@I(Ef|nGu;IFzzvL9*!F7U4d{(|dVaP-TU z$Z6OCR_H|vcL`AeHx%n*NP%zYt4GV{j_HgY$+;PH=?YgB*ZR4sYrFapL6B>~Ga%n+DhpzfIa*MfKUL+niou2V$;OkVyI8aLxRpf1ntyIEyutx0(R zOn|zj2@FCCMsrQJ`^nA0MPAA+Gma40vliqvgW{h~sz**_OU#II30iqgsnT=?ol-Fv zoMM%f*PgWjqUwjFTbO&crL$VMpZw{yfF-^CxlD)#B!rczwqz5MKTgr0`bYR7PwSS{ zOk!kp0E#30!TRztmyZY7^2Uj>X3T-RT-bfEzRXU0bRk?`T^HM(E~AHcJ8*&y&wp5H zQZ}x%<$OpWhzBHrRj)^O}2uCWt#RcY8K2!0{tf`kJ+zl#)+P*=3KtrY@~qcl{NDvuZ3bV zbTS>En;^2I@;A^7G;zfvb=?p&{}&*5P4l5Q>}Z|*RHFNCr=(?g!M});jsh#~A$e(5 z$s_aZ#{SYjr>>ohsZPyJ|Ezo64I*20_EcSke%l?lz8SAm>gII2x6B1F6F+{ss;wD{ zUw9BtEI0(yy_4PHFG=r1GOvrJVGW*qE!FZ>ZCRQ|0uF865 z2Noi7|Es+u^x=iOf~Qlfu=5S5xQ^@_%@ER6?SZ9Nb6^3wU(Syfk1`=}BBvHKVBni4?^R4WK z@3$t?>Xm3c2iX^G2@@{a8M9>>5@K7a&&SR*tb%I*$OB0K`bVyd0RE;dro?NeDi|4s z&=luwMXe7L{A>xfC_b6lEuuv++L395Jg;eW5y28=og==Z)%hoextZ9 z^lr#PYy;2V&84>GZ$nS{C}(;p*aGJOxx8TtXwx%+jdu^C!9+a8i-S`5@L+Lw4aHG-?=uygs5C;1zyoqC3rtY>E>FA>@3TTjJdROgt9r4 zrIyv{sr^?rS_4o4;qaDA_;o}9v+2-NTcjymZWNoIccUp#q5@nw+u?F>%xT_W7Gw8z zV%5Xh?p#-!l(iT0o_K+855N!m#9!Q{Io~`*I~I(Tf#4=5YUZUM6F-c?7f2I9w5v-u zo1qc0P|USqHJ>PffH5dNeKeBru>yk1ebkcVUYfxN0#L%-A;|nNP)3BFaYG(Mb0Jt? ze8e2N>3qj-TV`{7{5(M;6;r0#e^Z-t1FD(rsif(~s>y3c^s_L2xd=M=TIi`R3CQ8iebM5$zE;;68ij z-P<@XSA)r)@1OUNDU!_%ZDhJ~$1K@pBd7S&{k~jJsY{PR7oV78rGNwa%ILF?Bd)>o zY2s9YgcqN201o$q2vy$)Siv+1;}CIPWCH3Isl|Nydd2+)${m@W zT#Y3r7ryKLhgG%Cx^rOYZ$5W(Wo{nc{2%;q?s9_Q-d}$a|Du5jl9B&2mjHGkPH9UjLi|c`+uXcITj$+{&_US z1{Mb6=vSM)7F804y{g!ufU42t2R={!c|e3@Ai+Gdw(E<~eH@bJPP@Dwh_)IbzKO>J z)QHKLQ zb3&*DJ)GlSG*8Kr_iFe()%O%#Q0v$Ie8k$c}e%cdK78tbNV10Oz7(n0pJ}5^!i9J`1v;<&o5>_N-9F@)DIDhb(4v zOMD##n}!}MTwq?Bcros5Kmw_cJgU3|Qn(}a!0xHrFzw$!?xUB&_7VzCfwM=W8) z_!+AlDkfU#3SY5tT_Jw*%#Rgzhg?kJ4}TjKb@QXH0u8ZGTAZ!#EOuX(PnRPXxtSYU z9Mb0d(IJ$lJbd)gtyyQg;I!7BaEHNJ7ottao{Z2Dwhx|pHdm~=3KcIaXWXv~@>_@v zSb=gUbcCR;H0(^w5AN)Ik$w5n`=f zcow0Xm26Pc)@)3wBV@0Pqot!uii*qy9mp&Z5yiv;<{9gnPl+>$m$^f}eaBdy8oi^< z!Kh74ooifijH&F(#gzqwDJ&fg1_Gne@G_m)xlcL1NnJb8e$3ZvnU!#oR`@P^HLJF? z6q`alm1JK>K78OD8rR3ENbnWDl$1=JT+Qw$b}j~cGuf?#Ttc!j!`z4-u@$st63#Ll zT{4$Xj_Mm}*(3ZV=9Edfg%=^>6k~x|Wu0fRg3zd_#Rw;T*6_TL!coF8db6YZD4l(x z-Tl0FY+eo3KOn3YFz~c`P@hy-YtJn@sBXk)SYO@3>k?i>YA^d5ZdYq4#mnbZnD9D> zDV${_TOm6-zry}|>MAczQ7Cvq!)o6TOhB0}PO(X%Oq0#V5Sijj7Y1D#wq6Mm*6L?A z!oKp=ag?;GDhfQwVwWtJ)b)D+3)9WZ5365(%y$a1&@FtM^|M5_%bw-QV!3Y0i-4}l zFx}4kxZ!Z_ta};0X~UTw#VT?mqN3mZt?uJ2Z@XN{pvI|6Mu#17Q=HXxdQw#R_S8*5 z18gF`{L?pXEn_w>?G${!S^0+r-<1JglQI&gjhK{LxBZEoIkOrf(}QNkYe(9*2hK&N z8Sb^W#A}}=6|wPG?>i-bHORCmbYJZP50|#?OiT|)Lvb~>K+=T;J)XSs{(4v2Ty$e* ztk#@am)3%{q$rot=6qX6or&9vUFIxiZ*2`9J{oMRDX(9zxFoP*V0;*vbS=t~XO?gD zH4)EzoBS~%z=JP<{JR6o(?;L;uDi}G^}KEI0po@0`G&#=Y@`LO4v4-xl{d$tEt274 zz|qp0uKSK?bGRhXzHgk!+}d-)w1If0+t*%W_m4;sePcYy+P5U`__(d*bNPa%n^vQh z3*p#|v5PsxKkX_BJuVtv3J0Q{W(aPz?uRI|f0!Nwf#v&|VdE`u0KCv&X|y z!cEl$mc54K$k5E0WC{2W-j}VmzNZ+p59y1JXeUyBZtYm+;N5CR?zR%x zO_`AfdXSJ~_5v;|$~@79P9f6;Nl4SRii|VtbyitQ+xwgk6#Lu{wQSnOt-#$ZVCU*C zs@G+2=jN{!?UKu`&zUu@Rf#8Bmtcclo?VQNP|sqtJ4md0c=OO(jH+xz}UKEoN8CsT2#3k)QlyH|dUPR! z2x~Bl2ZwISzDW5wJ?EH*o^}$S%~>ZQvQ7c{+xE4_Q&AePTQb|#h+eAb@tZ0elJs*n zeMQtGd5%>lNT`%|_TF$g=QvY#`Kpp$qXjQYo=cBqteTh}gc+?2f0QAgAx3c4XH(GU z)Z<0u`Dudj&aEfCyHW$nGI;3%L-Ld}@G0YB7MTQrMrw>O-gI0Dvr^Ie2bPPfr_p@m zq7Ce4$Mj%WJweJr3W6}~q4vd?dbJSFeK@p1V#A2&yKuXjaTFt^y?6g;=DleZ5n2#^ zER{j*(*n=U77xEOgx0s7R$)s8+wineI8eB!{h zlwUJ;BY^?qw+Rs{nrADeI zsmIyE;bQG8)97RSuwo8tdg+vMNp~A&d#diZ<&SM&?c1Fl zL#B1ntnJ@9y9GYBzO}E@VspSed)E`xTGS~LYTo33)YYBKNoX`yw%haBV9AeriEDcr zy@V{HR^;oEB^1vzmVJNA!3(s<3Pa^+&kN&PB16)st^^vnM44fH*Lv3;@4ym8@T)dU z-Iq>x2}}H>{+Ku2&qjMm)u&-3$regx3cnB*I0x4O7{uwXkm?> z*)W5uuVN-Kji2l+9O}D^`Ys=1BKN|VY`dit`U5Dq;E~THeRkvk!T8UoWhNnkMjg%_>i*4 zS2fLC-q>=HUPT3T-`PGcVh{p@fwupC9MjUM+G(xZ+Etz9hW~NzPqkX{&R`Is!CFrF zW0r`#X{5-4yN#vSwauPEidR(7yn>%i8U8V6-~|}@)F6K zp<0fi0rJByg5z>t)!+xIg(3n=9 zGpTvsngn`coL)JT9qDS+*qzoRyWq4sR}hc5CkQuoUMOkw4nK-x&e?EIbY4J|3|_<+ zaZZ19SOdmFPL+@<6SDq+yKm3`@`3I&nP-Rb>cPidA1X^XkLB~NEzT`ZP~X0TuaDx6 z$d`TO9dlGjFz~qI6tBYi2QJYehbErl6k$uac!BRg`vDG-z=u&~&6i~4_OClazEM#G z;{R$-$>dMldOCCH_Fn2)6|AIG>YCWvdu-jgbNI@-+vi-9^C{680j0RoWn_>p%PL>Y Jx^T_={{g_I+4cYc literal 0 HcmV?d00001 diff --git a/docs/source/_static/imgs/pruning/regularization.PNG b/docs/source/_static/imgs/pruning/regularization.PNG new file mode 100644 index 0000000000000000000000000000000000000000..2feb6ae276edb8075818a654a62eb512c31e10fc GIT binary patch literal 37001 zcmbTec|4Tu+Xqb6WUG|jC>50m*<&bC$riF@zb$1M`#xih#E=rn8if$aGWH>|W@qe% z?1Qll!;CTSMcvQu?tXsH`*}a_AAPEs>pJJSj`e#U=kdKFwKdh47`PZHC@7fj-Bo!= zK|w`BK|%S9o)-9|;J*2D;4ez|hibPd3cGlgfd5e2DQhTGP!z|W*tZ0(q&V(!*T|iM z;_Su4Ka}?#URbA~c*%E9MOn|sY_)c;`tkQvbb8>qz}v76!Ufk$TDEmbZGXM;5m)Ew zemfjJ!^tw!Fr(0H9cC1^v&!(9KML9(uEc-g0yX`GbJTb5-aU5ix_H#J;&;5!@Rk|E z=2Pb~MTFi2RMQ|8+n<9jTG+eUR|~Du93-MX5dxmdlbep83uC=Sf3BH}h6eZx_;<7{ z1H;irVI1ykY)2o_i_HrO9bOg2YV|`$oB!zRrngxcN=IKm7gJLX{`IwWOvtg{3-bK` zSui6z<)&{RubfBg>S$Sq)5zEFpG^>5roMA;oHu@c$M&gX6TxdbwK5oIo@3o#2 zG8ZJ6mJ-8p+$?NtjGTPY8p%?XzFYHmKpA=hrhO0$l@O`#CG6G!yF&VtyT}_K(7W$5 zEbgYrdA5HxM=BCBEF|niT_ons=I0A*Iy;HeQqp7$-FV0{pKFw}BIXhb+)qeM-&06; zpZHN3WrB|S7~u-Wj#2;d>#Cxpv)A6abR=B!mdB1P@r@$Tyt0IN|D6`FYf;TkfRU zwZqT~@E*sVKYvV`lptz8T|ViF@W(8j#!j%n2LHZV)d+bhBf}zS+<&dMH^O44PttkP zlQ^+232t}(piU?zzDn1*MfiA*6qYOMr`Z1z_QazP($8x$9=V`Zi9nO}y#SrXPIwRC z92giFXzL?Du;I~XphtwH(?eCVLpL$=leh~o~P z#SiFrp4{Ul-KidsTD;D$>mqb|ik<*ge4A+I*Y^2wrQ7{G>BUnK&M1t6^A1xt<9LNA z9@$dONl~IL?X7U2&v)%LF!zU~MP!9dJN!?F3 z{H%m%mBHlxoz>`oIB+uBa(T#ujqR`SGKg;WtZk<^*bv%8n##+;IDi&n0|+-?lPy0_ zzlJ^eM^La|NVRTJprOA&`mFa`0{jF6gShyd zVG(D^cH>Uv7i|N7~=Y>D+Z^X11lNwvhT)@!P#yGJ}2RsL(G1668S^3g8 z_P__#E6(`Zt7$y40_bv{P`}{k|5C+R?b=IXTRs@{>w-Q=injJkO0zbntC-Sfh^n>1 z0IR>?`(vAWq;}VTnF3)$q{2QkfGK`eJ#g~PLqW!YTZ4fRLR?+E<6h{3Hri-v<(^x& z$qdjx|3~N-!19Z#x)ku6bOMlgYi$CYn}Ok!tS3gV+RbX=+T>DQ*~%ht=a;$66n*|b z?z~0flXDp-?G7B?*_e=jh17hKh9R>4Y9dT04~-()1fMxuk=TJ8IlUC zT~qycW{JS}x@E8#z-c2D|fA3T<5`B<`)(^G@iweH#msOQFXGw0l<&4(J!L-Ttp&c%yZ`IP zt@%y|gVrL^@zuJoiyQyfjcUx`g&5?n*-W&X{zr60W=4kA%E0(DiA&?MC-@)*I*R6~ z$GRT>@&$xXo16E_GjDE(cu@pw1_%qqk#Z-K>hmz~9Sv;97KB6Q}rKTslQ{7{U9hG#zirtBC+!p?=R4m&22QO(u{y#T@ zfNJyA^tfo6SU1#lr=gj94ri0SfB7zW$pGbs5mb4zxTi;agv#rK9E9A#u19)vLEAr;Z6&1`4Xg$BuyY*@3 za{-{wcP5DHmNeh$4OLn9C26dX&|>H{lk(E5Qv zz38VnSofwZ4_EE9%;!C|g!qTphSgq9fY{r(&)9C!5wuTMO5KlT)aPWqOz?+v+*JqJ z?bDg}aRGVIs}A~02%80E{Nlzx@2}WwzdCkiT~uUrTG78Z>;FaWnA(;J0j=5*;3q0^ z+}><#xQ3>tr9^m&XD=T<=>*8BMn}_p{(BU=)|Fn+|CZf$DP$)?YUZV( zflgfo?c}iE+%HxTr!9xvuhjjrbQiff>~UBCzM*s#(&vhQeZY5yX zj%Nk}zfKvKTNIEk8<Es0(bQb%sy1*hJ=1|Xl`a-+uUIwk@a zx0}b_Y2+lDfBgO1#O8YUY}17UqPLgSWLFa406%2W6+)zT0TQnH{$VBlWp3})s_Jnl zow-AeX~_=GpYP%JFS7w|J$hz+KWd>Gyt|_SM{J3j^@waWgwhHAS7*QW))0e*t_(Jh zh1^ju`R%f=1+~hrPCe^is3H?OtGOZZ{fSANIsBv#=0+W#(pmNYM}M;J>=nF3bElfl zRK)e)&u>(P^Z1J}vR2@YB$ril{U7lyKBlRK;5>DM<4< z*jm1~)c=uDNbBKa6Q;2>eTG>9mD}8QfU~qko)#QEvq5C)SJg@&Ej`xJei@XS*{BG` zZetP1nnE#EU&lQ+t-e0fzls=ug!4?riotT;WyDzXD%V-K+pwEX*A)O8 z{sB6q$6f0us)z$e74opAs3N`gv{UkQOE!dHGR#` z)ZEGCoa2ax&4h=9FxG6NOja3QKo6GH#{*@Em^jHzNXPBHJe5CcXBgpgm8;1>FW(sf za`{dDWoc(2q1L8a{G|NH%!4+xh&%b;swz6xVRFrNb+om`3ZNy){$GkxrTa;VB?}dS2Q6E)&f|&1vHh3G zs>0qs(XbdPp`d1T;w$N8wLdwZN*dcia)MzB%unlRU#9@@a4gl9R~N#z3a#kiTfIB!vDRr-Vc%iLxZ$3=fM3M~%Z0JfDc*J;jA9;^-7 z>7#t&M3!2x@=rw%{!~XQ&IC&OjO{XQndrJ*WFy^oku(fd(?Dhe+p+rf``DvBUdUE< zr;8nVLKyFFem9vgOZ;%#ubAjU)`3b^{DF)Xv(euoZLsf>rK{z!@+)4vuv*B>udAcK z!ZehW8(zwIV&YAZFevVQy-0<qzcICKc%n5CCdj+w`h_cdN&fP$APRqzADdLQSAkC$FBzVt}5;u zFPp>W@c~_)kGt_!OO_gaka$4De*s8#TFPm3cD^&29ja|5Wv#jpe)&e$yyXbho7I+m~F%xr3;C6{!w-*4e90s*8 zOF$8AHU^)gy_0h=UA0{3KtnxTY~3>6)YKGj_lVRe7)GL61;2BesM06hraRaGk$Y>m zy5z4h@%hfiC0GZJIUy>OA$23Bo`d?%dz*942m5>1YR?KAPCY6xe1Bj`4QTe*Q~_x{ z0_-TQU8^cyBgWgFojTJJi{M*yoqon{wphh?iX_*0*5zkIci_QRTl_l~&nVh7!W=iw z%$BN`SKjME_JuGG+Va4ui&|B*9d+)FL`9yEReL_7T=T$I54%(R50Br{8WrdPI%qeR}Q%-CT^*U z#t0VaavlG9nH%ExaeF=)Qr^dH z!*MxEa%VqU#OpHSP%!W1pYTXA(pizm!|%kMkI7&aS?N1QI$tcd3>33kt$ScVlRh)> z%Q2ip#(l{=9=j$)mV~V59TL8Z?8I1O+I3wg(MzelYrwe5i`a6I!s% z*Qm87`;3f?`Nx2^R#l&q`|By;b*8GNcHMmV+1FZCgZT!Czy)|2dU~y3dSG?3wp#L) z_Ysf0l-dL4yYLPU?4|7zKg zG17p%eoy|HqE|eTghxY2ll`R!lo6@o;aSA@8=T6sv4Q(D5{iu*cD^US4Aqm<*1|fv zRuAGycA2~PYe;Jx?40JPwP0%u#IC-$TIk|E>6>Fx?X}98 zZZtE?P}s=1-wA<9Bw^N-PJ1j443EXUa9E|6?J%`+F^)iH=fa}Tiaph%6A}yrwS)S< zbEj|MZJVw>x>WkDjb9<%IS+R0`O~RA;Xbg7akS@;NuX2Krqx^>NFm?e>6XYr!WDU9 zK=jo6k;=8lC~cVxMb4VsYQj79s8mVSYI45_B=o137xhN9sZ!C$OzO%Hxwf1%zr4YqZ9nDX$@$Kea|x)nv!RnRDkH9j)-6$e_7MWZN()m( z?_XQ2Iz{V|>{n=+Y?N72cASGL{KTx9>Dl+9uL=C*;%4*#cCKNq>b(b)`&+Tot)xcq ze&eiQ`t=wR6?5Sb^>S&Y9~%%rN-t74nif_lEvP{XRt*$r!D~q2*8V3W`t*U^$!0k) zl>s_H*f7-}QX;k4PuGy-gPpP)UD1qTJ=!i-$I{12#Z;Aiz63ALS7WQL${n+UGa27_ zsjoPDLnvNh{ztgL*jYK{Wnp(yX*Z0)nZ=+izW#4qsr*BGpRk&Q_uc&4r*F-^cO(Zj zHcL*5MZt$-bjd+{C`#;&_EpYj2k*xsl~EiU_~0ONn&*10Cj%`%W9|y$Ek>+eKU3&K z9Z|qX8M(y9TE+TI!u0bWl_4ANoldn-2`fn;m7#ok>HDF4noLu>_p)DG5Bbl07ZFys z{A?vcaJ5zNYELAyH%wklgk9ccE;8T`;*YEn6H*9`K=5@ilRDX53l=Jtx|o)pBdkIIRSKdYTX7wdbV$nEuSIm?xt(Qdx#r^H?aG zCY7;4WyOELuJy*jb=>J&YyxG06xACgt$Q097*M@jnGEVqsxgNuAaC$_L0Yk=u{*`$ z8}f=#G^g)`P)>0r(}l!{v&ssc>fuHATKSvW1ZSx%W*ti?G}KJ4M=R2Kz6`c`&cP64y> z*DUaEm&JS4x<)Ru))FW7qxo!5IA&e&ITbsfv${mOjMeQJ&iUBOt9MQl=tc#X2by4!q25!jD1Vw`)F-H!CU_hbW3P#wCz%l`g&#wZCTBP8LtmwIw5ccp-btOJg1*}` z)%zSrC3t6;HUe&GV{zd!irzg&a#_tkRe0lvM0W+y$Eg4sAn9@8lA5*^)8X7*GQA;R zC7zl!Cq4U`ye8+mGF*IlX?7s?lP4-WoViBw7>!&t5b*0pIX0>?EH=wprhsNs?iC(T zy2#X8hrcmu7t142O@$G}@iPOr-jJ>;(=K_QpmxEM4uZ@CR;j5_ad=fis?pCj-RmAnw z9-sV6kgk=Sl1An7mfmUM91As%hq$bpimA@wr`M>0&1RWtGhztG&ncesCR3E2o)pXa ze8vC#520G2V9ysAzC#*;q8xjHEEQW1t}*?{x1 zoyvs=9bAG&FU?i;UWA-pMxwjFCbHtP+_#>b9gHQ4f766nB~KT0zLU(~_7)v&(?7}R zgUL*1{rSKR&k|()+!~Ww0@DfuQV`|JbKl<6?sB~l@{4(D0hvLVEDNQXuux{Ipv4u5 z^IC)nO}1~9YjpCB54}LDjK`W&l*3!2LPE~Hb{J7@wT@Mgx;XI_ZcpEzEvA+znJ{5y zIM|tF`o*Gw6Ir4In;swJww88vXWH#Vj*X4YSTBje!1 zbw{9>r+&%{3|g<*gC)%RU`D|~rXtTIMvL4XFOSw8-WRDXTx(nxuor)$*o*-5j*6K> zDWffN(H|9AQE`tupt%lG6Fk9%t)YNy-$E_8Z*#Sz4EKXLI@=m%V!>DdKK5!3ajSK1 zB(=zw6gTtPp}d_7l;^I!<(77K)JP2<@)O`q_}H_ztD5RR9P|8SS<3n`zVvcHn?G;r z#sJ=!eo|Qb5qj}WFG~dfSw`)7m?qzG&IF)jPOb+$8UZU+{Afbxk{;#&PWtwpNX z;=%=e8S#klXM${{s-9CkGZ{Mxfk8O|iCU=}ojxmp@#Ft767nN?}eqs?1$H7Miz zd~N@U?IsHVgJ1Y&!#%;($i^42-f$V;RtsBuAHnB#f?XuVEw*Qo9;iT1n!HzrPx8th zhimeCkL+@PmkNERY8M_l5vyye0_NFCX`l#8W_SN_vnjXg;CQbJ?I2ibF=dYDB%dw0 zzK)j1-dqH@=(>Pww*vk_rC`U4f&`8jxm7;7M9+fTq+#6#^`GDTr%Lf9krk(@d8H~Y z6E#CJ*VAYNSrUTh3`Z~aX=`7nw%=d5>w*OWzQ%SEePTgYx@NVwzm=&!O;+xP)VIUK z*TX~CUZT%Ts}|Tw_w0d)M{}wwrgr*44)w8fp*^Bu`$YeB@|9wU?B`5C$_um;;2;IJ zQ^cuImP_D!c7`M>%1}|3m>Wp-B@jgmA00ZTPnR}Oi(m{>BPg=oeFm>&EWF-m70R>n zDJQL6@r9Q0EE+A~Y3(KYHkE6jRs{msKiFkNwd>>*$4!p|7)O7eY<)0x{-#Nmbw`== zOH~E0*q(%ICMZ8K#?*&`0sQrPwY;)xww0=bl|`Oi)kd16R-{&<`0Lm!@2Sdc+VlZS zpAW@?Zp!>MDLeC5rkE@rzW|Q8y-&n#UbKvNl0Ka#l+DEa=bCiap@V5>Az37<=*)h7;%gQG_=a@^6(%JXP66#cA`7;(xOThVe>ZU2=MokT4HhuK) z*jnFnk5HQ{H8dq-I$j*2qd!+uZm3R7xf5$M3B8uiMKb>`^{tJ;V zmbc7i%dTinz!Pv!L6peWQuz}uI#3SbYlleGCGdU1Of4PrBm4^@7@lou%pnS~ zbg~p|jWSKYUuQM7nCVoJFo?WdGA2yf90zeUTrQoCJjoI0bQ;i|IN`XM$iTGPy zE$uygz*?+PxBaxnNJ>)mT^!h`GoCynI zuhVomcUjv0#iC$Sg-+r@L!&z;Rf5rOOt)o+$;I(=*4oyO2Xy3E_&1X+LE7nz92`q6 z!GML5p$pL|@kok;{v6n5yQJhcE|xSYeeyG|xFG8qLa2ev`YG4C129 zttw`*ta>A@9FG6?q*S!(mS}Pg6&-WwjgJEGxk2u9F}H7H5%Qj2A0+X~dE|Od{790> zTCxBN7tGOEK!KPv7<;l#lQ4Hsjn4<|fi9(D0L$N0X~ytxGLwohqu&r!-a9i71=?^RHCuV73; z=DQUz#+xJ%fV%vsBfKV%{>j~TxjcR4tYIfAEPu)fYs}d)LBK_JC@d6nEk9hzsk<@BlgjSC}3(q&64%YE@fg}j1S$ z$QK#zfApm)`r&agddwtvn3EAgO;5evhyl4^e5;F05o_r4j!blkA7zB$ZJhKTnY!e9 z^?siuqqksGr^Iz}#zl~oph(=1+&0VfW?2=ru!fql@|_-gl zvzxkkhD-PTjZ=+5HD26sM6)N4O^bIprGR~L(pexGsv}==Lu!gsMGr)qWUfCYHP-lU z84E6dVS9FRJXVolzsWt=ztqI9iayR<(67N)1hf!Qegw+~7$pxR!I==?dmF52XmuF{ zBq3W_EA?-f0kBKvtDzVH%RAe^{*hju!8qa&LiiIU_6oT`^te%>6}VC8@Fh`A!!Lqh zz2kdjsTIS=rHfCj+s-ePaoDwGodXs!93 zf_w9dK(y?dm?+5e-n+U9w9}3ioW2+8z>QMw?MPx0z+Nq?`3hwcY@7}9?zt zw@Iks+YKJ>Z*7qS%E+%xz=Tg^v=w=3+VJ68hNa9VDa5@U?|NUhyg%E4N$sUlP{Xlm zaJ^hqKzcXrM4hYF<{4Tl4&R;SDQnU)P{MILzPKu10L2e0#f{kL@B*_lr!`OX?=qj_ ziqY&6PF2bYfA-{-zd{xpNhwP?r_UjfBHi6|xRp(zBw6PQDt^?Vb^|pIQ!8G~kY7@x zoBX8n!L@8Cf*{uS&E%pea)0yk(6eSgL#e*vdxfBIU|J0a%)4E>o<|CORyHxIEI>wm zTA>*_Mc-5Ro#zQ80JYl0zr#7a7Gbx{k_pY?DK=sX3BGQK8ALMY1y0qTkSf~e2@T5E zBvJW|2Ar;W{uG#-ImvT>Y`AZi`2`>^sxzr<9ZKd!$(-eE_{k*~ZWc_jcOF!%RDau8 z3Vxmtch=5+%-?sc4q6TL%HNZ29msDkg}$Td zZ>?7+LTYpwHv z$D(7-a0Z{Z_=3Kc1rn99{G442Za(=wy58gdRRru&tR}&TWBMn}O&;r~YrI@7k~^c$*@O``&&_W-GcJ0Hsxdq8 zo1=+OeX{~4LMXL*4+dYA1kw-9C%Wb>_kdrTW1>WF7g^}xB0@qQgmp;mR^HEhx*`j& z&z_6*nt{cC0ycI5%{Ok?IVV05q8E3pBBhOVK4IH=s}6B|44k{ZGqxE0ExXfK;q ztmEMjz2cqCP;GgKs^F?Wnvee%huhFXb|%(PW)k%6=zm#b%(f+w>^e{pl9_5U)~0$ zIdAn7=vcZx@>YJV{mPsyozgp?RNvTIsgcMT2@~q8^E^RIuS%%JX8>80Gv=_15Qvz2 zbcYkzBE7m~o{*`Yqv8t4%5O8Gs&n^sFWS&rE28mKD$n(hQtOtOMSscySFQHW?N4(| z(&#yk@0-_+t>kCl`jXs86Ncbwi-r&{LILr?v*uW!ailV{)@-aLSHk1b7kgUbEvhi& z3g7^SH$DZ{>)2K@uSPM^-`WbM5a=Vxb)TEwJQ)(LXSK?mH7SEF=8q8nNGkh_{$16o%gB3m$ z$CJ%qC6QqVi){hYxOUyJ!-SYrfPSwJ?@b?ke^yuiywUj>HXiaX49 z12yF&cRa)|!xhk1PtgyL9OHUE=_lRV61tI)N0Orx5cY@htCWd!RMD-b|6+0-kh#_i7wi4q%m zI?-v?Yli1-wk+UAr&Yhb?Go{y#|bS1V`|a2$P_Y)RtXg#??AY)=j`v3S?es5{a*lQ)woDcMb5IN9Oa#!VTlk3 zi0g-Z$(^oUyvsFSYTvJ^5A6%2zsLQB%QKja#FFqnZ{00A)Xjj7A7k+hem9AT>xe6FA-}?+2>;fSAbDs=!}`5v}E&N6|^ z+PGq(sagdb6H8sZ26>RPBY)9Mnq)7-*xdEaFtlp%P6hD-cg!wRX@%n7hgCHwqs?i+7z02-#9%>(yk&YUSQCw(2O4xkZ@ofi zM)#q^VS_BxLQ+MceMKOjd+p6|?(Pb9RVu^sX})q;ZN}DY7=!A$>e`$TroV1rQhYm; zdQ)?=fO)z$0PxgDinv>`cKa79@l&^SPnM($2sm-gRyvx=(@_05%bNQkp*Cifdv0@1 zr37a8$Z8e~%I)5HuIE5nw6n`~BOaRp?0rgm$?^L&zXN9^kZ7#0yrUD|F@*#)hu(*| z$wi(^5wTqBSfaL30-G=&OFXZADP>xd5a;~ni)}Ao>=InV5I^BYV2jbnXe#(?Qaw4(-wSNue4 zBph;H*Ty|^ye5Ng7_pALTkv|l+0NsF2SX;Wkw4cm1h2*!Lfb;&+|Dj6;C&aIPle^-bSiGF?#(KQB8NRf zREohF-d!oOVDHNL&H8?aFu7P$fl}k7TAIVI83)gXgq$8PBU3fh`qN8Mi9ljrheKO0 z_zO-*AH09Ah*DY3|FMcJAo_*J<-Jb0@a8mH1=R4&N`yeohdTt|NK^S@wjicgsKBEuL zz#S_;i8h`u3!EBRGyw0MVtwqx+fJQ%q~jo6zJe0XXKU2GPgX25sJ?Uw51sLK;Sl>- zWhobYPA!%%^_3dejPooo1HdBBLGSxBQc&*Tb9E{|Pzgh6>B7QCV_r2!pS+;@ibH3U z@jm!m-iH?llxplP%sJVZ)1i6cWq3uTjAN}-@YxBWC{=3S%YqF0Z5gA|9$&Zy+D`JR zlIzW=P1wUZ`QhgQ(u2j-8{}Bo?AjC~Ip)ImKU)g7nzzS8*9I=L(id|VJo;=bxi=>B z;3LuDvOkzGc$tIVvi7f)fQ1fGQ>7laGqbR%3YOrF;hkXZwl@uRpz)1OeKXg`Vj{t6QT` z%mIKyOAZUrhjNGvLPlMtr1n^Fo)%-?tMe`K4@H(2TU2W4YoF z+Z zy;9Y^B2;99pR+D+wDM%wJ^^|Pj6U6L)d*s;rk3_9ZQ*{`JAK=T^o~NUXX7hh_IK!> zRk_cG6+jXxkbnWLCcpz9kd=tdxk3W(P&&uqO7AtfpSfGdUY;ZK@jT|{e;-U%Xi(y~ zxgmiBy|LiGphmU`6~@`$PrW<}zV6VIANXZkBUJUfwssGtSN+(FX5)&t5@SwIJk6m$ z8nx_tRfH0+6@hs(a$);Fo*V0KTaNAnoAuO(wvfY~^ykP@O}!^P zb=ysvssQrG8#X62#|10u*NKY03r$>IJXy6G%+$ivn&ta9$rikidE{aM9thBJes7># zia`X@YuRrq>-FDV1+CmuG1R5y%?E})skua|nd8Y3aH!x+RW{wRbN)LkPZHA;-&}`3 z*|xr|{Q&I2#SXWRe#^%sSzqu-jKmSgL+3NX*mb7Av2Nv#@PO8`+MPFSt014-*Ops9 z>xKXaUtNN<`jMuW!it$E?5w_N)%@c%tA1~O;By)(nOYe)nfjTj`t9>Kvj;G<tCEe1UM+I{*YhC^n6yQJ=Rm>5pSfZ9@oRWV_?Fmx;3j zo&yrqg`3$oY!{{PCqH-Z%MY~p#KpBYXE39gbP$|GHEF~DGulbJKAw94y{TGzNE*1p z02z+bpz^oYWo5Y%vsyuSOpJ8t(xuF*VoSD(=Y4x=GM_G%INaf1x>E5<1t(UNetAQr~6;qh#j!;dCbu~ z`XM8c8P(r$BCmPdciRsbjxiMNR`1$1SRK>z+h!S%q=f@hT%7qCL2(i1Tg;xFjS1K~ zq^`MUCcYKLB31qR~@?{i_bj*qubPb}#pYg|R-$Zmw=%rt5b#21SlN4}!g^pT41 zbJKuJa4R1ZA?U(70_G7#J91ibxB`XD+=ibyk-`Q{X8|*SH{mP{yN%AHrJrzBvze+@ zAJ78~7k~-Dd~=-^R5DJzBzsw~s$Yv_b@qjHm-zi)3vI-++;)YgTJRA zB^E@Sp+_^6@7&H79?on6WIB~Mxv;s9iD^-1H3E2){y%d7xos5m$pANTu(u*32!1cs zYfbmwyzto6ym`scwZ}w9bTj^51kMUSeK2dyc1Z9!oCD82HCR;R(eJ*udB%C!)Vn$- zO>ugXG-UwgK4e8kP1wcb^hkhM`dV2bA)2C;NwpsEWbX->bYN%fSMH@(u$8)5yZFQD z&qFTcl%?)uMF!zbqNbm7m35T-#{Ome4iWjAEQm7I8a%wp!d^vteL4c9YHly){?k}` z;hoyVcyijO@d_6Fi7lnNR`b-~;-S~4evq>1X!Fc{mp&}~7c!t6<`dzNtSKx2Vq3q( zLHsJ{*c<|?GRntSj7Z;$92mcakl^|r*CQ3pM+{KG3p^VPxWuTb13pw zOAA2NZgi>rJ?;A;2LsF%L)N1`wAX=|yR^OYNj8XT&LOaZhX6>*eD?J|>iFSEHlP+J z5ZP<;678XYkxP{`lcrcZ2-)q|_;JRsWn`iqwd+TEc4FQBU$}w5QiD_=HVa@X)cJwM zp$4_PjY@2*p+uJIp-F;`Q}q5z(d2H+rC*QBC&0bVP>XA-Lg(|vnuH|Is4+RaAokL)uW|1yXRXJtlE znTc+8{Qp52R6ZP%225vOOCNg7F`kMFaGEebk;gLAW2_&#oA?&mo0rYz$x4{Wsi z5OU1RcT7XD9~{#oV(2-nVFE4K0cO>Uy5;9MaD|?KOBj1uIXH;{^u1nb`>N>sw2p)# zkM8t`+ES1*81H-Qg41oFb?UFqyx~7n-+yMb;h-dU$OfVjLk;Eu1mjTJ092$+oy7<1G6cx2`x#b_1n`l}y5 z_w=8g#MD)9MjmkxbhodT91f`;K7+ZVwo980H&|BRdN3`H;m83om4DC=-IkB+o6jzA zlFf0lrt;eQf2AM-wjfxAhD5PLJlM^kH8$*~igLlh7z49{w0}?%v#$KI#&+h8a>FOx zoY9~Eot6l!=4TxmazfBKsmj;+yR&rd{Lx~!6hQxEDPB!X%L_dGWgTLM1&OfjWcgp2 ziiehHrlkQJdPEV4`YqYRdmW0*jJ5;P*8w+-8r-Vl9;&p{szY;R-K_j=idbVh8i12+ zAN2z2|5F)X))8|9;G0|n&2>ip&WM{Xr0pRCTlIt#Mb)*CXWZkyiD1`oXy6i68RKs~ zl3Ta1YG5exh~@I|_sXw53u|#Y0j}U39#(;Gso;5bg-oLNX*PLj8#3 z1<_$vQ&3c@w-M>L?+iIsG%Ao@q`bs=yb#+)O{%~W`}$UTcm#&(CqmO0ukx%E#?7506)j=S0LvLmN7A`4D|hr z5BcD4=_0haiCvkgZ;Tze#zztCD1l!}bl-S2haEn#29TD_^L|_9;aG+RKx%h7hm*okJYIrYI`!DjxHo9&P` zd2P1R9qSq>i?;x1Q+r<@VtI~o0N~9aMJHjYQ<99tYF|8)t#D5>5`-9vCB$WhRBx}0Jj=>i5U~^aMmaB!!($tY)+Z`C$ zz=reQyG`-{k8q?2cuEYY-S|y6#Q7;F8_!LjFW076L`-M9(zXp};BEwQkerA8!$^Qd z0gxmChEvBU^XFRZ#J-f!vadxys=TKTT^dNF+mB0qlH_qKV{k-dN5b~!985YP;_IWi zt>k25S{h&~`00sVxcX25dktDe8j|i2OxnI!n0kEV&rZ`=%?gViLJEYqrAVI zIOyK_d2GVEyJiBog9KEhoP+lT`g{BH*$K)Gz#Pixu{8fBCv3qI9YD7H54Y`q9{=pJ z3&z5{&Lg7QN9k`dYyLyeEHMY-cmNzJ|Hf1NYgZ_N2b*clJ+hVk2LB#QRVsi zJ}%;ruBbuy2n@K~3aA=hP^>dBT@@9r^ymM3+3~Mi8UDCklmDXA$G}jJ6D9d)CNH*x zU19XUKm2g*@?YjP6@4=A%7lB!uK;62 zI0tr4(R3vcMt*NRX4H>$W7kTb=z^T-hyZW|N;H3=iv)f`L#u%x?jVN||MXU5&Qy1f zQ>>SwM#&L1bS=eU=ffd2^oUV=Vq?iFaJN>WtgIVK?rNtx24qR#od9Vj-n`z`EaZZX z^10xCh@?pVpN_iHwQ~$+IeH1d6f2*SeaKC=^@bg>d?l9>ZDqF)mrXywQk9iJH=I(5AoRqb53%otyYS;?_n;#p>;Fhg zCduOmyAD=rG^kF{|8|4IN@!v=xn~)UkWAJTT@d=0v^#L`g$LExnHW2}nbnjhL**SQ zTkzn%qMN^|y@z21-bKY2kB|?r&B3&P%UWdWomjm#k9s!;ozgffo(FKwWUo9){4p>Ntr3%4la$>@MWrGx?ePlJh z*nk^y;_S6m6Rkn;HjB;Y_62F_GDZe~@-uRF{o9iJL?CH?;tx9We=>}y`IzwuqB+_O z93MpS+ZV=tPfWy+iVo??NFWn_3KVkCS438RveBkcPfUxM1rt-Zv3R7H!GFoCE8fT| z-^HCA^y3+ks=s6?gVg|J^r)L@jHPllw(gLI`aj9lDKx}sOcONi(Sra#q0xX9L6a`k)yr`PK%EfPKf;0L&`(a-N4r#Ae2fafOFY?}1gj5&MZWj1QX`kvZ8a-I^epG=zSoYK*W+AJ0W`Zni ziKCXZ)O<(lN{_q$n5bNv19-3PztL9OfSk~9=@RY)=wRNzV|*>83 zhs8Z&xd>S&UgF@7++DT7f=Pp&cEjYG_`Fe?L-zv8aeF?*H5p$w06>DuUz?o4L%iVi z;y4jkIaG~4*sN;im@A&^P$&a7ghyK(!d5*MOX(Jw9c_#`m`QX_0fMak4fY$?k;?*@ z{SPkf-%1cHV`cylQ@=a{Nx+}(jVsWR{Ku;u4gr(xeeLzmi&#XRkLj2qNjm>2H>(fe zOIF*D|3k19JdD-f_-kt5ldhBHA<1@|Gs`H;9(ajFl-r}Dw;>#sT{sg%*Fm!t{!>Yj z^GG0hxx)d(8U(<@TMCehnV@P<{HHkScFT@8>c7lR%7toEBuhh_qlleb><>6Qx%9Jm z>K?=w-T$r2L+HBxtOy{2lJ^F&NT~0#>h+%ly5HszOfWs;`Bkk`NKOBD+5di*ng&w4 z{m8^(Wy{?B=RxC$L2N->pb1~Q!NuRuCSO!HB&6t?N8oxQfDh?yO9Kt?NAYtCQ;BJI z(oyr*iw0iUF^l>INkmo83k3mV0^Tor0yhyI<4E$iHv)FEV+o;dOh=^T!wuAFZnl6u z6L&Qc{`DiEzV!~yqRNY{;@&Ht%f0{G#W|8PC{VyFg3c0OElG^8k>?ojYX{wVz&l6O z>!6%M-Tc3|p5~-4$_?KW12#pa?*AQxoDz1G$>n^Ja+98tz~ywmoslnE?`S_;_2#NU z`5o|D$Osmpi2Ozx>;7C&3%o&Qc}r9FUkl@w`c23&R!S7W#_{+zkcE=QzHIH~Qkh8~ zzB%RPZ#_2LrJ-2|UTjopRHCUyW`BI6ABrjXh(+&@d>!T56G&I=o=taS{5v)Wqpk~a z3}y@~TW*u3Mp*CeG#4@qQ0!nQeeQUk`~CF3=gT=i4tulq-q%{|TG#cDtAE{Z zrbTN7t>Q5f<;B~FZY1B&+h>kC3-Ag4JXu_Dy8Uu(slDQM!%;9)i|_2CcHE zfLvC#``o!O5Y;|OC)IGY35^%| zt0?fx3Hjl#yAjLEQsMo%+bu1(!ct=lyUXtJGr}{XaL>Kd=ouSl*iXdipSB9i-}na$ zcq=S+=uhEdp@LW!TfhNRx;EwKS=6Amo);fnE86!-8!itk*Dkh$tkBIQg`Ry3f(IPF z=hXJjk=UH z>t*Hw!vhGB=A%)%uGDxyczvt4Bt4Ai(X(3q_ma;<-EEiF*4Po2T^8q3vdYnah(4V? ztp2v(t?O(55VlgP$I^lq!{EJRZT5(R=>f0~*o9(H`7=M%yqx1P(U4~(|D6{6k22F% zm&3~8D~QDQgAvljRUnG6YG1S}I=(XK)dJKs6pAow7MY0s4`Z8ReRfh3Y?1oZR)&jLLM2vg0YCT+oE4PnDw7YAMo2}5$FWr2tRKJM%n9*Zl)(8JA=H|n5;*j| zl}EQkP#d>wzwan;Mgq{f#3vuCvps>@fOKVBUb#|FyZ*HH$aUbU2j8FW91HU}p?LJT zT~GLyyK@(DpfeEevIcF5%o8Epdgo+Hpj z@d_~MML&VIYo0$J1T{<^i5gd5zeT5c;m;MY2D05_%&HpPLbAGen-}@U{Kd9>wjG-AoXeQk zE(3=d%Cap^EOV78JXZhYpf7WWHEsK&Mb_(HsLcXI+;1~ratF|(;wH-6FzaE6;W<(i zs;ZcjMThyBes&Ku4%{cZ^0>s~cz-sn2Vzk)2KUerFd^=NI)Yv{ezJqUPsXF3i?yxpwlDaE*ZjEkbz^aS+Z6oGT2MCjd>k)eyDzo66rc}lp{<~*ti z+AMvAh+9T|dLCBa6(S}o)_M+mus2%UHN_2ijKYR_{b?oJ}9Hw5Q2Ik6CdQG*2(IkOfRtl{H6i$IOt zX8q$l7J7rYHQ$s)#11-m=c8zINJMkUUCU3gj3Vw31L zIUFGaZnE^mP@bPD_z39m1Ri7CiNff?i`#=1*@f_vIx02g*V5&=T3@?=QjgN|>J4i{ z?iwEhI&k-Wo5$7jg()Qy1;ap~L-O-dcG<@L#oakN2mB8Kv;MEH$xKq+DnghcfkI|a-r?h!^LJ`=hC(+a#;xnBFJu=4#jP*{4R<5I&I*kNqX<#*5@t~G%FkyhQM>53KC#08u6MK zqHbB-T)bO^eq$eqq#j4vr=r`fjz5IqqgZGHJ-9$Ygt777flY-*hZ zz=wf$>FbI+q2Z>Cq2e;IU*?QdUL`^;PZmV3HwrL5jM7Z8B1QdZ%W`8Wz{*%T9a>6} zGXx%2`@&2M4IM>ih;%I9{`#hD*)#q4GNN20uGx?kcAw86`hnYKH4f&cc&GodPbEL& zo(x-0HKVv4Y6OyeZ0HKcOZ2|)X&V4cs=zM=hLb3IE?vMK70v!gSTBc%X6W^#)w4mi zMrX9od?IF^ldY|7@|)SL?a`&Aug;yGE94F$_ziY}u=ruS%~lceOuSErA(H-Xa#V^{ z9>^||`;|A=d~l{icy>1w_l>sVCt(4Z+`9WnFBDZ8FO=c%z+Fwgx;c46&%<-hoq&1w}Br6J1h^k>z0kaM6_SkeJGDkK@ z*3<(~>k2AOk(ll|@^f)I$DtErehEL7iaKT0RF5=Gp4_Q?+$ivXsnMdSq|p@r+!A>Z zvZY5BJN(vnqwBA&J|R0lfq*XD=w;xJ2wV&5x?GKKtH%5Sqf5f_Z8$?kCqy8m)?&4} z9sh#D;DY)RBz^KwmkXxpY#~iCzRKuvs|Si@-(MYP&$k$LBMO#-)tlTt(X|jPQ$Z)|Z<006H|l)OYFAp2CEo?QkH{BE3j%ev!}DlZt4CScT9 zkUkY|oY)eQ(@}+PT1K|hzklI)jh09T1@P?mZJn2hT#AIHK3zES``ovlq7cNQu8nWP zNZ=I0H5~##IjXiFJdpQKWIlY!ZZWCHUQZPHL28bg?HeK;a`EE*nswXnzoT@~a+9GP z#@%aZ`C*H%kpTfsHhmOmlEBbuk>?3`tbCa99SNR>g72-INF|23j+L^_HZF5l`98RK z0=;N)G2_kjU^uEOH%)IKJSt{gtWP~&)d#2N3HG%bbNBMsu-~Xn-yx@Emg(AN9G7&q zzgS9n>*o!LuJ#SCtpan`$z27L0|{9-`}24YcaM|ij&Y=S!5ltn{RQoz1L|?MSjx^# z2+{`1XH*@)mJljr!uH1U-S3IX^VfDVe#h7`vvS-EiQ6J3|Ed;tl4grpE-LEQF1DYZ zvT3d+ssq_nuWAD$hjqw{E%_C6lg4{^+@mh?$b2(48Mqf)N3ePaQ0>dLXsRv8RbVIt z;BYUmeY=m%1d6ndYv$WbRXM|NoEy?$$g-#ln)ZZv@o~#Kp~pVdCembqc>t_<}n;Wy>yg{4BIh{_4Vzg{Fr zymjOUc-j?rNm}K{ zLQJaAXqV@Bl}{+(*1X!0y~Zmxe$`eO@`WkD&Y3G%g$kBG(uR`IK|Tu6L(<-o zh|~MLx~4fm(We);qF$*>$AaA++GKLP|1Bt|iy-v)0v`tWFIM;CZg7G9gcf$>)!AL6H7v1`d>U2gGlxfgND5OIcdZC(2d0c}N zx7%PM2V%(uL?P)K&Iwr}QO)Crx0^e;=o_6rcJmz&o_k!$ovyt z=mf8|RnSAjnwzVG@{w7+h&2TJr35!+Cj2`$TgQvt++gw$%CC@?LF1e|!&!Ijt{_A8% zTJVLKp_OHJgs54Sz3@{CRg2q~6R0-sbI^w}w#D$nn%bw>g(z@VloDwymXy?iV3LpS zL5;LuTpM>!5@I~YIx7j86xKKr;IZ^BK{cP|sOH^`(k}yyDf5wB&-iej6xmF@|Ew}0 z0oS}p8^r_ev`yX$z~KZ-bKIsg&u~dSmzZBFT8ds1q7lwmr@gbGW=l2NfIV61q#4<^ z_iRV9F81dI2yu?O&G?h*klAE>X%(xT% zm59>Z-N#7@6tz{MZK*3#i+6MZ4w!!yP>sJRMWWZ*@=us3dkl7@M@7+s&mKNgz$j`N z@m@+H+wo_~?P5&tQq~UDHZ{~5=cQc@n)c!-w zZ0R$bnoY(2%v4dcJf3m4GRwq<kthQYI%{d*Q06aeuZ1vRL2XIOopSmytBHE)C)Z-?B=+ z7U~u08y&oVbtpgsx#=f_U9L5G7ZB~azE2jP(TIVGWSw(ZCt;qUuf~$JtoR;~nAFXi zYA-z;9qG16vA+*kSdmw=IeOJ3`grJxc1ie{I#>Bq`2h)fYRrb3Y||Loew4>{V1rQG zCQk;!y|-oStfSM10j(<{;V~iU6;<{J?uTCG{BjyQR?SuNGu=Td&HS(K2U1Dk)-qaX z-}}}=X)ioCUjM?FYe=(m&*5W$)r+$2A-A0_UV=XA3;2zuMMt=R(89>u%*W)R1_cSN zOMkF&*y1Oa%#Sw!f5j?|<-<-W91NULsc)>PWw#kXhmAw|mldJQO#3%35j_mG#^Cip zgDwM+!o~oi!4*s;A8j`) z3;{|dDq`=>^AE$3%UKl>|3&aTycSoG)DMdbl#~Fh>@xYXM&bnHL zh-0ZB+ECl9f3fcK?PVsn%jI#rmo!e;-Xtcn30T%`ypL-1d<*u}n`_H{lgL*6kyIC~!{*_vT0Iv$h8Mvj*P$ynY9CxeB_O24_~K~eNZhvopO(x_Pmz53kr zoj^Xu9-CRMJJ77^;xwOS8lp8X#_~MU6~65U?1je{_%HT5ErcA9y^92yE#6C(ZoM|X zsRfASPEeO>nm8p%|9Cx9s9Td4~0RIbGQXpv6$m&xrE!er0EEX za=>C^P`so`wR&?EK>wlpKgSI;`cN*T18sgniZ6_m(3I;K3)i`3@f7xusSaHJdec=1 zBiU`HB>G%jMy^uz&~iq0!KZ@vlVHoJ`5yF}%A^qvqfCw2fqT%X{WS(s?HGNFxjP?z zNGo8(=laNt7ZWI){KIV**Y=%l-fne6U1OdU?yovqi2!UOH!AdcJr}3`>AfNi*!;CF z`d;#YgkbuwN}>iuvXU(EgAC!B{7mv6epFbp8K|GIxhDa(WB-T^B=xyXJ~uJxRf|n4 zy)u5IagqC-MEwNbol9{K;j9aKtHf1yCbj&rolC3c+_-_o8Zd=fN*&XLbENRJwg^hz zG4_Ci&W;0_DmbU$MvGJNX6yX39qHPx?CVeZ00j$Q^}W+x!1OqPT}q0kI5UuiTE6jH z;2MJNX$012py*owSL3bC;-^`E7iL?14>cRJ5c$<15dspeN7Hyd{VrwUrdiD>o3puh z!?$p*;*vh7ctljPkO|(3Bb(TSAxS%TS}UKZHBXp1Y=n8~*taZM63=osFy6GJK+=#( zFy+^Hz_3jVl;#w|e>*}uF>+91Ry(KrSTxA{i1{THR~!~_s_wP7{mjg3U z70bP4zK5~x>>y>jY^_ITvS~b~Ad|w^NZ=hF-vB`jTlMF#m;4kF6#lkPYEcV9w)1kf zVMWOYa(w%;yBQ=I#mhVIk_<(5SkGNaNRtIGFyOpuf)i1#`o1B>B7|@7dDI$-cqJIp zpxB_mj-Oka$J=U|yJ;|GEfeZnLK{9f%TA$M(a9h_QGjQDijaL~ruf#Oyy_|qV{-pp z5|I1>DY%Xc6vauhlJG-Ui>dNHVmr~5(5u>lmVW-`h}2tRs@Xc6^DM8GKFXxy& z6mNn8M2@0ueMOM0%Lf0^k}`#HI+df0w9O%c9ygMr)}N}iET!@19Vg%Jc1`UjLk8)v zkl}nsQ+gmwRxlErI}=kiB| z_uwX5CvYl~0En*G)dYH_sO7Or1fX75*M?5A5EHl(o9vaUq6hD$zBNz8yeb2W_2G45 z#0zNhAcaK z3J$^YRI{IieKcO-^t-07-=38Bg+*7_`Qj<&e~md%z?+NWu|2tZWfa__A7o5PuLD*f#ODDco7mG*~w=}SlLdTLNCZl+Y3W8z5@bl z#VJZ*s(Qbm!(j{&{ZftO$R{zJ5^~dOY~zd@wPqnwnb!psH^g-PJPy1=R_!5-4Pd?9 zP1#&?TlfswHGJHZ>ZeX98hVRnC0f4HuoIm0(HBfEWlrk+dK7(ooM0>bWHR+^-q^9vRx!CMJX?m%Gh8!#C$o<~l?yaAp+SJKLPDgd<*CpR zqsL4jNF+@zTE_i;9$({ii)FtjOpq-yjazbfIE+wAIf8(@SJQ2dWNAA$oQv_X;%oOz zfzN&hBbCsf*9A9f1|Oty*B(Z8hT#|tbC`7(kWU%2QjlBbS{%G{e%B1)O@J41 zvLyiYTEq0F3&GSD1L8X}n(@DHN_OY?U5ksVU0*)W!KlnTIDLE6_2gWd33U=!rQsz) zPicFR2dlYy3E@eqeD9U_ja1!Q=7?fam%cmaLXNyQ>Q!6D9$@4g|x@F?D5YlLMttKbQAAL|nl1_;?~FQS%}4POxgJNI%Z z8(^>KIAvbBwct@53pl_i^xw#bWpafwUz9cwV}Sa(7BGdiNRi`i*#UQpTw z(1lnx(9KJp$K`hI{ns3~$u9%(M}ih0Jw8MOfz=sNmJXN>OIdR;n@BjMv9r zqm+qZi0O|pbW@F@Rl+R~Jj(%~{L}P+gKCtC5H&4>U$6E&^*5F zm(4JwO1a=u$4g(v`y*)6p4Y8RBPpq~g26GIAmsrdXs;HN`^bbw?>^jlthw1F1>3E* zUzj!A>a?cFD51>C15!|eHBRe(u2gW>L$lEDq@E`-dtc1E@Xz(` zb39DH$P8P&+Z{E{nA@NpW|ioL^@9P>J?%DApsDD_%}8WWkTfEEG1?I9NCJ%^zg=$V zgjO|y(yb@ZOS-c}{8Y0xhAHmlqH_0-sdKZjM9{dW#xEqCyU5V<1m&uNZ0&+18GTE_ z#?7YAf_@R6_EZaW67Y6Lo4prJWsR?yMz%OuFcLP7$zn>RVzEPMt&bh8zh)rh1X)Ta zwgA{8qOz5oWAIqIg?7p|?w+6H@{*s3@D4g|{xT3A!@=O$`ohdTCIy(+`TBqU1g{Qm z@|FlZKpzwCqpWG^1qhl7d{zJLWN<$3_m87eyzMzCcums(3tuvoMFyW9{*2Osd4W-^ zo0i8(_1GLQBb zIi~@T-Zg#h$Ny?%KE3=l31H~o;nzePvQ+fX3w-fsiZ!QtWKd9|!RM{9GE{RLJKp`@ z7*Oh0T)V({sI1h#r5&gWus^gu>ZzcN=E${BY| ztHRSM$j$4xYp3V5$D&xb&&qK=SFk)6{TA~Mp3#~3e8*i+FT(Hg0sm!r@0v*Kv;j(+ zIPN=cuvs^k?y&GIRZhc)E{}xqYWRS_sAEIu@BCD$*;F-V*TolU89ogccrTIoueoI) z3Gd=nLYx6cmw=AT2w0TnVJMv5ahdS8>{VRDL0+U1AOTnDsb z$nuq$H5%^G7+=^<(NJX%1hRc<23PvU-MGWTBQ6t>01K%8OMfYAcdu>$@jr96ymb-EqX^g zy|tTiGqYkGB_ePaQuR#W(nvnhWE`cRFu9<RF>}5`H)bf_{ls`A$zx9r&~s~YqTA+ z$Jx!NSuqwKWd{ojEQQ9hclHA5WU{$VFe| zOlUVEzZEKljFWoAZ2~mdr#@@oRizX1db~zP^3@OWu;q4`53VMgG7x-82tMaNbEAsLn;gHXN?a|K3&wFp(B09f zoDFd-(NgAUVao%`MEJd)8_BVHHkKv>6ylxLuWZL6!-c*7^E#1_7A< zB+g7D4YISFF&b*#6a;vO2{Jf@`Jmp)r01NH!>M#qC1nMFV_jthx*YpfEE@sNHXt#W zdV*3v>F~sg4PW*u-8XBGmx@Sm3=}#%mJqx%!um80qY0~*w_v(CmO}$#%|`}Gs!I~} zXat}fmJ6eLMdKq>)2KpR`{^cg2=a-Z%tfC=-AsYys?_(!up}nUIFgccSY$LTo4&B3 z0CK4Zej4ch>x-!3WX}0WF*x^zvZUMqilp=Y9LEY#I9h~n!;fPe^sWGTVd(t!MSS}3Yi5mz5-u$B(4HT~H?KJy1a=pypa{-8j)2hDuDDklpO-x*M?dGQ$ifrfrfj_Nket z>H$926CqoEvw|YaflIC3sVcFTE%p7b>D|7H(N<{(5Qu0OTXRHkz2zJ5iR-NMR@r!XJO){rra^6me8tT{ND zZJ3#Ck8Jn*$$gC*$H znlG7x@kP!<5*sN$Pxe4_n7?$BP~=O0mt25+1TwRX4kt&uMUd;l&t7G7o6d)xFmG~H z99oz%mW88#J=2$(HY7u~FHjTJCHf2k)h)pjLGQROTu^`exWT{@_c;S@=6B1`Q8|@+=gbY(&g_%gDfv9K`?$L`5y!iu^!yF# zE5b}wrcM{}jX-UvhU5MR%G?l(6Pd9pbLXc_Hd~~}%llxIuYvA9OpUS1aI*tw8pfN8 zv5J=_H83$nyG`8P^RW3;yDM5^A9>l0kv&xgW^6R&t2hc*m6-w>@_p-WMWmWeeq2t9 z-&1_-T31sxP~6#mM1Vk;fF;n&H>d(-8|3PS6v1bL zV(#njFCY2+s%?w>irAZSWH*Vn(p^JyUjh&ts318A!$5)osAdfG>AS#|6Rf#pW`4-B ze1Eg2yrR6weIFT;)VnYw6B%#wrzPzJLWpP1HtO=hx&W%`y3h_kJ;Rl8*z1Qy<9sIg z$>m$!1vwP8?99tMvGTR8jiQw!COC4F+RV_wP<8N%92X&?pIf7b7J>0YLGD(Nl=)N6 z5Se#c93I-q2Z5bRveiT~ep3)WJ);ePRVhHAT|qiH-<@`N%OnG_)$IFm@2&mCb4u0$ z(_~7uZVytA#3Xk=qT}Z$N^T|G_aTcn*EKmue<5#r#aLRDXq!U{_$3(dl)Lowb2sJ1 z^Wh{~6+jz8!ksF)&3f>B=t1^R5L!k+^6pi$UJumMXpda<=zE(%oMN31%`p(iO96UB zl-@#~JFgy(sM6%jF!4f&LWo;Z9d$&+Y(~rGR&!H@cTiQ722{eFcQsjzWK&O2%U2k< zsN_h4Pv!-I|@%v9Yd( z9RT)G#D0rc=#NOrIDz(`7s`bR-6nmesxzAB32VDk

_SWC zXO)VBx%1z_zar#%NP!zDP+Nric)x~;NuXM0R1Q>r*#N9o<^bI}-?cl*G01Olz@YVI zul1`YeprWMObgpN%&5Gp}Ha#mg0Btd88?rFdN`K^?ku|~g-TDyAFZnm-^?zb z9`!m&>8(6$S>2CY{1Lxe-v#VdzIbiktz5?ng&OkA0&A8O^7G^aamGx;hZZ{}uPhRl zhyDVZCU}Ni#v;6dG35M?=Pmt;8msa}>)oDi&f!wX;n%-RXVSeEtrsAl^drFGNygRi z{t|o4fAWe^dMbL)usB%Y%Gx!*KkV2*>+*YFV`X_4@qXlP^cHOQ38r_b2m0v)Xpe)k z1kT#b9gooLqW|VCN>edfZhcC4rr7@6y;*EBLa@8!z&WC`>h<^xk}JEc!>@7j)?@*) z0MxMGOsKq@U|18NJ?v=JE^lb9nE2qsv^#w)48_`GQexQa{e9UIhUjp z@=aQuF*^|F?DFmALHd1mePQ4(ct7SKP*CH94-~^5<4|U1Hb%E;{l2AF*skliy*|jW ze;FZMSsU8;$3zL-rTN+Om3Yq?h25su%Jefl__v+RA72o6LG{89K>bWKF~Y|8t$RIp z{eCG8lV`bqt=O?UZcJ+z*oX8oO8qK zAD<qR)*IEx8dwWd0PdH;e{PS8`aBY8? zn?tFup5W}oT@;O4NSQjB{u?f23(&y_*em-tzIfd<1<*wh!a`@pbt=mj_x~ID_Uu_dlDhmzLu{Pd zgKq!%t8g&fDxjP?NVBm>>{6P|?OyhGZVy-uz;OTWEBdz!J2;(RuGBD4t;C%V6Tw>g zS!8jE{|U?f*}wBhr8&~X0h|+)7?6UQ^M`PT&PvzXkL0bUSgJYH`;;x z-%SLUHr-AtU@LQBW9Av^6~pamgGE4cVeS-hv@!M5`RaeA#h!W|f=ApZrY)}J95pXf zZ+i|d6#>ShJd z;OXvc^pee~-zTVys{CQ@oq3h+?Ej6g`e6|mu3kH8`gjID^}~v|f;;>-zUrC5D#r6~ z?9_k1`t|E>-e%FoWT_PaI_WdAYcL&e{J&V8E^Y2tRVS2qHH=#R=L76D1s=_xCr4O@ zh4?JTK3bv15=kY6S-{p;p0mAGodR|6NN`PA|32Hect%iN*1?iZ)gpoGE`;LsLDTMs zO_yS*^vMKz$dp&qD;bxAlIx%LvLIS#j}r>c`K1ndVf0&ez~sP^d(S?ck8b|%Z1?xl zll`MWMw^6K=Yzs|V5fZ_>-w3=K(k#ho$zl2(x;fGqi3rD7P2Sd7M{8XZE!NKYE?nc>Wpde40bL0(KGmIOI}{Auk_w z)ybdV+8_l>AQ(E>)f$1MsdSDFiKOYE(@G`c?}X6cQ$nas1~qp0eOaZ=BQ9%&=pi8s zwFIi{oy@YcZRqsxgR{!ZXH}cL@vFzn(r!K#^ym8WfM}0Pbrg zrjr+Y;n4SPB?Hvv?ziDpukAKK!QV9gJS_H?m!6k2!nJmJB}+-~Kb8-Bz|wS9I6PL^ z9A1t0@c28A6d2xu_xE@AQ7Fg-03|6|AE>zT*^JIN?J4P34jj#94-fub4&iJ?{=54q z)}k1DAu!yyl4IUIPiJ!2x*JtICT1%CZ|0@oFW$_qsJ9x$JipEM#RLpyOVL)7EH{&}(LV2^$M zKTJr$?mlYUfa|ijqH$`#{a{%~>@4^D3YORZ;X(=?m8v=Gjrc%{32^=Xv!-7MIphCy zAq5%D->8l0|JW%D|6kaUg6yo%(vxq&wr`}dh$nEc$Q}eA2;#?KzP=%+i>Vgx2kF|Q zWBn}Kla-SUM&PqPH5BdhPxnypy3l}gmzt@R9O|NrJbbrm_}BL(0{Ev5Ajoq{V7$5P zM%Ut8xxG)bG6uIc{>$=R){Wql7QSb*gI|hVJwEF zKDX^u1|-y|62zuy^Qp5uaG*yt@ss>Badnuv<;b*(7_}O7`1)P(xY3jayH8;!2n{hc}VRzsC`j-?5?05h4WTSJ=zUK2#pdi#r9*U&%1}*)4V_ztw0-Mj8WK z>Gm^p@Nb>De@6Fjy0s22TMuz$pZ>q-e|#tsaG0g39TX`oCd3T735+1bJVG(E2uYi9 zv{y0c?(BbCWR&{nx#H^0Z090iTD&rPvQUM@*dt;gg*NqPYbd=mXmsL~PA%IRjJECf zD(?=Nwj!mOi;)=HNuI>=~Zl|Hi>U9N}x zb%@*Y%&Itk>N1-(X+4|N_eb&R<(PZy>CxgL6m&G{^?sg`59Mt~3m8_GDr146GO@Xa zi)Vu4U0!F%3iN9>!C$Fy(X7M&iFVji;Uuz6-j)3m^nNa-siN!``?ABNsr%2_SgOD|K+s$1F~y%?yu2rd$yq}*I;SzFQtY@ zy4FWCx+^;@gu}X}VZ}hFVa^!>&-`;*7SEdX$lc;Aw0Suu*=2)m>iwG+=_91$y#$Ql zUq`>WedSZFE;P1A;Y$buML+BLuFsu>>FMiUT?|EpK!)S=yzMUCeM%i)mjRswF}ICR z60U2BLxmR6LwUvzEa;BkKhv8BGHcr8y}#>&b)eZgMOoWoeJnuQU|Wj)=utRau7?g3 z7fzK|9Mm7O`BTUdvDd|YyK7V3Kr`y(QDH1qlMr$3Sl5xaQt z^hf^m4%F1AR}k^fT)%#Hg;C4@zc*CZ!1}Wu|Ag*y@&NDIqbZwL;E4{g!wdSDXFE~j z{IKd|u_l~DX&9(Uj$pNK_hUQ#$}|X?E`z5651vfmxuB8oQlgR{vy}#2zE!p+2Y7ew zDsY%&I_zOp$gB2lD8}@}a-%4P&~10RgFA#m4tx6^9N_7cYiB>cKu_H-X_)ecM_#vp zMlQ%O8N641M>)dQ!CB~u+pgNlJQQV5YO&ma+hpVP1&_OU9YKC9?BolM;2V9!9z4dE zsq45zO z`(Ak5zw$gpJL(a%QA~%X<=oEis4F)SBC%v{<9fN1@>a*CTImpk4!qsVo#ny({r%WM zs^C4n@Pj)a4qXn%e8<#TV_D6S&3X*%ccidELoYtcUsGbpQm+~sH$pV@(>A_T%vaR3 zyeo&+`bT~BW`dJn*}0X^4_DP40S{fyMOsaN#H8Ln^1QYj5kC<&bOS=2-iRBbzNj)G?OH6g zMKwiBfE05?Z#4%pCdE>zhF>vqUmAU~Y&!LNEHGu%@8ZS75Pb9rS4#U$w|uwdDa%2R z{&0(|S%D@wZPr**Y2pd(3Oa!ksg{>EC)QKdFISZ2mt8VPKTT#A;v!E_MQb)zCyJh| zHQR>XcceYF_9}lBX&A^1?l7Jp^i{hquBI^HvDfiEJbf3v>=&y$Yh(-i>f%ImRINml zmvYS~-oDy1z8d2cAl3V`mOq3)J<&VQHk(}h|S*RdW-5fHJ^DjuPkrv8)& zRc77Gv53%A7H3n}=SDnzyyF8PR1GnpqqTf1wb6{n< zCvw;ml z7k_avdFLzNyNN1PQqt>tg99z<820OPYxgRTG9qdarl(H8=Y_A_r^ z&+)l$3H7TdfR$%-do_#x+8KNpZJkg*mwJz({9i}i)5@^X?4A>o64jiP#}rkn1%OZ^ zJF=RAxe19?FKeWJdbmPk6_tzhc8RzarUtKymUk8A%i@bYhY(*r6asajRI+{xdEKS> zh`i>&8UW8A{*U|TwOLpWggQ+bwb`g2HcHG-EDdy8$Jr_tl%f>zMwnHDo#W18;x*h}Ap(sEYjrE*K^)l^UsY{7c+Q^mnK`n>DVT z$PtAz6-vqlB~7vrgLNIXS3$7%Khz`HZ9qhMLd>E5_1@bbfzX zUDIPh7BybZX!`+%5WVJrD*a%mn&Tr0vA*6fD^B_zR_VlJ+X<9c0`hrj7m_MziL;kK zN7v8XBVs4`D}o;Knl^?u25|fkg(>u6JZxubs%pgj6io2NDQ^W{V0jGxj*Cm{&@nAN zexI(Z!MGbC5M4V6-r!%HsllWa{2ShA>64w+Ag7DWCG#{!n^!=qOOYoGyX`2Y2MN4D zS;bXR!0E)pQQY4PI32%xXmOK!YYgzPFw3k=Bgjxs$6ZFG$x-7SKPpi?L1LBVE0kTW zKMNDKZ%dT=kO|AYVIZq;`B*B}hw8e}Y{Ie@bE|4Kxaa&yQAnpsHbpEayFb0)OE-V) zMx`g}Sf0~tVoMF?w_Gp39+1OEN*=-437Em$7J88Y47 zYd6Au!(&l2?&r~O@cQ;!wI-NS8El{1o}Act!;NgaC}BIV{}Gd54nm+yj<Oe{q;i^QrIL$Wc zh_R?uHv>tKQ4e-urike89M|iobUsao6j`wU;(twg(Vd4Yw+|h4`2zUO6n|{i6jOKW zf|0IB^J`7T7rQjJDnImeL9#^ebec0&+w5=~J#Wsy=2`p;r`ZXP3BIV-HAy>yNtYJ3 pEpaDB>~CNF|2D(YoE$zO&vy78ORHiqi39$*r=WT#OU}Ue{{a!RCBOgx literal 0 HcmV?d00001 diff --git a/docs/source/pruning.md b/docs/source/pruning.md index 89e6567737e..fe951fc98e4 100644 --- a/docs/source/pruning.md +++ b/docs/source/pruning.md @@ -32,7 +32,7 @@ Neural network pruning (briefly known as pruning or sparsity) is one of the most Pruning patterns defines the rules of pruned weights' arrangements in space. - Sparsity Pattern + Sparsity Pattern diff --git a/neural_compressor/conf/config.py b/neural_compressor/conf/config.py index 64a100dc113..208f754ddf8 100644 --- a/neural_compressor/conf/config.py +++ b/neural_compressor/conf/config.py @@ -20,15 +20,13 @@ from ..adaptor import FRAMEWORKS from ..strategy import STRATEGIES from ..objective import OBJECTIVES -from ..pruners import PRUNERS from ..utils import logger from ..version import __version__ import re import copy -import itertools from collections import OrderedDict from .dotdict import DotDict, deep_set -import os, datetime +import datetime def constructor_register(cls): yaml_key = "!{}".format(cls.__name__) diff --git a/neural_compressor/experimental/pruning.py b/neural_compressor/experimental/pruning.py index 9f4e5bb128a..9280fa3e9b0 100644 --- a/neural_compressor/experimental/pruning.py +++ b/neural_compressor/experimental/pruning.py @@ -17,7 +17,7 @@ # limitations under the License. from .component import Component -from ..pruners import PRUNERS +from neural_compressor.pruner.pruner_legacy import PRUNERS from ..utils import logger from ..utils.utility import GLOBAL_STATE, MODE from ..utils.create_obj_from_config import create_dataloader, create_train_func, create_eval_func @@ -210,14 +210,9 @@ def generate_hooks(self): def generate_pruners(self): """Functions that generate pruners and set up self.pruners.""" for name in self.cfg.pruning.approach: - assert name == 'weight_compression' or name == "weight_compression_pytorch", \ + assert name == 'weight_compression', \ 'now we only support weight_compression and weight_compression_pytorch' - if self.cfg.pruning.approach.weight_compression_pytorch != None: - from .pytorch_pruner.pruning import Pruning as PytorchPruning - self.pytorch_pruner = PytorchPruning(self.cfg) - self.pruners.append(self.pytorch_pruner) - if self.cfg.pruning.approach.weight_compression != None: for pruner in self.cfg.pruning.approach.weight_compression.pruners: diff --git a/neural_compressor/experimental/pytorch_pruner/patterns.py b/neural_compressor/experimental/pytorch_pruner/patterns.py deleted file mode 100644 index a12b375cbb4..00000000000 --- a/neural_compressor/experimental/pytorch_pruner/patterns.py +++ /dev/null @@ -1,574 +0,0 @@ -"""pattern module.""" -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -import torch -from .logger import logger - -PATTERNS = {} - - -def register_pattern(name): - """Class decorator used to register a Pattern subclass to the registry. - - Decorator function used before a Pattern subclasses. - Make sure that this Pattern class can be registered in PATTERNS. - - Args: - cls (class): The class of register. - name: A string. Define the pattern type which will be included in a pruning process. - - Returns: - cls: The class of register. - - """ - - def register(pattern): - PATTERNS[name] = pattern - return pattern - - return register - - -def get_pattern(config): - """Get registered pattern class. - - Get a Pattern object from PATTERNS. - - Args: - config: A config dict object. Contains the pattern information. - - Returns: - A Pattern object. - - Raises: - AssertionError: Currently only support patterns which have been registered in PATTERNS. - """ - name = config.pattern - name = name.split('_')[-1] - if "x" in name: - return PATTERNS["NxM"](config) - if ":" in name: - return PATTERNS["N:M"](config) - assert False, f"currently only support {PATTERNS.keys()}" - - -class Pattern: - """Pruning Pattern. - - Every Pruner object will contain a Pattern object. - It defines the basic pruning unit and how this unit will be pruned during pruning. - - Args: - config: A config dict object. Contains the pattern information. - - Attributes: - pattern: A config dict object. The pattern related part in args config. - is_global: A bool. Whether the pruning take global pruning option. - Global pruning means that all pruning layers are gathered to calculate pruning criteria. - Local pruning, on the contrast, means that pruning layers are to calculate criteria individually. - """ - - def __init__(self, config): - """Initialize.""" - self.pattern = config.pattern - self.is_global = config.prune_domain == "global" - - def get_masks(self, scores, target_sparsity_ratio, pre_masks, max_sparsity_ratio_per_layer): - """Call when new masks for pruning are to be calculated. - - Args: - scores: A dict{“layer_name”: Tensor}. Store the pruning scores of weights. - target_sparsity_ratio: A float. After pruning, the model's sparsity will reach this value. - pre_masks: A dict{"layer_name": Tensor}. The masks generated after the last pruning step. - max_sparsity_ratio_per_layer: A float. The maximum sparsity that one layer can reach. - - Returns: - A dict with the identical size as pre_masks. Update the 0/1 values in it. - - """ - if self.is_global: - return self.get_masks_global(scores, target_sparsity_ratio, pre_masks, max_sparsity_ratio_per_layer) - else: - return self.get_masks_local(scores, target_sparsity_ratio, pre_masks, max_sparsity_ratio_per_layer) - - def get_masks_global(self, scores, target_sparsity_ratio, pre_masks, max_sparsity_ratio_per_layer): - """To be implemented in subclasses.""" - raise NotImplementedError - - def get_mask_single(self, score, exact_sparsity_ratio): - """Obtain a mask for one layer. - - Args: - score: A Tensor. Store the pruning scores of one layer. - exact_sparsity_ratio: A float. After pruning, the layer's sparsity will reach this value. - - Returns: - A Tensor with the identical size as score. a new mask. - """ - flattern_score = torch.flatten(score) - k = int(exact_sparsity_ratio * flattern_score.numel()) - threshold, _ = torch.kthvalue(flattern_score, k) - if not k < 1: - zero = torch.tensor([0.]).to(score.device) - one = torch.tensor([1.]).to(score.device) - mask = torch.where(score <= threshold, zero, one) - else: - mask = torch.ones(score.shape, device=score.device) - return mask - - def get_block_size_dict(self, data): - """To be implemented in subclasses.""" - raise NotImplementedError - - def get_masks_local(self, scores, target_sparsity_ratio, pre_masks, max_sparsity_ratio_per_layer): - """Obtain layers' local masks. - - Args: - scores: A dict{“layer_name”: Tensor}. Store the pruning scores of weights. - target_sparsity_ratio: A float. After pruning, the model's sparsity will reach this value. - pre_masks: A dict{"layer_name": Tensor}. The masks generated after the last pruning step. - max_sparsity_ratio_per_layer: A float. The maximum sparsity that one layer can reach. - - Returns: - A dict with the identical size as pre_masks. Update the 0/1 values in it. - """ - masks = {} - if isinstance(self, PatternNxM) and not isinstance(self.block_size, dict): - self.block_size = self.get_block_size_dict(pre_masks) - for key in scores.keys(): - score = {key: scores[key]} - pre_mask = {key: pre_masks[key]} - mask = self.get_masks_global(score, target_sparsity_ratio, pre_mask, max_sparsity_ratio_per_layer) - masks[key] = mask[key] - return masks - - def get_sparsity_ratio(self, pre_masks): - """Calulate the zero elements' ration in pre_masks. - - Args: - pre_masks: Dict{"layer_name": Tensor}. The masks generated after the last pruning step. - - Returns: - A float. The zero elements' ratio in pre_masks. - """ - zero_cnt = 0 - total_cnt = 0 - for key in pre_masks.keys(): - pre_mask = pre_masks[key] - zero_cnt += torch.sum(pre_mask == 0.0).data.item() - total_cnt += pre_masks.numel() - return float(zero_cnt) / total_cnt - - def get_pattern_lock_masks(self, modules): - """Obtain masks from original weight map, by masking where weights' are zero. - - Args: - modules: A dict{“layer_name”: Tensor}. Store weights. - - Returns: - A dict with the identical size as modules, containing pattern lock masks. - """ - pattern_lock_masks = {} - for key in modules.keys(): - weight = modules[key].weight - shape = weight.shape - mask = torch.ones(shape) - mask[weight == 0] = 0.0 - pattern_lock_masks[key] = mask.to(weight.device) - return pattern_lock_masks - - -@register_pattern('NxM') -class PatternNxM(Pattern): - """Pruning Pattern. - - A Pattern class derived from Pattern. In this pattern, the weights in a NxM block will be pruned or kept - during one pruning step. - - Args: - config: A config dict object. Contains the pattern information. - - Attributes: - block_size: A list of two Integers. The height and width of the block. - Please be aware that the vertical direction of a Linear layer's weight in PyTorch refer to output channel. - Because PyTorch's tensor matmul has a hidden transpose operation. - """ - - def __init__(self, config): - """Initialize.""" - super(PatternNxM, self).__init__(config) - pattern = self.pattern.split('_')[-1] - self.N = pattern.split('x')[0] - self.M = pattern.split('x')[1] - if self.N == "channel": ##channel-wise pruning mode - self.block_size = ["channel", int(self.M)] - elif self.M == "channel": ##channel-wise pruning mode - self.block_size = [int(self.N), "channel"] - else: - self.block_size = [int(pattern.split('x')[0]), int(pattern.split('x')[1])] - - def get_block_size_dict(self, data): - """Calulate the zero elements' ration in pre_masks. - - Args: - data: Dict{"layer_name": Tensor}. Store weights or scores. - - Returns: - A dict. Dict{"layer_name": [block_size_1, block_size_2]}. - Containing layers' corresponding pruning pattern's block shape. - Please be aware that because in channel-wise pruning, - different layers can have different pruning patterns. - """ - block_sizes_dict = {} - if self.N == "channel" or self.M == "channel": - for key in data.keys(): - if isinstance(data[key], torch.nn.Module): - shape = data[key].weight.shape - else: - shape = data[key].shape - if self.N == "channel": - block_sizes_dict[key] = [shape[0], 1] - else: - block_sizes_dict[key] = [1, shape[1]] - return block_sizes_dict - for key in data.keys(): - block_sizes_dict[key] = self.block_size - return block_sizes_dict - - def get_sparsity_ratio(self, pre_masks): - """Calulate the zero elements' ration in pre_masks. - - Args: - pre_masks: Dict{"layer_name": Tensor}. The masks generated after the last pruning step. - - Returns: - A float. Calculate the zero elements' ratio in pre_masks. - """ - zero_cnt = 0 - total_cnt = 0 - if isinstance(self.block_size, list): - self.block_size = self.get_block_size_dict(pre_masks) - for key in pre_masks.keys(): - block_size = self.block_size[key] - pre_mask = pre_masks[key] - shape = pre_mask.shape - if len(shape) == 4: - shape = pre_mask.reshape(pre_mask.shape[0], -1).shape - if shape[0] % block_size[0] != 0 or shape[1] % block_size[1] != 0: - logger.warning(f"layer {key} is not support under current pattern, ignoring") - continue - - new_shape = [shape[0] // block_size[0], block_size[0], shape[1] // block_size[1], block_size[1]] - pre_mask = pre_mask.reshape(new_shape) - pre_mask_sum = pre_mask.sum(-1).sum(1) - zero_cnt += torch.sum(pre_mask_sum == 0.0).data.item() - total_cnt += pre_mask_sum.numel() - return float(zero_cnt) / total_cnt - - def get_masks_global(self, scores, target_sparsity_ratio, pre_masks, max_sparsity_ratio_per_layer, - keep_pre_mask=False): - """Generate masks for layers. - - Gather all layer's scores together and calculate a common threshold. - This threshold will be applied for all layers. - - Args: - scores: A dict{“layer_name”: Tensor}. Store the pruning scores of weights. - target_sparsity_ratio: A float. After pruning, the model's sparsity will reach this value. - pre_masks: A dict{"layer_name": Tensor}. The masks generated after the last pruning step. - max_sparsity_ratio_per_layer: A float. The maximum sparsity that one layer can reach. - keep_pre_masks: A bool. If True, keep the masks unchanged. - - Returns: - A dict with the identical size as pre_masks. Update the 0/1 values in it. - """ - if isinstance(self.block_size, list): - self.block_size = self.get_block_size_dict(scores) - new_scores = {} - not_divided_keys = [] - for key in scores.keys(): - block_size = self.block_size[key] - current_score = scores[key] - if len(current_score.shape) == 4: ##TODO need to verify whether it's ok for transposed conv - current_score = current_score.permute(0, 2, 3, 1) ##cout,k,k,cin - current_score = current_score.reshape(current_score.shape[0], -1) - shape = current_score.shape - if shape[0] % block_size[0] != 0 or shape[1] % block_size[1] != 0: ## only consider input channel - not_divided_keys.append(key) - continue - - new_shape = [shape[0] // block_size[0], block_size[0], shape[1] // block_size[1], - block_size[1]] - current_score = current_score.reshape(new_shape) - current_score_sum = current_score.mean(-1).mean( - 1) ##TODO sum or mean is quite different for per channel pruning - new_scores[key] = current_score_sum - global_scores = torch.cat([torch.flatten(v) for v in new_scores.values()]) - k = int(target_sparsity_ratio * global_scores.numel()) - masks = {} - if not k < 1: - threshold, _ = torch.kthvalue(global_scores, k) - for key in new_scores.keys(): - block_size = self.block_size[key] - score = new_scores[key] - zero = torch.tensor([0.]).to(score.device) - one = torch.tensor([1.]).to(score.device) - mask = torch.where(score <= threshold, zero, one) - mask = mask.repeat_interleave(block_size[0], dim=0).repeat_interleave(block_size[1], dim=-1) - if torch.sum(mask) / mask.numel() < 1.0 - max_sparsity_ratio_per_layer: - ##to prevent some layer not be purned too much - ##this is differnt with our original implementation - masks[key] = self.get_mask_single(new_scores[key], max_sparsity_ratio_per_layer) - masks[key] = masks[key].repeat_interleave(block_size[0], 0).repeat_interleave(block_size[1], -1) - # if pre_masks != {}:##when use one shot, this is not right - # masks[key] = pre_masks[key] - # else: - # masks[key] = mask - else: - masks[key] = mask - # if len(scores[key].shape) == 4: - # ##we need to revert back - # masks[key] = masks[key].reshape(scores[key].shape) - - for key in not_divided_keys: - p = scores[key] - masks[key] = torch.ones(p.shape).to(p.device) - logger.warning(f"{key} shape {scores[key].shape} cannot be divided by {self.pattern}") - - else: - for key in scores.keys(): - p = scores[key] - masks[key] = torch.ones(p.shape).to(p.device) - - for key in masks.keys(): - if len(scores[key].shape) == 4 and len(masks[key].shape) == 2: ## need to permute - mask = masks[key] - mask = mask.reshape(scores[key].shape[0], scores[key].shape[2], scores[key].shape[3], - scores[key].shape[1]) - mask = mask.permute(0, 3, 1, 2) - masks[key] = mask - return masks - - def get_pattern_lock_masks(self, modules): - """Obtain masks from original weight map, by masking where weights' are zero. - - Args: - modules: A dict{“layer_name”: Tensor}. Store weights. - - Returns: - A dict with the identical size as modules, containing pattern lock masks. - """ - pattern_lock_masks = {} - if isinstance(self.block_size, list): - self.block_size = self.get_block_size_dict(modules) - for key in modules.keys(): - block_size = self.block_size[key] - weight = modules[key].weight - if len(weight.shape) == 4: # conv - weight = weight.permute(0, 2, 3, 1) - weight = weight.reshape(weight.shape[0], -1) - shape = weight.shape - new_shape = [shape[0] // block_size[0], block_size[0], shape[1] // block_size[1], block_size[1]] - p = weight.reshape(new_shape) - p_mag = p.abs() # avoid the scene which sum is zero but weights are not - weight_block_sum = p_mag.sum(-1).sum(1) - mask = torch.ones(weight_block_sum.shape) - mask[weight_block_sum == 0] = 0.0 - mask = mask.repeat_interleave(block_size[0], dim=0).repeat_interleave(block_size[1], dim=-1) - orig_shape = modules[key].weight.shape - if len(orig_shape) == 4: - mask = mask.reshape(orig_shape[0], orig_shape[2], orig_shape[3], orig_shape[1]) - mask = mask.permute(0, 3, 1, 2) - pattern_lock_masks[key] = mask.to(weight.device) - return pattern_lock_masks - - -@register_pattern('N:M') -class PatternNInM(Pattern): - """Pruning Pattern. - - A Pattern class derived from Pattern. In this pattern, N out of every M continuous weights will be pruned. - For more info of this pattern, please refer to - https://github.com/intel/neural-compressor/blob/master/docs/pruning.md - - Args: - config: A config dict object. Contains the pattern information. - - Attributes: - N: The number of elements to be prune in a weight sequence. - M: The size of the weight sequence. - - """ - - def __init__(self, config): - """Initialize.""" - super(PatternNInM, self).__init__(config) - pattern = self.pattern.split('_')[-1] - self.N = int(pattern.split(':')[0]) - self.M = int(pattern.split(':')[1]) ##m is bigger - - def get_sparsity_ratio(self, pre_masks): - """Calulate the zero elements' ration in pre_masks. - - Args: - pre_masks: Dict{"layer_name": Tensor}. The masks generated after the last pruning step. - - Returns: - A float. Calculate the zero elements' ratio in pre_masks. - """ - ##simply use elemwise sparsity - non_zero_cnt = 0 - total_cnt = 0 - for key in pre_masks.keys(): - non_zero_cnt += (torch.sum(pre_masks[key])).data.item() - total_cnt += pre_masks[key].numel() - return 1.0 - float(non_zero_cnt) / total_cnt - - def get_masks_global(self, scores, target_sparsity_ratio, pre_masks, max_sparsity_ratio_per_layer): - """Generate masks for layers. - - Gather all layer's scores together and calculate a common threshold. - This threshold will be applied for all layers. - - Args: - scores: A dict{“layer_name”: Tensor}. Store the pruning scores of weights. - target_sparsity_ratio: A float. After pruning, the model's sparsity will reach this value. - pre_masks: A dict{"layer_name": Tensor}. The masks generated after the last pruning step. - max_sparsity_ratio_per_layer: A float. The maximum sparsity that one layer can reach. - - Returns: - A dict with the identical size as pre_masks. Update the 0/1 values in it. - """ - N = self.N - M = self.M - target_sparsity_ratio = target_sparsity_ratio / (float(N / M)) ##recover sparsity for block wise - all_nm_masks = {} - new_scores = {} - not_divided_keys = [] - for key in scores.keys(): - current_score = scores[key] - shape = current_score.shape - if shape[1] % M != 0: - not_divided_keys.append(key) - continue - if len(current_score.shape) == 4: ##TODO need to verify whether it's ok for transposed conv - current_score = current_score.permute(0, 2, 3, 1) ##cout,k,k,cin - current_score = current_score.reshape(current_score.shape[0], -1) - shape = current_score.shape - new_shape = [shape[0], shape[1] // M, M] - current_score_new = current_score.reshape(new_shape) - - threshold, _ = torch.kthvalue(current_score_new, N, dim=2) - threshold = threshold.unsqueeze(-1) - - threshold = threshold.expand(shape[0], shape[1] // M, M) - threshold = threshold.reshape((shape[0], shape[1])) - - one = torch.tensor([1.]).to(current_score.device) - zero = torch.tensor([0.]).to(current_score.device) - mask = torch.where(current_score <= threshold, zero, one) - current_score_new = current_score_new.reshape((shape[0], shape[1])) - ##to get the sum of N scores in each block with M - current_score_new = current_score_new * (1.0 - mask) - current_score_new = current_score_new.reshape(shape[0], shape[1] // M, M) - score_sum = torch.mean(current_score_new, dim=-1) - all_nm_masks[key] = mask - new_scores[key] = score_sum - - global_scores = torch.cat([torch.flatten(v) for v in new_scores.values()]) - k = int(target_sparsity_ratio * global_scores.numel()) - masks = {} - if not k < 1: - threshold, _ = torch.kthvalue(global_scores, k) - for key in new_scores.keys(): - score = new_scores[key] - zero = torch.tensor([0.]).to(score.device) - one = torch.tensor([1.]).to(score.device) - mask = torch.where(score <= threshold, zero, one) - mask = mask.repeat_interleave(M, dim=-1) - ## both zero will be zero - mask = (mask + all_nm_masks[key]) - mask = torch.where(mask <= 0, zero, one) - if torch.sum(mask) / mask.numel() < 1.0 - max_sparsity_ratio_per_layer: - ##trick, to prevent some layer not be purned too much - masks[key] = self.get_mask_single(new_scores[key], max_sparsity_ratio_per_layer) - masks[key] = masks[key].repeat_interleave(M, dim=-1) - ## both zero will be zero - masks[key] = (masks[key] + all_nm_masks[key]) - masks[key] = torch.where(masks[key] <= 0, zero, one) - else: - masks[key] = mask - for key in not_divided_keys: - p = scores[key] - masks[key] = torch.ones(p.shape).to(p.device) - logger.warning(f"{key} shape {scores[key].shape} cannot be divided by {self.pattern}") - - else: - for key in scores.keys(): - p = scores[key] - masks[key] = torch.ones(p.shape).to(p.device) - for key in masks.keys(): - if len(scores[key].shape) == 4 and len(masks[key].shape) == 2: ## need to permute - mask = masks[key] - mask = mask.reshape(scores[key].shape[0], scores[key].shape[2], scores[key].shape[3], - scores[key].shape[1]) - mask = mask.permute(0, 3, 1, 2) - masks[key] = mask - - return masks - - def get_pattern_lock_masks(self, modules): - """Obtain masks from original weight map, by masking where weights' are zero. - - Args: - modules: A dict{“layer_name”: Tensor}. Store weights. - - Returns: - A dict with the identical size as modules, containing pattern lock masks. - """ - pattern_lock_masks = {} - N, M = self.N, self.M - for key in modules.keys(): - weight = modules[key].weight - if len(weight.shape) == 4: # conv - weight = weight.permute(0, 2, 3, 1) - weight = weight.reshape(weight.shape[0], -1) - shape = weight.shape - ##TODO need to check whether it can be divisible later - new_shape = [shape[0], shape[1] // M, M] - weight_new = weight.reshape(new_shape) - mask1 = torch.ones(weight_new.shape) - mask2 = torch.ones(weight_new.shape) - nonzeros = torch.count_nonzero(weight_new, dim=-1) - zeros = M - nonzeros - mask1[weight_new == 0] = 0.0 - mask2[zeros >= N] = 0.0 - mask3 = mask1 + mask2 # zero in mask3 means its block has been completely pruned. - zero = torch.tensor([0.]).to(weight.device) - one = torch.tensor([1.]).to(weight.device) - mask = torch.where(mask3 == 0, zero, one) - mask = mask.reshape(shape) - orig_shape = modules[key].weight.shape - if len(orig_shape) == 4: - mask = mask.reshape(orig_shape[0], orig_shape[2], orig_shape[3], orig_shape[1]) - mask = mask.permute(0, 3, 1, 2) - - pattern_lock_masks[key] = mask.to(weight.device) - return pattern_lock_masks diff --git a/neural_compressor/experimental/pytorch_pruner/prune_utils.py b/neural_compressor/experimental/pytorch_pruner/prune_utils.py deleted file mode 100644 index 2c4223f3576..00000000000 --- a/neural_compressor/experimental/pytorch_pruner/prune_utils.py +++ /dev/null @@ -1,221 +0,0 @@ -"""prune utils.""" -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import yaml - -try: - from ...conf.dotdict import DotDict -except: - from .dot_dict import DotDict ##TODO -from .logger import logger - - -def check_config(prune_config): - """Functions that check key-value is valid to run Pruning object. - - Args: - prune_config: A config dict object. Contains Pruning parameters and configurations. - - Returns: - None if everything is correct. - - Raises: - AssertionError. - """ - assert prune_config['start_step'] >= 0, "start_step should be greater than 0" - assert prune_config['end_step'] >= -1, "end_step should be greater than 0" - assert prune_config['end_step'] >= prune_config['start_step'], \ - "end_step should be greater than start_step" - assert prune_config['target_sparsity'] >= 0 and prune_config['target_sparsity'] < 1.0, \ - "begin_pruning_step should be in range [0,1)" - assert prune_config['update_frequency_on_step'] > 0, "update_frequency_on_step should be greater than 0" - assert prune_config['max_sparsity_ratio_per_layer'] >= 0 and prune_config['max_sparsity_ratio_per_layer'] < 1, \ - "update_frequency_on_step should be greater than 0" - assert prune_config['prune_domain'] == "global" or prune_config['prune_domain'] == "local", \ - "only support 'global' and 'local' prune domain" - if "x" in prune_config["pattern"]: - pattern = prune_config["pattern"].split('_')[-1].split('x') - if pattern[0]=="channel" or pattern[1]=="channel": - pass - else: - try: - N = int(pattern[0]) - M = int(pattern[1]) - except: - assert False, "N or M can't convert to int" - assert N > 0, "N should be greater than 0" - assert M > 0, "M should be greater than 0" - if ":" in prune_config["pattern"]: - pattern = prune_config["pattern"].split('_')[-1].split(':') - try: - N = int(pattern[0]) - M = int(pattern[1]) - except: - assert False, "N or M can't convert to int" - assert N > 0, "N should be greater than 0" - assert M > N, "M should be greater than N" - max_ratio = float(N) / M - assert prune_config['target_sparsity'] <= max_ratio, \ - "in N:M pattern, the max sparsity is N/M={}".format(max_ratio) - prune_config['max_sparsity_ratio_per_layer'] = min(max_ratio, prune_config['max_sparsity_ratio_per_layer']) - -def reset_non_value_to_default(obj, key, default): - """Functions that add up undefined configurations. - - If some configurations are not defined in the configuration, set it to a default value. - - Args: - obj: A dict{key: value} - key: A string. Key in obj. - default: When the key is not in obj, Add key: default item in original obj. - - """ - if isinstance(obj, dict): - if (not key in obj.keys()) or obj[key] == None: - return default - else: - return obj[key] - else: - if not hasattr(obj, key) or getattr(obj, key) == None: - return default - else: - return getattr(obj, key) - -def process_and_check_config(val): - """Functions which converts a initial configuration object to a Pruning configuration. - - Copy parameters and add some non-define parameters to a new Pruning configuration object. - - Args: - val: A dict directly read from a config file. - - Returns: - A dict whose contents which are regularized for a Pruning obejct. - """ - val = val["pruning"]['approach']['weight_compression_pytorch'] - start_step = reset_non_value_to_default(val, "start_step", 0) - end_step = reset_non_value_to_default(val, "end_step", 0) - excluded_names = reset_non_value_to_default(val, "excluded_names", []) - prune_layer_type = reset_non_value_to_default(val, "prune_layer_type", ['Conv2d', 'Linear']) - target_sparsity = reset_non_value_to_default(val, "target_sparsity", 0.0) ## be care of this val - update_frequency_on_step = int(reset_non_value_to_default(val, "update_frequency_on_step", 1)) - prune_domain = reset_non_value_to_default(val, "prune_domain", "global") - prune_type = reset_non_value_to_default(val, "prune_type", "snip_momentum") - sparsity_decay_type = reset_non_value_to_default(val, "sparsity_decay_type", "exp") - max_sparsity_ratio_per_layer = reset_non_value_to_default(val, "max_sparsity_ratio_per_layer", 0.98) - names = reset_non_value_to_default(val, "names", []) - extra_excluded_names = reset_non_value_to_default(val, "extra_excluded_names", []) - pattern = reset_non_value_to_default(val, "pattern", "tile_pattern_4x1") - - pruners_info = [] - for info in val['pruners']: - pruner = {} - pruner['start_step'] = reset_non_value_to_default(info, 'start_step', start_step) - pruner['end_step'] = reset_non_value_to_default(info, 'end_step', end_step) - pruner['excluded_names'] = reset_non_value_to_default(info, 'excluded_names', excluded_names) - pruner['prune_layer_type'] = reset_non_value_to_default(info, 'prune_layer_type', prune_layer_type) - pruner['target_sparsity'] = reset_non_value_to_default(info, 'target_sparsity', target_sparsity) - pruner['update_frequency_on_step'] = reset_non_value_to_default(info, 'update_frequency_on_step', \ - update_frequency_on_step) - pruner['prune_domain'] = reset_non_value_to_default(info, 'prune_domain', prune_domain) - pruner['prune_type'] = reset_non_value_to_default(info, 'prune_type', prune_type) - pruner['sparsity_decay_type'] = reset_non_value_to_default(info, 'sparsity_decay_type', sparsity_decay_type) - pruner['max_sparsity_ratio_per_layer'] = reset_non_value_to_default(info, 'max_sparsity_ratio_per_layer', \ - max_sparsity_ratio_per_layer) - pruner['names'] = reset_non_value_to_default(info, 'names', names) - pruner['extra_excluded_names'] = reset_non_value_to_default(info, 'extra_excluded_names', - extra_excluded_names) - pruner['pattern'] = reset_non_value_to_default(info, 'pattern', - pattern) - check_config(pruner) - pruner_info = DotDict(pruner) - pruners_info.append(pruner_info) - return pruners_info - - -def process_config(config): - """Obtain a config dict object from a config file. - - Args: - config: A string. The path to configuration file. - - Returns: - A config dict object. - """ - if isinstance(config, str): - try: - with open(config, 'r') as f: - content = f.read() - try: - from .schema_check import schema - - except ImportError: - from ...conf.config import schema - - val = yaml.safe_load(content) - schema.validate(val) - except FileNotFoundError as f: - logger.error("{}.".format(f)) - raise RuntimeError( - "The yaml file is not exist. Please check the file name or path." - ) - except Exception as e: - logger.error("{}.".format(e)) - raise RuntimeError( - "The yaml file format is not correct. Please refer to document." - ) - - elif isinstance(config, DotDict): - val = config - else: - assert False, f"not supported type {config}" - - return process_and_check_config(val) - - -def parse_to_prune(model, config): - """Keep target pruned layers.""" - modules = {} - if config["names"] == None or config["names"] == []: - config["names"] = [".*"] - for raw in config["names"]: - try: - pattern = re.compile(raw) - except: - assert False, f"regular expression match does not support {raw}" - for name, module in filter(lambda t: pattern.search(t[0]), model.named_modules()): - if type(module).__name__ in config["prune_layer_type"]: - modules[name] = module - return modules - - -def parse_not_to_prune(modules, config): - """Drop non pruned layers.""" - exclude_names = config["extra_excluded_names"] - exclude_names.extend(config["excluded_names"]) - - patterns = [re.compile(s) for s in exclude_names] - if len(patterns) <= 0: - return modules - new_module = {} - for name in modules.keys(): - if any([p.search(name) for p in patterns]): - continue - new_module[name] = modules[name] - return new_module diff --git a/neural_compressor/experimental/pytorch_pruner/pruner.py b/neural_compressor/experimental/pytorch_pruner/pruner.py deleted file mode 100644 index ebba4a0afaa..00000000000 --- a/neural_compressor/experimental/pytorch_pruner/pruner.py +++ /dev/null @@ -1,347 +0,0 @@ -"""pruner module.""" -# !/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch -from .patterns import get_pattern -from .scheduler import get_scheduler - -from .logger import logger - -PRUNERS = {} - - -def register_pruners(name): - """Class decorator to register a Pruner subclass to the registry. - - Decorator function used before a Pattern subclass. - Make sure that the Pruner class decorated by this function can be registered in PRUNERS. - - Args: - cls (class): The subclass of register. - name: A string. Define the pruner type. - - Returns: - cls: The class of register. - """ - - def register(pruner): - PRUNERS[name] = pruner - return pruner - - return register - - -def get_pruner(modules, config): - """Get registered pruner class. - - Get a Pruner object from PRUNERS. - - Args: - modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. - config: A config dict object. Contains the pruner information. - - Returns: - A Pruner object. - - Raises: AssertionError: Cuurently only support pruners which have been registered in PRUNERS. - """ - name = config["prune_type"] - if name not in PRUNERS.keys(): - assert False, f"does not support {name}, currently only support {PRUNERS.keys()}" - return PRUNERS[name](modules, config) - - -class Pruner: - """Pruning Pruner. - - The class which executes pruning process. - 1. Defines pruning functions called at step begin/end, epoch begin/end. - 2. Defines the pruning criteria. - - Args: - modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. - config: A config dict object. Contains the pruner information. - - Attributes: - modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. - config: A config dict object. Contains the pruner information. - masks: A dict {"module_name": Tensor}. Store the masks for modules' weights. - scores: A dict {"module_name": Tensor}. Store the score for modules' weights, - which are used to decide pruning parts with a criteria. - pattern: A Pattern object. Defined in ./patterns.py - scheduler: A scheduler object. Defined in ./scheduler.py - current_sparsity_ratio: A float. Current model's sparsity ratio, initialized as zero. - global_step: A integer. The total steps the model has run. - start_step: A integer. When to trigger pruning process. - end_step: A integer. When to end pruning process. - update_frequency_on_step: A integer. The pruning frequency, which's valid when iterative - pruning is enabled. - target_sparsity_ratio: A float. The final sparsity after pruning. - max_sparsity_ratio_per_layer: A float. Sparsity ratio maximum for every module. - """ - - def __init__(self, modules, config): - """Initialize.""" - self.modules = modules - self.config = config - self.masks = {} - self.scores = {} - self.reg = None ##TODO need to add reg - self.pattern = get_pattern(config) - self.scheduler = get_scheduler(config) - self.current_sparsity_ratio = 0.0 - self._init() - - def _init(self): - """Auxiliary function for initializing.""" - self.global_step = -1 - self.start_step = self.config['start_step'] - self.end_step = self.config['end_step'] - self.update_frequency_on_step = self.config['update_frequency_on_step'] - ##this is different with original code - self.total_prune_cnt = (self.end_step - self.start_step + 1) \ - // self.update_frequency_on_step - self.completed_pruned_cnt = 0 - self.masks = {} - for key in self.modules.keys(): - module = self.modules[key] - self.masks[key] = torch.ones(module.weight.shape).to(module.weight.device) ##TODO support bias or others - - self.target_sparsity_ratio = self.config['target_sparsity'] - - self.max_sparsity_ratio_per_layer = self.config['max_sparsity_ratio_per_layer'] - - def on_epoch_begin(self, epoch): - """Functions called in the beginning of each epoch.""" - pass - - def mask_weights(self): - """Functions called when masks are applied on corresponding modules' weights. - - Weights are multipled with masks. This is the formal pruning process. - """ - with torch.no_grad(): - for key in self.modules.keys(): - module = self.modules[key] - module.weight.data = module.weight.data * self.masks[key] - - def on_step_begin(self, local_step): - """Functions called on the beginning of each step. - - Judge if the current step should execute a pruning process. - If so, using scores and criteria to update the masks and pruning the model. - Or, simply train the model with its original structure. - """ - self.global_step += 1 - if not self.check_is_pruned_step(self.global_step): - return - - if self.current_sparsity_ratio > self.target_sparsity_ratio: - return - - current_target_sparsity_ratio = self.scheduler.update_sparsity_ratio(self.target_sparsity_ratio, - self.completed_pruned_cnt, - self.total_prune_cnt, self.masks) - logger.info(f"current target ratio is {current_target_sparsity_ratio}") - self.update_scores() - self.completed_pruned_cnt += 1 - if self.scores == {}: - return - self.masks = self.pattern.get_masks(self.scores, current_target_sparsity_ratio, self.masks, - self.max_sparsity_ratio_per_layer) - self.mask_weights() - - self.current_sparsity_ratio = self.pattern.get_sparsity_ratio(self.masks) - logger.info(f"current sparsity ratio is {self.current_sparsity_ratio}") - - def on_step_end(self): - """Functions called in the end of each step.""" - pass - - def on_epoch_end(self): - """Functions called in the end of each epoch.""" - pass - - def on_before_optimizer_step(self): - """Functions called before the optimizer.step().""" - pass - - def on_after_optimizer_step(self): - """Functions called after the optimizer.step(). - - Prune the model after optimization. - """ - self.mask_weights() - - def on_train_begin(self): - """Functions called in the beginning of training.""" - pass - - def on_train_end(self): - """Functions called in the end of each training.""" - pass - - def on_before_eval(self): - """Functions called in the beginning of evaluation.""" - pass - - def on_after_eval(self): - """Functions called in the end of evaluation.""" - pass - - def check_is_pruned_step(self, step): - """Decide whether the current step should execute a pruning process.""" - if step < self.start_step or step > self.end_step: - return False - if int(step - self.start_step) % self.update_frequency_on_step == 0: - return True - return False - - def update_scores(self): - """Update self.scores.""" - pass - - -@register_pruners('magnitude') -class MagnitudePruner(Pruner): - """Pruning Pruner. - - A Pruner class derived from Pruner. In this pruner, the scores are calculated based on weights. - - Args: - modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. - config: A config dict object. Contains the pruner information. - - Attributes: - Inherit from parent class Pruner. - """ - - def __init__(self, modules, config): - """Initialize.""" - super(MagnitudePruner, self).__init__(modules, config) - self.scores = {} - - def update_scores(self): - """Update self.scores.""" - with torch.no_grad(): - for key in self.modules.keys(): - p = self.modules[key].weight.data - self.scores[key] = p - - -@register_pruners('snip') -class SnipPruner(Pruner): - """Pruning Pruner. - - A Pruner class derived from Pruner. In this pruner, the scores are calculated based on SNIP. - Please refer to SNIP: Single-shot Network Pruning based on Connection Sensitivity - (https://arxiv.org/abs/1810.02340) - - Args: - modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. - config: A config dict object. Contains the pruner information. - - Attributes: - Inherit from parent class Pruner. - """ - - def __init__(self, modules, config): - """Initialize.""" - super(SnipPruner, self).__init__(modules, config) - assert self.config.end_step > 0, "gradient based criteria does not work on step 0" - self.scores = {} - - def on_after_optimizer_step(self): - """Functions called after the optimizer.step(). - - Prune the model after optimization and update the scores based on weights and gradients. - """ - self.mask_weights() - with torch.no_grad(): - for key in self.modules.keys(): - p = self.modules[key].weight - self.scores[key] = torch.abs(p * p.grad) - - -@register_pruners('snip_momentum') -class SnipMomentumPruner(Pruner): - """Pruning Pruner. - - A Pruner class derived from Pruner. In this pruner, the scores are calculated based on SNIP. - Moreoever, the score map is updated with a momentum like process. - - Args: - modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. - config: A config dict object. Contains the pruner information. - - Attributes: - Inherit from parent class Pruner. - """ - - def __init__(self, modules, config): - """Initialize.""" - super(SnipMomentumPruner, self).__init__(modules, config) - assert self.config.end_step > 0, "gradient based criteria does not work on step 0" - # self.scores = {} - for key in modules.keys(): - p = modules[key].weight - self.scores[key] = torch.zeros(p.shape).to(p.device) - - def on_after_optimizer_step(self): - """Functions called after the optimizer.step(). - - Prune the model after optimization and update the scores based on weights and gradients. - """ - self.mask_weights() - with torch.no_grad(): - for key in self.modules.keys(): - p = self.modules[key].weight - self.scores[key] *= 0.9 ##magic number - self.scores[key] += 1.0 * torch.abs(p * p.grad) - - -@register_pruners('pattern_lock') -class PatternLockPruner(Pruner): - """Pruning Pruner. - - A Pruner class derived from Pruner. In this pruner, original model's sparsity pattern will be fixed while training. - This pruner is useful when you want to train a sparse model without change its original structure. - - Args: - modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. - config: A config dict object. Contains the pruner information. - - Attributes: - Inherit from parent class Pruner. - """ - - def __init__(self, modules, config): - """Initialize.""" - super(PatternLockPruner, self).__init__(modules, config) - assert self.config.end_step == self.config.start_step, "pattern_lock pruner only supports one shot mode" - - def on_step_begin(self, local_step): - """Functions called on the beginning of each step.""" - self.global_step += 1 - if not self.check_is_pruned_step(self.global_step): - return - self.masks = self.pattern.get_pattern_lock_masks(self.modules) - - def on_after_optimizer_step(self): - """Functions called after the optimizer.step().""" - self.mask_weights() diff --git a/neural_compressor/experimental/pytorch_pruner/pruning.py b/neural_compressor/experimental/pytorch_pruner/pruning.py deleted file mode 100644 index ae85df778ff..00000000000 --- a/neural_compressor/experimental/pytorch_pruner/pruning.py +++ /dev/null @@ -1,163 +0,0 @@ -"""pruning module.""" -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch.nn - -from .prune_utils import process_config, parse_to_prune, parse_not_to_prune -from .pruner import get_pruner -from .logger import logger - - -class Pruning: - """Pruning. - - The main class that users will used in codes to do pruning. - Contain at least one Pruner object. - - Args: - config: a string. The path to a config file. For config file template, please refer to - https://github.com/intel/neural-compressor/tree/master/examples/pytorch/nlp/huggingface_models/text-classification/pruning/pytorch_pruner/eager/ - - Attributes: - model: The model object to prune. - config_file_path: A string. The path to a config file. - pruners: A list. A list of Pruner objects. - pruner_info: A config dict object. Contains pruners' information. - """ - - def __init__(self, config): - """Initialize.""" - self.model = None - self.config_file_path = config - self.pruners = [] - self.pruner_info = process_config(self.config_file_path) - - def update_items_for_all_pruners(self, **kwargs): - """Functions which add User-defined arguments to the original configurations. - - The original config of pruning is read from a file. - However, users can still modify configurations by passing key-value arguments in this function. - Please note that the key-value arguments' keys are analysable in current configuration. - """ - for item in self.pruner_info: - for key in kwargs: - if key in item.keys(): - item[key] = kwargs[key] - - def get_sparsity_ratio(self): - """Functions that calculate a modules/layers sparsity. - - Returns: - Three floats. - elementwise_over_matmul_gemm_conv refers to zero elements' ratio in pruning layers. - elementwise_over_all refers to zero elements' ratio in all layers in the model. - blockwise_over_matmul_gemm_conv refers to all-zero blocks' ratio in pruning layers. - """ - pattern_sparsity_cnt = 0 - element_sparsity_cnt = 0 - for pruner in self.pruners: - modules = pruner.modules - sparsity_ratio = pruner.pattern.get_sparsity_ratio(pruner.masks) - cnt = 0 - for key in modules.keys(): - cnt += modules[key].weight.numel() - pattern_sparsity_cnt += int(cnt * sparsity_ratio) - for key in pruner.masks.keys(): - element_sparsity_cnt += torch.sum(pruner.masks[key] == 0).data.item() - - linear_conv_cnt = 0 - param_cnt = 0 - for name, module in self.model.named_modules(): - if type(module).__name__ in ["Linear"] or "Conv" in type(module).__name__: - linear_conv_cnt += module.weight.numel() - - for n, param in self.model.named_parameters(): - param_cnt += param.numel() - blockwise_over_matmul_gemm_conv = float(pattern_sparsity_cnt) / linear_conv_cnt - elementwise_over_matmul_gemm_conv = float(element_sparsity_cnt) / linear_conv_cnt - elementwise_over_all = float( - element_sparsity_cnt) / param_cnt - - return elementwise_over_matmul_gemm_conv, elementwise_over_all, blockwise_over_matmul_gemm_conv - - def _generate_pruners(self): - """Functions that obtain Pruner objects.""" - assert isinstance(self.model, torch.nn.Module) - - for info in self.pruner_info: - modules = parse_to_prune(self.model, info) - modules = parse_not_to_prune(modules, info) - if modules == {}: - logger.warning("one pruner hooks no layers, please have a check") - - self.pruners.append(get_pruner(modules, info)) - info['modules'] = [key for key in modules.keys()] - info['len_of_modules'] = len(info['modules']) - logger.info(info) - - def on_train_begin(self): - """Functions called in the beginning of training process. - - Before training, ensure that pruners are generated. - """ - self._generate_pruners() ##TODO is there better place to place - - def on_epoch_begin(self, epoch): - """Functions called in the beginning of every epoch.""" - for pruner in self.pruners: - pruner.on_epoch_begin(epoch) - - def on_step_begin(self, local_step): - """Functions called in the beginning of every step.""" - for pruner in self.pruners: - pruner.on_step_begin(local_step) - - def on_before_optimizer_step(self): - """Functions called before optimizer.step().""" - for pruner in self.pruners: - pruner.on_before_optimizer_step() - - def on_step_end(self): - """Functions called in the end of every step.""" - for pruner in self.pruners: - pruner.on_step_end() - - def on_epoch_end(self): - """Functions called in the end of every epoch.""" - for pruner in self.pruners: - pruner.on_epoch_end() - - def on_train_end(self): - """Functions called in the end of training.""" - for pruner in self.pruners: - pruner.on_train_end() - - def on_before_eval(self): - """Functions called in the beginning of evaluation.""" - for pruner in self.pruners: - pruner.on_before_eval() - - def on_after_eval(self): - """Functions called in the end of evaluation.""" - for pruner in self.pruners: - pruner.on_after_eval() - - def on_after_optimizer_step(self): - """Functions called after optimizer.step().""" - for pruner in self.pruners: - pruner.on_after_optimizer_step() diff --git a/neural_compressor/pruner/README.md b/neural_compressor/pruner/README.md new file mode 100644 index 00000000000..16481b50630 --- /dev/null +++ b/neural_compressor/pruner/README.md @@ -0,0 +1,194 @@ +Pruning +============ + + + +1. [Introduction](#introduction) + + + +    1.1. [Neural Network Pruning](#neural-network-pruning) + + + +    1.2. [Pruning Patterns](#pruning-patterns) + + + +    1.3. [Pruning Criteria](#pruning-criteria) + + + +    1.4. [Pruning Schedule](#pruning-schedule) + + + +    1.5. [Pruning type](#pruning-type) + + + +    1.6. [Regularization](#regularization) + + + +2. [Get Started With Pruning API](#get-started-with-pruning-api) + + + +3. [Examples](#examples) + + + +## Introduction + + + +### Neural Network Pruning +Neural network pruning is a promising model compression technique that removes the least important parameters/neurons in the network and achieves compact architectures with minimal accuracy drop and maximal inference acceleration. As state-of-the-art model sizes have grown at an unprecedented speed, pruning has become increasingly crucial for reducing the computational and memory footprint that huge neural networks require. + +

+ + +### Pruning Patterns + + + +Pruning patterns defines the rules of pruned weights' arrangements in space. INC currently supports unstructured, N:M and NxM patterns. Please note that N:M pattern is applied to input channels while NxM pattern is applied to output ones. [Details](../../docs/source/pruning_details.md#pruning-patterns). + + + +### Pruning Criteria + + + +Pruning Criteria determines how should the weights of a neural network be scored and pruned. In the image below, pruning scores are represented by neurons' color and those with the lowest scores are pruned. The magnitude and gradient are widely used to score the weights. Currently, INC supports **magnitude**, **snip** and **snip_momentum** criteria. [Details](../../docs/source/pruning_details.md#pruning-criteria). + + + +### Pruning Schedule + + + +Pruning schedule defines the way the model reach the target sparsity (the ratio of pruned weights). Both **one-shot** and **iterative** pruning schedules are supported. [Details](../../docs/source/pruning_details.md#pruning-schedule). + + + + +### Pruning Type + + + +Pruning type defines how the masks are generated and applied to a neural network. Both **pattern_lock** and **progressive** types are supported by INC. [Details](../../docs/source/pruning_details.md#pruning-type). + + + +### Regularization + + + +Regularization is a technique that discourages learning a more complex model and therefore performs variable-selection. In the image below, some weights are pushed to be as small as possible and the connections are thus sparsified. **Group-lasso** method is used in INC. +[Details](../../docs/source/pruning_details.md#regularization). + + + + +## Get Started with Pruning API + + + +Neural Compressor `Pruning` API is defined under `neural_compressor.pruning`, which takes a user defined yaml file as input. +Users can pass the customized training/evaluation functions to `Pruning` in various scenarios. + +In this case, pruning process can be done by pre-defined hooks in Neural Compressor. Users need to put those hooks inside the training function. The pre-defined Neural Compressor hooks are listed below. + + + +``` +on_train_begin() : Implement at the beginning of training phase. +on_epoch_begin(epoch) : Implement at the beginning of each epoch. +on_step_begin(batch) : Implement at the beginning of each batch. +on_step_end() : Implement at the end of each batch. +on_epoch_end() : Implement at the end of each epoch. +on_before_optimizer_step() : Implement before optimization step. +on_after_optimizer_step() : Implement after optimization step. +``` + + + +The following section is an example of how to use hooks in user pass-in training function to perform BERT training. Our pruning API supports multiple pruner objects in a single Pruning object, which means we can apply different pruning configurations for different layers in a model. Since these pruning configurations share the same parameter names, we introduce a global-local configuration structure to initialize a Pruning object. First, we set up a dict-like local_config, which refers to some unique configurations for specific pruners. Afterwards, we pass this local_config dict and common configurations for all pruners (known as "global setting") to Pruning's initialization function. Below is code example for how to utilize our global-local configuration method to initialize a Pruning object. + + + +```python +from neural_compressor.pruning import Pruning + +prune = Pruning(config_dict) +prune.update_config(start_step=1, end_step=10, pruning_frequency=1) +prune.model = model +prune.on_train_begin() +for epoch in range(num_train_epochs): + model.train() +    prune.on_epoch_begin(epoch) +    for step, batch in enumerate(train_dataloader): +        prune.on_step_begin(step) +        outputs = model(**batch) +        loss = outputs.loss / gradient_accumulation_steps +        loss.backward() +        if (step + 1) % gradient_accumulation_steps == 0: +            prune.on_before_optimizer_step() +            optimizer.step() + prune.on_after_optimizer_step() +            scheduler.step()  # Update learning rate schedule +            model.zero_grad() +        prune.on_step_end() + prune.on_epoch_end() +... +``` + +```python +config_dict = { + 'target_sparsity': 0.9, + 'pruning_type': "magnitude_progressive", + 'pattern': "4x1", + 'op_names': ['layer1.*'], # A list of modules that would be pruned. + 'excluded_op_names': ['layer3.*'], # A list of modules that would not be pruned. + 'start_step': 0, + 'end_step': 10, + 'pruning_scope': "global", + 'pruning_frequency': 1, + 'min_sparsity_ratio_per_op': 0.0, # Minimum sparsity ratio of each module. + 'max_sparsity_ratio_per_op': 0.98, # Maximum sparsity ratio of each module. + 'sparsity_decay_type': "exp", + 'pruning_op_types': ['Conv', 'Linear'], + } +``` + + +## Examples + + + +We validate the pruning technique on typical models across various domains (including CV, NLP, and Recommendation System) and the examples are listed in [Pruning Examples](../../docs/source/pruning_details.md#examples). A complete overview of validated examples including quantization, pruning and distillation results could be found in [INC Validated examples](../../docs/source/validated_model_list.md#validated-pruning-examples). + + +Please refer to pruning examples([TensorFlow](../../examples/README.md#Pruning), [PyTorch](../../examples/README.md#Pruning-1)) for more information. \ No newline at end of file diff --git a/neural_compressor/experimental/pytorch_pruner/__init__.py b/neural_compressor/pruner/__init__.py similarity index 87% rename from neural_compressor/experimental/pytorch_pruner/__init__.py rename to neural_compressor/pruner/__init__.py index 359a68d8260..d33331cae08 100644 --- a/neural_compressor/experimental/pytorch_pruner/__init__.py +++ b/neural_compressor/pruner/__init__.py @@ -1,5 +1,5 @@ -"""PyTorch Pruner module.""" -#!/usr/bin/env python +"""prune init.""" +# !/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2022 Intel Corporation @@ -14,4 +14,4 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and -# limitations under the License. +# limitations under the License. \ No newline at end of file diff --git a/neural_compressor/pruner/criteria.py b/neural_compressor/pruner/criteria.py new file mode 100644 index 00000000000..0397fca4c82 --- /dev/null +++ b/neural_compressor/pruner/criteria.py @@ -0,0 +1,188 @@ +"""pruning criterion.""" +# !/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from neural_compressor.utils.utility import LazyImport +torch = LazyImport('torch') + + +CRITERIAS = {} + + +def register_criterion(name): + """Register a criterion to the registry.""" + + def register(criterion): + CRITERIAS[name] = criterion + return criterion + + return register + + +def get_criterion(config, modules): + """Get registered criterion class.""" + name = config["criterion_type"] + if name not in CRITERIAS.keys(): + assert False, f"criteria does not support {name}, currently only support {CRITERIAS.keys()}" + return CRITERIAS[name](modules, config) + + +class PruningCriterion: + """Pruning base criterion. + + Args: + config: A config dict object that includes information about pruner and pruning criterion. + modules: A dict {"module_name": Tensor} that stores the pruning modules' weights. + + Attributes: + scores: A dict {"module_name": Tensor} that stores the scores of pruning modules. + """ + + def __init__(self, modules, config): + """Initiliaze a pruning criterion.""" + self.scores = {} + self.modules = modules + self.config = config + + def on_step_begin(self): + """Calculate and store the pruning scores of pruning modules at the beginning of a step.""" + pass + + def on_after_optimizer_step(self): + """Calculate and store the pruning scores of pruning modules after the optimizer step.""" + pass + + +@register_criterion('magnitude') +class MagnitudeCriterion(PruningCriterion): + """Pruning criterion. + + The magnitude criterion_class is derived from PruningCriterion. + The magnitude value is used to score and determine if a weight is to be pruned. + + Args: + config: A config dict object that includes information about pruner and pruning criterion. + modules: A dict {"module_name": Tensor} that stores the pruning modules' weights. + + Attributes: + scores: A dict {"module_name": Tensor} that stores the scores of pruning modules. + """ + + def __init__(self, modules, config): + """Initiliaze a magnitude pruning criterion.""" + super(MagnitudeCriterion, self).__init__(modules, config) + + def on_step_begin(self): + """Calculate and store the pruning scores based on magtinude criterion.""" + with torch.no_grad(): + for key in self.modules.keys(): + p = self.modules[key].weight.data + self.scores[key] = p + + +@register_criterion('gradient') +class GradientCriterion(PruningCriterion): + """Pruning criterion. + + The gradient criterion_class is derived from PruningCriterion. + The absolute value of gradient is used to score and determine if a weight is to be pruned. + + Args: + config: A config dict object that includes information about pruner and pruning criterion. + modules: A dict {"module_name": Tensor} that stores the pruning modules' weights. + + Attributes: + scores: A dict {"module_name": Tensor} that stores the scores of pruning modules. + """ + + def __init__(self, modules, config): + """Initiliaze a gradient pruning criterion.""" + super(GradientCriterion, self).__init__(modules, config) + + def on_after_optimizer_step(self): + """Calculate and store the pruning scores based on gradient criterion.""" + with torch.no_grad(): + for key in self.modules.keys(): + p = self.modules[key].weight + self.scores[key] = torch.abs(p.grad) + + +@register_criterion('snip') +class SnipCriterion(PruningCriterion): + """Pruning criterion. + + The snip criterion_class is derived from PruningCriterion. + The product of magnitude and gradient is used to score and determine if a weight is to be pruned. + Please refer to SNIP: Single-shot Network Pruning based on Connection Sensitivity. + (https://arxiv.org/abs/1810.02340) + + Args: + config: A config dict object that includes information about pruner and pruning criterion. + modules: A dict {"module_name": Tensor} that stores the pruning modules' weights. + + Attributes: + scores: A dict {"module_name": Tensor} that stores the scores of pruning modules. + """ + + def __init__(self, modules, config): + """Initiliaze a snip pruning criterion.""" + super(SnipCriterion, self).__init__(modules, config) + assert self.config.end_step > 0, "gradient based criterion does not work on step 0" + + def on_after_optimizer_step(self): + """Calculate and store the pruning scores based on snip criterion.""" + ##self.mask_weights() + with torch.no_grad(): + for key in self.modules.keys(): + p = self.modules[key].weight + self.scores[key] = torch.abs(p * p.grad) + + +@register_criterion('snip_momentum') +class SnipMomentumCriterion(PruningCriterion): + """Pruning criterion. + + The snip_momentum criterion_class is derived from PruningCriterion. + A momentum mechanism is used to calculate snip score, which determines if a weight is to be pruned. + + Args: + config: A config dict object that includes information about pruner and pruning criterion. + modules: A dict {"module_name": Tensor} that stores the pruning modules' weights. + alpha: A parameter that determines how much of the snip score is preserved from last pruning step. + beta: A parameter that determines how much of the snip score is updated at the current step. + + Attributes: + scores: A dict {"module_name": Tensor} that stores the scores of pruning modules. + """ + + def __init__(self, modules, config): + """Initiliaze a snip_momentum pruning criterion.""" + super(SnipMomentumCriterion, self).__init__(modules, config) + assert self.config.end_step > 0, "gradient based criterion does not work on step 0" + for key in modules.keys(): + p = modules[key].weight + self.scores[key] = torch.zeros(p.shape).to(p.device) + + self.alpha = 0.9 + self.beta = 1.0 + + def on_after_optimizer_step(self): + """Calculate and store the pruning scores based on snip_momentum criterion.""" + with torch.no_grad(): + for key in self.modules.keys(): + p = self.modules[key].weight + self.scores[key] *= self.alpha + self.scores[key] += self.beta * torch.abs(p * p.grad) diff --git a/neural_compressor/experimental/pytorch_pruner/logger.py b/neural_compressor/pruner/logger.py similarity index 90% rename from neural_compressor/experimental/pytorch_pruner/logger.py rename to neural_compressor/pruner/logger.py index fb5c26a035e..f39f1198a65 100644 --- a/neural_compressor/experimental/pytorch_pruner/logger.py +++ b/neural_compressor/pruner/logger.py @@ -1,5 +1,5 @@ """logger module.""" -#!/usr/bin/env python +# !/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2022 Intel Corporation @@ -17,7 +17,7 @@ # limitations under the License. try: - from ...utils import logger + from neural_compressor.utils import logger except: import logging logger = logging.getLogger(__name__) diff --git a/neural_compressor/pruner/patterns.py b/neural_compressor/pruner/patterns.py new file mode 100644 index 00000000000..8ad1d1fb6f0 --- /dev/null +++ b/neural_compressor/pruner/patterns.py @@ -0,0 +1,1110 @@ +"""pruning patterns.""" +# !/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from neural_compressor.utils.utility import LazyImport +torch = LazyImport('torch') +from .logger import logger +from collections import namedtuple + +PATTERNS = {} + + +def register_pattern(name): + """Class decorator used to register a Pattern subclass to the registry. + + Decorator function used before a Pattern subclasses. + Make sure that this Pattern class can be registered in PATTERNS. + + Args: + name: A string. Define the pattern type name which will be included in a pruning process. + + Returns: + cls: The class of register. + """ + + def register(pattern): + """Register patterns.""" + PATTERNS[name] = pattern + return pattern + + return register + + +def get_pattern(config, modules): + """Get registered pattern class. + + Get a Pattern object from PATTERNS. + + Args: + config: A config dict object. Contains the pattern information. + modules: torch neural network modules, which will be pruned with the pattern + + Returns: + A Pattern object. + + Raises: + AssertionError: Currently only support patterns which have been registered in PATTERNS. + """ + name = config.pattern + name = name.split('_')[-1] + if "x" in name: + return PATTERNS["NxM"](config, modules) + if ":" in name: + return PATTERNS["N:M"](config, modules) + assert False, f"currently only support {PATTERNS.keys()}" + + +SparsityInfo = namedtuple("SparsityInfo", ['zero_cnt', 'total_cnt', 'sparsity_ratio']) + + +class BasePattern: + """Pruning Pattern. + + It defines the basic pruning unit and how this unit will be pruned during pruning, e.g. 4x1, 2:4 + + Args: + config: A config dict object. Contains the pattern information. + modules: torch neural network modules, which will be pruned with the pattern + + Attributes: + pattern: A config dict object. The pattern related part in args config. + is_global: A bool. Whether the pruning take global pruning option. + Global pruning means that all pruning layers are gathered to calculate pruning criterion. + Local pruning, on the contrast, means that pruning layers are to calculate criterion individually. + keep_mask_layers:A dict. the layers whose mask will not be updated + invalid_layers: the layers whose shape don't fit the patten + modules: torch neural network modules, which will be pruned with the pattern + config: A config dict object. Contains all the information including the pattern's. + max_sparsity_ratio_per_op: A float. The maximum sparsity that one layer could reach + min_sparsity_ratio_per_op: A float. The minimum sparsity that one layer could reach + target_sparsity: A float. The sparsity ratio of the modules will be reached after pruning. + + """ + + def __init__(self, config, modules): + """Initialize the basic pruning unit of a pattern.""" + self.pattern = config.pattern + self.is_global = config.pruning_scope == "global" + self.keep_mask_layers = {} + self.invalid_layers = [] + self.modules = modules + self.config = config + self.max_sparsity_ratio_per_op = self.config['max_sparsity_ratio_per_op'] + self.min_sparsity_ratio_per_op = self.config['min_sparsity_ratio_per_op'] + self.target_sparsity_ratio = self.config['target_sparsity'] + # Not using deterministic_algorithms for all examples + torch.use_deterministic_algorithms(False) + + def reduce_tensor(self, data, dim): + """Reduce the data along the given dimension. + + Args: + data: The input data + dim: The reduced axis + + Returns: + The reduced tensor + + """ + name = self.config['criterion_reduce_type'] + if name == "mean": + return torch.mean(data, dim=dim) + elif name == "sum": + return torch.sum(data, dim=dim) + elif name == "max": + return torch.max(data, dim=dim)[0] + else: + assert False, "currently only support mean, sum and max reduce type" + + def get_masks(self, scores, target_sparsity_ratio, pre_masks): + """Generate the weight masks according to the weight score and the current target sparsity ratio. + + Args: + scores: A dict{“layer_name”: Tensor}. Store the pruning scores of weights. + target_sparsity_ratio: A float. After pruning, the sparsity of the modules will reach this value. + pre_masks: A dict{"layer_name": Tensor}. The previous masks generated after the last pruning step. + + Returns: + A dict with the identical size as pre_masks. Update the 0/1 values in it. 1 means keep, 0 means drop + + """ + if self.is_global: + return self.get_masks_global(scores, target_sparsity_ratio, pre_masks) + else: + return self.get_masks_local(scores, target_sparsity_ratio, pre_masks) + + def get_masks_global(self, scores, target_sparsity_ratio, pre_masks): + """Generate the weight masks for global pruning, please refer to function get_masks for more information.""" + raise NotImplementedError + + def get_masks_local(self, scores, target_sparsity_ratio, pre_masks): + """Generate the weight masks for local pruning. + + Args: + scores: A dict{“layer_name”: Tensor}. Store the pruning scores of weights. + target_sparsity_ratio: A float. After pruning, the sparsity of the modules will reach this value. + pre_masks: A dict{"layer_name": Tensor}. The previous masks generated after the last pruning step. + + Returns: + A dict with the identical size as pre_masks. Update the 0/1 values in it. 1 means keep, 0 means drop + + """ + masks = {} + if isinstance(self, PatternNxM) and not isinstance(self.block_size, dict): + self.block_size = self.get_block_size_dict(pre_masks) + for key in scores.keys(): + score = {key: scores[key]} + pre_mask = {key: pre_masks[key]} + mask = self.get_masks_global(score, target_sparsity_ratio, pre_mask) + masks[key] = mask[key] + return masks + + def get_single_mask_per_target_ratio(self, score, exact_sparsity_ratio): + """Generate a mask for one layer with the exact_sparsity_ratio. + + Args: + score: A Tensor. the pruning scores of each weight elements. + exact_sparsity_ratio: A float. After pruning, the layer's sparsity will reach this value. + + Returns: + A Tensor with the identical size as score. a new mask. + """ + flattern_score = torch.flatten(score) + k = int(exact_sparsity_ratio * flattern_score.numel()) + threshold, _ = torch.kthvalue(flattern_score, k) + if not k < 1: + zero = torch.tensor([0.]).to(score.device) + one = torch.tensor([1.]).to(score.device) + mask = torch.where(score <= threshold, zero, one) + else: + mask = torch.ones(score.shape, device=score.device) + return mask + + def get_block_size_dict(self, data): + """Get pattern size for each module. + + this is mainly for per-channel pruning when each module has different pruning size + + Args: + data: the input data + + Returns: + To be implemented in subclasses. + """ + raise NotImplementedError + + def get_sparsity_ratio(self, pre_masks, return_dict=False): + """Calculate the zero elements' ratio in pre_masks. + + please be noted that the implementations in subclass are little tricky + TODO: need to refactor this function + + Args: + pre_masks: Dict{"layer_name": Tensor}. The masks generated after the last pruning step. + return_dict: Whether need to return more information like zero_cnt and total_cnt + Returns: + A float. The zero elements' ratio in pre_masks. + """ + zero_cnt = 0 + total_cnt = 0 + for key in pre_masks.keys(): + pre_mask = pre_masks[key] + zero_cnt += torch.sum(pre_mask == 0.0).data.item() + total_cnt += pre_masks[key].numel() ##FIXME + if return_dict: + return {"sparsity_ratio": float(zero_cnt) / total_cnt, "zero_cnt": zero_cnt, "total_cnt": total_cnt} + else: + return float(zero_cnt) / total_cnt + + def get_pattern_lock_masks(self, modules): + """Obtain masks from original weight map according the pattern and weights' zero positions. + + Args: + modules: a dict{“layer_name”: Tensor}. Store weights. + + Returns: + A dict with the identical size as modules, containing pattern lock masks. + """ + pattern_lock_masks = {} + for key in modules.keys(): + weight = modules[key].weight + shape = weight.shape + mask = torch.ones(shape) + mask[weight == 0] = 0.0 + pattern_lock_masks[key] = mask.to(weight.device) + return pattern_lock_masks + + def check_layer_validity(self): + """Check if a layer is valid for this block_size.""" + pass + + def get_reduced_masks_from_data(self, data, key): + """Obtain the unpruned weights and reshape according to the block_size.""" + raise NotImplementedError + + def update_residual_cnt(self, masks, target_sparsity_ratio): + """Update the number of parameters yet to be pruned. + + Args: + masks: the current pruning mask + target_sparsity_ratio: A float. After pruning, the sparsity of the modules will reach this value. + + Returns: + An int. How many weights still need to be pruned to achieve the target sparsity ratio + """ + self.total_params_cnt = self.get_sparsity_ratio(masks, return_dict=True)["total_cnt"] + to_prune_cnt = int(self.total_params_cnt * target_sparsity_ratio) + for key in masks.keys(): + if self.keep_mask_layers.get(key, False): + zero_cnt = self.get_sparsity_ratio({key: masks[key]}, return_dict=True)["zero_cnt"] + to_prune_cnt -= zero_cnt + + return to_prune_cnt + + def get_sparsity_ratio_each_layer(self, masks): + """Calculate the sparsity ratio of each layer. + + TODO: need to refactor this function + + Args: + masks: The current weight masks + + Returns: + infos: the sparsity information for each layer, sparsity_ratio, zero_point and total cnts + SparsityInfo: the sparsity information for the model + """ + infos = {} + zero_cnts = 0 + total_cnts = 0 + for key in masks.keys(): + if key in self.invalid_layers: + continue + reduced_mask = self.get_reduced_masks_from_data(masks[key], key) + zero_cnt = (int(torch.sum(reduced_mask == 0.0).data.item())) + total_cnt = int(reduced_mask.numel()) + sparsity_ratio = float(zero_cnt) / total_cnt + val = SparsityInfo(zero_cnt, total_cnt, sparsity_ratio) + infos[key] = val + zero_cnts += zero_cnt + total_cnts += total_cnt + sparsity_ratio = float(zero_cnts) / total_cnts + return infos, SparsityInfo(zero_cnts, total_cnts, sparsity_ratio) + + def adjust_ratio(self, masks: dict, layer_name: str, key_new_sparsity: SparsityInfo, + max_sparsity_ratio: float, min_sparsity_ratio: float, \ + final_target_sparsity_ratio: float): + """Limits the sparsity of a layer to the set threshold interval. + + Args: + masks: the weight masks + layer_name: the to be examined layer name + key_new_sparsity: the proposal ratio for the layer + max_sparsity_ratio: A float. The maximum sparsity that one layer could reach + min_sparsity_ratio: A float. The minimum sparsity that one layer could reach + final_target_sparsity_ratio: the final target sparsity ratio + + Returns: + A bool indicating if the ratio needs to be adjusted and the adjusted sparsity ratio. + adjust_sparsity_ratio: the ratio adjusted + """ + need_adjust = False + adjust_zero_cnt = key_new_sparsity.zero_cnt + adjust_sparsity_ratio = key_new_sparsity.sparsity_ratio + adjust_total_cnt = key_new_sparsity.total_cnt + + if adjust_sparsity_ratio > max_sparsity_ratio: + need_adjust = True + adjust_sparsity_ratio = max_sparsity_ratio + adjust_zero_cnt = int(adjust_total_cnt * max_sparsity_ratio) + + if adjust_sparsity_ratio < min_sparsity_ratio: + return need_adjust, adjust_sparsity_ratio + + ##TODO no need to calculate each time + infos, net_info = self.get_sparsity_ratio_each_layer(masks) + + any_exceed_target_ratio = False + for key in infos.keys(): + if infos[key].sparsity_ratio > final_target_sparsity_ratio: + any_exceed_target_ratio = True + break + if adjust_sparsity_ratio > final_target_sparsity_ratio: + any_exceed_target_ratio = True + if not any_exceed_target_ratio: + return need_adjust, adjust_sparsity_ratio + + zero_cnt_below_min_sparsity = 0 + total_cnt_below_min_sparsity = 0 + zero_cnt_above_min_sparsity = 0 + for key in infos.keys(): + info = infos[key] + if key == layer_name: + info = SparsityInfo(zero_cnt=adjust_zero_cnt, total_cnt=adjust_total_cnt, + sparsity_ratio=adjust_sparsity_ratio) + if info.sparsity_ratio < min_sparsity_ratio: + zero_cnt_below_min_sparsity += info.zero_cnt + total_cnt_below_min_sparsity += info.total_cnt + else: + zero_cnt_above_min_sparsity += info.zero_cnt + + gap_cnt = int(total_cnt_below_min_sparsity * min_sparsity_ratio) - zero_cnt_below_min_sparsity + remaining_cnt = int(net_info.total_cnt * final_target_sparsity_ratio) \ + - zero_cnt_above_min_sparsity - zero_cnt_below_min_sparsity + if remaining_cnt >= gap_cnt: + return need_adjust, adjust_sparsity_ratio + else: + new_zero_cnt = adjust_zero_cnt - (gap_cnt - remaining_cnt) + new_sparsity_ratio = float(new_zero_cnt) / adjust_total_cnt + ##adjust_zero_cnt = new_zero_cnt + adjust_sparsity_ratio = new_sparsity_ratio + return True, adjust_sparsity_ratio + + +@register_pattern('NxM') +class PatternNxM(BasePattern): + """Pruning Pattern. + + A Pattern class derived from BasePattern. In this pattern, the weights in a NxM block will be pruned or kept + during one pruning step. + + Args: + config: A config dict object. Contains the pattern information. + + Attributes: + block_size: A list of two Integers. The height and width of the block. + Please be aware that the vertical direction of a Linear layer's weight in PyTorch refer to output channel. + Because PyTorch's tensor matmul has a hidden transpose operation. + """ + + def __init__(self, config, modules): + """Initialize the basic pruning unit of NXM pattern.""" + super(PatternNxM, self).__init__(config, modules) + pattern = self.pattern.split('_')[-1] + self.N = pattern.split('x')[0] + self.M = pattern.split('x')[1] + if self.N == "channel": ##channel-wise pruning mode + self.block_size = ["channel", int(self.M)] + elif self.M == "channel": ##channel-wise pruning mode + self.block_size = [int(self.N), "channel"] + else: + self.block_size = [int(pattern.split('x')[0]), int(pattern.split('x')[1])] + self.total_params_cnt = -1 + + self.block_size = self.get_block_size_dict() + self.check_layer_validity() + + def get_block_size_dict(self): + """Calulate the zero elements' ration in pre_masks. + + Args: + data: Dict{"layer_name": Tensor}. Store weights or scores. + + Returns: + A dict. Dict{"layer_name": [block_size_1, block_size_2]}. + Containing layers' corresponding pruning pattern's block shape. + Because in channel-wise pruning different layers can have different pruning patterns. + """ + data = self.modules + block_sizes_dict = {} + if self.N == "channel" or self.M == "channel": + for key in data.keys(): + if isinstance(data[key], torch.nn.Module): + shape = data[key].weight.shape + else: + shape = data[key].shape + if self.N == "channel": + block_sizes_dict[key] = [shape[0], 1] + else: + block_sizes_dict[key] = [1, shape[1]] + return block_sizes_dict + for key in data.keys(): + block_sizes_dict[key] = self.block_size + return block_sizes_dict + + def check_layer_validity(self): + """Check if a layer is valid for this block_size.""" + block_sizes = self.block_size + datas = self.modules + for key in datas.keys(): + data = datas[key].weight + data = self._reshape_orig_to_2dims(data) + shape = data.shape + block_size = block_sizes[key] + if shape[0] % block_size[0] != 0 or shape[1] % block_size[1] != 0: ## only consider input channel + self.invalid_layers.append(key) + logger.warning(f"{key} shape {data.shape} cannot be divided by {self.pattern}") + + def get_reduced_masks_from_data(self, data, key): + """Obtain the unpruned weights and reshape according to the block_size.""" + assert key not in self.invalid_layers + block_size = self.block_size[key] + data = self._reshape_orig_to_2dims(data) + shape = data.shape + new_shape = [shape[0] // block_size[0], block_size[0], shape[1] // block_size[1], block_size[1]] + data = data.reshape(new_shape) + data = data.sum(-1).sum(1) + reduced_mask = data != 0 + return reduced_mask + + def get_sparsity_ratio(self, pre_masks, return_dict=False): + """Please note that the zero cnt and total cnt are all block_wise for supporting channel-wise pruning. + + Args: + pre_masks: Dict{"layer_name": Tensor}. The masks generated after the last pruning step. + + Returns: + A float. Calculate the zero elements' ratio in pre_masks. + """ + zero_cnt = 0 + total_cnt = 0 + for key in pre_masks.keys(): + if key in self.invalid_layers: + continue + reduced_mask = self.get_reduced_masks_from_data(pre_masks[key], key) + zero_cnt += (int(torch.sum(reduced_mask == 0.0).data.item())) + total_cnt += int(reduced_mask.numel()) + if total_cnt == 0: + sparsity_ratio = 0.0 + else: + sparsity_ratio = float(zero_cnt) / total_cnt + if return_dict: + return {"sparsity_ratio": sparsity_ratio, "zero_cnt": zero_cnt, "total_cnt": total_cnt} + else: + return sparsity_ratio + + def get_sparsity_ratio_progressive(self, pre_masks, return_dict=False): + """Calculate the sparsity ratio of each layer.""" + zero_cnt = 0 + total_cnt = 0 + for key in pre_masks.keys(): + if key in self.invalid_layers: + continue + # progressive masks are unstructured, therefore directly find zeros + zero_cnt += float(torch.sum(pre_masks[key] == 0).data.item()) + total_cnt += float(pre_masks[key].numel()) + return (zero_cnt / total_cnt) + + def _reshape_orig_to_2dims(self, data): + """Mainly for processing layer dims not equal to 2, for example conv layer. + + Args: + data: the input + + Returns: + a reshaped data + """ + ##TODO need to verify whether it's ok for transposed conv + if len(data.shape) == 4: + data = data.permute(0, 2, 3, 1) ##cout,k,k,cin + data = data.reshape(data.shape[0], -1) + return data + + def _reshape_2dims_to_orig(self, data, orig_shape): + """Mainly for recover layer dims not equal to 2, for example conv layer. + + Args: + data: input + orig_shape: target shape + + Returns: + a reshaped data + """ + if len(orig_shape) == 4: + data = data.reshape(orig_shape[0], orig_shape[2], orig_shape[3], + orig_shape[1]) + data = data.permute(0, 3, 1, 2) + return data + + def reshape_orig_to_pattern(self, data, key): + """Reshape the data(s1,s2) to [s1/N,N,s2,s2/M]. + + Args: + data: the input + key: the layer name + + Returns: + The reshaped input tensor. + """ + block_size = self.block_size[key] + data = self._reshape_orig_to_2dims(data) + shape = data.shape + new_shape = [shape[0] // block_size[0], block_size[0], shape[1] // block_size[1], + block_size[1]] + data = data.reshape(new_shape) + return data + + def reshape_reduced_to_orig(self, data, key, orig_shape): + """Reshape the data [s1/N,s2/M] to [s1,s2], also permute dims for conv layer. + + Args: + data: + key: + orig_shape: + + Returns: + Original shape data + """ + block_size = self.block_size[key] + data = data.repeat_interleave(block_size[0], dim=0).repeat_interleave(block_size[1], dim=-1) + data = self._reshape_2dims_to_orig(data, orig_shape) + return data + + def reduce_scores(self, scores): + """Recalculate the pruning scores after reducing the data.""" + new_scores = {} + for key in scores.keys(): + if key in self.invalid_layers: + continue + if self.keep_mask_layers.get(key, False): + continue + self.keep_mask_layers[key] = False + current_score = scores[key] + current_score = self.reshape_orig_to_pattern(current_score, key) + ##sum or mean is quite different for per channel pruning + current_score_sum = self.reduce_tensor(self.reduce_tensor(current_score, dim=-1), dim=1) + new_scores[key] = current_score_sum + return new_scores + + def get_mask_per_threshold(self, score, threshold, block_size): + """Get the mask per threshold.""" + zero = torch.tensor([0.]).to(score.device) + one = torch.tensor([1.]).to(score.device) + mask = torch.where(score <= threshold, zero, one) + mask = mask.repeat_interleave(block_size[0], dim=0).repeat_interleave(block_size[1], dim=-1) + return mask + + def get_masks_global(self, scores, cur_target_sparsity_ratio, pre_masks, + keep_exact_sparsity_ratio=True): + """Generate masks for layers. + + Gather all layer's scores together and calculate a common threshold. + This threshold will be applied for all layers. + + Args: + scores: A dict{“layer_name”: Tensor}. Store the pruning scores of weights. + cur_target_sparsity_ratio: A float. After pruning, the model's sparsity will reach this value. + pre_masks: A dict{"layer_name": Tensor}. The masks generated after the last pruning step. + max_sparsity_ratio_per_op: A float. The maximum sparsity that one layer can reach. + keep_pre_masks: A bool. If True, keep the masks unchanged. + + Returns: + A dict with the identical size as pre_masks. Update the 0/1 values in it. + """ + ##keep the masks if the layer exceed max sparsity ratio + + masks = pre_masks + + k_blockwise = self.update_residual_cnt(masks, cur_target_sparsity_ratio) + if k_blockwise <= 0: + return masks + new_scores = self.reduce_scores(scores) + global_scores = torch.cat([torch.flatten(v) for v in new_scores.values()]) + residual_k = k_blockwise + not_exceed_layers = [key for key in new_scores.keys()] + if self.min_sparsity_ratio_per_op > 0: + sparsity_infos_perlayer, _ = self.get_sparsity_ratio_each_layer(masks) + + while True: + threshold, _ = torch.kthvalue(global_scores, residual_k) + for key in not_exceed_layers: + block_size = self.block_size[key] + score = new_scores[key] + mask = self.get_mask_per_threshold(score, threshold, block_size) + info = self.get_sparsity_ratio({key: mask}, return_dict=True) + zero_cnt = info["zero_cnt"] + total_cnt = info["total_cnt"] + current_sparsity_ratio = float(zero_cnt) / total_cnt + key_new_sparsity = SparsityInfo(zero_cnt, total_cnt, current_sparsity_ratio) + need_adjust, adjust_ratio = self.adjust_ratio(masks, key, key_new_sparsity, + self.max_sparsity_ratio_per_op, + self.min_sparsity_ratio_per_op, + self.target_sparsity_ratio) + if need_adjust: + # uptade status + self.keep_mask_layers[key] = True + masks[key] = self.get_single_mask_per_target_ratio(new_scores[key], adjust_ratio) + masks[key] = masks[key].repeat_interleave(block_size[0], 0).repeat_interleave(block_size[1], -1) + if keep_exact_sparsity_ratio: + zero_cnt = self.get_sparsity_ratio({key: masks[key]}, return_dict=True)["zero_cnt"] + residual_k -= zero_cnt + else: + masks[key] = mask + if not keep_exact_sparsity_ratio: + break + new_not_exceed_layers = [key for key in new_scores.keys() if not self.keep_mask_layers.get(key, False)] + if not_exceed_layers == new_not_exceed_layers or len(new_not_exceed_layers) == 0: + break + not_exceed_layers = new_not_exceed_layers + global_scores = torch.cat([torch.flatten(new_scores[key]) for key in not_exceed_layers]) + + for key in masks.keys(): + if key in self.invalid_layers: + continue + if len(scores[key].shape) == 4: ## need to permute + mask = masks[key] + orig_shape = scores[key].shape + mask = self._reshape_2dims_to_orig(mask, orig_shape) + masks[key] = mask + layer_ratio = torch.sum(masks[key] == 0.0).data.item() / masks[key].numel() + logger.info(f'layer {key} sparsity_ratio is {layer_ratio}') + return masks + + def get_pattern_lock_masks(self, modules): + """Obtain masks from original weight map, by masking where weights' are zero. + + Args: + modules: A dict{“layer_name”: Tensor}. Store weights. + + Returns: + A dict with the identical size as modules, containing pattern lock masks. + """ + pattern_lock_masks = {} + for key in modules.keys(): + weight = modules[key].weight + ori_shape = weight.shape + if key in self.invalid_layers: + mask = torch.ones(weight.shape, device=weight.device) + pattern_lock_masks[mask] = mask + continue + reduced_mask = self.get_reduced_masks_from_data(weight, key) + mask = self.reshape_reduced_to_orig(reduced_mask, key, ori_shape) + pattern_lock_masks[key] = mask + return pattern_lock_masks + + # ---------------progressive related-------------------- + def count_new_masked_cnts(self, new_added_masks): + """Cound the number of elements to be masked.""" + # count how many elements are to masked, + new_masked_cnts = 0 + for key in new_added_masks.keys(): + new_masked_cnts += torch.nonzero(1 - new_added_masks[key]).size()[0] + return new_masked_cnts + + def update_new_added_masks(self, pre_masks, cur_masks): + """Obtain the new set-to-zero mask during a pruning procedure. + + Pre_masks, cur_masks should have identical keys bacause they stands for one model. + """ + # obtain the new set-to-zero mask during a pruning procedure. + # pre_masks, cur_masks should have identical keys bacause they stands for one model. + new_added_masks = {} + for key in pre_masks.keys(): + pre_mask = pre_masks[key] + cur_mask = cur_masks[key] + zero = torch.tensor([0.]).to(pre_mask.device) + one = torch.tensor([1.]).to(cur_mask.device) + new_added_masks[key] = torch.where(pre_mask == cur_mask, one, zero) + return new_added_masks + + def update_progressive_masks(self, pre_masks, cur_masks, scores, progressive_step, progressive_configs): + """Generate the progressive masks.""" + # Generate the progressive masks + use_global = progressive_configs["use_global"] + if use_global: + return self.update_progressive_masks_global(pre_masks, cur_masks, scores, \ + progressive_step, progressive_configs) + else: + return self.update_progressive_masks_local(pre_masks, cur_masks, scores, \ + progressive_step, progressive_configs) + + def update_progressive_masks_linear(self, pre_masks, cur_masks, progressive_step, progressive_configs): + """Generate the progressive masks along the block's larger dimension.""" + progressive_steps = progressive_configs["progressive_steps"] + progressive_masks = {} + new_added_masks = self.update_new_added_masks(pre_masks, cur_masks) + for key in pre_masks.keys(): + block_size = self.block_size[key] + new_added_mask = new_added_masks[key] + # conv + new_added_mask = self._reshape_orig_to_2dims(new_added_mask) + shape = new_added_mask.shape + # progressive masks are generated in the direction of block's large dim. + if block_size[0] >= block_size[1]: + # NxM (N>=M), output channel pruning + new_shape = [shape[0] // block_size[0], progressive_steps, block_size[0] // progressive_steps, + shape[1] // block_size[1], block_size[1]] + new_added_mask_reshape = new_added_mask.reshape(new_shape) + new_added_mask_reshape[:, progressive_step:, :, :, :] = 1.0 + else: + # NxM (N N + return reduced_mask + + def get_least_ninm_mask_from_data(self, score): + """Generate the least N scores in M.""" + current_score = score + M = self.M + N = self.N + current_score = self._reshape_orig_to_2dims(current_score) + shape = current_score.shape + new_shape = [shape[0], shape[1] // M, M] + current_score_new = current_score.reshape(new_shape) + + threshold, _ = torch.kthvalue(current_score_new, N, dim=2) + threshold = threshold.unsqueeze(-1) + + threshold = threshold.expand(shape[0], shape[1] // M, M) + threshold = threshold.reshape((shape[0], shape[1])) + + one = torch.tensor([1.]).to(current_score.device) + zero = torch.tensor([0.]).to(current_score.device) + mask = torch.where(current_score <= threshold, zero, one) + return mask + + def get_sparsity_ratio(self, pre_masks, return_dict=False): + """Please noted that the zero cnt and total cnt are all block_wise for supporting channel-wise pruning. + + The return sparsity ratio is elementwised(confused, TODO). + + Args: + pre_masks: + return_dict: + + Returns: + An elementwise sparisty ratio. + """ + ##simply use elemwise sparsity + zero_cnt = 0 + total_cnt = 0 + for key in pre_masks.keys(): + if key in self.invalid_layers: + # total_cnt += pre_masks[key].numel() // self.M + continue + reduced_mask = self.get_reduced_masks_from_data(pre_masks[key], key) + zero_cnt += int((torch.sum(reduced_mask == 0)).data.item()) + total_cnt += int(reduced_mask.numel()) + sparsity_ratio = float(zero_cnt) / total_cnt * self.N / self.M + + if return_dict: + return {"sparsity_ratio": sparsity_ratio, "zero_cnt": zero_cnt, + "total_cnt": total_cnt} + else: + return sparsity_ratio + + def _reshape_orig_to_2dims(self, data): + if len(data.shape) == 4: ##TODO need to verify whether it's ok for transposed conv + data = data.permute(0, 2, 3, 1) ##cout,k,k,cin + data = data.reshape(data.shape[0], -1) + return data + + def _reshape_2dims_to_orig(self, data, orig_shape): + if len(orig_shape) == 4: + data = data.reshape(orig_shape[0], orig_shape[2], orig_shape[3], orig_shape[1]) + data = data.permute(0, 3, 1, 2) + return data + + def reshape_orig_to_pattern(self, data, key): + """Reshape the data based on the pruning pattern.""" + data = self._reshape_orig_to_2dims(data) + shape = data.shape + new_shape = [shape[0], shape[1] // self.M, self.M] + data = data.reshape(new_shape) + return data + + def reshape_reduced_to_orig(self, data, key, orig_shape): + """Reshape the reduced data to its original shape.""" + data = data.repeat_interleave(self.M, dim=-1) + return self._reshape_2dims_to_orig(data, orig_shape) + + def reduce_scores(self, scores): + """Calculate the pruning scores after reducing the data and obtain the least N scores in M.""" + ##to get the least N scores in M + M = self.M + N = self.N + least_ninm_masks = {} + new_scores = {} + for key in scores.keys(): + if key in self.invalid_layers: + continue + if self.keep_mask_layers.get(key, False): + continue + current_score = scores[key] + mask = self.get_least_ninm_mask_from_data(current_score) + current_score_new = self._reshape_orig_to_2dims(current_score) + shape = current_score_new.shape + current_score_new = current_score_new.reshape((shape[0], shape[1])) + ##to get the sum of N scores in each block with M + current_score_new = current_score_new * (1.0 - mask) + current_score_new = current_score_new.reshape(shape[0], shape[1] // M, M) + score_sum = self.reduce_tensor(current_score_new, dim=-1) + least_ninm_masks[key] = mask + new_scores[key] = score_sum + return new_scores, least_ninm_masks + + def get_ele_mask_per_threshold(self, score, threshold, block_size, least_ninm_mask): + """Get the elementwise mask per threshold. + + Args: + score: + threshold: + block_size: + least_m_in_m_masks: + + Returns: + mask: + """ + zero = torch.tensor([0.]).to(score.device) + one = torch.tensor([1.]).to(score.device) + mask = torch.where(score <= threshold, zero, one) + mask = mask.repeat_interleave(block_size[1], dim=-1) + ## both zero will be zero + mask = (mask + least_ninm_mask) + mask = torch.where(mask <= 0, zero, one) + return mask + + def get_masks_global(self, scores, cur_target_sparsity_ratio, pre_masks, + keep_exact_sparsity_ratio=True): + """Generate masks for layers. + + Gather all layer's scores together and calculate a common threshold. + This threshold will be applied for all layers. + + Args: + scores: A dict{“layer_name”: Tensor}. Store the pruning scores of weights. + target_sparsity_ratio: A float. After pruning, the model's sparsity will reach this value. + pre_masks: A dict{"layer_name": Tensor}. The masks generated after the last pruning step. + max_sparsity_ratio_per_op: A float. The maximum sparsity that one layer can reach. + + Returns: + A dict with the identical size as pre_masks. Update the 0/1 values in it. + """ + masks = pre_masks + + block_sparsity_ratio = cur_target_sparsity_ratio * self.M / self.N + k_blockwise = self.update_residual_cnt(pre_masks, block_sparsity_ratio) + if k_blockwise <= 0: + return masks + new_scores, least_ninm_masks = self.reduce_scores(scores) + global_scores = torch.cat([torch.flatten(v) for v in new_scores.values()]) ##block_wise + residual_k = k_blockwise + not_exceed_layers = [key for key in new_scores.keys()] + + while True: + threshold, _ = torch.kthvalue(global_scores, residual_k) + for key in not_exceed_layers: + score = new_scores[key] + mask = self.get_ele_mask_per_threshold(score, threshold, (self.N, self.M), least_ninm_masks[key]) + info = self.get_sparsity_ratio({key: mask}, return_dict=True) + zero_cnt = info["zero_cnt"] + total_cnt = info["total_cnt"] + current_sparsity_ratio = float(zero_cnt) / total_cnt + key_new_sparsity = SparsityInfo(zero_cnt, total_cnt, current_sparsity_ratio) + need_adjust, adjust_ratio = self.adjust_ratio(masks, key, key_new_sparsity, + self.max_sparsity_ratio_per_op * self.M / self.N, + self.min_sparsity_ratio_per_op * self.M / self.N, + self.target_sparsity_ratio * self.M / self.N) + + if need_adjust: + self.keep_mask_layers[key] = True + masks[key] = self.get_single_mask_per_target_ratio(new_scores[key], adjust_ratio) + masks[key] = masks[key].repeat_interleave(self.M, dim=-1) + ## both zero will be zero + masks[key] = (masks[key] + least_ninm_masks[key]) + zero = torch.tensor([0.]).to(score.device) + one = torch.tensor([1.]).to(score.device) + masks[key] = torch.where(masks[key] <= 0, zero, one) + if keep_exact_sparsity_ratio: + zero_cnt = self.get_sparsity_ratio({key: masks[key]}, return_dict=True)["zero_cnt"] + residual_k -= zero_cnt + else: + masks[key] = mask + if not keep_exact_sparsity_ratio: + break + new_not_exceed_layers = [key for key in new_scores.keys() if not self.keep_mask_layers.get(key, False)] + if not_exceed_layers == new_not_exceed_layers or len(new_not_exceed_layers) == 0: + break + not_exceed_layers = new_not_exceed_layers + global_scores = torch.cat([torch.flatten(new_scores[key]) for key in not_exceed_layers]) + + for key in masks.keys(): + if key in self.invalid_layers: + continue + if len(scores[key].shape) == 4: ## need to permute + mask = masks[key] + orig_shape = scores[key].shape + mask = self._reshape_2dims_to_orig(mask, orig_shape) + masks[key] = mask + layer_ratio = torch.sum(masks[key] == 0.0).data.item() / masks[key].numel() + logger.info(f'layer {key} sparsity_ratio is {layer_ratio}') + return masks + + def get_pattern_lock_masks(self, modules): + """Obtain masks from original weight map, by masking where weights' are zero. + + Args: + modules: A dict{“layer_name”: Tensor}. Store weights. + + Returns: + A dict with the identical size as modules, containing pattern lock masks. + """ + pattern_lock_masks = {} + for key in modules.keys(): + weight = modules[key].weight + orig_shape = weight.shape + if key in self.invalid_layers: + mask = torch.ones(orig_shape, device=weight.device) + pattern_lock_masks[key] = mask + continue + mask = self.get_least_ninm_mask_from_data(weight) + mask = self._reshape_2dims_to_orig(mask, orig_shape) + pattern_lock_masks[key] = mask + return pattern_lock_masks diff --git a/neural_compressor/pruners/__init__.py b/neural_compressor/pruner/pruner_legacy/__init__.py similarity index 100% rename from neural_compressor/pruners/__init__.py rename to neural_compressor/pruner/pruner_legacy/__init__.py diff --git a/neural_compressor/pruners/gradient_sensitivity.py b/neural_compressor/pruner/pruner_legacy/gradient_sensitivity.py similarity index 99% rename from neural_compressor/pruners/gradient_sensitivity.py rename to neural_compressor/pruner/pruner_legacy/gradient_sensitivity.py index e6ae10e0ee6..46683c14e23 100644 --- a/neural_compressor/pruners/gradient_sensitivity.py +++ b/neural_compressor/pruner/pruner_legacy/gradient_sensitivity.py @@ -18,7 +18,7 @@ import numpy as np from .pruner import pruner_registry, Pruner from heapq import heappush, heappop -from ..utils import logger +from neural_compressor.utils import logger import re @pruner_registry diff --git a/neural_compressor/pruners/group_lasso.py b/neural_compressor/pruner/pruner_legacy/group_lasso.py similarity index 98% rename from neural_compressor/pruners/group_lasso.py rename to neural_compressor/pruner/pruner_legacy/group_lasso.py index fc659bdafa1..045fa18d07d 100644 --- a/neural_compressor/pruners/group_lasso.py +++ b/neural_compressor/pruner/pruner_legacy/group_lasso.py @@ -20,7 +20,7 @@ import numpy as np from .pruner import pruner_registry, Pruner from .magnitude import BasicMagnitudePruner -from ..utils import logger +from neural_compressor.utils import logger @pruner_registry class GroupLassoPruner(BasicMagnitudePruner): diff --git a/neural_compressor/pruners/magnitude.py b/neural_compressor/pruner/pruner_legacy/magnitude.py similarity index 98% rename from neural_compressor/pruners/magnitude.py rename to neural_compressor/pruner/pruner_legacy/magnitude.py index 752e1cf2268..9544d9474b2 100644 --- a/neural_compressor/pruners/magnitude.py +++ b/neural_compressor/pruner/pruner_legacy/magnitude.py @@ -17,7 +17,7 @@ import numpy as np from .pruner import pruner_registry, Pruner -from ..utils import logger +from neural_compressor.utils import logger @pruner_registry class BasicMagnitudePruner(Pruner): diff --git a/neural_compressor/pruners/pattern_lock.py b/neural_compressor/pruner/pruner_legacy/pattern_lock.py similarity index 100% rename from neural_compressor/pruners/pattern_lock.py rename to neural_compressor/pruner/pruner_legacy/pattern_lock.py diff --git a/neural_compressor/pruners/pruner.py b/neural_compressor/pruner/pruner_legacy/pruner.py similarity index 98% rename from neural_compressor/pruners/pruner.py rename to neural_compressor/pruner/pruner_legacy/pruner.py index 64d2e44cdda..6384235af30 100644 --- a/neural_compressor/pruners/pruner.py +++ b/neural_compressor/pruner/pruner_legacy/pruner.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ..experimental.pruning_recipes.patterns import patterns +from neural_compressor.experimental.pruning_recipes.patterns import patterns PRUNERS = {} diff --git a/neural_compressor/pruners/util/block_mask.py b/neural_compressor/pruner/pruner_legacy/util/block_mask.py similarity index 100% rename from neural_compressor/pruners/util/block_mask.py rename to neural_compressor/pruner/pruner_legacy/util/block_mask.py diff --git a/neural_compressor/pruner/pruners.py b/neural_compressor/pruner/pruners.py new file mode 100644 index 00000000000..c9a7cf436ae --- /dev/null +++ b/neural_compressor/pruner/pruners.py @@ -0,0 +1,565 @@ +"""Pruner.""" +# !/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy +from neural_compressor.utils.utility import LazyImport +torch = LazyImport('torch') +from .patterns import get_pattern +from .schedulers import get_scheduler +from .criteria import get_criterion, CRITERIAS +from .regs import get_reg +from .logger import logger + +PRUNERS = {} + + +def register_pruner(name): + """Class decorator to register a Pruner subclass to the registry. + + Decorator function used before a Pattern subclass. + Make sure that the Pruner class decorated by this function can be registered in PRUNERS. + + Args: + cls (class): The subclass of register. + name: A string. Define the pruner type. + + Returns: + cls: The class of register. + """ + + def register(pruner): + PRUNERS[name] = pruner + return pruner + + return register + + +def get_pruner(config, modules): + """Get registered pruner class. + + Get a Pruner object from PRUNERS. + + Args: + modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. + config: A config dict object. Contains the pruner information. + + Returns: + A Pruner object. + + Raises: AssertionError: Cuurently only support pruners which have been registered in PRUNERS. + """ + ## do the ugly work here + if "progressive" not in config["pruning_type"]: + name = config["pruning_type"] + config["progressive"] = False + else: + # if progressive, delete "progressive" words and reset config["progressive"] + name = config["pruning_type"][0:-12] + config["progressive"] = True + if name in CRITERIAS: + if config["progressive"] == False: + config['criterion_type'] = name + name = "basic" ##return the basic pruner + else: + config['criterion_type'] = name + name = "progressive" ## return the progressive pruner + + if name not in PRUNERS.keys(): + assert False, f"does not support {name}, currently only support {PRUNERS.keys()}" + return PRUNERS[name](config, modules) + + +class BasePruner: + """Pruning Pruner. + + The class which executes pruning process. + + Args: + modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. + config: A config dict object. Contains the pruner information. + + Attributes: + modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. + config: A config dict object. Contains the pruner information. + masks: A dict {"module_name": Tensor}. Store the masks for modules' weights. + scores: A dict {"module_name": Tensor}. Store the score for modules' weights, + which are used to decide pruning parts with a criterion. + pattern: A Pattern object. Defined in ./patterns.py + scheduler: A scheduler object. Defined in ./scheduler.py + current_sparsity_ratio: A float. Current model's sparsity ratio, initialized as zero. + global_step: An integer. The total steps the model has run. + start_step: An integer. When to trigger pruning process. + end_step: An integer. When to end pruning process. + pruning_frequency: An integer. The pruning frequency, which's valid when iterative + pruning is enabled. + target_sparsity_ratio: A float. The final sparsity after pruning. + max_sparsity_ratio_per_op: A float. Sparsity ratio maximum for every module. + """ + + def __init__(self, config, modules): + """Initialize.""" + self.modules = modules + self.config = config + self.masks = {} + self.global_step = 0 + self.handled_global_step = -1 + self.start_step = self.config['start_step'] + self.end_step = self.config['end_step'] + self.pruning_frequency = self.config['pruning_frequency'] + ##this is different with original code + self.total_prune_cnt = (self.end_step - self.start_step + 1) \ + // self.pruning_frequency + self.completed_pruned_cnt = 0 + for key in self.modules.keys(): + module = self.modules[key] + self.masks[key] = torch.ones(module.weight.shape).to(module.weight.device) ##TODO support bias or others + + self.target_sparsity_ratio = self.config['target_sparsity'] + self.current_sparsity_ratio = 0.0 + self.init_sparsity_ratio = 0.0 + self._init() + + def _init(self): + """Auxiliary function for initializing.""" + pass + + def on_epoch_begin(self, epoch): + """Implement at the beginning of each epoch.""" + pass + + def mask_weights(self): + """Apply masks to corresponding modules' weights. + + Weights are multipled with masks. This is the formal pruning process. + """ + with torch.no_grad(): + for key in self.modules.keys(): + module = self.modules[key] + module.weight.data = module.weight.data * self.masks[key] + + def mask_weights_general(self, input_masks): + """Apply input masks to corresponding modules' weights. + + Weights are multipled with input_masks. + + Args: + input_masks: A dict {"module_name": Tensor}. Store the masks for modules' weights. + """ + with torch.no_grad(): + for key in self.modules.keys(): + module = self.modules[key] + module.weight.data = module.weight.data * input_masks[key] + + def on_step_begin(self, local_step): + """Implement at the start of each step.""" + if self.handled_global_step == self.global_step: + return + self.update_masks(local_step) + self.handled_global_step = self.global_step + + def update_masks(self, local_step): + """Update the masks at a given local step.""" + pass + + def on_epoch_end(self): + """Implement at the end of each epoch.""" + pass + + def on_step_end(self): + """Implement at the end of each step.""" + pass + + def on_before_optimizer_step(self): + """Implement before optimizer.step().""" + pass + + def on_after_optimizer_step(self): + """Implement after optimizer.step(). + + Prune the model after optimization. + """ + self.mask_weights() + self.global_step += 1 + + def on_train_begin(self): + """Implement at the beginning of training phase.""" + pass + + def on_train_end(self): + """Implement at the end of training phase.""" + pass + + def on_before_eval(self): + """Implement at the beginning of evaluation phase.""" + pass + + def on_after_eval(self): + """Implement at the end of evaluation phase.""" + pass + + def check_is_pruned_step(self, step): + """Check if a pruning process should be performed at the current step. + + Args: + step: an integer representing the number of current step. + + Returns: + A Boolean. + """ + if step < self.start_step or step > self.end_step: + return False + if int(step - self.start_step) % self.pruning_frequency == 0: + return True + return False + + +@register_pruner("basic") +class BasicPruner(BasePruner): + """Pruning Pruner. + + The class which executes pruning process. + 1. Defines pruning functions called at step begin/end, epoch begin/end. + 2. Defines the pruning criterion. + + Args: + modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. + config: A config dict object. Contains the pruner information. + + Attributes: + pattern: A Pattern object. Define pruning weights' arrangements within space. + criterion: A Criterion Object. Define which weights are to be pruned + scheduler: A Scheduler object. Define model's sparsity changing method as training/pruning executes. + reg: A Reg object. Define regulization terms. + """ + + def __init__(self, config, modules): + """Initialize.""" + # self.modules = modules + # self.config = config + # self.masks = {} + super(BasicPruner, self).__init__(config, modules) + + def _init(self): + """Auxiliary function for initializing.""" + self.pattern = get_pattern(self.config, self.modules) + self.scheduler = get_scheduler(self.config) + self.criterion = get_criterion(self.config, self.modules) + self.reg = get_reg(self.config, self.modules, self.pattern) + # if switch off progressive but use per-channel pruning, give a warn + if "channel" in self.pattern.pattern: + logger.info("UserWarning: use per-channel pruning pattern without progressive pruning!") + logger.info("Instead, enabling progressive pruning would be a better choice.") + else: + pass + + def set_global_step(self, global_step): + """Set global step number.""" + self.global_step = global_step + + # def on_step_begin(self, local_step): + # """Implement at the start of each step. + # + # Update the masks at a given local_step. + # """ + # self.update_masks(local_step) + + def update_masks(self, local_step): + """Update the masks at a given local step.""" + if self.global_step == self.start_step: + if self.config['lock_init_sparsity']: + self.masks = self.pattern.get_pattern_lock_masks(self.modules) + self.init_sparsity_ratio = self.pattern.get_sparsity_ratio(self.masks) + self.current_sparsity_ratio = self.init_sparsity_ratio + + if not self.check_is_pruned_step(self.global_step): + return + + if self.current_sparsity_ratio > self.target_sparsity_ratio: + return + + self.criterion.on_step_begin() + current_target_sparsity_ratio = self.scheduler.update_sparsity_ratio(self.target_sparsity_ratio, + self.completed_pruned_cnt, + self.total_prune_cnt, self.masks, + self.init_sparsity_ratio) + logger.info(f"current target ratio is {current_target_sparsity_ratio}") + + self.completed_pruned_cnt += 1 + if self.criterion.scores == {}: + return + self.masks = self.pattern.get_masks(self.criterion.scores, current_target_sparsity_ratio, self.masks) + self.mask_weights() + + self.current_sparsity_ratio = self.pattern.get_sparsity_ratio(self.masks) + logger.info(f"current sparsity ratio is {self.current_sparsity_ratio}") + + def on_before_optimizer_step(self): + """Implement before optimizer.step().""" + self.reg.on_before_optimizer_step() + + def on_after_optimizer_step(self): + """Prune the model after optimization.""" + ##the order of the following three lines can't not be exchanged + self.reg.on_after_optimizer_step() + self.mask_weights() + self.criterion.on_after_optimizer_step() + self.global_step += 1 + + +@register_pruner('pattern_lock') +class PatternLockPruner(BasePruner): + """Pruning Pruner. + + A Pruner class derived from BasePruner. + In this pruner, original model's sparsity pattern will be fixed while training. + This pruner is useful when you want to train a sparse model without change its original structure. + + Args: + modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. + config: A config dict object. Contains the pruner information. + + Attributes: + Inherit from parent class Pruner. + """ + + def __init__(self, config, modules): + """Initialize.""" + super(PatternLockPruner, self).__init__(config, modules) + self.pattern = get_pattern(self.config, modules) + assert self.config.end_step == self.config.start_step, "pattern_lock pruner only supports one shot mode" + + def update_masks(self, local_step): + """Update the masks at a given local step.""" + if not self.check_is_pruned_step(self.global_step): + return + self.masks = self.pattern.get_pattern_lock_masks(self.modules) + + def on_after_optimizer_step(self): + """Implement after optimizer.step(). + + Prune the model after optimization. + """ + self.mask_weights() + self.global_step += 1 + + +@register_pruner('progressive') +class ProgressivePruner(BasicPruner): + """Pruning Pruner. + + A Pruner class derived from BasePruner. In this pruner, mask interpolation will be applied. + Mask interpolation is a fine-grained improvement for NxM structured pruning, + By adding interval masks between masks of two pruning steps + + Args: + modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. + config: A config dict object. Contains the pruner information. + + Attributes: + Inherit from parent class Pruner. + """ + + def __init__(self, config, modules): + """Initialize.""" + super(ProgressivePruner, self).__init__(config, modules) + + def _init(self): + """Auxiliary function for initialization.""" + self.pattern = get_pattern(self.config, self.modules) + self.scheduler = get_scheduler(self.config) + self.criterion = get_criterion(self.config, self.modules) + self.reg = get_reg(self.config, self.modules, self.pattern) + # progressive pruning set up, including check up paramters. + self.use_progressive = self.config["progressive"] + # progressive parameters + # dict passed to Pattern's functions + self.progressive_configs = { + "progressive_steps": 4, + "progressive_type": "scores", + "use_global": True + } + self.progressive_steps = self.progressive_configs["progressive_steps"] + self.progressive_type = self.progressive_configs["progressive_type"] + self.use_global = self.progressive_configs["use_global"] + self.progressive_logger = False + self._init_for_progressive() + + def _init_for_progressive(self): + """Auxiliary function for initializing progressive pruning.""" + # detailed progressive parameters will stored at patterns.py + # step 1: check if pattern is NxM + if "x" not in self.pattern.pattern: + raise NotImplementedError(f"Currently progressive only " \ + f"support NxM and per-channel pruning patterns.") + + # step 2: check if current set up will "degrade" into non-progressive + degrading_flag = False + if (self.end_step - self.start_step) <= self.progressive_steps or self.progressive_steps <= 1: + logger.info("Current progressive setting will degrading to non-progressive pruning.") + self.use_progressive = False + return + + # step 3: log hyper-parameters. and check validity. + if self.use_progressive: + logger.info(f"Progressive pruning is enabled!") + logger.info(f"Progressive pruning steps: {self.progressive_steps}") + logger.info(f"Progressive type: {self.progressive_type}") + logger.info(f"Progressive balance: {self.use_global}") + self.check_progressive_validity() + self.pre_masks = copy.deepcopy(self.masks) + self.progressive_masks = copy.deepcopy(self.masks) + if self.pruning_frequency < self.progressive_steps:##TODO trick + self.progressive_steps = self.pruning_frequency + # if self.progressive_steps == 3: + # self.progressive_steps = 2 + self.pruning_frequency_progressive = self.progressive_steps + else: + self.pruning_frequency_progressive = self.pruning_frequency // self.progressive_steps + # this is a structural pruning step, it fits self.pruning_frequency + self.structured_update_step = 0 + + def check_progressive_validity(self): + """Check if the settings of progressive pruning are valid.""" + # check some problematic settings + if self.progressive_type == "linear": + if self.use_global: + # when global progressive is applied, linear type is contradict. + raise NotImplementedError("Global progressive pruning do not support linear pattern") + # When linear, progressive_step should not meet a indivisible + for key in self.pattern.block_size.keys(): + block_size = self.pattern.block_size[key] + progressive_direction = max(block_size) + if progressive_direction % self.progressive_steps != 0: + raise ValueError( + f"In layer {key}, its pruning pattern is {block_size}, " \ + f"while progressive steps {self.progressive_steps} is indivisible.") + else: + for key in self.pattern.block_size.keys(): + block_size = self.pattern.block_size[key] + total_block_size = block_size[0] * block_size[1] + if total_block_size < self.progressive_steps: + raise ValueError( + f"In layer {key}, its pruning pattern is {block_size}, " \ + f"while progressive steps {self.progressive_steps} is overflowing.") + + def check_is_pruned_progressive_step(self, step): + """Check if a progressive pruning process should be performed at the current step. + + Args: + step: an integer representing the number of current step. + + Returns: + A Boolean. + """ + # used in progressive pruning + if step < self.start_step or step > self.end_step: + return False + if int(step - self.start_step) % self.pruning_frequency_progressive == 0: + return True + return False + + def update_masks_progressive(self, local_step): + """Update the masks in progressive pruning mode at a given local step.""" + if self.global_step == self.start_step: + if self.config['lock_init_sparsity']: + self.masks = self.pattern.get_pattern_lock_masks(self.modules) + self.init_sparsity_ratio = self.pattern.get_sparsity_ratio(self.masks) + self.current_sparsity_ratio = self.init_sparsity_ratio + + # case 1: step is not in [start_step, end_step] or it is not either pruning or progressive pruning step. + if (self.check_is_pruned_step(self.global_step) == False) and ( + self.check_is_pruned_progressive_step(self.global_step) == False): + return + if self.current_sparsity_ratio > self.target_sparsity_ratio: + return + + # case 2: step which does progressive update, but it is not a pruning step in case 3 + if self.check_is_pruned_progressive_step(self.global_step) \ + and self.check_is_pruned_step(self.global_step) == False: + # do not do global pruning, only do the progressive mask update. + step_offset = self.global_step - self.structured_update_step + progressive_idx = step_offset // self.pruning_frequency_progressive + if progressive_idx < (self.progressive_steps - 1): + self.progressive_masks = self.pattern.update_progressive_masks(self.pre_masks, self.masks, \ + self.criterion.scores, \ + progressive_idx + 1, \ + self.progressive_configs) + else: + # in the end, directly use new masks. + for n in self.masks.keys(): + self.progressive_masks[n] = self.masks[n].clone() + self.mask_weights_general(self.progressive_masks) + if self.progressive_logger: + self.print_progressive_sparsity() + return + + # case 3: a pruning step, generate new masks, progressive masks also update. + tmp_step = self.global_step + self.structured_update_step = tmp_step + current_target_sparsity_ratio = self.scheduler.update_sparsity_ratio(self.target_sparsity_ratio, + self.completed_pruned_cnt, + self.total_prune_cnt, self.masks) + logger.info(f"current target ratio is {current_target_sparsity_ratio}") + self.criterion.on_step_begin() + self.completed_pruned_cnt += 1 + if self.criterion.scores == {}: + return + for n in self.masks.keys(): + self.pre_masks[n] = self.masks[n].clone() + # update new masks + self.masks = self.pattern.get_masks(self.criterion.scores, current_target_sparsity_ratio, self.masks, ) + self.progressive_masks = self.pattern.update_progressive_masks(self.pre_masks, self.masks, \ + self.criterion.scores, 1, \ + self.progressive_configs) + self.mask_weights_general(self.progressive_masks) + if self.progressive_logger: + self.print_progressive_sparsity() + return + + def on_step_begin(self, local_step): + """Update the masks at a given local_step.""" + """Implement at the start of each step.""" + if self.handled_global_step == self.global_step: + return + + if not self.use_progressive: + # As _init_for_progressive() works, when degrades to non-progressive + # just call BasicPruner's update_masks(). + self.update_masks(local_step) + else: + self.update_masks_progressive(local_step) + self.handled_global_step = self.global_step + + def on_before_optimizer_step(self): + """Implement before optimizer.step().""" + self.reg.on_before_optimizer_step() + + def on_after_optimizer_step(self): + """Prune the model after optimization.""" + ##the order of the following three lines can't not be exchanged + self.reg.on_after_optimizer_step() + if not self.use_progressive: + self.mask_weights() + else: + self.mask_weights_general(self.progressive_masks) + self.criterion.on_after_optimizer_step() + self.global_step += 1 + + def print_progressive_sparsity(self): + """Output the progressive sparsity.""" + cur_sp = self.pattern.get_sparsity_ratio_progressive(self.progressive_masks) + logger.info("Step: {} -> Current progressive sparsity: {}".format(self.global_step, cur_sp)) diff --git a/neural_compressor/pruner/regs.py b/neural_compressor/pruner/regs.py new file mode 100644 index 00000000000..12f4a1b6f28 --- /dev/null +++ b/neural_compressor/pruner/regs.py @@ -0,0 +1,127 @@ +"""Regularizer.""" +# !/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .patterns import BasePattern +from neural_compressor.utils.utility import LazyImport +torch = LazyImport('torch') + +REGS = {} + + +def register_reg(name): + """Register a regularizator to the registry.""" + + def register(reg): + REGS[name] = reg + return reg + + return register + + +def get_reg_type(config): + """Obtain the regularizer type.""" + for key in REGS.keys(): ##assume there is only one reg + if config.get(key, None) != None: + return key + return None + + +def get_reg(config, modules, pattern): + """Get registered regularizator class.""" + reg_type = config["reg_type"] + if reg_type == None: + return BaseReg(config, modules, pattern) + if reg_type not in REGS.keys(): + assert False, f"regularizator does not support {reg_type}, currently only support {REGS.keys()}" + return REGS[reg_type](config, modules, pattern, config["reg_coeff"]) + + +class BaseReg: + """Regularizer. + + The class which performs regularization. + + Args: + modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. + config: A config dict object that includes information of the regularizer. + pattern: A config dict object. The pattern related part in args config. + """ + + def __init__(self, config: dict, modules: dict, pattern: BasePattern): + """Initialize.""" + self.modules = modules + self.config = config + self.pattern = pattern + + def on_before_optimizer_step(self): + """Implement before optimizer.step().""" + pass + + def on_after_optimizer_step(self): + """Implement after optimizer.step().""" + pass + + +@register_reg("group_lasso") +class GroupLasso(BaseReg): + """Regularizer. + + A regularizer class derived from BaseReg. In this class, the Group-lasso regularization will be performed. + Group-lasso is a variable-selection and regularization method. + + Args: + modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. + config: A config dict object that includes information of the regularizer. + pattern: A config dict object. The pattern related part in args config. + + Attributes: + reg_terms: A dict {"module_name": Tensor} of regularization terms. + alpha: A float representing the coeffient related to group lasso. + """ + + def __init__(self, config: dict, modules: dict, pattern: BasePattern, coeff): + """Initialize.""" + super(GroupLasso, self).__init__(config, modules, pattern) + assert "x" in self.config.pattern, "group lasso only supports NXM pattern" + self.reg_terms = {} + self.alpha = float(coeff) + assert self.alpha >= 0, "group lasso only supports positive coeff" + + def on_before_optimizer_step(self): + """Calculate the group-lasso score map.""" + with torch.no_grad(): + if self.pattern.invalid_layers == None: + self.pattern.check_layer_validity() + for key in self.modules.keys(): + if key in self.pattern.invalid_layers: + continue + grad = self.modules[key].weight.grad + reg_term = self.pattern.reshape_orig_to_pattern(grad, key) + reg_term = self.alpha / (torch.norm(reg_term, p=2, dim=[1, 3]) + 1e-12) + reg_term[torch.isinf(reg_term)] = 0.0 + self.reg_terms[key] = reg_term + + def on_after_optimizer_step(self): ##decoupled with grad descent + """Perform group lasso regularization after optimization.""" + with torch.no_grad(): + for key in self.modules.keys(): + if key in self.pattern.invalid_layers: + continue + reg_term = self.pattern.reshape_reduced_to_orig(self.reg_terms[key], key, + self.modules[key].weight.shape) + self.modules[key].weight -= reg_term diff --git a/neural_compressor/experimental/pytorch_pruner/scheduler.py b/neural_compressor/pruner/schedulers.py similarity index 64% rename from neural_compressor/experimental/pytorch_pruner/scheduler.py rename to neural_compressor/pruner/schedulers.py index 915022a5a6e..78e985da05f 100644 --- a/neural_compressor/experimental/pytorch_pruner/scheduler.py +++ b/neural_compressor/pruner/schedulers.py @@ -1,5 +1,5 @@ """scheduler module.""" -#!/usr/bin/env python +# !/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2022 Intel Corporation @@ -23,14 +23,14 @@ def register_scheduler(name): """Class decorator used to register a Scheduler subclass to the registry. - + Decorator function used before a Scheduler subclass. Make sure that the Scheduler class decorated by this function can be registered in SCHEDULERS. Args: cls (class): The class of register. name: A string. Define the scheduler type. - + Returns: cls: The class of register. """ @@ -44,12 +44,12 @@ def register(scheduler): def get_scheduler(config): """Get registered scheduler class. - + Get a scheduler object from SCHEDULERS. - + Args: config: A config dict object. Contains the scheduler information. - + Returns: A Scheduler object. """ @@ -59,17 +59,17 @@ def get_scheduler(config): return SCHEDULERS[name](config) -class Scheduler: +class PruningScheduler: """Pruning Scheduler. The class which defines a sparsity changing process during pruning. Mainly contains two types: 1. iterative scheduler. Prune the model from dense to target sparsity gradually. 2. one-shot scheduler. Prune the model in a single step and reach the target sparsity. - + Args: config: A config dict object. Contains the scheduler information. - + Attributes: config: A config dict object. Contains the scheduler information. """ @@ -78,21 +78,21 @@ def __init__(self, config): """Initialize.""" self.config = config - def update_sparsity_ratio(self, aggressive_ratio, current_prune_step, total_prune_steps, masks): + def update_sparsity_ratio(self, target_ratio, current_prune_step, total_prune_steps, masks, init_ratio=0.0): """To be implemented in subclasses.""" raise NotImplementedError @register_scheduler('oneshot') -class OneshotScheduler(Scheduler): +class OneshotScheduler(PruningScheduler): """Pruning Scheduler. - + A Scheduler class derived from Scheduler. Prune the model to target sparsity once. - + Args: config: A config dict object. Contains the scheduler information. - + Attributes: Inherit from parent class Scheduler. """ @@ -101,18 +101,29 @@ def __init__(self, config): """Initialize.""" super(OneshotScheduler, self).__init__(config) - def update_sparsity_ratio(self, aggressive_ratio, current_prune_step, total_prune_steps, masks): - """Return the aggressive ratio.""" - return aggressive_ratio + def update_sparsity_ratio(self, target_ratio, current_prune_step, total_prune_steps, masks, init_ratio=0.0): + """Update sparsity ratio. + + Args: + target_ratio: A float representing the sparsity ratio after pruning. + current_prune_step: An integer representing the current pruning step. + total_prune_steps: An integer representing the total number of steps of the pruning process. + masks: A dict {"module_name": Tensor} that stores the masks for modules' weights. + init_ratio: A float representing the sparsity ratio before pruning. + + Return: + A float representing the sparsity ratio that the model will reach after the next pruning step. + """ + return target_ratio @register_scheduler('iterative') -class IterativeScheduler(Scheduler): +class IterativeScheduler(PruningScheduler): """Pruning Scheduler. - + A Scheduler class derived from Scheduler. - Prune the model to from dense to target sparsity in several steps. - + Prune the model from dense to target sparsity in several steps. + Args: config: A config dict object. Contains the scheduler information. @@ -123,9 +134,9 @@ class IterativeScheduler(Scheduler): def __init__(self, config): """Initialize.""" super(IterativeScheduler, self).__init__(config) - # self.decay_type = config["sparsity_decay_type"] - def update_sparsity_ratio(self, target_ratio, current_prune_step, total_prune_steps, masks): + def update_sparsity_ratio(self, target_ratio, current_prune_step, total_prune_steps, masks, + init_sparsity_ratio=0.0): """Obtain new target sparsity ratio according to the step. Args: @@ -133,30 +144,32 @@ def update_sparsity_ratio(self, target_ratio, current_prune_step, total_prune_st current_prune_step: A integer. The current pruning step. total_prune_steps: A integer. The total steps included in the pruning progress. masks: A dict{"module_name": Tensor}. The masks for modules' weights. + init_sparsity_ratio: - Returns: - A float. the target sparsity ratio the model will reach after the next pruning step. + Returns: + A float representing the target sparsity ratio the model will reach after the next pruning step. """ aggressive_ratio = target_ratio - # if self.config.prune_domain == "global": - # aggressive_ratio += 0.02 - - aggressive_ratio = min(self.config.max_sparsity_ratio_per_layer, - aggressive_ratio) ##lagacy issue + aggressive_ratio = min(self.config.max_sparsity_ratio_per_op, + aggressive_ratio) ##legacy issue decay_type = self.config.sparsity_decay_type if decay_type == "cos": - current_target_sparsity = (aggressive_ratio) * ( - 1.0 - math.cos(float(current_prune_step) / total_prune_steps * (math.pi / 2))) + current_target_sparsity = (aggressive_ratio - init_sparsity_ratio) * ( + 1.0 - math.cos(float(current_prune_step) / total_prune_steps * (math.pi / 2))) + init_sparsity_ratio elif decay_type == "exp": - target_dense_change_ratio = (1.0 - aggressive_ratio) ** (1 / total_prune_steps) - current_target_sparsity = 1.0 - target_dense_change_ratio ** current_prune_step + target_dense_change_ratio = ((1.0 - aggressive_ratio) / (1.0 - init_sparsity_ratio)) ** ( + 1 / total_prune_steps) + current_target_sparsity = 1.0 - ( + 1.0 - init_sparsity_ratio) * target_dense_change_ratio ** current_prune_step elif decay_type == "linear": - current_target_sparsity = (aggressive_ratio) * float(current_prune_step) / total_prune_steps + current_target_sparsity = (aggressive_ratio - init_sparsity_ratio) * float( + current_prune_step) / total_prune_steps + init_sparsity_ratio elif decay_type == "cube": - current_target_sparsity = (aggressive_ratio) * ((float(current_prune_step) / total_prune_steps) ** 3) + current_target_sparsity = (aggressive_ratio - init_sparsity_ratio) * ( + (float(current_prune_step) / total_prune_steps) ** 3) + init_sparsity_ratio else: assert False, "{} is not supported".format(decay_type) diff --git a/neural_compressor/pruner/utils.py b/neural_compressor/pruner/utils.py new file mode 100644 index 00000000000..5598167dee5 --- /dev/null +++ b/neural_compressor/pruner/utils.py @@ -0,0 +1,247 @@ +"""prune utils.""" +# !/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import yaml + +try: + from neural_compressor.conf.dotdict import DotDict +except: + from .dot_dict import DotDict ##TODO +from .logger import logger + + +class WeightPruningConfig: + """ + similiar to torch optimizer's interface + """ + + def __init__(self, pruning_configs=[{}], ##empty dict will use global values + target_sparsity=0.9, pruning_type="snip_momentum", pattern="4x1", op_names=[], + excluded_op_names=[], + start_step=0, end_step=0, pruning_scope="global", pruning_frequency=1, + min_sparsity_ratio_per_op=0.0, max_sparsity_ratio_per_op=0.98, + sparsity_decay_type="exp", pruning_op_types=['Conv', 'Linear'], + **kwargs): + self.pruning_configs = pruning_configs + self._weight_compression = DotDict({ + 'target_sparsity': target_sparsity, + 'pruning_type': pruning_type, + 'pattern': pattern, + 'op_names': op_names, + 'excluded_op_names': excluded_op_names, ##global only + 'start_step': start_step, + 'end_step': end_step, + 'pruning_scope': pruning_scope, + 'pruning_frequency': pruning_frequency, + 'min_sparsity_ratio_per_op': min_sparsity_ratio_per_op, + 'max_sparsity_ratio_per_op': max_sparsity_ratio_per_op, + 'sparsity_decay_type': sparsity_decay_type, + 'pruning_op_types': pruning_op_types, + ##reg_type=None, reduce_type="mean", parameters={"reg_coeff": 0.0} + ##'resume_from_pruned_checkpoint': resume_from_pruned_checkpoint ##resume_from_pruned_checkpoint + }) + self._weight_compression.update(kwargs) + + @property + def weight_compression(self): + return self._weight_compression + + @weight_compression.setter + def weight_compression(self, weight_compression): + self._weight_compression = weight_compression + + +def check_config(prune_config): + """Functions that check key-value is valid to run Pruning object. + + Args: + prune_config: A config dict object. Contains Pruning parameters and configurations. + + Returns: + None if everything is correct. + + Raises: + AssertionError. + """ + assert prune_config['start_step'] >= 0, "start_step should be greater than 0" + assert prune_config['end_step'] >= -1, "end_step should be greater than 0" + assert prune_config['end_step'] >= prune_config['start_step'], \ + "end_step should be greater than start_step" + assert prune_config['target_sparsity'] >= 0 and prune_config['target_sparsity'] < 1.0, \ + "begin_pruning_step should be in range [0,1)" + assert prune_config['pruning_frequency'] > 0, "pruning_frequency should be greater than 0" + assert prune_config['max_sparsity_ratio_per_op'] >= 0 and prune_config['max_sparsity_ratio_per_op'] < 1, \ + "pruning_frequency should be greater than 0" + assert prune_config['pruning_scope'] == "global" or prune_config['pruning_scope'] == "local", \ + "only support 'global' and 'local' prune domain" + try: + prune_config['resume_from_pruned_checkpoint'] = bool(prune_config['resume_from_pruned_checkpoint']) + except: + assert False, "resume_from_pruned_checkpoint should be bool value" + if "x" in prune_config["pattern"]: + pattern = prune_config["pattern"].split('_')[-1].split('x') + if pattern[0] == "channel" or pattern[1] == "channel": + pass + else: + try: + N = int(pattern[0]) + M = int(pattern[1]) + except: + assert False, "N or M can't convert to int" + assert N > 0, "N should be greater than 0" + assert M > 0, "M should be greater than 0" + if ":" in prune_config["pattern"]: + pattern = prune_config["pattern"].split('_')[-1].split(':') + try: + N = int(pattern[0]) + M = int(pattern[1]) + except: + assert False, "N or M can't convert to int" + assert N > 0, "N should be greater than 0" + assert M > N, "M should be greater than N" + max_ratio = float(N) / M + assert prune_config['target_sparsity'] <= max_ratio, \ + "in N:M pattern, the max sparsity is N/M={}".format(max_ratio) + prune_config['max_sparsity_ratio_per_op'] = min(max_ratio, prune_config['max_sparsity_ratio_per_op']) + if prune_config['reg_coeff'] != None: + prune_config['reg_coeff'] = float(prune_config['reg_coeff']) + assert prune_config['reg_coeff'] >= 0, "only support positive reg_type" + assert prune_config["min_sparsity_ratio_per_op"] >= 0 and prune_config["min_sparsity_ratio_per_op"] <= \ + prune_config['max_sparsity_ratio_per_op'], \ + "min_sparsity_ratio_per_op should in[0, max_sparsity_ratio_per_op]" + + +def reset_none_to_default(obj, key, default): + """Functions that add up undefined configurations. + + If some configurations are not defined in the configuration, set it to a default value. + + Args: + obj: A dict{key: value} + key: A string. Key in obj. + default: When the key is not in obj, Add key: default item in original obj. + + """ + if obj == None: + return None + if isinstance(obj, dict): + if (not key in obj.keys()) or obj[key] == None: + return default + else: + return obj[key] + else: + if not hasattr(obj, key) or getattr(obj, key) == None: + return default + else: + return getattr(obj, key) + + +def update_params(info): + if "parameters" in info.keys(): + params = info["parameters"] + for key in params: + info[key] = params[key] + + +def process_and_check_weight_config(val: WeightPruningConfig): + default_global_config = {'target_sparsity': 0.9, 'pruning_type': 'snip_momentum', 'pattern': '4x1', 'op_names': [], + 'excluded_op_names': [], + 'start_step': 0, 'end_step': 0, 'pruning_scope': 'global', 'pruning_frequency': 1, + 'min_sparsity_ratio_per_op': 0.0, 'max_sparsity_ratio_per_op': 0.98, + 'sparsity_decay_type': 'exp', + 'pruning_op_types': ['Conv', 'Linear'], + + } + default_local_config = {'resume_from_pruned_checkpoint': False, 'reg_type': None, + 'criterion_reduce_type': "mean", 'parameters': {"reg_coeff": 0.0}} + + params_default_config = {"reg_coeff": 0.0} + + default_config = {} + default_config.update(default_global_config) + default_config.update(default_local_config) + default_config.update(params_default_config) + + pruning_configs = val.pruning_configs + pruners_info = [] + global_info = val.weight_compression + if len(pruning_configs) == 0: ##only one + pruner_info = global_info + for key in default_config.keys(): + pruner_info[key] = reset_none_to_default(pruner_info, key, default_config[key]) + update_params(pruner_info) + check_config(pruner_info) + pruner_info = DotDict(pruner_info) + pruners_info.append(pruner_info) + + else: ##TODO need update, in this mode, we ingore the global op names + for pruner_info in pruning_configs: + for key in default_config.keys(): + pruner_info[key] = reset_none_to_default(pruner_info, key, global_info[key]) + pruner_info[key] = reset_none_to_default(pruner_info, key, default_config[key]) + update_params(pruner_info) + check_config(pruner_info) + pruner_info = DotDict(pruner_info) + pruners_info.append(pruner_info) + + return pruners_info + + +def process_config(config): + """Obtain a config dict object from a config file. + + Args: + config: A string. The path to configuration file. + + Returns: + A config dict object. + """ + if isinstance(config, WeightPruningConfig): + return process_and_check_weight_config(config) + else: + assert False, f"not supported type {config}" + + +def parse_to_prune(config, model): + """Keep target pruned layers.""" + modules = {} + if config["op_names"] == None or config["op_names"] == []: + config["op_names"] = [".*"] + for raw in config["op_names"]: + try: + pattern = re.compile(raw) + except: + assert False, f"regular expression match does not support {raw}" + for name, module in filter(lambda t: pattern.search(t[0]), model.named_modules()): + for layer_type in config["pruning_op_types"]: + if layer_type in type(module).__name__: + modules[name] = module + break + ##remove not to prune layers + """Drop non-pruned layers.""" + exclude_names = config["excluded_op_names"] + patterns = [re.compile(s) for s in exclude_names] + if len(patterns) <= 0: + return modules + new_modules = {} + for name in modules.keys(): + if any([p.search(name) for p in patterns]): + continue + new_modules[name] = modules[name] + return new_modules diff --git a/neural_compressor/pruning.py b/neural_compressor/pruning.py index 3205ffff99b..0094b0fcdf9 100644 --- a/neural_compressor/pruning.py +++ b/neural_compressor/pruning.py @@ -1,7 +1,8 @@ -#!/usr/bin/env python +"""Pruning.""" +# !/usr/bin/env python # -*- coding: utf-8 -*- # -# Copyright (c) 2021 Intel Corporation +# Copyright (c) 2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,144 +15,188 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from neural_compressor.utils.utility import LazyImport +LazyImport('torch.nn') +torch = LazyImport('torch') -from .utils import logger -from .utils.utility import singleton -from .experimental import Pruning as ExpPruning -from deprecated import deprecated +from neural_compressor.pruner.utils import process_config, parse_to_prune,\ + check_config, update_params +from neural_compressor.pruner.pruners import get_pruner +from neural_compressor.utils import logger +import re +from neural_compressor.pruner.utils import WeightPruningConfig -@singleton class Pruning: - """This is base class of pruning object. + """Pruning. - Since DL use cases vary in the accuracy metrics (Top-1, MAP, ROC etc.), loss criteria - (<1% or <0.1% etc.) and pruning objectives (performance, memory footprint etc.). - Pruning class provides a flexible configuration interface via YAML for users to specify - these parameters. + The main class that users will used in codes to do pruning. + Contain at least one Pruner object. Args: - conf_fname_or_obj (string or obj): The path to the YAML configuration file or - Pruning_Conf class containing accuracy goal, pruning objective and related - dataloaders etc. - + config: a string. The path to a config file. For config file template, please refer to + https://github.com/intel/neural-compressor/tree/master/examples/pytorch/nlp/huggingface_models/text-classification/pruning/pytorch_pruner/eager/ + + Attributes: + model: The model object to prune. + config_file_path: A string. The path to a config file. + pruners: A list. A list of Pruner objects. + pruner_info: A config dict object. Contains pruners' information. """ - def __init__(self, conf_fname_or_obj): - self.exp_pruner = ExpPruning(conf_fname_or_obj) + def __init__(self, config): + """Initialize.""" + self.model = None + self.pruners = [] + self.pruners_info = process_config(config) - def on_epoch_begin(self, epoch): - """ called on the begining of epochs""" - self.exp_pruner.on_epoch_begin(epoch) + def update_config(self, *args, **kwargs): + """Add user-defined arguments to the original configurations. + + The original config of pruning is read from a file. + However, users can still modify configurations by passing key-value arguments in this function. + Please note that the key-value arguments' keys are analysable in current configuration. + """ + for item in self.pruners_info: + for key in kwargs: + if key in item.keys(): + item[key] = kwargs[key] + + update_params(item) + check_config(item) + + # def _call_pruners(self, func): + # """Function which decorates the Pruning class's functions. + # + # It can simplify codes by calling same-name functions in Pruning's Pruner objects. + # For example, when it decorates on_step_begin function of Pruning, + # it automatically calls its Pruners' on_step_begin functions without a "for" code. + # However, when this trick is enabled, the pylint validation on INC cannot passed, therefore commented out. + # """ + # def warpper(self, *args, **kw): + # func_name = f"{func.__name__}" + # func(self, *args, **kw) + # for prune in self.pruners: + # prun_func = getattr(prune, func_name) + # prun_func(*args, **kw) + # + # return warpper + + def get_sparsity_ratio(self): + """Calculate sparsity ratio of a module/layer. - def on_step_begin(self, batch_id): - """ called on the begining of batches""" - self.exp_pruner.on_step_begin(batch_id) + Returns: + Three floats. + elementwise_over_matmul_gemm_conv refers to zero elements' ratio in pruning layers. + elementwise_over_all refers to zero elements' ratio in all layers in the model. + blockwise_over_matmul_gemm_conv refers to all-zero blocks' ratio in pruning layers. + """ + pattern_sparsity_cnt = 0 + element_sparsity_cnt = 0 + for pruner in self.pruners: + modules = pruner.modules + sparsity_ratio = pruner.pattern.get_sparsity_ratio(pruner.masks) + cnt = 0 + for key in modules.keys(): + cnt += modules[key].weight.numel() + pattern_sparsity_cnt += int(cnt * sparsity_ratio) + for key in pruner.masks.keys(): + element_sparsity_cnt += torch.sum(pruner.masks[key] == 0).data.item() + + linear_conv_cnt = 0 + param_cnt = 0 + for name, module in self.model.named_modules(): + if type(module).__name__ in ["Linear"] or re.search(r'Conv.d', type(module).__name__) != None: + linear_conv_cnt += module.weight.numel() + + for n, param in self.model.named_parameters(): + param_cnt += param.numel() + if linear_conv_cnt == 0: + blockwise_over_matmul_gemm_conv = 0 + elementwise_over_matmul_gemm_conv = 0 + else: + blockwise_over_matmul_gemm_conv = float(pattern_sparsity_cnt) / linear_conv_cnt + elementwise_over_matmul_gemm_conv = float(element_sparsity_cnt) / linear_conv_cnt + if param_cnt == 0: + elementwise_over_all = 0 + else: + elementwise_over_all = float( + element_sparsity_cnt) / param_cnt + + return elementwise_over_matmul_gemm_conv, elementwise_over_all, blockwise_over_matmul_gemm_conv + + def _generate_pruners(self): + """Obtain Pruner objects.""" + assert isinstance(self.model, torch.nn.Module) + + for info in self.pruners_info: + modules = parse_to_prune(info, self.model) + if modules == {}: + logger.warning("one pruner hooks no layers, please have a check") + + self.pruners.append(get_pruner(info, modules)) + info['modules'] = [key for key in modules.keys()] + info['len_of_modules'] = len(info['modules']) + logger.info(info) + + # @_call_pruners + def on_train_begin(self): + """Implement at the beginning of training process. + + Before training, ensure that pruners are generated. + """ + self._generate_pruners() ##TODO is there better place to place + # @_call_pruners + def on_epoch_begin(self, epoch): + """Implement at the beginning of every epoch.""" + for pruner in self.pruners: + pruner.on_epoch_begin(epoch) + + # @_call_pruners + def on_step_begin(self, local_step): + """Implement at the beginning of every step.""" + for pruner in self.pruners: + pruner.on_step_begin(local_step) + + # @_call_pruners + def on_before_optimizer_step(self): + """Implement before optimizer.step().""" + for pruner in self.pruners: + pruner.on_before_optimizer_step() + + # @_call_pruners def on_step_end(self): - """ called on the end of batches""" - self.exp_pruner.on_step_end() + """Implement at the end of every step.""" + for pruner in self.pruners: + pruner.on_step_end() + # @_call_pruners def on_epoch_end(self): - """ called on the end of epochs""" - self.exp_pruner.on_epoch_end() - - @deprecated(version='2.0', reason="please use neural_compressor.prepare and neural_compressor.fit instead") - def __call__(self, model, train_dataloader=None, pruning_func=None, eval_dataloader=None, - eval_func=None): - """The main entry point of pruning. - - This interface currently only works on pytorch - and provides three usages: - a) Fully yaml configuration: User specifies all the info through yaml, - including dataloaders used in training and evaluation phases - and pruning tuning settings. - - For this usage, only model parameter is mandatory. - - b) Partial yaml configuration: User specifies dataloaders used in training - and evaluation phase by code. - The tool provides built-in dataloaders and evaluators, user just need provide - a dataset implemented __iter__ or __getitem__ methods and invoke dataloader() - with dataset as input parameter to create neural_compressor dataloader before calling this - function. - - After that, User specifies fp32 "model", train dataset "train_dataloader" - and evaluation dataset "eval_dataloader". - The trained and pruned model is evaluated with "eval_dataloader" - with evaluation metrics specified in the configuration file. The evaluation tells - the tuner whether the pruned model meets the accuracy criteria. If not, - the tuner starts a new training and tuning flow. - - For this usage, model, q_dataloader and eval_dataloader parameters are mandatory. - - c) Partial yaml configuration: User specifies dataloaders used in training phase - by code. - This usage is quite similar with b), just user specifies a custom "eval_func" - which encapsulates the evaluation dataset by itself. - The trained and pruned model is evaluated with "eval_func". - The "eval_func" tells the tuner whether the pruned model meets - the accuracy criteria. If not, the Tuner starts a new training and tuning flow. - - For this usage, model, q_dataloader and eval_func parameters are mandatory. - - Args: - model (object): For PyTorch model, it's torch.nn.model - instance. - train_dataloader (generator): Data loader for training. It is iterable - and should yield a tuple (input, label) for - training dataset containing label, - or yield (input, _) for label-free training - dataset. The input could be a object, list, - tuple or dict, depending on user implementation, - as well as it can be taken as model input. - pruning_func (function, optional): Training function for pruning. - This function takes "model" as input parameter - and executes entire training process with self - contained training hyper-parameters. If this - parameter specified, eval_dataloader parameter - plus metric defined in yaml, or eval_func - parameter should also be specified at same time. - eval_dataloader (generator, optional): Data loader for evaluation. It is iterable - and should yield a tuple of (input, label). - The input could be a object, list, tuple or - dict, depending on user implementation, - as well as it can be taken as model input. - The label should be able to take as input of - supported metrics. If this parameter is - not None, user needs to specify pre-defined - evaluation metrics through configuration file - and should set "eval_func" paramter as None. - Tuner will combine model, eval_dataloader - and pre-defined metrics to run evaluation - process. - eval_func (function, optional): The evaluation function provided by user. - This function takes model as parameter, - and evaluation dataset and metrics should be - encapsulated in this function implementation - and outputs a higher-is-better accuracy scalar - value. - - The pseudo code should be something like: - - def eval_func(model): - input, label = dataloader() - output = model(input) - accuracy = metric(output, label) - return accuracy - - Returns: - pruned model: best pruned model found, otherwise return None - - """ - logger.warning("This API is going to be deprecated. Please import " - "neural_compressor.experimental.Pruning, initialize an instance of `Pruning`," - "set its dataloader and metric attributes, then invoke its __call__ method.") - self.exp_pruner.model = model - self.exp_pruner.train_dataloader = train_dataloader - self.exp_pruner.pruning_func = pruning_func - self.exp_pruner.eval_dataloader = eval_dataloader - self.exp_pruner.eval_func = eval_func - return self.exp_pruner() - - fit = __call__ + """Implement the end of every epoch.""" + for pruner in self.pruners: + pruner.on_epoch_end() + + # @_call_pruners + def on_train_end(self): + """Implement the end of training phase.""" + for pruner in self.pruners: + pruner.on_train_end() + + # @_call_pruners + def on_before_eval(self): + """Implement at the beginning of evaluation phase.""" + for pruner in self.pruners: + pruner.on_before_eval() + + # @_call_pruners + def on_after_eval(self): + """Implement at the end of evaluation phase.""" + for pruner in self.pruners: + pruner.on_after_eval() + + # @_call_pruners + def on_after_optimizer_step(self): + """Implement after optimizer.step().""" + for pruner in self.pruners: + pruner.on_after_optimizer_step() diff --git a/test/pruning/test_pruning.py b/test/pruning/test_pruning.py index 5871f6bcc34..ba1e6589d4b 100644 --- a/test/pruning/test_pruning.py +++ b/test/pruning/test_pruning.py @@ -6,126 +6,71 @@ import torchvision import torch.nn as nn -from neural_compressor.config import Pruner, PruningConfig -from neural_compressor.data import Datasets +from neural_compressor.data import DATASETS from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader -from neural_compressor.training import prepare_compression - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: imagenet_prune - framework: pytorch - - pruning: - approach: - weight_compression: - initial_sparsity: 0.0 - target_sparsity: 0.97 - start_epoch: 0 - end_epoch: 2 - pruners: - - !Pruner - start_epoch: 1 - end_epoch: 2 - prune_type: basic_magnitude - names: ['layer1.0.conv1.weight'] - - - !Pruner - target_sparsity: 0.6 - prune_type: basic_magnitude - update_frequency: 2 - names: ['layer1.0.conv2.weight'] - """ - with open('fake.yaml', 'w', encoding="utf-8") as f: - f.write(fake_yaml) +from neural_compressor.pruning import Pruning, WeightPruningConfig class TestPruning(unittest.TestCase): - model = torchvision.models.resnet18() - @classmethod - def setUpClass(cls): - build_fake_yaml() + def test_pruning_basic(self): + local_configs = [ + { + "op_names": ['layer1.*'], + 'target_sparsity': 0.5, + "pattern": '8x2', + "pruning_type": "magnitude_progressive" + }, + { + "op_names": ['layer2.*'], + 'target_sparsity': 0.5, + 'pattern': '2:4' + }, + { + "op_names": ['layer3.*'], + 'target_sparsity': 0.7, + 'pattern': '5x1', + "pruning_type": "snip_progressive" + } + ] + config = WeightPruningConfig( + local_configs, + target_sparsity=0.8 + ) + prune = Pruning(config) + prune.update_config(start_step=1, end_step=10) + prune.model = self.model - @classmethod - def tearDownClass(cls): - os.remove('fake.yaml') - shutil.rmtree('./saved', ignore_errors=True) - shutil.rmtree('runs', ignore_errors=True) - - def test_pruning(self): - pruner1 = Pruner(start_epoch=1, end_epoch=2, names=['layer1.0.conv1.weight']) - pruner2 = Pruner(target_sparsity=0.6, update_frequency=2, names=['layer1.0.conv2.weight']) - conf = PruningConfig(pruners=[pruner1, pruner2], end_epoch=2) - datasets = Datasets('pytorch') - dummy_dataset = datasets['dummy'](shape=(100, 3, 224, 224), low=0., high=1., label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - compression_manager = prepare_compression(self.model, conf) - model = compression_manager.model - - epochs = 2 - iters = 3 criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - for nepoch in range(epochs): - model.train() - cnt = 0 - compression_manager.callbacks.on_epoch_begin(nepoch) - for image, target in dummy_dataloader: - compression_manager.callbacks.on_step_begin(cnt) - print('.', end='') - cnt += 1 - output = model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - optimizer.step() - compression_manager.callbacks.on_step_end() - if cnt >= iters: - break - compression_manager.callbacks.on_epoch_end() - - model.save("./saved") - - def test_pruning_external(self): - from neural_compressor.experimental import common - from neural_compressor import Pruning - from neural_compressor.conf.config import PruningConf - pruners = [Pruner(1,3,names=['layer1.0.conv1.weight']), - Pruner(target_sparsity=0.6,update_frequency=2,names=['layer1.0.conv2.weight'])] - conf = PruningConfig(pruners) - - datasets = Datasets('pytorch') - dummy_dataset = datasets['dummy'](shape=(100, 3, 224, 224), low=0., high=1., label=True) + optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) + datasets = DATASETS('pytorch') + dummy_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0., high=1., label=True) dummy_dataloader = PyTorchDataLoader(dummy_dataset) - compression_manager = prepare_compression(self.model, conf) - model = compression_manager.model - epochs = 2 - iters = 3 - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - for nepoch in range(epochs): - model.train() - cnt = 0 - compression_manager.callbacks.on_epoch_begin(nepoch) + prune.on_train_begin() + prune.update_config(pruning_frequency=4) + for epoch in range(2): + self.model.train() + prune.on_epoch_begin(epoch) + local_step = 0 for image, target in dummy_dataloader: - compression_manager.callbacks.on_step_begin(cnt) - print('.', end='') - cnt += 1 - output = model(image) + prune.on_step_begin(local_step) + output = self.model(image) loss = criterion(output, target) optimizer.zero_grad() loss.backward() + prune.on_before_optimizer_step() optimizer.step() - compression_manager.callbacks.on_step_end() - if cnt >= iters: - break - compression_manager.callbacks.on_epoch_end() - model.save("./saved") + prune.on_after_optimizer_step() + prune.on_step_end() + local_step += 1 + + prune.on_epoch_end() + prune.get_sparsity_ratio() + prune.on_train_end() + prune.on_before_eval() + prune.on_after_eval() if __name__ == "__main__": diff --git a/test/pruning/test_pruning_config.py b/test/pruning/test_pruning_config.py new file mode 100644 index 00000000000..c3cdd37ce61 --- /dev/null +++ b/test/pruning/test_pruning_config.py @@ -0,0 +1,80 @@ +import os +import shutil +import unittest + +import torch +import torchvision +import torch.nn as nn + +from neural_compressor.data import DATASETS +from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader +from neural_compressor.pruning import Pruning, WeightPruningConfig + + +class TestPytorchPruning(unittest.TestCase): + model = torchvision.models.resnet18() + + def test_pruning_class_config(self): + local_configs = [ + { + "op_names": ['layer1.*', 'layer2.*'], + "excluded_op_names": ['downsample.*'], + 'target_sparsity': 0.6, + "pattern": 'channelx1', + "pruning_type": "snip_progressive", + "pruning_scope": "local", + "start_step": 0, + "end_step": 10 + }, + { + "op_names": ['layer3.*'], + "pruning_type": "pattern_lock" + } + ] + config = WeightPruningConfig( + local_configs, + pruning_frequency=2, + target_sparsity=0.8, + ) + prune = Pruning(config) + prune.model = self.model + + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) + datasets = DATASETS('pytorch') + dummy_dataset = datasets['dummy'](shape=(12, 3, 224, 224), low=0., high=1., label=True) + dummy_dataloader = PyTorchDataLoader(dummy_dataset) + + prune.on_train_begin() + prune.update_config(pruning_frequency=4) + assert prune.pruners[0].config['pruning_frequency'] == 4 + assert prune.pruners[0].config['target_sparsity'] == 0.6 + assert prune.pruners[1].config['target_sparsity'] == 0.8 + assert prune.pruners[0].config['pattern'] == "channelx1" + assert prune.pruners[1].config['pruning_type'] == 'pattern_lock' + + for epoch in range(1): + self.model.train() + prune.on_epoch_begin(epoch) + local_step = 0 + for image, target in dummy_dataloader: + prune.on_step_begin(local_step) + output = self.model(image) + loss = criterion(output, target) + optimizer.zero_grad() + loss.backward() + prune.on_before_optimizer_step() + optimizer.step() + prune.on_after_optimizer_step() + prune.on_step_end() + local_step += 1 + + prune.on_epoch_end() + prune.get_sparsity_ratio() + prune.on_train_end() + prune.on_before_eval() + prune.on_after_eval() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/pruning/test_pruning_criteria.py b/test/pruning/test_pruning_criteria.py new file mode 100644 index 00000000000..75063d9bdbb --- /dev/null +++ b/test/pruning/test_pruning_criteria.py @@ -0,0 +1,87 @@ +import os +import shutil +import unittest + +import torch +import torchvision +import torch.nn as nn + +from neural_compressor.data import DATASETS +from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader +from neural_compressor.pruning import Pruning, WeightPruningConfig + + +class TestPruningCriteria(unittest.TestCase): + model = torchvision.models.resnet18() + + def test_pruning_criteria(self): + local_configs = [ + { + "op_names": ['layer1.*'], + 'target_sparsity': 0.4, + "pattern": '8x2', + "pruning_type": "magnitude_progressive", + "pruning_scope": "local", + "sparsity_decay_type": "cube" + }, + { + "op_names": ['layer2.*'], + 'target_sparsity': 0.45, + 'pattern': '2:4', + "pruning_type": "snip", + 'start_step': 6, + 'end_step': 6 + }, + { + "op_names": ['layer3.*'], + 'excluded_op_names': ['downsample.*'], + 'target_sparsity': 0.7, + 'pattern': '4x1', + "pruning_type": "snip_momentum_progressive", + "pruning_frequency": 4, + "min_sparsity_ratio_per_op": 0.5, + "max_sparsity_ratio_per_op": 0.8, + } + ] + config = WeightPruningConfig( + local_configs, + target_sparsity=0.8, + sparsity_decay_type="cube" + ) + prune = Pruning(config) + prune.update_config(start_step=1, end_step=10) + prune.model = self.model + + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) + datasets = DATASETS('pytorch') + dummy_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0., high=1., label=True) + dummy_dataloader = PyTorchDataLoader(dummy_dataset) + + prune.on_train_begin() + prune.update_config(pruning_frequency=4) + for epoch in range(2): + self.model.train() + prune.on_epoch_begin(epoch) + local_step = 0 + for image, target in dummy_dataloader: + prune.on_step_begin(local_step) + output = self.model(image) + loss = criterion(output, target) + optimizer.zero_grad() + loss.backward() + prune.on_before_optimizer_step() + optimizer.step() + prune.on_after_optimizer_step() + prune.on_step_end() + local_step += 1 + + prune.on_epoch_end() + prune.get_sparsity_ratio() + prune.on_train_end() + prune.on_before_eval() + prune.on_after_eval() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/pruning/test_pruning_patterns.py b/test/pruning/test_pruning_patterns.py new file mode 100644 index 00000000000..3850c373ab3 --- /dev/null +++ b/test/pruning/test_pruning_patterns.py @@ -0,0 +1,83 @@ +import os +import shutil +import unittest + +import torch +import torchvision +import torch.nn as nn + +from neural_compressor.data import DATASETS +from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader +from neural_compressor.pruning import Pruning, WeightPruningConfig + + +class TestPruningPatterns(unittest.TestCase): + model = torchvision.models.resnet18() + + def test_pruning_pattern(self): + local_configs = [ + { + "op_names": ['layer1.*'], + 'target_sparsity': 0.5, + "pattern": '5:8', + "pruning_type": "magnitude" + }, + { + "op_names": ['layer2.*'], + "pattern": '1xchannel', + "pruning_scope": "global" + }, + { + "start_step": 2, + "end_step": 20, + "op_names": ['layer3.*'], + 'target_sparsity': 0.666666, + 'pattern': '4x2', + "pruning_type": "snip_progressive", + "pruning_frequency": 5 + } + ] + config = WeightPruningConfig( + local_configs, + target_sparsity=0.8, + sparsity_decay_type="cos", + excluded_op_names=["downsample.*"], + pruning_scope="local", + min_sparsity_ratio_per_op=0.1 + ) + prune = Pruning(config) + prune.update_config(start_step=1, end_step=10) + prune.model = self.model + + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) + datasets = DATASETS('pytorch') + dummy_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0., high=1., label=True) + dummy_dataloader = PyTorchDataLoader(dummy_dataset) + + prune.on_train_begin() + for epoch in range(5): + self.model.train() + prune.on_epoch_begin(epoch) + local_step = 0 + for image, target in dummy_dataloader: + prune.on_step_begin(local_step) + output = self.model(image) + loss = criterion(output, target) + optimizer.zero_grad() + loss.backward() + prune.on_before_optimizer_step() + optimizer.step() + prune.on_after_optimizer_step() + prune.on_step_end() + local_step += 1 + + prune.on_epoch_end() + prune.get_sparsity_ratio() + prune.on_train_end() + prune.on_before_eval() + prune.on_after_eval() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/pruning/test_pruning_regs.py b/test/pruning/test_pruning_regs.py new file mode 100644 index 00000000000..412e0557ebc --- /dev/null +++ b/test/pruning/test_pruning_regs.py @@ -0,0 +1,98 @@ +import os +import shutil +import unittest + +import torch +import torchvision +import torch.nn as nn + +from neural_compressor.data import DATASETS +from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader +from neural_compressor.pruning import Pruning, WeightPruningConfig + +local_regs_config = [ + { + "start_step": 0, + "end_step": 10, + "pruning_type": "magnitude", + "op_names": ['layer1.*'], + "excluded_op_names": ['layer2.*'], + "pruning_scope": "global", + "target_sparsity": 0.5, + "pattern": "4x1", + "reg_type": "group_lasso", + "parameters": {'reg_coeff': 0.2} + }, + { + "start_step": 1, + "end_step": 1, + "target_sparsity": 0.5, + "pruning_type": "snip_momentum", + "pruning_frequency": 2, + "op_names": ['layer2.*'], + "pruning_scope": "local", + "target_sparsity": 0.75, + "pattern": "1x1", + "sparsity_decay_type": "exp", + "reg_type": "group_lasso", + "parameters": {'reg_coeff': 0.1} + }, + { + "start_step": 2, + "end_step": 8, + "target_sparsity": 0.1, + "pruning_type": "gradient", + "pruning_frequency": 2, + "op_names": ['fc'], + "pruning_scope": "local", + "target_sparsity": 0.75, + "pattern": "1x1", + "sparsity_decay_type": "cube", + "reg_type": "group_lasso", + "parameters": {'reg_coeff': 0.0} + } +] + +fake_snip_config = WeightPruningConfig(local_regs_config, target_sparsity=0.9, start_step=0, \ + end_step=10, pruning_frequency=1, sparsity_decay_type="exp") + + +class TestPruningRegs(unittest.TestCase): + model = torchvision.models.resnet18() + + def test_pruning_regs(self): + prune = Pruning(fake_snip_config) + prune.update_config(start_step=1) + prune.model = self.model + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) + datasets = DATASETS('pytorch') + dummy_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0., high=1., label=True) + dummy_dataloader = PyTorchDataLoader(dummy_dataset) + prune.on_train_begin() + prune.update_config(pruning_frequency=1) + for epoch in range(2): + self.model.train() + prune.on_epoch_begin(epoch) + local_step = 0 + for image, target in dummy_dataloader: + prune.on_step_begin(local_step) + output = self.model(image) + loss = criterion(output, target) + optimizer.zero_grad() + loss.backward() + prune.on_before_optimizer_step() + optimizer.step() + prune.on_after_optimizer_step() + prune.on_step_end() + local_step += 1 + + prune.on_epoch_end() + prune.get_sparsity_ratio() + prune.on_train_end() + prune.on_before_eval() + prune.on_after_eval() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/pruning/test_pruning_schedulers.py b/test/pruning/test_pruning_schedulers.py new file mode 100644 index 00000000000..4e23a14f8fe --- /dev/null +++ b/test/pruning/test_pruning_schedulers.py @@ -0,0 +1,81 @@ +import os +import shutil +import unittest + +import torch +import torchvision +import torch.nn as nn + +from neural_compressor.data import DATASETS +from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader +from neural_compressor.pruning import Pruning, WeightPruningConfig + +local_schedulers_config = [ + { + "start_step": 0, + "end_step": 2, + "pruning_type": "magnitude", + "op_names": ['layer1.*'], + "excluded_op_names": ['layer2.*'], + "pruning_scope": "global", + "target_sparsity": 0.5, + "pattern": "4x1" + }, + { + "start_step": 1, + "end_step": 10, + "target_sparsity": 0.5, + "pruning_type": "snip_momentum", + "pruning_frequency": 2, + "op_names": ['layer2.*'], + "pruning_scope": "local", + "target_sparsity": 0.75, + "pattern": "32x1", + "sparsity_decay_type": "exp" + } +] + +fake_snip_config = WeightPruningConfig(local_schedulers_config, target_sparsity=0.9, start_step=0, \ + end_step=10, pruning_frequency=1, sparsity_decay_type="exp") + + +class TestPruningCriteria(unittest.TestCase): + model = torchvision.models.resnet18() + + def test_pruning_schedulers(self): + + prune = Pruning(fake_snip_config) + prune.update_config(start_step=1) + prune.model = self.model + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) + datasets = DATASETS('pytorch') + dummy_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0., high=1., label=True) + dummy_dataloader = PyTorchDataLoader(dummy_dataset) + prune.on_train_begin() + prune.update_config(pruning_frequency=1) + for epoch in range(2): + self.model.train() + prune.on_epoch_begin(epoch) + local_step = 0 + for image, target in dummy_dataloader: + prune.on_step_begin(local_step) + output = self.model(image) + loss = criterion(output, target) + optimizer.zero_grad() + loss.backward() + prune.on_before_optimizer_step() + optimizer.step() + prune.on_after_optimizer_step() + prune.on_step_end() + local_step += 1 + + prune.on_epoch_end() + prune.get_sparsity_ratio() + prune.on_train_end() + prune.on_before_eval() + prune.on_after_eval() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/pruning/test_pruning_types.py b/test/pruning/test_pruning_types.py new file mode 100644 index 00000000000..68948911848 --- /dev/null +++ b/test/pruning/test_pruning_types.py @@ -0,0 +1,87 @@ +import os +import shutil +import unittest + +import torch +import torchvision +import torch.nn as nn + +from neural_compressor.data import DATASETS +from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader +from neural_compressor.pruning import Pruning, WeightPruningConfig + +local_types_config = [ + { + "start_step": 0, + "end_step": 0, + "pruning_type": "pattern_lock", + "op_names": ['layer1.*'], + "excluded_op_names": ['layer2.*'], + "pruning_scope": "global" + }, + { + "start_step": 1, + "end_step": 1, + "target_sparsity": 0.5, + "pruning_type": "snip_momentum_progressive", + "pruning_frequency": 2, + "op_names": ['layer2.*'], + "pruning_scope": "local", + "pattern": "4x1", + "sparsity_decay_type": "exp" + }, + { + "start_step": 2, + "end_step": 8, + "target_sparsity": 0.8, + "pruning_type": "snip_progressive", + "pruning_frequency": 1, + "op_names": ['layer3.*'], + "pruning_scope": "local", + "pattern": "16x1", + "sparsity_decay_type": "cube" + } +] + +fake_snip_config = WeightPruningConfig(local_types_config, target_sparsity=0.9, start_step=0, \ + end_step=10, pruning_frequency=3, sparsity_decay_type="exp") + + +class TestPruningTypes(unittest.TestCase): + model = torchvision.models.resnet18() + + def test_pruning_types(self): + prune = Pruning(fake_snip_config) + prune.model = self.model + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) + datasets = DATASETS('pytorch') + dummy_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0., high=1., label=True) + dummy_dataloader = PyTorchDataLoader(dummy_dataset) + prune.on_train_begin() + prune.update_config(pruning_frequency=1) + for epoch in range(2): + self.model.train() + prune.on_epoch_begin(epoch) + local_step = 0 + for image, target in dummy_dataloader: + prune.on_step_begin(local_step) + output = self.model(image) + loss = criterion(output, target) + optimizer.zero_grad() + loss.backward() + prune.on_before_optimizer_step() + optimizer.step() + prune.on_after_optimizer_step() + prune.on_step_end() + local_step += 1 + + prune.on_epoch_end() + prune.get_sparsity_ratio() + prune.on_train_end() + prune.on_before_eval() + prune.on_after_eval() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/pruning/test_pytorch_pruning.py b/test/pruning/test_pytorch_pruning.py deleted file mode 100644 index 5fb2047b7b2..00000000000 --- a/test/pruning/test_pytorch_pruning.py +++ /dev/null @@ -1,203 +0,0 @@ -import os -import shutil -import unittest - -import torch -import torchvision -import torch.nn as nn - -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader - - -def build_fake_yaml_basic(): - fake_snip_yaml = """ - model: - name: imagenet_prune - framework: pytorch - - pruning: - approach: - weight_compression_pytorch: - initial_sparsity: 0.0 - target_sparsity: 0.9 - start_step: 0 - end_step: 10 - excluded_names: ["classifier"] - - update_frequency_on_step: 1 - sparsity_decay_type: "exp" - pruners: - - !Pruner - start_step: 0 - sparsity_decay_type: "cos" - end_step: 10 - prune_type: "magnitude" - names: ['layer1.*'] - extra_excluded_names: ['layer2.*'] - prune_domain: "global" - pattern: "tile_pattern_4x1" - - - !Pruner - start_step: 1 - end_step: 1 - target_sparsity: 0.5 - prune_type: "snip_momentum" - update_frequency: 2 - names: ['layer2.*'] - prune_domain: local - pattern: "tile_pattern_2:4" - - - !Pruner - start_step: 2 - end_step: 8 - target_sparsity: 0.8 - prune_type: "snip" - names: ['layer3.*'] - prune_domain: "local" - pattern: "tile_pattern_16x1" - sparsity_decay_type: "cube" - - """ - with open('fake_snip.yaml', 'w', encoding="utf-8") as f: - f.write(fake_snip_yaml) - -def build_fake_yaml_channel(): - fake_channel_pruning_yaml = """ - model: - name: imagenet_prune - framework: pytorch - - pruning: - approach: - weight_compression_pytorch: - initial_sparsity: 0.0 - target_sparsity: 0.9 - start_step: 0 - end_step: 10 - excluded_names: ["classifier"] - - update_frequency_on_step: 1 - sparsity_decay_type: "exp" - pruners: - - !Pruner - start_step: 5 - end_step: 5 - prune_type: "pattern_lock" - names: ['layer1.*'] - extra_excluded_names: ['layer2.*'] - prune_domain: "global" - pattern: "channelx1" - - - !Pruner - start_step: 1 - end_step: 1 - target_sparsity: 0.5 - prune_type: "pattern_lock" - update_frequency: 2 - names: ['layer2.*'] - prune_domain: local - pattern: "2:4" - - - !Pruner - start_step: 2 - end_step: 8 - target_sparsity: 0.8 - prune_type: "snip" - names: ['layer3.*'] - prune_domain: "local" - pattern: "1xchannel" - sparsity_decay_type: "cube" - - """ - - with open('fake_channel_pruning.yaml', 'w', encoding="utf-8") as f: - f.write(fake_channel_pruning_yaml) - - -class TestPytorchPruning(unittest.TestCase): - - model = torchvision.models.resnet18() - - @classmethod - def setUpClass(cls): - build_fake_yaml_basic() - build_fake_yaml_channel() - - - @classmethod - def tearDownClass(cls): - os.remove('fake_channel_pruning.yaml') - os.remove('fake_snip.yaml') - shutil.rmtree('./saved', ignore_errors=True) - shutil.rmtree('runs', ignore_errors=True) - - def test_pytorch_pruning_basic(self): - from neural_compressor.experimental.pytorch_pruner.pruning import Pruning - - prune = Pruning("fake_snip.yaml") - ##prune.generate_pruners() - prune.update_items_for_all_pruners(start_step=1) - prune.model = self.model - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = Datasets('pytorch') - dummy_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0., high=1., label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - prune.on_train_begin() - prune.update_items_for_all_pruners(update_frequency_on_step=1) - for epoch in range(2): - self.model.train() - prune.on_epoch_begin(epoch) - local_step = 0 - for image, target in dummy_dataloader: - prune.on_step_begin(local_step) - output = self.model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - prune.on_before_optimizer_step() - optimizer.step() - prune.on_after_optimizer_step() - prune.on_step_end() - local_step += 1 - - prune.on_epoch_end() - prune.get_sparsity_ratio() - prune.on_train_end() - prune.on_before_eval() - prune.on_after_eval() - - def test_pytorch_pruner_channel_pruning(self): - from neural_compressor.experimental.pytorch_pruner.pruning import Pruning - prune = Pruning("fake_channel_pruning.yaml") - ##prune.generate_pruners() - prune.model = self.model - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = Datasets('pytorch') - dummy_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0., high=1., label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - prune.on_train_begin() - for epoch in range(2): - self.model.train() - prune.on_epoch_begin(epoch) - local_step = 0 - for image, target in dummy_dataloader: - prune.on_step_begin(local_step) - output = self.model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - prune.on_before_optimizer_step() - optimizer.step() - prune.on_after_optimizer_step() - prune.on_step_end() - local_step += 1 - - prune.on_epoch_end() - -if __name__ == "__main__": - unittest.main() - - diff --git a/test/pruning/test_gradient_sensitivity.py b/test/pruning_v1/test_gradient_sensitivity.py similarity index 100% rename from test/pruning/test_gradient_sensitivity.py rename to test/pruning_v1/test_gradient_sensitivity.py diff --git a/test/pruning/test_pattern_lock.py b/test/pruning_v1/test_pattern_lock.py similarity index 100% rename from test/pruning/test_pattern_lock.py rename to test/pruning_v1/test_pattern_lock.py diff --git a/test/pruning_v1/test_pruning.py b/test/pruning_v1/test_pruning.py new file mode 100644 index 00000000000..5871f6bcc34 --- /dev/null +++ b/test/pruning_v1/test_pruning.py @@ -0,0 +1,132 @@ +import os +import shutil +import unittest + +import torch +import torchvision +import torch.nn as nn + +from neural_compressor.config import Pruner, PruningConfig +from neural_compressor.data import Datasets +from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader +from neural_compressor.training import prepare_compression + + +def build_fake_yaml(): + fake_yaml = """ + model: + name: imagenet_prune + framework: pytorch + + pruning: + approach: + weight_compression: + initial_sparsity: 0.0 + target_sparsity: 0.97 + start_epoch: 0 + end_epoch: 2 + pruners: + - !Pruner + start_epoch: 1 + end_epoch: 2 + prune_type: basic_magnitude + names: ['layer1.0.conv1.weight'] + + - !Pruner + target_sparsity: 0.6 + prune_type: basic_magnitude + update_frequency: 2 + names: ['layer1.0.conv2.weight'] + """ + with open('fake.yaml', 'w', encoding="utf-8") as f: + f.write(fake_yaml) + + +class TestPruning(unittest.TestCase): + + model = torchvision.models.resnet18() + + @classmethod + def setUpClass(cls): + build_fake_yaml() + + @classmethod + def tearDownClass(cls): + os.remove('fake.yaml') + shutil.rmtree('./saved', ignore_errors=True) + shutil.rmtree('runs', ignore_errors=True) + + def test_pruning(self): + pruner1 = Pruner(start_epoch=1, end_epoch=2, names=['layer1.0.conv1.weight']) + pruner2 = Pruner(target_sparsity=0.6, update_frequency=2, names=['layer1.0.conv2.weight']) + conf = PruningConfig(pruners=[pruner1, pruner2], end_epoch=2) + datasets = Datasets('pytorch') + dummy_dataset = datasets['dummy'](shape=(100, 3, 224, 224), low=0., high=1., label=True) + dummy_dataloader = PyTorchDataLoader(dummy_dataset) + compression_manager = prepare_compression(self.model, conf) + model = compression_manager.model + + epochs = 2 + iters = 3 + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) + for nepoch in range(epochs): + model.train() + cnt = 0 + compression_manager.callbacks.on_epoch_begin(nepoch) + for image, target in dummy_dataloader: + compression_manager.callbacks.on_step_begin(cnt) + print('.', end='') + cnt += 1 + output = model(image) + loss = criterion(output, target) + optimizer.zero_grad() + loss.backward() + optimizer.step() + compression_manager.callbacks.on_step_end() + if cnt >= iters: + break + compression_manager.callbacks.on_epoch_end() + + model.save("./saved") + + def test_pruning_external(self): + from neural_compressor.experimental import common + from neural_compressor import Pruning + from neural_compressor.conf.config import PruningConf + pruners = [Pruner(1,3,names=['layer1.0.conv1.weight']), + Pruner(target_sparsity=0.6,update_frequency=2,names=['layer1.0.conv2.weight'])] + conf = PruningConfig(pruners) + + datasets = Datasets('pytorch') + dummy_dataset = datasets['dummy'](shape=(100, 3, 224, 224), low=0., high=1., label=True) + dummy_dataloader = PyTorchDataLoader(dummy_dataset) + compression_manager = prepare_compression(self.model, conf) + model = compression_manager.model + + epochs = 2 + iters = 3 + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) + for nepoch in range(epochs): + model.train() + cnt = 0 + compression_manager.callbacks.on_epoch_begin(nepoch) + for image, target in dummy_dataloader: + compression_manager.callbacks.on_step_begin(cnt) + print('.', end='') + cnt += 1 + output = model(image) + loss = criterion(output, target) + optimizer.zero_grad() + loss.backward() + optimizer.step() + compression_manager.callbacks.on_step_end() + if cnt >= iters: + break + compression_manager.callbacks.on_epoch_end() + model.save("./saved") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/pruning/test_pruning_group_lasso.py b/test/pruning_v1/test_pruning_group_lasso.py similarity index 100% rename from test/pruning/test_pruning_group_lasso.py rename to test/pruning_v1/test_pruning_group_lasso.py diff --git a/test/pruning/test_pruning_pattern.py b/test/pruning_v1/test_pruning_pattern.py similarity index 100% rename from test/pruning/test_pruning_pattern.py rename to test/pruning_v1/test_pruning_pattern.py diff --git a/test/pruning/test_pruning_pure_yaml.py b/test/pruning_v1/test_pruning_pure_yaml.py similarity index 100% rename from test/pruning/test_pruning_pure_yaml.py rename to test/pruning_v1/test_pruning_pure_yaml.py diff --git a/test/pruning/test_tensorflow_distributed_pruning.py b/test/pruning_v1/test_tensorflow_distributed_pruning.py similarity index 100% rename from test/pruning/test_tensorflow_distributed_pruning.py rename to test/pruning_v1/test_tensorflow_distributed_pruning.py diff --git a/test/pruning/test_tensorflow_pruning.py b/test/pruning_v1/test_tensorflow_pruning.py similarity index 100% rename from test/pruning/test_tensorflow_pruning.py rename to test/pruning_v1/test_tensorflow_pruning.py diff --git a/test/pruning/test_tensorflow_pruning_utility.py b/test/pruning_v1/test_tensorflow_pruning_utility.py similarity index 100% rename from test/pruning/test_tensorflow_pruning_utility.py rename to test/pruning_v1/test_tensorflow_pruning_utility.py From 1190bd9bf4654752ef8dc76468259dbb4449471a Mon Sep 17 00:00:00 2001 From: wenhuach21 Date: Mon, 12 Dec 2022 12:05:30 +0800 Subject: [PATCH 2/9] DATASETS->Datasets Signed-off-by: wenhuach21 --- test/pruning/test_pruning.py | 5 ++--- test/pruning/test_pruning_config.py | 4 ++-- test/pruning/test_pruning_criteria.py | 4 ++-- test/pruning/test_pruning_patterns.py | 4 ++-- test/pruning/test_pruning_regs.py | 4 ++-- test/pruning/test_pruning_schedulers.py | 4 ++-- test/pruning/test_pruning_types.py | 4 ++-- 7 files changed, 14 insertions(+), 15 deletions(-) diff --git a/test/pruning/test_pruning.py b/test/pruning/test_pruning.py index ba1e6589d4b..57fdb9fd604 100644 --- a/test/pruning/test_pruning.py +++ b/test/pruning/test_pruning.py @@ -5,8 +5,7 @@ import torch import torchvision import torch.nn as nn - -from neural_compressor.data import DATASETS +from neural_compressor.data import Datasets from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader from neural_compressor.pruning import Pruning, WeightPruningConfig @@ -44,7 +43,7 @@ def test_pruning_basic(self): criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = DATASETS('pytorch') + datasets = Datasets('pytorch') dummy_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0., high=1., label=True) dummy_dataloader = PyTorchDataLoader(dummy_dataset) diff --git a/test/pruning/test_pruning_config.py b/test/pruning/test_pruning_config.py index c3cdd37ce61..4430affbb49 100644 --- a/test/pruning/test_pruning_config.py +++ b/test/pruning/test_pruning_config.py @@ -6,7 +6,7 @@ import torchvision import torch.nn as nn -from neural_compressor.data import DATASETS +from neural_compressor.data import Datasets from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader from neural_compressor.pruning import Pruning, WeightPruningConfig @@ -41,7 +41,7 @@ def test_pruning_class_config(self): criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = DATASETS('pytorch') + datasets = Datasets('pytorch') dummy_dataset = datasets['dummy'](shape=(12, 3, 224, 224), low=0., high=1., label=True) dummy_dataloader = PyTorchDataLoader(dummy_dataset) diff --git a/test/pruning/test_pruning_criteria.py b/test/pruning/test_pruning_criteria.py index 75063d9bdbb..03a54d60d7c 100644 --- a/test/pruning/test_pruning_criteria.py +++ b/test/pruning/test_pruning_criteria.py @@ -6,7 +6,7 @@ import torchvision import torch.nn as nn -from neural_compressor.data import DATASETS +from neural_compressor.data import Datasets from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader from neural_compressor.pruning import Pruning, WeightPruningConfig @@ -54,7 +54,7 @@ def test_pruning_criteria(self): criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = DATASETS('pytorch') + datasets = Datasets('pytorch') dummy_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0., high=1., label=True) dummy_dataloader = PyTorchDataLoader(dummy_dataset) diff --git a/test/pruning/test_pruning_patterns.py b/test/pruning/test_pruning_patterns.py index 3850c373ab3..f5f6db91f34 100644 --- a/test/pruning/test_pruning_patterns.py +++ b/test/pruning/test_pruning_patterns.py @@ -6,7 +6,7 @@ import torchvision import torch.nn as nn -from neural_compressor.data import DATASETS +from neural_compressor.data import Datasets from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader from neural_compressor.pruning import Pruning, WeightPruningConfig @@ -51,7 +51,7 @@ def test_pruning_pattern(self): criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = DATASETS('pytorch') + datasets = Datasets('pytorch') dummy_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0., high=1., label=True) dummy_dataloader = PyTorchDataLoader(dummy_dataset) diff --git a/test/pruning/test_pruning_regs.py b/test/pruning/test_pruning_regs.py index 412e0557ebc..7da5f44852f 100644 --- a/test/pruning/test_pruning_regs.py +++ b/test/pruning/test_pruning_regs.py @@ -6,7 +6,7 @@ import torchvision import torch.nn as nn -from neural_compressor.data import DATASETS +from neural_compressor.data import Datasets from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader from neural_compressor.pruning import Pruning, WeightPruningConfig @@ -66,7 +66,7 @@ def test_pruning_regs(self): prune.model = self.model criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = DATASETS('pytorch') + datasets = Datasets('pytorch') dummy_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0., high=1., label=True) dummy_dataloader = PyTorchDataLoader(dummy_dataset) prune.on_train_begin() diff --git a/test/pruning/test_pruning_schedulers.py b/test/pruning/test_pruning_schedulers.py index 4e23a14f8fe..272b766f661 100644 --- a/test/pruning/test_pruning_schedulers.py +++ b/test/pruning/test_pruning_schedulers.py @@ -6,7 +6,7 @@ import torchvision import torch.nn as nn -from neural_compressor.data import DATASETS +from neural_compressor.data import Datasets from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader from neural_compressor.pruning import Pruning, WeightPruningConfig @@ -49,7 +49,7 @@ def test_pruning_schedulers(self): prune.model = self.model criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = DATASETS('pytorch') + datasets = Datasets('pytorch') dummy_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0., high=1., label=True) dummy_dataloader = PyTorchDataLoader(dummy_dataset) prune.on_train_begin() diff --git a/test/pruning/test_pruning_types.py b/test/pruning/test_pruning_types.py index 68948911848..3adbc78452e 100644 --- a/test/pruning/test_pruning_types.py +++ b/test/pruning/test_pruning_types.py @@ -6,7 +6,7 @@ import torchvision import torch.nn as nn -from neural_compressor.data import DATASETS +from neural_compressor.data import Datasets from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader from neural_compressor.pruning import Pruning, WeightPruningConfig @@ -55,7 +55,7 @@ def test_pruning_types(self): prune.model = self.model criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = DATASETS('pytorch') + datasets = Datasets('pytorch') dummy_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0., high=1., label=True) dummy_dataloader = PyTorchDataLoader(dummy_dataset) prune.on_train_begin() From c034cf8f94193eaf37c3396ad6244ba0ce4cabd8 Mon Sep 17 00:00:00 2001 From: "Lu, Yintong" Date: Mon, 12 Dec 2022 13:20:10 +0800 Subject: [PATCH 3/9] pruner README v2 Signed-off-by: Lu, Yintong --- docs/source/pruning_details.md | 341 +++++++++++++++++++++++++++++ neural_compressor/pruner/README.md | 90 ++++---- 2 files changed, 392 insertions(+), 39 deletions(-) create mode 100644 docs/source/pruning_details.md diff --git a/docs/source/pruning_details.md b/docs/source/pruning_details.md new file mode 100644 index 00000000000..2c6909c8ebf --- /dev/null +++ b/docs/source/pruning_details.md @@ -0,0 +1,341 @@ +Pruning details + +============ + + + + + + +1. [Introduction](#introduction) + + + + + + +>>>[Neural Network Pruning](#neural-network-pruning) + + + + + + +>>>[Pruning Patterns](#pruning-patterns) + + + + + + +>>>[Pruning Criteria](#pruning-criteria) + + + + + + +>>>[Pruning Schedule](#pruning-schedule) + + + + + + +>>>[Pruning Type](#pruning-type) + + + + + + +>>>[Regularization](#regularization) + + + + + + + + +2. [Pruning examples](#examples) + + + + + + +3. [Reference](#reference) + + + + + + +## Introduction + + + + + + +### Neural Network Pruning + +Neural network pruning is a promising model compression technique that removes the least important parameters in the network and achieves compact architectures with minimal accuracy drop and maximal inference acceleration. As state-of-the-art model sizes have grown at an unprecedented speed, pruning has become increasingly crucial for reducing the computational and memory footprint that huge neural networks require. + + + + + + + +### Pruning Patterns + + + + + +- Unstructured Pruning + + + + + +Unstructured pruning means pruning the least salient connections in the model. The nonzero patterns are irregular and could be anywhere in the matrix. + + + + + +- Structured Pruning + + + + + +Structured pruning means pruning parameters in groups and deleting entire blocks, filters, or channels according to some pruning criterions. In general, structured pruning leads to lower accuracy due to restrictive structure compared to unstructured pruning but it can significantly accelerate the model execution as it fits better with hardware designs. + + + + + + + + + +### Pruning Criteria + + + + + + +Pruning criteria determines how should the weights of a neural network be scored and pruned. The magnitude and gradient are widely used to score the weights. + + + + + +- Magnitude + + + + + + The algorithm prunes the weight by the lowest absolute value at each layer with given sparsity target. + + + + + +- Gradient + + + + + The algorithm prunes the weight by the lowest gradient value at each layer with given sparsity target. + + + + +- SNIP + + + + + + The algorithm prunes the dense model at its initialization, by analyzing the weights' effect to the loss function when they are masked. Please refer to the original [paper](https://arxiv.org/abs/1810.02340) for details + + + + + +- SNIP with momentum + + + + + + The algorithm improves original SNIP algorithms and introduces weights' score maps which updates in a momentum way.\ + + In the following formula, $n$ is the pruning step and $W$ and $G$ are model's weights and gradients respectively. + + $$Score_{n} = 1.0 \times Score_{n-1} + 0.9 \times |W_{n} \times G_{n}|$$ + + + + + + +### Pruning Schedule + + + + + +Pruning schedule defines the way the model reach the target sparsity (the ratio of pruned weights). + + + + + +- One-shot Pruning + + + + + + One-shot pruning means the model is pruned to its target sparsity with one single step. This pruning method often works at model's initialization step. It can easily cause accuracy drop, but save much training time. + + + + + + +- Iterative Pruning + + + + + + Iterative pruning means the model is gradually pruned to its target sparsity during a training process. The pruning process contains several pruning steps, and each step raises model's sparsity to a higher value. In the final pruning step, the model reaches target sparsity and the pruning process ends. + + + + + + + +### Pruning Type + + + + + + +- Pattern_lock Pruning + + + + + +Pattern_lock pruning type uses masks of a fixed pattern during the pruning process. + + + + + +- Progressive Pruning + + + + + +Progressive pruning aims at smoothing the structured pruning by automatically interpolating a group of interval masks during the pruning process. In this method, a sequence of masks are generated to enable a more flexible pruning process and those masks would gradually change into ones to fit the target pruning structure. + + + + + +### Regularization + + + + + +Regularization is a technique that discourages learning a more complex model and therefore performs variable-selection. + + + + + +- Group Lasso + + + + + + The Group-lasso algorithm is used to prune entire rows, columns or blocks of parameters that result in a smaller dense network. + + + + + + + +## Pruning Examples + + + + + + +We validate the pruning technique on typical models across various domains (including CV, NLP, and Recommendation System) and the examples are listed below. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Reference + + + + +[1] Namhoon Lee, Thalaiyasingam Ajanthan, and Philip Torr. SNIP: SINGLE-SHOT NETWORK + +PRUNING BASED ON CONNECTION SENSITIVITY. In International Conference on + +Learning Representations, 2019. + + + + + + + + + + diff --git a/neural_compressor/pruner/README.md b/neural_compressor/pruner/README.md index 16481b50630..fb34c70c6ed 100644 --- a/neural_compressor/pruner/README.md +++ b/neural_compressor/pruner/README.md @@ -7,27 +7,27 @@ Pruning -    1.1. [Neural Network Pruning](#neural-network-pruning) +>>>[Neural Network Pruning](#neural-network-pruning) -    1.2. [Pruning Patterns](#pruning-patterns) +>>>[Pruning Patterns](#pruning-patterns) -    1.3. [Pruning Criteria](#pruning-criteria) +>>>[Pruning Criteria](#pruning-criteria) -    1.4. [Pruning Schedule](#pruning-schedule) +>>>[Pruning Schedules](#pruning-schedule) -    1.5. [Pruning type](#pruning-type) +>>>[Pruning types](#pruning-type) -    1.6. [Regularization](#regularization) +>>>[Regularization](#regularization) @@ -39,6 +39,10 @@ Pruning +4. [Citation](#citation) + + + ## Introduction @@ -69,7 +73,7 @@ Pruning patterns defines the rules of pruned weights' arrangements in space. INC -Pruning Criteria determines how should the weights of a neural network be scored and pruned. In the image below, pruning scores are represented by neurons' color and those with the lowest scores are pruned. The magnitude and gradient are widely used to score the weights. Currently, INC supports **magnitude**, **snip** and **snip_momentum** criteria. [Details](../../docs/source/pruning_details.md#pruning-criteria). +Pruning Criteria determines how should the weights of a neural network be scored and pruned. In the image below, pruning scores are represented by neurons' color and those with the lowest scores are pruned. The magnitude and gradient are widely used to score the weights. Currently, INC supports **magnitude**, **gradient**, **snip** and **snip_momentum** criteria. [Details](../../docs/source/pruning_details.md#pruning-criteria). -### Pruning Schedule +### Pruning Schedules @@ -90,7 +94,7 @@ Pruning schedule defines the way the model reach the target sparsity (the ratio -### Pruning Type +### Pruning Types @@ -119,18 +123,18 @@ Regularization is a technique that discourages learning a more complex model and Neural Compressor `Pruning` API is defined under `neural_compressor.pruning`, which takes a user defined yaml file as input. Users can pass the customized training/evaluation functions to `Pruning` in various scenarios. -In this case, pruning process can be done by pre-defined hooks in Neural Compressor. Users need to put those hooks inside the training function. The pre-defined Neural Compressor hooks are listed below. +In this case, pruning process can be done by pre-defined hooks in Neural Compressor. Users need to place those hooks inside the training function. The pre-defined Neural Compressor hooks are listed below. ``` -on_train_begin() : Implement at the beginning of training phase. -on_epoch_begin(epoch) : Implement at the beginning of each epoch. -on_step_begin(batch) : Implement at the beginning of each batch. -on_step_end() : Implement at the end of each batch. -on_epoch_end() : Implement at the end of each epoch. -on_before_optimizer_step() : Implement before optimization step. -on_after_optimizer_step() : Implement after optimization step. +on_train_begin() : Execute at the beginning of training phase. +on_epoch_begin(epoch) : Execute at the beginning of each epoch. +on_step_begin(batch) : Execute at the beginning of each batch. +on_step_end() : Execute at the end of each batch. +on_epoch_end() : Execute at the end of each epoch. +on_before_optimizer_step() : Execute before optimization step. +on_after_optimizer_step() : Execute after optimization step. ``` @@ -141,9 +145,13 @@ The following section is an example of how to use hooks in user pass-in training ```python from neural_compressor.pruning import Pruning +from neural_compressor.config import WeightPruningConfig -prune = Pruning(config_dict) -prune.update_config(start_step=1, end_step=10, pruning_frequency=1) +config = WeightPruningConfig( + local_configs, # An example of local_configs is shown below. + target_sparsity=0.8, start_step=1, end_step=10, pruning_frequency=1 +) +prune = Pruning(config) prune.model = model prune.on_train_begin() for epoch in range(num_train_epochs): @@ -152,35 +160,34 @@ for epoch in range(num_train_epochs):     for step, batch in enumerate(train_dataloader):         prune.on_step_begin(step)         outputs = model(**batch) -        loss = outputs.loss / gradient_accumulation_steps +        loss = outputs.loss         loss.backward() -        if (step + 1) % gradient_accumulation_steps == 0: -            prune.on_before_optimizer_step() -            optimizer.step() - prune.on_after_optimizer_step() -            scheduler.step()  # Update learning rate schedule -            model.zero_grad() +        prune.on_before_optimizer_step() +        optimizer.step() + prune.on_after_optimizer_step() +        scheduler.step()  # Update learning rate schedule +        model.zero_grad()         prune.on_step_end() prune.on_epoch_end() ... ``` ```python -config_dict = { - 'target_sparsity': 0.9, - 'pruning_type': "magnitude_progressive", - 'pattern': "4x1", +config_dict = [{ + 'target_sparsity': 0.9, # Target sparsity ratio of modules. + 'pruning_type': "snip_momentum", # Default pruning type. + 'pattern': "4x1", # Default pruning pattern. 'op_names': ['layer1.*'], # A list of modules that would be pruned. 'excluded_op_names': ['layer3.*'], # A list of modules that would not be pruned. - 'start_step': 0, - 'end_step': 10, - 'pruning_scope': "global", - 'pruning_frequency': 1, + 'start_step': 0, # Step at which to begin pruning. + 'end_step': 10, # Step at which to end pruning. + 'pruning_scope': "global", # Default pruning scope. + 'pruning_frequency': 1, # Frequency of applying pruning. 'min_sparsity_ratio_per_op': 0.0, # Minimum sparsity ratio of each module. 'max_sparsity_ratio_per_op': 0.98, # Maximum sparsity ratio of each module. - 'sparsity_decay_type': "exp", - 'pruning_op_types': ['Conv', 'Linear'], - } + 'sparsity_decay_type': "exp", # Function applied to control pruning rate. + 'pruning_op_types': ['Conv', 'Linear'], # Types of op that would be pruned. + }] ``` @@ -188,7 +195,12 @@ config_dict = { -We validate the pruning technique on typical models across various domains (including CV, NLP, and Recommendation System) and the examples are listed in [Pruning Examples](../../docs/source/pruning_details.md#examples). A complete overview of validated examples including quantization, pruning and distillation results could be found in [INC Validated examples](../../docs/source/validated_model_list.md#validated-pruning-examples). +We validate the pruning technique on typical models across various domains (including CV and NLP) and the examples are listed in [Pruning Examples](../../docs/source/pruning_details.md#examples). A complete overview of validated examples including quantization, pruning and distillation results could be found in [INC Validated examples](../../docs/source/validated_model_list.md#validated-pruning-examples). + + +Please refer to pruning examples([PyTorch](../../examples/README.md#Pruning-1)) for more information. + + -Please refer to pruning examples([TensorFlow](../../examples/README.md#Pruning), [PyTorch](../../examples/README.md#Pruning-1)) for more information. \ No newline at end of file +## Citation \ No newline at end of file From 8a00d9f0b763cd58e77951421380b7e8a0ff52f1 Mon Sep 17 00:00:00 2001 From: "Lu, Yintong" Date: Mon, 12 Dec 2022 14:43:41 +0800 Subject: [PATCH 4/9] prune README v2 Signed-off-by: Lu, Yintong --- .../scripts/codeScan/pyspelling/inc_dict.txt | 5 ++ .../_static/imgs/pruning/Pruning_patterns.PNG | Bin 0 -> 60008 bytes docs/source/pruning_details.md | 28 +-------- neural_compressor/pruner/README.md | 56 +++++++++--------- 4 files changed, 34 insertions(+), 55 deletions(-) create mode 100644 docs/source/_static/imgs/pruning/Pruning_patterns.PNG diff --git a/.azure-pipelines/scripts/codeScan/pyspelling/inc_dict.txt b/.azure-pipelines/scripts/codeScan/pyspelling/inc_dict.txt index 3ade5e4de45..83a0d9c1521 100644 --- a/.azure-pipelines/scripts/codeScan/pyspelling/inc_dict.txt +++ b/.azure-pipelines/scripts/codeScan/pyspelling/inc_dict.txt @@ -2453,3 +2453,8 @@ QuantizationAwareTrainingConfig Startup doesn startup +Ajanthan +WeightPruningConfig +Namhoon +Thalaiyasingam +Torr diff --git a/docs/source/_static/imgs/pruning/Pruning_patterns.PNG b/docs/source/_static/imgs/pruning/Pruning_patterns.PNG new file mode 100644 index 0000000000000000000000000000000000000000..d453622ed5a71955d2c8ca0db6dfbc1f0b0bd4eb GIT binary patch literal 60008 zcma&NbwHC}_&01LpdwPzN{F^*NDQPLFc>nrr5mKXH)=3s zbU)+!d!IL+zurH#-8=Vv?sJ{bxz5$+o2I(LLlQcY8#it|R8st)edES$KEidw{dD@Z|3@P+ztZymzWu+STw+%;`M>-8Uyb!_-&Fpe7yZ{5n#|_@|9<;*<39)+ z-~VSwW$h=XkhcGW4&@dVNcI0ooX`JH;@Sn4@d&dEWj?b;u7<#nFJYCzrNt)ERNgSlI-s5OFI9; zQczSFA2Z-0=4oRZbXbQ}=k=D-Fcq)AT8lI=$&%D5 zJ`sG89B{l8?s0zXZtv=vTv*7(`SPXaUIP2c$;tArPi|hGTnvL4%TD7N^1L(nymMJ( zj;2FCoK?X#w*E6=eM1D=~Vw4e+?%`(>aD8v)mpvJU4<6 zm|b^R_EU2XdGO#-cYm4)MkDxiEz-cmYTEi!6M{8nYB(QeLm+ zHsk<`t&7nZ8cyH{6xowZ1sdmiAmy=dH@lL@k1H<+YPPs zabHd;APYY7cmSVr(1>rh>ksJpsin5zYc6X&UhN#nzYSJ}19UYzbKrjbgXr(v9RHN9 z)ewoJ(Mnpa?woe%&@Zc`kc9v(lGFj^cdbg^4|TMSXSk`jjw{M5yYp0%(N#?s+ZGxF zy=Oa3{8EuOk_>!uQI){R&5pdsz_iuG4EYIPPAADcFIXaKnu*hZ*t$-67-`m&r4&m4 zh2oY6jO2c=-msz3Oo10n(+{SLy^^W5APwtt+zw5g0CBD%heJuV59J;`i&J|0Gb`)e z=H_ND0-^Ht>sR!@rB*>HDP9GtfB2oYw$KOM9z6?KEY@2BwDm*%AaSfv#`U3K5?#+3m^*r_L*MR!+U4mF*2R22$`SQQ|l52O(3D6v1Gr@EA>#n zQn$2LgR`taQ{U8Io??;l#WI(>v5^t8kjh74V^Grq&0RSx2@+9V_7j1cYg!lQqY~QI zHZ?_(8n7kAF^Pm8g>D2z}a`tvXc4Cl73N##n1aeXCA%$E?MaGcH@^t?0ALN1;T4$Z*yBG$c*Em8mYib za*y&KnUmGh9<%DTe2g!;n?w&Y(y1I~3hmmrHj?S*!LZMJ=t=%#hB;ydU7=q82R- zgv?IUa_WT^8B z06=IrOM5l(EmA!r6?)ns%vrRVkX}3D5RJ^Q5WLJ9-{!~WrOtwllMVrsx2ni|K+QMIO^>Ob}+nJ`wtipEQx%|*+#Ci$bnx)PB0pW_4U=5Cv zIBr`w4rj*sNHBE5^0+?!ovQotzNqDq5Q<2rOU$$)x4fA!%iz*TAC&+nExo9^Na&FY}tlG5Q}>d}SK3iL~pend(l ziEF}xN6}p~u1zaXKR<7-@juGoONIIZphz7D0QG540 z>DKCNs3=({sBc=p_Lm>54Ve9{(>f7aflRq^vH)MFCicHENpjlBP1GAl-0?gl)dJd7 zeP_H;cxGv7>Gx1ELn(V(7~&QKjsNjJ;Mv=%vYG|+_h z%q*j|=L8$#Fp*Lpq^zM)!m6hl14h-|%4#J_VScZ_B|QH5;M1SCL6ex}`$k2W)#91u z1AP_qez(!;ggiHRE=mu$mz#4pBaiLQ-vqe2inUrVMm zz?*Hg?kgL6nWt^;tPbM0hwKWhqv3w3V5h%f54Yn1(!2;f7JPs(>n&a3prjUgnf<1C z?alj6Z&KJTp<=9>byadAGUwEgQz67stz7!-+%qLTHEFoV28tfAlNrJ?Cn1j#kv;ra zJ;5Euafw$|E^{8v@Gkorw_V7naCyG>Q=Ljr^C4VI?~ov!o@BllyWBhzFgIY~ripFi z+CZO*o_9#k-Ne{8T+GB^-d$^|I+TODVri9*d?yE^SpWOy$8Fss%LOh;JkY8S;S!W> zGu95L`K-DBM4C>D8DL_3RAbH#MXU~e2vSSH24%iIaO|j_vJJ5R;(fhGd42?`d`FAk z7+6XEz+I`hiK`$Q%vQ1CVpY6bnL7>~y*KlB8a}zy{{yxLOG~41buP>GeZgLQsqE!9 zOy{B;3D(CWPIFz|bms$)8rtKtDY!g3Xtkvj*ov1@50010ztTfCrFg^i<^G2@X>VWR*$*;_Bs?$wZjub z+2Gc}#L_kELCG`iYfbbO$h%tJ)MPgAvl4yX-zZE%Pe_df{EHIzDVExkO8s@U&J_0r z33%Rr=b}BnF+kSoIGQBAHHU)h-3FvQ^);!Xt&l4yEp=gZ+7n_h#XonEgCA00?qs`n zfj4NR=R3q9;&ZBjtDkqnM zUYXK861W=8W0RKfeNcWcciEjm`=@1LNkSXlrtAG$Xx~^y+f;2IY2GjVJaX z03kL;srUH9nVpWdWFg#3mRGrMM9kmVacF}B6^#e(L?=_WnMzlGtEKT!eScv;=A|>C zJ#Eb?-rGG#%kepKawh8O=}>KrhFo13GGH%W{yqAp+!6mIn&;JV znYQUj)~!pEt^M4wIY`J@q@7;0%HSV!Uk+tFo-ltS_4HIIWRp(}Ek#&$dnp$-wGF?Y zbR10a&GDQk$1Ceyq^f6KeM&%>jBcR*0kb8rL3H=$EmR7-lIb(vz`#JkSFe61U@17% z*YAi?NBav`8{C$qB*ovdFL2#Xey}4TX6Ucif8~tzCax+7dNghlzhB5YJSqH?g7C<$ zj0;Uv$EjN|xudXXmwWqqS5yb1obeyn`$#*{ZxUwuc>K)5h57=2B4GIE>Wb|5?L(AR z5=hB*@MpmOZf6v${0oA?Ut;BPS}i>hlz*DODU)RAs{=d}gH|751+t}5iS22n0zTjy z-WM+HO^@?*M&}mzfc{F9klMZO)R1dX*>5IdR=)ZdiY{cM3yvs8il9G^rStuSWk}^{ zyyKDkTuzy!kC;`!P?Vx1pgH6m!@c1L?nQNAGXMjx$F;BbzT=UGl8gI6_FTL0J?q4S zja$<*JN0#<7HHX0tD9gtnr*(MT2t-n5l$cp*h1^$lr}amvmd*9h(nj?7p~jcFtl4=!M=IPvuP{A) z;siOigKYViG$2FBpfe3)6TB18rzcOkuV(MV;T?&A`3-kGwY7B!*Y$GYLRNo->Tpsh!m)|IhS% z)k&m^Pb1uOCHOM~@CC-noZSUhr_iEu7Q=m$S>rVGBmp0C zrGK)EX40>wXm|hE(Cfb+iq_rlGR1i66S}O6Ac_FLV$%P1s?0$mB1mn=9qDzy0Hs+a z5Xjz`LO@J8ETb3liyg43ZZYK2gNu9(0SxiNH3q{1Uq79xA?T|lNFnuTtz*Ii-&{`aCQuM#|(_QcI1`{V*}6d(F?fW4v?BK=(z3I|7;F<1{lKkBrZ=b7EFd zW%$SpSFCqLCFj*Y6YX~{ANJ;EMmgN#7bsW+DZjtV#8E^Jm-DP6kFv& z4o*La1!DFf#cpfsLzXe~p{a^3?$h)lt_8V8dP}WE?e%TQZ7vza?W=eW$mph~Qgy>&tm%DDQd{`aHV;DnV8 zEv%>ffAgAFn*xAzxe7Odf#Sw*WN{(L^?JB!yB(2dCNLukq9srS*(!*@5wjjG1+@?N z$U`;KD;ZezoekQ3p*aV=@wAYwf23K*qX$@#irAXZ5;Y`XQ3nR4=U?XfJlFV@-{s+L zp=rj^RQCEh-LLIp5R_zhVXFK&2X=f4XlrE3gE=gVXUo@{8ChJ6B z)+}Yx_dXX`BC*AdsMK2fPl;h`V(sgYfntbyKuo4+MgHf&CGqEcjj^Jj!XX)S0!Iu? zCOAmzwm)HBx<~sDIcGOO-N`iW@Lw7`e8IG#u+OJSOkt!sr>DGY?D}d4O=ZUsKAZlS z1Ce=?(LWi4z>$LXg7~vK(xDOdB3;X|s{LLCbA71T*rM}RGEk0!Z^KXTkn(|IR3v-T zv78Ut_a$>0jCY^|Fs2sn7!>OTBI{vJdb_)I#pTgnw{}Qfo1o#uFUEOyUK(ifz#!YS2ixP%+&q{Ul*IfGu>lHRjs%Lr33NXV_i5rO-Wg6%jL;pnBeaT+l3Fg9?n(&Z)LbncQH)H4_$38V8Zd7^J#_Xc#e{mqf zR#L4mOa&|3n)p^(dW_oA`kWJN*K)Wwpmy-8soHqMZysgFG2OAOf@*Sm`$&VS3>f&a zbLBj&7os)Gmlnxyc!>QlV;G?Ii2AUrG{KXcImGS#Zw-*KPvp}bs?VyTb5Fz1?0iI2 zMKyMo%k(OWQ4Q<4_)hJS>s`7z@$IXOE%kW!wgi~b|J;iwbdGv{1OhNwp}QBKv&~1BjJz!U zJuf7W^StQb?dG%h>Q{K9&jooS8N^lu3x=JU)bnh#7b8!)WMjrTbQHE%9?8adIe<*H zqw0Wr@nHo6>Y5(rZBo=yax{#9?Otv$zagllNbS8`kGYpTiuRAL?h2=rU_6`s2I z?gaXWJ#ay;Har6C{ntDbKf>YNFXaH-raDEMH1Nsk!MH$b*Bxe3=JLQqlkAGRpLHuL z!M!o{t%JU#Ou?WiX{y;SJL{wJ9Q?w0r*(M!wub54(`$FotZY69UIT`tpska-LBMGBcph* zoT+y~aaSE1z=KT1798C5+Dn%~bSEd=PA?$GOXg+zr zo+}T_dA$?}kHMadS-uppAZ-JgFT}Ol&f?m&V!?7aBxb0tBikEAR7B zx3zDc!U&F0=cUhI+59tv=-GfkkI(YJz^wGe(;2rNS3@q88`I@OrEbL7grg9ddIhwG zhTw|!Y^9VvFMSeF>w!sS=yKMs_U=H*aL`*abvgh2W|U9gGjy=ORln8mJhA(yP=;e> zb5=~LyLhPcUxYw&sPCE-O`V7Ts9$>7)?jy~xUIe*Uh-5_m1Q0)i`&~XJC{Z6sQ@#X zm+y!A+WE|mH{=%y5%bl=&eS6N8uFd;HDVZ~TpZzKK;H;8WmDKG@~pZ{B$jd1^SL`w zbLLdDWZXn{lj9g%Cwli0S{TdPt1i>U!n52$nlemEhlXzLp?J6Vhc6Z1G3(ft>q{V8Q!&sNI~4_D-8eh9J5-OgH|d|(d~6X1~W ztmQd#&vEYFOkQez9aI04(|GAVyGA`rcbH=J+$836wOfwm%OM3~V(&etF0%LUwkaE(s_LJ~%#n}2{Vs3Z;02bVMV3A) zN}@sS32l=@sM;X-Ow!hK+NV-a@K0YUxN^j#CvHzQBwbVwRoZ|Y8D$v-mW=O>3AsD# z6PiF)72Vw{L7TNjpu&(mR9obENs4Ilg%T6o31kPJK=nSDAykU{Ubg zM(3{ac*9*(X>-kBD1uK&80b&E;5UESHj^M z>o8SBW4g->irTGn1ODf$oXJ=D4lBojPxU|CO-;M1(GGwNcVlg3A}{ZkKhAe!gNOP5 zP5ltFB2cnD_I>3vzLia|Idtu;3KZRS+DG0SG;7B#7^AG!A2-|R>8i!;c!D0u#0yNkI1oA~d^hr&GuewJ=f-8&P}@iwVvS)o;5c6#XJtW#`x z*QPcOJyvO_47!E}7ezMSn^X(OEVu_pT?y3UTTL{}vcCCHpF#SYKW1kZLCFpPHE zH%7Gdmqu^DCkg4`Em_#82%R4fxiByI-&j_N?c#nfG&Pf`h@HDiQ8O14ZO$ynjwDS@(>(l%pRjDxxx5U z>**W2g1V7y=ywW;k-*QLyTt4a6plMg`Za^Ute-6;hev9BF0Y z`!mWR#CmD?LK~wDaqDa3!$wC?udB?Og4up7K9(H3#Ei0DbCId46z3VKA7-m&@d*9T zqdy%5GD;G+yzY@Vx&qO*y2@Y>32U2vOf!FT8MJaQFyo6Hgi+aex>i?Ph;Q&$Mp?Qo ze8#h25A`no05_i{7hM##^3>bg`?U6%jsC>8mnBg&ywZJ8<<-)xzmZ--3(UUP5tQ>; zq7Hvb^*kLox*5eT$;n++B;@jR-(r(kOmmVwx#V83-%P{A{9pd~+a=c!^QuU_plxT2 z{BdVttUdr!%fPicG+{GFAQCg0;{cpNx5j=kX!4zGov0hx^U1QO_GRIyk<_WG4Z?@1N889xf_fbG-(0dO|TR z&;zggk3Mc80P7s+ayk|AAN5k6n&d4-<8cy3yN@$o*u+G>W0`e%{6bO1+xvh(d^)HBdJib>KWSEaG#VwzY7cnD5dKO7jzsx8ZR9)t4S{|*g-Zfl5a-0h)JYKh*aSfzvO(ag!5mLFd<3ghs2%V7; z)T$;p-5Zv0VJ|1XtN!M5b#CsO9b>T@AjK=Ci3qeC&$XfOW9kyj~zVE!={EfpQ^PrhaN3& zX|}O9)(-S^ypJYUGEsC9)BXTqEa`|Lm7vVZ%`G>{nDy7~-$-dnu@spOahSC|icwK3 zmRi2`ysXx=$~Z8v;Zo$s`RPPce8@%^@h1r>LE#`DEd+nkTNA}+Ow$yZVnLe4+h5dAw1e>s$$uYk6uM@Idc6D@&BJ3G*xf5W*Y--wODNlW}UXlW_%}@F^PFcl>NvVTy24JC)6DNw zmXYcVW`*%b$HPXhVPl$fSJh|UgtR6oP*|+q9B< zZAk>7RzmBmT@_P}#iTZcNLHq7B!`$ZM%MwF#J`j00 zEPs7R#&tdgnSX*`T>)^}?esZ~`dAa>X0+IJ%Fttc*FA;y%G|0A(T{|-hnZq=Kni^R z4_i+so1S_NcpVtiSG~BGw^%rm`JGof6Q3Q$4v(6Ua#)Yv8d5=Z|x-l z^>v+&g2dbf_b)!xqrT)=?6$#*H~J4_BHL;GnY2C2xpPc$^C~}cEW97~2H!uC7Qgg~YvU)i3qiSyuR$H`mXH?w*%~V6C}alw@UK z*R{nKGSguVut`f7qYt3Dz1Gj?oS;zkKUn*my z0I#!RSK6L$o13+_76DKcl*vbHTrz{Z%KioYUjd+5^9mSc0W6{apn^`f*rG7Z(9o{M69NHhh$AzBAn zNmTiucVO$0n6LQpzzx0d&r9w`m58xxfLj4X=#S=blWH&TYnyDWB#3ugiWT27Ap9Sh z&f5unD3=;{?&EQ$9p5F|?Lpjn`L(_qJ!InCKkT7*ZEVQbTkaxcVc)nfaT>DCxQN8O z*3;Mjs`@)V#j?}-B1`{d=$Cu1FQ|5|$ye3D7jCQ0N6=o6lfQi<=H%zEs<#oLb5u^t zBs+L|D5HF}XlUZTMjj>+PKh(APxq04ls+yjSKEG%pB*^<_?KeFpS=X7fcxqbbr8-R z^m6{!J6}KlDr*^+a$As>l997n7x25tLV!vn;%vX-TD|$ZD1_^ZkSXR@tu54g15iEw z=%M0^>#b}$JH@5O7;58hG z<~{SMm8x`pJFHTRo{nBT`t*ENU~7;|qGGdm<1iQB9}p&SWJ-1+YV{kAa(|mKPtLsM zYrLdh&;eVFHILPAVc`Ss%tHfH^zb1Etaw<3C9+aM7_4IN9bUhu{?GGweQAOS*hRw7 z<=QCQaNZ`Q2RWF7DC@u6>0Z8@NPlQxbS+_m{L5H0Eb$|mftC5t3b#A4G@8hHoohqR z`2>0I;+E+AqjbBL6Q{1*{GSPOWNwn`S@RkOl675WlUROESL53H^LjrpO-^#xut%RN zc@u%=0kdV^gVr~F6OY>@8Y&cxKD^lv<8d?1U1+Z!h}k7ygo)JAqI+ruK2xDUQ11A%?k& z`SKr8!Txv;MH-;O)u_KpXIlbshE6F~8~!_4_|5<2b0ds2&3uL{!F?27Eh{hAl>zc3 zA{QSi$c%U@qc6Q-NfS7$+h8x0pN}@jU9%52LqthRIp%UoCLEa39TxOJbuPiteK0Zl zC&6C3s_~QCoO*cHFo6Dr*wjSNyzAzh7ygs zk+JW-`teu;{wua;v!TM_Vs3(}NjlLeJM&&-6G!S#3WeW-o{`>8Rkeip3RsuH*Iprn zP$4zb;yr#!$nxpdXxN(s__k+=i+Sw(bTK?^&h*vdt{FU9{)s)k81t~XOy>pe+Tw1` zd*mxO!W;kdDl*R5MqsJl7qY>Ctgk1Ssh`@Zuj}8G=rHGZmD@N=A|IvYtp& z>Bmc08mWdF{YXh(Vqew1uI>a5q0gV{(+95|I%;TR>_q;bN*T2H}OXcS8ur-rZH3fLP><>$e7XQ z#f&){6G^n>;G!gp@#tUT*Gth`>6`bfR7bk(BoT?7a8<61G>UbxD<_0h4tpNO#*EDl zCgsbvOwLbn^E>H)jtURFMTc-^GldJxg28RySCPj^TfQ&h9$0@kZcC&Xs#CS4W zV|9|sl-g8p0V9r+%-)iO7wI*Pjw3Z6@FN|PJB8m|$GiLkbaE&Q+!wC?Oj%kePPSn# zjuw)OZk3$d>E)cCp@dIF!e=JTT%a7nX;RDUph)ApPfuG)B3=wBOzg;EptrLlZRY!`p9^^W}e!V^g?+nMLF0c&G6 zy+JU$g`hm6@ofZzwNsq~J!QelV7(aMR<9BUy!FHkPQUWj*eCVr8eqGplR21&J>%Zp z<%f77jHNyyR{WJ;rZP*N%W-BO@OPV}v;0|TA7YF_bf~ZXaDPAl=RujA+85I5y$*WU zo-EPD0o1n8@SMzx%qz)VrV%=ote4)qc)hT9RCF{&tE}VEC%_tnatAzSO8Qo_`clDE z8EWG#)X{JFspH6JWuZf{3qczReA>dfJRw&VH|AfcYvQIcv6Ju9S^A5Vri5#lP%Zk; zfJ^^jw0aL7Hs%K`I`z&bPdmy>?(GMFDLgtC?$`0THe6IPbn$reW*&bv4qvMrS5#U1 z*u}cm*dmFiECLo-BQX@Dh8xhvtZ%akw-$8iwXVTiv%}0i@W(vD=&=IFqKMQHiK{+HVG?o^aa?KgEwDv_8)%l!aL{TclgB-9NT7k{%+9*GK}mDFJj}qlNE>sntK+Ur_t9I3NRS~ zI8Ufv`TA3RVAW;X1o&a5qA|$zo~If`M~X(?;Xx>)Y*LYp1go<5e;R1nn$(3(Z5e{3 z{ob5cSy6{bWMzu)$vuAZ2o#h#g?4XuY3`X7%t)j-T{SdYVp0I+&1o*t=}VWD%GN)e zJ0*5n>H@en@UqW?e4T)Kjvrd7Rw2N|l$)#s8CPPs&)j?1YQSZ!@zp?mFk|RQB9xWQ zhEOK~P0yzD{Rr)J2k!Wfv4(pT9+3aYd;BK<{v$Fn>>W`N5d%CP&*029S^xfPno}#( zj8Ga)8Y@4)8DG+{n9e-%4?`ufi+LXSNWx@Et#_7fP)I_mWPcF%$m*Mem#A#}UQVp^ z6Ae9>Bni&2PmU+88J1$nu`d;2cJF40PpO5?yNcPW>|qa6i?PNtkGw6#Vr@gQ67H0x z`+C!k6G#>LchR#=EtXTkMRcIU1SpfzV9B_MUATWH#bggDVGx7fSW|D1`p`j^9u*;} zle5Ek|4332>$Z&MoJv`R?r@(~(Xr}31Tt#{nQRt}DeTD3y-vJ;>z=#dFV&3)HJ*Y; zC#x!^MrCki`OX)t1htfr3|LFnmB6ts80qKjBw*%~+}s(P;eTKcFqTJRTda!%$a1a} zm;^mJa%FH9>85$IN0uakz#i#c5uxfmHcq+JicLPdp-rCw>v4!k%aF>07$ZMdC-?}le(YLhDx zau>hSvz5t7*Ss!PNiit*CIxvijqm_*^=C%+J!le3^?Q!YK=KBs=Z8@klvF#bf{nWP zWn!cu$-5Ad;xdUi7_4~V68C|%dsT|nwch;d9MM#NaX6z5T;S4h$&V|MwrrptstgXS zl+Bpaeg3c?_&k&3scN-)249CapkJYgPJ-{^;=-J>hYEm8|r#`nP8lcZG?$W&!5>!%X9aMM}|>9@j-{!gJ;^dc~I(F!4b7Lo3~G zaI#o9biaY4L3^;yv0~uD+_>E()?=$o8Cl{S7U$ni2yNbr-#hsbdc&H8UabYeER4|ztsKySv>4^;P zgyhOZ<@b(F^=BCkX9Y!}BLaK2RQ3Z!am;++j^u8C(>c>ZGa2#$0qfwK;F!!i;6z4# z&qv^K9N$Pag?-Fdw|Xaw(`u(B;yQk)5rMEX97=GiyhQbp24k|>m3V;Q(}EAGCXX|I zLv+^TGwY4nu8w_5&o6S?@!;5`4`b^q2o0*<*I3fL`a*$hUMaKZ-$db%t@Lc;8du-= z^F4~vp@c)S&D5q#r+AMDLHmuTiQt83+)IjBLn|6Kp$YDV-~!D{r40um7q*AMrh|ae zq8Xj6!4`#;`uid;ne;*}%zha(9xb<=0;1`aa!PjUHQCb0bh{S2bae;l`F*!5Cic@N z3g+Q(xQel{aUb2qy9l3i`~kMv?5nMU%WOS6)mSmUZTiW{1Uw3JB}tuDk((LpFMopn z`lO&}TeJIouAQ7UncD!l-bwlXRnh8;>3f`n%JJdY?=kX!7`Y~m!p35j1|k_T!_x(B zw!{osUb6Z1vC$emxI~td)jc`w7SqH_sVWuJEnsw+iuv}tlK|Ans? zmN5tI$46S{p!D0zT?;3vxq(-vLKZt@W_*yuEiN_5xiOJjLZ|1TSFBa)ia}A4!6qhil+5wC8_#gkvoeG|9W0DxZc67qd(A~D zs>-{O7X6%mtNyL&BSvw`gd_+gI{_4<*8ZjZ>F4@Up~_xVe;7X6)&sKx_q__;G_=FQohr>l3+VeZ8!=+~IYu ziZbt3d%1L>V)6ol!kqpi+~_m@RnDTa43Rp*#QKw6Ws*7Q50jKV{hNZ)Bvw9f*PEGd zb_po;7emgd8b5pLuY`~MnK?^dQ+80#Q&sH`^}g-;YRPc*N6UO5Nl0)xk3lCHVz<@4 zt&~VtvX^haXUK9Wftr-9>3LW9U)S z{zJk!8qVaM#>WlFfGdf?weHFg>)|O0r?YRLN)z)VqR0nE$3-f;5r8Ok-5{n*Mkev};u7~9`co!@^{63{6`=bTw?!K#G9LlNe+*6D6MFNeXOmHRa82)@&l`nH z@Ke9x%fVJL{Eo(ImnHXJVu)TBdbGUJTq;S{JG|Z=9J8Vexv!2xWY+L<4tcLIPLr(351 z-TcTzBdS+VPKYf)VP{1?>HJ6axxaq(e7;R&wa^%7F4EX}L?})rUyLv)3S)JVl4?L5 zye&{h;?am6j$Jjx0m-9su588YJl#{Gd+eh#b*$vbOpfZ(_qTQqNg29#XPp^{~MW|n(0AAWMXW77uFU8w#OZ7n0-WPY=;_8LQyqfJJg zQeM^)?F^Fy=AqM-*kcS>_Yz+iStBG=;jy>6{ewz!H>p`IMz_1j z5$UCKV|$w!WCf>zD8|>+qx3U?Y4GiI$qr@sTQzK<#orLFd+!><sD74>eTj3k#kRNoxSY5x+S@XCj^mCkXfx(LjHNU;Dnl6d z6w)1_|KWDD=#k1G0^kvEnMa8-&MKZg` zqdJWGDr1;x{I>ljMVzLoZtRU{$UuimiMQM0zU)XQ7ekV_nleV@fc@#0Ik(IR+hzFR z+ZgG<-=R5LiQDD5@5L8ood0v9`8sfkOC5OU(d6OQRE|vR+Ej(Ti%SyWG{?r< z?L@Z$##cy3Cup>PB&M^MGE4Vz7uuN^{8HwI-PJO4;coDGNK30o#y}F0SvS;~NF^_; z2FT2wH@ubjii|@&{qNCWuw)5JLH7?R13^;!d3jMgo@suXZKmE`)V!~^m+Pbnj~?yRb2OI>#XNbB)Z8Z zIwX@SDofR9EG*RSe0R&pJgYw?QrMT$94zXuh=p#x4mdOlD>-2|g-H%4>vaBj7FxfO z2X|Ez+@s>u+zqY%qIPMi38X=;%?#Sud+2FvVDUU>t8 z%ACLQh;h;rh_G~ud&c3;xX`L#Jkt#0QhVFd3?rh%ZK-o7Pu~lZFenl8^Q;T1{?iW_ z9-U?rM&^nY5K4u}S-k9_DsE(u$=g$dXGK0dql)qU~=@!U2j?b)TivEebO$^l=P?!*T z^HLKDfl5SDzFKq4A!@rZ$6OzmM{{)Y@NR97X5o$MGK2#Ck2H;cH~$go{zMcbMh13! zDp`brPs9t(ku0wlv}y&L$V$3Kp!HJrR0J`=IwuwAu{X{;k+J_)lO^u(@S(D|T5+zG z!RA4*u0X&zgH-8f=7P)0HX2rvm;Y~UgW@H`M*&rrsO{Lch=T<^SrO> zUcFcn{t&kCUf<`Yg`*=PsD{0}1R%TnX>UlapO>q-FzLWw8NE?);>Qt|n^tk&93_^T zFIm_4R(P9xOo7{YX4)ErQ~jXvAyhCZ{U3FscPKd_-)V&+-Sal+}kfTJ3oXbG7B0RU5w5I?TEj56#C8JWBtc70sw zWhd=#z}7xdk4w{mS5kmR|K0_^Eq3!f>9)1Dt|{+BHRsdqQI`gj$G2KrU-(ht^n4xB zANBKETEEB@nMBAOKLwS3g5EuSJvQcj+8t02?<+pztNxtPVvx*oH=~d)Q1YwjUPPn_ zsk@X)=ksN9cl(oZmIg;X0DvRpeU7*}$Z_pfi_*K&_a`bTaCw>fo4_fhv#iK9d4uJ1 zik=;PCw|BSDjh7AzT6D;{!)$xw}54SO6{r^vej>BsInSfabBrh+thjfJINtve48)c zMD91Il{@;orGiUOga1+aoE}%*LcutG%KO+OrbBVNcQCD@FCm)i9##HR-4ZX%0WBf> z?@7R`E6}CDq?ZPqVVnSWUppJLwFfp`wa9*MTYMC1x-u}ehg=rF3^>@-v$w~NmWEk& zuHX44So;Nfezj3?_cVgGaddPzD^oY=zL;^}w%ebm1EbJ1l$(2d8!0K;#<(Fp2Brh~Fp&&E z=a#J72OB>>d4Y2m&X-&|em0v)2?jrZc-1}N9QL_5eF*#Bu49lMLh^D4&RJ5FwEsOU ztsuqVme6NwD=%3NTHV-{0>sR%u6U<1($|85*RXh}rXW3XuUG}BmuEnEz2xpG5Cn5iKZ`@vm z`dd8X>JzcL`($1BU?S$}Y|4)x!?8{p8l5u3dHMN<4p=(x8@bB;EXLEUL-b?RjHz3diej)FNk?EPEhm8DnavG3rf}rm?KF%%oK2)qwemOGo zdY{k2M!ymjYpixdaXA87Dt^X?lvVC3s0=6Pxr@za6cvwWfoE-8yhg!_j<{KT28pw!k_wLxK&|Z4E=SQNhEF_zRjG5qSx>UGs_;nRm2z zgBO<>M5;(Z;%olh`I|cJXYb@PvFmZUC3eBq<+ChqSyrYfP?24z-@bPX3b$Uj9QkOS z!@@<`H&wx6`a(Nq^{IR5lX=&7LWy#@%f4Ha0x-L{>Ww%nO3#tCii0dbCXGT`zHMph5Mv+59*<_{Uj#Z7Tm{NM_(2@ z>x-E^+IM?>Ac(vd$3%N_pX%IKI?T>P-aZ)ra2he08%iB>?@P)gzuYZFp5`>MSeobm zsJwpO_~XJ&cKGbu;zsD>oA=dO*6)KTI7;=H7zZR`J32eP&7YCSyb@Gczu&zQzp4wvd;|S(|`Rt7~f&rgwyI5e@Vj1$*5|T*+e|_(SL68uOgTw8UUR(^Oyo z7kB4*W)sJ^-eC%-OywtRDIM`5Ul=r&SxQe?nD?T>7L)H7MKD3tNgh4+rTcuFdFe>$ z$$I)*lb5QWGy6mYB0j2(k#AWAE`L+ovilwrqwM>?So-#OrrZDjZMNAQw>eFQjUhQx z4iR(SQjruwW)l@TH>t%o$1x0vLMRr>XzjL@Aj<<7|Ax9;Ef|2?kj zwd-}gPS4l#dc89qmay?3XY*SGS5In=@FK-_UWq9wfx}mvQrH`6y`&NG{skX5dVU>P z;Zkp@{#b{%n20MEdQ!em?NIfLsMdc|5pIA~bGT=~q)?l!nA!JwG>dl!KI!>|Ivi`eH|L?DNc?m_ zC&braCm(B*3uZY5&6EqeLve4fK0i`!SLmkx3id&Ody{{ex`4G7!X_`P+5?j((#fVB z(2L?M{O5#ugpDQ@RnIOHT9-y&+g@}C{}2I6`@H)2{UfA)ryfnA;H|n2E=#KZRmpX4 zTGYo1ff0`o-ex+tspdnT3-a_cP1Au?b(@RJ zDvxYjoeVOB0)Zg1uoSvc@Xe1DZeq!EDM7dgPESwYt>BF?8gJw*WT8@FtS~TC9`ZjT zhFs@p=>@&)seivaBN3`NI8JU(m{Upq2HvZr2=z}P9!BiIngRyvoYU_Pfx{UW9;R+z zcDF(WrDgL7i?Ep4?k756YYZ!zp=)p_k0idmHMbDSNdGk#dOSTSYeAXLd~S^09q28G zkx74HGpkrFXzGeN3F($K-*wM!IXM9G;O;?DIUqZ;G8MxuU zlPjcg1_-URY;`hq>WCEbtMnFkvd2!-*8UOmupn_!ZnCdmg8U)5GOL=5vxF@jNwPfG z9aDD01;nsER+|gPNb+2gG%<`6g{IJuG(+vlSOG!{q*cPH;2t4tvId~Hy*7qr?>jY7 z8{Upf%QIidFSVLkdaH3o@k2(trHI5eiTN zrU2xXH_UpX{-Jg!XGGE2_&~*+wVqoI1tOOHAPpCK8S&+;Zno*-ps-nkY7>c%nmu~R zplE}+TdE@)kJ8wU&=O4T;Eu{{bUlA{~>d)gj`` zX(YfoYrcyA67gC zG5EZ}u<>_9Sl_J$IB_uWTpLP@!{q}rWQ=rtwcQPXgPQUzpb&k-NiCtz^KQSD;_hus zT{*(wSf5NPa{eU2KuJ=@pkf#}QrO)UY>%W`3#GGY*F|qtF1eGmR-lpW?xv(EvS0Bv zob_TD5JXL2T8VVxx+9>e+=5sOK}>AM`Xhe6{z&|D?v1G3DTKqX@VIpW1YpwGr5SoU z=V=*dA`H{YIefe72+M-HB+^)C7gvZTbVRz{@+g2>+5(AQV+iTUlzHr-BDZ8<9L|{h zgv7vpB4dOqty|+1uij2*HlmfddBweIc54E-0sYGef?SG*!})!it)>1SCff0aR~ed$ zb=7jK=bfy(<$EZHgon3Lyk$3h;>&Tm$-y&H*U5c@_7GqSJk3k%hy0xvU5`fw_HYur z>i%5v`Zh+Cx*=|<`QH1kcb8S$3Wt~91-cX{PEsR<1~hqNa=H&+ldk?l{MTOHDoxgdRhj*8PUHwKJLOl4rs*am{e#EuZWWn2lY}SetNMiA0 zshU;}K(&M!8&iipACwWgkD?jZJ?;o+_!BfGiZ?f=ohKB=sk>0R<5TiaTMaB>_j|?* za%F!)fC_Z)Qi1y3RLk7+;~9GH7z4NniBZdv*4+Zje)fo)sA@m}Naam4JVep!jv**%^A#J)aN8mO@Qx)R@%Gb*AR> zgc;e@=~&acRtv(c;IC=Zm}a#m_PAXn85XAybN-T8%U!%D9D&IdwH@Yl? z-2N#TB=cJH4Q71(60!t*WF=-M$H~BE8|%)6hR&xk@#}||a-Ae6j{N7z4V#TRJI6C+ z>w-diKCQ}O>YwEaU6+j3{Py!ASdgh~6Rn9Q<7B(aqbPfr8*?m}ub!arz##e0k%!2( z;rqAtsgpT~bnF#yCb1A{&G}cpVgXq$PRfn0*)~(nz6Cb4gt3w9Q}qlr-?twPO9GM< zd}WWHy~>!-b)BdVbcOVrek=>AC8)g)8@sBV4=&5FUdq)@p(s?(%1uju?H=M+3lg&; zWEsd^*^c;tPm^~E=N9ZX?s+Otm|HkK{~r7mpXMe`l*C9a%S(JHOtF{M)uWZ$rZ&R!6B$dg30|R_JdjLqp4X;!Nu|sKY%}6V zRdld1_MmF&HK|R<9rX(NmSM0JY)8L&&L=69t+DRop39G+KZDihj)4L>nqmIqFxSu8`h?~V-_pBWRfte~s;X3W-xTnhr9r>=| zI$4(`?sSkRz33<`HR^lty@Qi~JUz2OG0ykXvl6YT5oO#E*FT{7-`+rhg_!k}njYi( zEK+|}_mcl8-y!RSM~M6zd1vEu)%#q@$nA|qPTJs$3wK_BusA`2$zmo}t_@Ir3upVq zTmjm-1WbFjGB_@EVQVt$6*hO82nT%~^%{s=_*wiPO9YfeA$Y4Nx|6VIv@(`40Y<4k zPJ)5*v-p?YdS>RsD>NkXvb!cK(E~puv||S}00aUS3qnsTJwPss9QA>@BL2qt)8HNF z)!y_h_0?C(=B$Yo`&ECvIC$`F7FNsx{DU~$_xF5yD|}Pt9&h!L!+xhtVVTb?6~??! zdBYxLiE0a637Ew@jtZ|hvaI0f&V&rWrt<%o!XNqi9ne0xFSC?G;!seU^oC2#RPhPu zi`j~tn)!W*Z2s^0``R|Yx`wuf%qE(N=+?3@9W24oFsqZ$jhRin{is25yVX^M_virP z*k#qR+>f3Ii62(~^^)l@r>R8ij0XlZ;acq(9`$|KEXJ&`Q*$od!8Z4BNu0HAWOv7Es&zc#y^)nc@9iIDA+Ssz zATaWBnXgV41)PN|1v~Q%B)z6*#1?SR@M<#SLQ|fuW>yq4#D$Zc3!LI07b4b6?4o7? zn8dTOzXzUf=f@`PsXk_%t?CnwR(Cg&$!JNS_HbtYq%-|ff=dr%5?&s{hJ6M z%XwvO!Yn@@Ajox+?<87MxZjYt9~*dcG_9+x=9% zv@T2I*cQU`PRE!LD=l}*$b#JA)cA4(aQpT3uJKl_JNpU;9d*Hm8jE=Aa-~MIPKJm) zr>|qLk#OgQcwWbcuj87Z6}qNNLTSwyp_8K`3U-iz~dY`8Z+6xTGxbr zZ1fQDRRSEJd_q@oF|xEir=ddeY%EY*6XnmXgd){ea?c_z^UPocR`zPY(fS4qE48q- z6OFUpN{2CKJCM$T+sU)omhLhl9=_;eQH5h+~|}cw&LtA@lm`Q*VNTE^+VfF-GQ`4$n1)oagVnFc(z=< z^Bw8YjTOn6n-H`CiDV7;ktGzL=kbP&t52;p^Wwj2bT$C?p!5l~UIqNljlY#R>rqk+ z33x8^6YgH>lmiBjE6r9)-4 zE5GxQ3LuQ3=#<>Djcl1AKbcwZdgjoqQYH)-msn!HTyNpalM_Sjcc(yxSoH#Y&(*%{ z4e-u1=^2@f5+{mwlW>c6@0$^g-$uC47xyhL!)2+D z65#U09Y%8{&w5qaXXuS>&!iO{$^eIihH#38f8;2Dh}XN7|73VZNm1m^az~KPSPt^` z6__2=OU`IB+Bi?~`o7nS=jQ$h|FgTcoDekykUNvA+yU2SrSjhmB0;XPuifgitC7(+ z_6W;l;~~QvL&%`uh9-qS+m|c3=C6Ubs}(DY>gg&^0VlKHKB{0@OkRtrQh$H#R8eL} z<}rE&zxbn?kOkS(QL0}tYwKhEpOL61yYu7RUVwZNmZn9Ce%n2W{k?QZNc6$R+TKu@1bASgdfGv$t zf*UFB&FrH7*v-=vf9_X8$UtnXNy^T`w8pf1hMdFJ5N9KKNN_VuaU*|#yD=)k1ma`z>3 z-e#QRFV^1aZzvn;S^5jeIXL;>btdQF?9& zYd&eCeE&FEw_mUAxl{mLF`j5e#lN&nc6T7!cqu-GH<{sca|+OWOV@VHfk!F3@$UFs zTHk$Lu}8dt>+L(w;V`i^sp_?g0^7P(E2Wn{nuXP^HraDT)IM-{^M8qz?3xLyJ9oX= z#vRNxTx1q|9P1=FpXn)YLu|BZT31W+bfM4pN_TXd(W@U@p8c4MT9tF|$!|Tau&`K~ zMV?$wWwvlnxcbFMeY*5I>ISJx7PGtoF2Hx>Skuq|*B1tN80a1Df=J?~B-<@ub6X$K zTbyIS{yRJWhpy@HaDwDP_^%#hVY~Pw7B(u}xs$t5fhof`=0D}$C78oX7Wn!dWIg z5rm>B6sO+#k?iW(QvKOnRm8p#GxfLm=A){qU!1d;(WVHkY5a)FJssJV#foAZ9-E+| zb_+lAf6X-;fs*<6vo#t#+3dYS%sOcoc3vSST>x8E&MM8@IuSYbOaSu?3HeQ?sTwz! zb-dUfTDD=8jmIauhXma(BAK(k?ZKL3R7#N*X?J+j)!JFvbzL#a*fM0+EHZBRb~SP>u|SbGECAIbBdJ1bCqe<{?+YZOK@8}l0hhDr9e=wxl{ zm^YI$Njn!z%IgEF`Iz#YWDXXyRw3xlXq5p(@km?7-}x&l_vMaa-;lq?%%CRPMO#Sc zNn80*)_Qbkx>JX@Qs=dhtP(9&MJH-=WM?L6mELOvz2s&y`r%scMJfJ~$AH8p&DeM$ zy)cHJU;hmzfKVtD_=?zH;Ia?F-`Kr6IY0Xxll){rer%vk=$4$jeEWxY+CT zzbYH~G1~~x5G%X-gd&;8BtJ8rXG5>`yYa%}yY}p|>2mSTt`aIw2CjB%@ooe(vE%F^ zQ5y!#h^Urbdv>mqvQ{n8QIv2VD69Jcuhc5FmKY*igcu|%HUQz)QJtYgB>->h%yZ!0 z%=2uw|-CBL_Elg=n;h|RwMs@!c{u2i;#dXCETBsGa}7CPls{U z<%k^~MnNzja)7CxdpTVOvRX|bw7~SOU-fU^esPfMkI4o_wx;vW|Mo915#GJUs1w=I zayO86!lS^_&W0pef!Yb(VCxGk7mYQmKPpBxMy44ZXw)fQSXNmW`QDb-)?hY?@O4B% zk+@%CPvM=ctl8%~>gN_IOI)K=y7~Wv_$sLp)g~_@q6(DUdEoY)JO6GMWIxXFlm%h@ zJkI}4joGro$>)!#b`$0L)08U>RXnm8}g!jZ6(6 zP8>pTUL<^GyCxZ6>i^NkgM5`V=cFomiXp&HTwq^G78j0R986CFw`fRA0#Mj&C%Z{8 zfWuY;Z3i;~RB{L{A*$2HLD&c7UM7||zJ$u&sW%Td=D)**{L{74T138ij@D@%F77&5 zW07#vn+A@U)ps@`>_zZ@b+ziy8S|ozpx0>q6qP7=%vY&Auy9z-9F{^uZOL)+4ldu) zp;q!=iD{XP>j%~^s8Pyvqt^J2bTA!S)7nnBUlg&Rn^bdoi=gDghw$T`!p4ZFO{zUj z2xyC%+`|KNPF#uZKEmX>KTWgK-I;YriozGg33{;xg1vuH>TZd21fe}SIihQ`l$nF< zWPI(M!Gdo4?Vtj`gL@uM9Z3743wzM-*CvqNyoFx3kkX`_dp)k5I~3$--#!rW0tl;X z)I&jrGR}>CUmAVQy_=M>l?OO|cvnYJ=_GX^f)@up-R8P4db;gK9p>8$EPQu3f}`MT zz+^POLu#J$;{2lq-@M;^i(g~8UWdGha>UVk{4y%n6Z*cS2e7OnU@l-$;4RM3o>*%N zjox;&Ou>$T*j24oZaSSaFOm+F;mjL*ayb)sx!XZzOLxGE2|l|Zn@kpO>AkDWF|pKs zSbi{Tf1scIzXwq{-y$C(9c z_|0Gfo|jy12qN+R#q;+5op%?p>wHbXr1nHAU26{Z;M9MhIZ77muj4)2THV{(_;s(x zECM3~ABrDp%$bk6hF`>!@Jm92{yo;5o?qzF2t5kyJ{Z?(7_O!fF?VjBIgckX!Mf~x zsd{v-leh1kXD9_fiE;&?e8ByGbDFT-wMHtCqW%Ma6mcx`a?^ofKU(Ouug+EzC@t_V zBi}A)m+3F08_!wd^ua(CrIOs49+CyyZd2gM(iIke6F^GR7a~^e&RUyHB~LvTVB>W+ z?s|qGewb0rR{|d^?q^7Ele%O`vs~{HYk>1o5RdPOVyo=89f|^lfc4usl{qESli!APHcxw-ygU1JDlY0SobR*!7vwfDzB-JsJ1b1UG_Fu=%D!xW}0J*;Nmk-mn zbPXs15eCHGL*fFCD#lX0jeJdSFwtef3bl2km3MTyq*W3{(W4@=3*e3M zdiwa7WDAiz+^H4N{vg%7CL?wa3DjAdWRr0NQRTV3`}Wgc=P=h@3_}k`w3PpmgQa}? zsC9`VvPRroId$R+=3L46E33GItOot%O%Skz zAyt1Kv#hj`oq^XpPrW$tZbd8N95@+>7+fY0-K`l@iHyeiJs{GK*VsNVBSAizlxa-$c`VpL7?w_3Zp zRqU$yMl0XeqipP`vZtt}8A#|mx#$TnI0*p(rVED`B0!0o8(_xlJ>h{bQx8`y-9Ppq zI{35E+pv+p=7RwQ)ZQ6l&ex`wM^QCN?lytqE=O(#N)>e_!2YWhpeQYAfyZJhwyw-oV}I~LBYZbAK5;{=L9CDcT+k;Vww_1Q{x zg;1^)>fGxpb+vz9;?9dM{&(q#lm2gkaqC{{B-!z)eANis^^1N@tV>~_K!Lamt1r6H zrl}B$kD-^2H<~?lShKZXs6u-y8Qka^m+CGQMO+DBZ_CP8npncekP)Yn&j=3C5?I}b z(+p*U5NvEh!j?IED#e{eL95rj%9@AOollxxsMiI(al)f@Tg*X z@d6yCxUSb9Q6h6sJwr#AU}|R)>aesAmDq3Qo*<{5TWYIo29W zL1wb7ke<>A$6PDBM8xU%S{JSQ0=#`zXmr0UJE8*GW_!0JiNL%=N{p$Voy22FyOcIb zuIc~PG+y6^#<6?b0NVW8-$$^JJOp~;G{7Ey>2)Y|eJGqi1o!bFUXZ#_lT;P#DTaoOQ#PxZQciC2k&!Wb~Q=)ea9%Ykp}N~Q6G^-6yo@2@{6 zn=Q9-^SJqTE7@qJEB9M);$xAQU?f%h{9^A%&{3IuI$KG|pV}=5*-(~~3@BW0Nsf!p z@!ZY_uD$|elCZU8DtMr=T%LXV$Im2Lqo=m&h|F!i^aJ_ZMr%-s=A;q1WqBcUjdGfs zu(IG5IIt3i)H1~T2z8Wen2)(3%e}Tl$m36OrPZ zS#+3J_=pb6lVjvlHnQ-Ru-<oZyXW*H9m3ZJd2&86pIR5l9nG1PpaVW!A= zMv68GC~#L-l~@(XXX;wgYA#u4Dgh~&zrrm|vn>1y+py>>PT61m)I;KFN=YvFaoKR6 zUFGgcz@X%<8%?qn-H4ljNnm)AuQME}pzCzK*qjh;vS9OID|#FYJ$c!%=h%=-1pwIW znElpg z9?z|W1XEugn5nDy`+Qws@OaZVvT3tKKffv+VwGq=gD zGx2Tc<@!twfy8s8sW&pjfu*vZJ4`W=9>xFaOXnS?b2Bg9W8;&P4_z{{EO0th^klh5 zd6nrU|66JQ)Wqqa1Fh6IAJZQ=Ba$bU$WAmr7xDw{a6qW>GOmL)j;Yvsin(9p@at%% zuiNye-Kk*B_uc9I44M^NXAypYK-j!ha$4MSt3}pnJ)@oP*nI+NHQKB4sljZVIfaf` z*4lyASyj+{=0S2PcrjgT^Wr2bA*?bh^v&msT)E}rj}TwfGuY?au*UPu_?7R0+(|~_ zT!(wd<;qMqF{>D7STFw+KyFFccqLP+7)u7d{(+Q4{ z_5L34emfZoC0h5+*b&eNwP`W?QU zXAT8U)7$ym)3)WvM=ZC?=nBD$1b?Lf#*MfHu;fvr9`DH|uCs=gil_XoY_ zO7;D!m$C_n2iyPWaNpAO2WxQe7hAZ`R++|F ziKIA*XigiL=}e+1br1rh3ltanF{OErC#_a3xvJT(MW}U;EFTlG5(Gqn$`A%VD-L^T z#G%^1RpCst7(wWcZ;wcl2}688=Hnais1%$kqx3M8RX27)$O^kNkDvuH<+qx&b8vQK-e`=@9< zuPB;yw1%?PDV&F~J}Y{d$>EdoDi?93MHFezBeJkQpg%(O3WY5kr zpWbmmc9?riH3J&EEdeR(rHSNW=_R-iEvLbgf#a54M{eD{z{Gmxy1e^{Ofw!8+zQRbFE0s?M>6F-OPC86Q zPsL3!V&qYgh136ocR+w9Popj8F6NL5@0o{+K@2%A+6v(NbTlXc*(V2wmmCNWZ~p6$ zqaM=|1@&ji$xfOM&Adz{4r?NA*x&R7>4 zC%>g%KcM$E>r>gUsr7xg7aDeg)iW9uDhj3`&U`%yl8^S#Ek2HB8zsBz_u1ahQ?7|t zID-o>6Y6&`6Uzbu>7tdEuA2@&s-)8 z{Fi*l?bFB?Mgi|pwS4%vNCyM@Oyx9_A!cP&G9GxB5 z`^AfFV~N^smyWA`zgRKztK$TfHA1CPJuk-mCB@n`JwfgQHIr0d%XH1TJwHJ=(Xe8gD5cIRD-%=H1epTP6% z%9Ad7yB$AbEoC!!UYvRF-~D*U?bMxb=5JLdcpn6`dEqSvZs8_cU3M}=CPEq>u0T>9 ze-`|Ga+oE4RJ_hdrR*BbkW1mf$*Z@Z+nO~sZV1ilRjaX#* ze8*4MsHk$!vSCXw>YTa-V{Y> z?xgSgz4d+4q?K#V-#;Vfs_6N_V6x6Edbb`MG06v(+-fwNDtuVbB&}1AllkXdK|VnB zDDVToAl2US!=xE{$C)THqv_!dZU=D{5Y5ri++&~*BP2s<&Qg_nUwx&jjmgKeJF%S& z#VtYCl=hP8Ti~5T@-oh+5$4LoPkeN3_tbUN81osDI9i#Za_zQYTTKm48J)BP>^g09 zVE>i^&9&{{1mPPiXJiwW27RgT^xkbZj$7LA7wn(aRjxU$`Ei#4=Ig_f99g^WwJpnF zyjy-&z8>4(dH+Vhgx##}_XOy8zNDJq^;FnQTEr$!`$VsQ-k2Cr{HpXOKt@lh4M{JD z(uen?TsYf@m-+tppmv-Y*?t{Lqr9K^PDVT>3i!t*I?mHXn^Tm_2sA3Fe9NjyfDyVT zCkSy(VnJ8R!ZPjmN3T39S$50pYU_!Ws;q@A>y1xdz7sWvJ?~hj$TQOal-%7X70OCd(10R;+#RC?9%v&&E~$d-gaYaZX9!_y8hZ z0#DF@^~S8_x)iz&kmL)I(m2OfW7EdN{YWjoQW8{v1EZ~c6R(D(P4-F{?2lqk=}Vp$iXE2LMm_(CLcxXUdOhvX3U#%E%>J*6-Cb zcqVTYPs^n*IA_NCO7-B6a4KZ04!;cLz_ib_(R z-I=m;z_zXXDZ(br2(I+n-HMq?u9@hTsD$kiJM`J5y^+I>6^nNHSjq)cuWwM6Jv1RX|+b1dn-gBR( zDn^BIfuI-V29{`G1B?Mm>R_t^@{yBhG{t)2j;zQ>69$)vWP0&{tjmse6Ah+Y@8Z<{ zg9384AXVG}KXwE{tEqfE2`WU^$|XtMQamI~tSAJ5CZYk1Oh-*HsBU(A_79VDJ+FmR z)G{}PK*+&W zqxOVa#g`Zcm+6JUjbuIVpU&E9eyN znZVAv8~;^ix)bwr4Yu~+JhaE-s9j=6tJ3(j>)o+m)M_9bYiCs4Y@B`YCt&rs^M)!i zu8q&Q0M|41NZLNCwqRoEGoll*;FqRcNk-jSHXoacOd|x-Ycp&g`4}uV-_-o zK&b=*Q+z~zC30PlTLJxMAD&9+Olcyp>RxF*dg*3g``CXpBQ#I|;m!QUv|kc`vjXT5 zUaVO%%k63YSb4k!J_yjb|B}c*oP$t8*Z&S`OUxaPk~&_xMQt>zgmB(g)uM642Kl`2 zHI9vn+ZQ6r{S)FR%}p47Ba*S#g)$7N+FBp1%Tj+727!3Cfa?6F?Z8$oMnUCTJ7u}W z`z6_kgZIa}jtw9GyCSh=_V-`EE^n*5eJu8&fyKvDDu2%zA;CJ>Q1VP!PLHXKAjGo* z$sQ-ju$5u{N0HSmGmFvEMuY zKeoeG^wujkjeoD8*qMNezp&rH%L6)8PAFrrhS%#Ay}~(ULy@a0y)#DmxsjW9tJ;yU znyv2c0gA9{-w|Gv)v2Iw{6-We+V)7Y`7r(6xGzWs2F(h>MQ$qz) z;+K=_;YKpGeWg_$>^V>VHd;KGrIFR~c!uS5Hn|tldJy$+Y(-FC6&({-fZW z#|3n`_X^Q=s~3m4I_{f6%kEfl7kVmx3NW>;Ct>xce%j|HDZ8?z5nKyZZ9DY63|B&) zCnX;`r<4pFAY~3XjT1b|2OaTt9hJCD{WhvDKPMx)PsC2w5*WoJ*cV_#*6ny-p7&fOAb^7OlPgNZGo+sQ zuO5-jre`MP@L3%sy)1{!zH$130RCT*xk(KAUe?68f_FdA_3nI=zS~y&>@z6Cf(WuQ zTX$jL`Bna5GchRPPBsaH)xNB%C-tc9+_Ssc!{3u5sU&wWpDdga10FTF*_;*T%VHaK zLOTb2=fAj z#n#Eaz2%n77#|GO^#N6G6T?W7zFASA(1j{LJvS@+uO3tu&WY#~WpN^WFEv>5$ zhz4KF;UG7|J<1Bh1V*T+znAm|xLIdfz1RP}60qM!#eo!fy*IA$rt8 z13bfF>O$l7yh!RTDX3rfm&S8rPx!j`CHE`m_+2N3JD4nN$r|(-s2$|m;Fg+SDoVvW zNbN`(KyGW@xj&Gdfsti1d;z>VHBOELpQGH7tzv8c{5v$Wb7?{L$vpyVW0RPric}-E zV3qM{0w51v)09PLw)uPjJR2!(U8oEQVQipOM~WO-7q1%ZDO1t+k{9dR6t+rI*Yh@P zGB=AU;M=~BZ62kZ;UZf4_b5>hb?T;d`##fn!1lwlJ~qEQbe%dfc5Yk0|AF8aLw9%X zm@h+pPr?U^S#AjVkjOa~;X$#gP$fe(H>Bgmz2tAr7H5(KWU^gr1UIbc27|C0iXiVrye*k2BVt)!~fif z;w4Yp?${BwR=~Tz2@Beo+OI&&^`Tc)AGfHumwc7Oz>>;{U20Ww+CM61{+SAUar1NK zaqkt!23&T8 zpfHFifjyS0ceww6vT>xGoQJso;@{42J|JE%{MW|_RXmy98Z3D`$7L)u_SxTu?fDT} z?i4>N+_5f-R5El$z2}8VYQ^QRk#`AQbkQq;9Lgpwck^$5pKT}f?N9e~~VXC)ia^R`BQ z$#eCF;kC_<>VW8A02PpJlN{Yg+KlV(T0Yrm)Wi|>wGgf==zth@di!^<07G#Op&)g1 z)iDUrE}XQgTa~ulqp0!Y$2pdf`$1l4yiU2^;GSyTP9MOTo2|;q%J07ORilGZ4s=TV!POLra!_mMsVe@te0hUA*-l)d8LioKbRmu#e-x(~j40ruc%~TK5G3 zo<6L)+0Z~xgNyY}zDW0+K&%g+d~8+}k){yY*g>UVa0J*>v+=Qw;6ORH0h79i6k=+L z@S77EJwj!j#M$(3!M|wvsP?;hI7zMdibwc?#201ymR1$t@}EZ-S^Ob07##mTc8}@| zi!0t;@l$dsF~!q)s%m~Ou`9Fh+sP$`j{Y`U;RB)KBZ?a>{TnZq&UW-&0G3BDZLbh> z@LQ9Ct0ZwU>^|@t9?^rlPCKvj9=J{>D{vQdmDEhIA%L4fkLL7{QQ?@2I{zHyrC@B( zybFfl0eGt1brfL^t-!4M%3fA4Q1Kb6XTm*AID$4qRa$|45v=$A|_O{6CFrmO|)kxSj zyK|1;i_M<(V=}%z*mv@oZzrTfKJ@liD_MIre!QlU=y&6569it>#_JI(#<_owGVz^s zRu45r*yuSa{X*;tu@l?K+VMEw$-!mL(exdo#ps>{{a@7H1fFl&wg(El$z$mzcBVt5 zE|eCdA{vgx19Em^q%Q7xACZ810mjfs2spsF851bZ9@Q^B*aKFpRuKykqPa*?%OrvePPDoB~?9?3V8OO20-GDvZ zZ5^d8jeiJoNd~P{P7;2U?dRi@x5pmvmzUO?H+aB=UJcJsxLG6SW?1}R)N5cJnl;XP zj}ZWD-{(Lhy*@t$wgsLS#Lj_xiMd|POLCoMUimo+x)Kxm0I7ZVA09kLMloe13LrFy z$NscdWm;^34gxq=g$6VE>HHm$jajaxb@n{3r;vRSrui z4m&I3qz!#oVn5l(p}0tLq@r7K*hYQYS*c|!1SQzJZ8bk+c%F|O|4MKWn{{w*O}-ac zOmmiR?<2_l9;x|yr(N-WP=R9m@qyg;GWF!Qk$s!39;z+yhaN>Kk}+TNzO{bh_*zDt zz>gtL(Yv_eo_(elN2@-bBy4c}_Bsc-Z#wk(+jOWR|W2 z@hqqS8nOE&yy22srJ31ns!?7WdggFAYWE7+vW*4;Q>&?mz(J9`*p^*aN(KM>P>b1> zbS&-jra5B}QO01LRb2g$Kq^c?;T9AMwM zDJeqO0{_%=K%%``4TxUddXA-E6Fr21h&I7nR!bi8$L7GZ#Qzd%8RxBgGp)6CEnwWn zRjxC)DS+2?5fP8Bl~3>7{YJ|?=I_A-c~_rp=TCaxIdwAx%Um6iKeeJao_oZut7C}2RjVmFI5+!9y+Dy|8}f`U@RSwoLhEHnPFG=?R3VOiJSxB0ChmBjTr20K zM+puZ(&=! z)j4~CRI2!Em+j2FKgZWLKOtzNmS`?}VgH9#%8!c2xZvGY@IvfDcAbi60_o+MZ~4O# z=lJ|7wg(EX@7qV@6u>*~4-LyMo9mH2Rq3~l47GZG8WAl}(3yEeoLc*a!Cgev7rb6y z=#N}w7u{l*2!d%x9`2#*?7&$vDF9d4uLiT9Q+^4}Im75n=q-mVA>yvu?xMR`*^;S) z?`~ggRETZHeHC1wWIkifc2EXIbMOobZy#@ESXudVFlrly&ekrN-tG;9;`uAPKo>CreUsRVHR%Jb> zn`q8fT>EXD309nQ`gKlIk9lMP%IH_KJVZ6{nT||*Qx8oail*Xl|84fEs!(shYG3~L2Ng_}D`bDcq3jN0WL!^4xz0~y&J9>MroQ)C0s4qp240?NBz0jdo3wr^1xpp2SHV)nifx3jz(-xjML=N_0>gn( zI&qY4(IJ3o9ki}MiD+*rG870r1uTTx7@#z%WfI}XL&y|g4g(2+O}2}V^+-VDfU}Je z5MU#=rzDzUB=HXkBsc;s(wPa)6dgigUC(@77cr!Y^9frQRr&ahA^S|_S2(_?v=~s% zlAu)!A;XzxKO!h%eVfPqX;1PC$%>o|w#G9A0>&vajE7ZdONkmNIUP|2{ObR5PJE0? zh`KWXf~iJJQDQj#@ia9$Z1DHV3!5yqZFQ>xo?oi0?Bb zRpNSDEVCgu$Q)}UZ1*FUoiekN{;9D1v~Qh4gONTiwMzAI^G$o0`h%XIf^-jt@VCFs zrw+#abqBXFMlu02GY~XbkO46;LxvD0C)66Y^jjjzCMDCJMeM+`2#P)=N}l$D>LP2uK#k?&bXscMn5h`UOCQa5g@@ zKt4uvjFC8Jvqh^$Ha51af_!zrD@D|qcg$NKTTDc8ni5Rwyn$IVufoD2^9k<6i(%Rr zFapa6ogn#zIs~));j;{Pg-v`v8Zk_~bL1zQD)_T!*o=a6SU)ic=?I@EN`rs!Icy=Y z)j=Hr1xBPihz4-gcz%jI9|@7E?&HYGH1Z$?J6)ec%;L37vA82XIw3#Prd;|mWCY`5 zvNZLJmlUbBM;G#e!#E+ zf^I54{S1>(@aKD^&Venx1p_pgereXt?z4MB044F}4P^|KHJ9OKeZO7wyA!#_2z;}1 zbSJ$uxj? zh3X#v_6t-*j)aNg2;J>hXGHrDesl|Y9(wEI?_I@Hzy9<6z?|dF02)fdojyFaX{Vhd z#<2zW+C9WtA6l0z*iuMbm}Y^id+#6^CcSr5_WXS;vLm?0i0gK*qF@2l#OD~?c<7)y zPj``U^poZBV&Fr2u;8X;APGOw_eRGy3#mDGlIQ@~Xhp~XUeh~-NhyB|0YA8g5lnfL zBpi#MCFDFB%`|{gy=MfSjsx@p+%5jLn`&S%Yvgu9}`n zwm3pUB@o1ddw`q27P~~J@c=X}(l&k|fLs7|sfHx|?BkBVkL=iQjDQcp2FV)lsISM; zjB)&o&H6|QA*xV_fB!lv4dn6OAU!%s3Q*u?dDxcfMpz(2@;#Jw!L_a_G zUu)WM`J&lL6%QDS`))n9HDKhDbYJT?Fq3K6Ee6&72+n+1(&52Ebm~gEz)94LcZHQ* zK1K=*KO$!hC3I~Bv4+c%f=-SGHjvsw6&>G*uEb1;5Hy4S>k9yudke7}xb%7fok{tl z`%T)p(^eH@%i$g?ZQv|YlnT@ps~6PJD~iQkVkU&|)Wx-*kp{W?Xv*p(woCU_iRVfY z=_PbNu)P=21ma>E4^UMThAWd9ybd5^`VYtZ&;6G`sxyWacO zmC!j2lQ>usM3i5i{?jdy-cU~qK{BFk!I3u1+3*|6{z}k%;-E~W;%w-fix_MR#}VFN z;d>JQy%qf~=TPAFaZ`t0+2g-+asLM930?)byYgS)#!hNA+wOfWbOVn zZ@pWCPl`i{Iy)4ebBV^j(C6|NGwWSSd4QziS6(5yD`+5ZS<~$tF0`-O4sYpt+&Ofnl*Vn*-}!Uhn@rs;MPR1e=F%yOK@Al5Q(NCPthq!e9UccUpj zjMpNC^Tu(%uqgf6(t6NRX4rxpL#rkr^|ffhawWZ=0h|2a%zQv2U%U@mZ^erBKn-?vzWXnFs6(!74|SQzYb zVik)=YJ>ZJ!G0>-pB4U-q!En)qP^^!2#s5$QqYKVy+q;|R#Yv$+|M{2>xQ!kMC7y8 z{9E2xy|@TekB{Sq?-mf)_SSGu>br{@Qrj$nh&A!WD$3{Zo3NzMd+Ut6>Ew(c7mml? zl(I__Dxk6MYFovJ^ob)VD_{b{`;i(H5;Y+sgA&Q_)O8L?kA={Q+l7IOfsFz|_u6mC zCJVMvT`|$5Bm!o+jrtN;?H{id^U45a$`uwxS&3%lLWB>DJMo0l=joo;``kM!@M}2d z8e)^mXC$hk94M~<08He}8p#DpVnELS0zm-dWAWAqwlB2wGwQk#H@yDnJp`mpx0ep= z7;rd>mB`0gb$P#WzJ4{L#iZ4$1l^-W2A>f~G5`VUmN7Jjo(~*hzoH2fmCc`h<2{0j z41HzSpojsL{fs8sACwW;=P%GUWbcXRzVD3zYgC`->ttLzeSHua|9q36=tU#G9kGl+ zU8isgI@`$o_1vXdsd_v^B#;-4`KMJBYmNRWDDzc=)lMh$Jj@%rE3m3HNZ6 z{*I#Zz0oMH7lSp0SE3ScH=)TgX5>P4mf(rfu!p76AyHTYoQv7>N$WH2+Z_(WW>EP$ z^TlKXS>Ig~vn~E|oMaU}-9;jRUW0-OO}kMr89ZSc0(2wbVjxGSNc^zKgbD78ETy;H zK<1sFV_@BBD7#PpbP6?lqm%aHxZ4JTW74}3zeFH0hWn~Bm0(W8$C1g zDgAu zO}-Z5oWf-GxZRFKmInwEZaiPPVbi^p-yx({Fq4pp-nO^`btz=)6z zaq3j6oa9dE)&S63yb>a?pE`A}cc12kgyOOjsKQ^nImP@cCHp^11o}nFFO$lWcfX_Z zu$aJAapJ;a_d~y>xnQY0Efe%yv=m1XKF29X&lgs_zWzj@7&FMkmXA(O zck_#$BSLgmNLZK?Bo7QaNLw1r>w?UKIU>Wssum=}h)q^JkIyj}T44$tNq^pV(})jU zuw>irYJ~S6UvhDFLz{jY1}t*pL-s>J`Ez%5<}`{R7219t{{?Pzgt06*>~8&u_X4d( z;|S&fM*i6?M~E$7U*_m#^o|j)#Q%kVz~(5GM!4;_L^!@jkqJ!%E;_)B`)aB1YwzAy zF`Nk3K3V)=iO}L4|A5ay_VbJ~7*aC#3s%Nbb4NFeNNMlMVPIW8p%my&0U@~0L4^S> z^N1G+(a>jPzmFqoQYJ8P4Iy)7iX+_N_JAf+kQ^lR8&|s-{a>T1rc`PTQ1Bc0QxN|y z45X<_Y4=&=))!3xs|Kx?(ogDZH1$}Gh91LRSAfeld?|D@yZ=72GGuDRM6b0gr%m$z zpoP{Mj?*eBtu9G+(@Z|cY<|k=vebf(4GHv|z``|T3x(~!sG|k`B9jU9hYy`PwjcDX zAQ4zVAEStv=q^$lfo_BzB1?)33P{QePRQUad?=W(Z%}fjL7^S2B%|CvAF%MC5MqZd zS^ciberMTXc4=&65Wqi(DtWy^=+l7WeG9Y{z@R>=TLH7~}sVFS>s`EVB}K zp7%%$>dfDVUL%Nv%6DC$egi1`@$*=3LI}Z*<RF5@RH|f^%Al*n^zVe+Mb!WC|EO6{xt+nNtf||0y`C>L$61sDKGjvU(;VCS%@5LaQ1ML~kbV75kpGj`{#~ z1;d5KqOHlA)SJ?@<BF+BzY+*(ux~bDHP+FxU zm&SOWNVY~50sr_>w3j^o9$x?YEe$2&NEX?Jo@QDcO~piman;}vA?!qsNj8tkC~d## zxkV|8EPPI@V1#xbOv%SB1lv0|Iv>GmIf8x>hOfW~6O|?vfy`&R^t^KX zp^16~R#Ngj+yo7gku9Ss4`1d2UVj1@bqk6D5`RLHGKqI0Xyi~MRTnrejN?p!pG@C~ zGN}AT&|S#}L>x>gJh6lUc0;~EAidP*wkehz)r$*ngco?V1`%CE0U2^3%gE|gV+!fS zmdu^LkU~=xk?M!>0nEM|+Z(P=iU8FWLysM4*IF+DiPisS@#Mk-kikC?F-07S-)JM{ z9J}ffDofegMIZtpHYs=Y+*=W%Fvazx@yX7K5GfZZFTT^#v5J`t=!x!DecnYT%v)cd z#vF1CAvTCzVzvB0KH^2-^B!uzYz8R?D$X-Sa<%u3adzYyb(V_>6xYr}vSBQRHm>XL zhrR_DJ+B_)_KE+C3$KZP#vYzX4w=PZs1zb;`SB$UZx(lrqJotz!i177a}Cf6N66q# z#Y?i;wAO)pgd@2?a9u3kmrGi!K!jk!y0EH;?sSgW^ zO@c-_q|WS>$6`?Dzdjp)#?_#UauXf3)Y4rL6HDhWWTXi|no9%wj1s0IuaHE^LKSl_ zP>=j;XmoT2k+MZClI$MWLuQ0#jcgZ#b|QJ3OGXb08DCOpAjP5`JN`Vb>1K8$ zCK4V@Y%4+aDj z8x&(1s;?#k@RKIb_$KDDWz^stsy<$)=*CW;BCMF`SUF$Qs3dhYSq)Gu4f6x6q>Anu zRif(DW16Y}WZX6Z@t;2h`E_lo0?9Mr#0*-iA)%m|C83fw|XkSg1vF%-9T?YUH ztv|b5baZ9$t|zV9k9&_{79a76Cihfc#>ZReMkczeg811v1SmKG!aPl-(p?VmK$Ebf z6wg}ubMDx!2O+xcVX{)>yTEtq1Wm=9>1i%$Cz&7hEsW+ap_0^-^UD-cIHE?WWZB6Y z&-j06UBpA07O*Jn z3~OIkbg>C1;^oW5?(v}2hI8-i?BCTG9A#1QNX)hSSz9W0_(ACQ2L0~?G9H$7gIy8A z6|8|O-Ra@M9KPD`-Q+w*CMNywvO_bi4TD9h1vjI27gTlks33j;c6!vIfuRpVShQV3 zzc<-m_$x1WM|GX*bay`ptnVHNb{Pv6z=rmDDb%RjRw&K?o1QGM+^p=6_86mUb{5<0 z?u(k7nqvX5%m)V{OTrD^nAZly4pAqsB&ce5*>Grp?;H=QvOUpZ?TK0N99RNa zYqm6%lzzHDe@ap6R7`4x7a&(G@aR|I2*J>|dL981N;xul-4Y{_$TFge4jl>#$bw8N zO8n#xs0Gv=4?!Pl4O4^aOTcTc3YUk9vf1tuNU}#CcnfC-<1)B!*k{lGrhUM{k#mv^ z-qwu!B&hgFaGZ{~LdI{&ZP1~O>LJDjY{<_fxe8#6Gi#!)eoAHTJLP=^g&;w@d}m8m zDqGc{3aJ=L!Oi5n)h|DXB@1Q~DbKCZy;OJn zRAjB*Orqi9Av?;@OfP6PgnLPtEI+MKJYH)PZ)<0h>V0SMB<8S%Ev=Z9olGCgl0+|Z zbt~uh60Otld1#iIWpNpYQ`qw647mh8zp(TRJ-q;5fKryb-1U@)nzM>Vc|a*q2B9XG zyDXAUG^wGga89bgaiz|JsZ}ryCkD}JpR+0%#CtpF220^LWl%mV{1ba~Jp(`q7Bjx> z84hrEXyASNQxMne*Be8;@@qp`zv$IwSy>2@%i2(*amp-^pPscPl3z}_lWc5%u1KP^ zvQQ0)zU6h(6n1C;iO-(-UT3QH@x^Jd@V>=2iaAtJVgH*z#HmH1Q0VWofiKN;E5E#7z@*=GgN~blw(=VEwx*I7l#p z)dVKzIe)v_O%IyCbt>tRGH#}bK=JW;=V>L}iN#eH@o@@A*=Yas6W>xYn9@$prPEXK z&{k5(jW8%186ldJa#+DaLTdAceoMT=uq2pCFRGieDXapwS}I&l6X2;Q+$G7Xszy=3 z%_`cvidEWeQWsG3=nR}v-jQC#)14x2YH{9aUVfH(Y@!X%QUm$(qIO1-a9@#BN)3#Z zGYdBCclR*<8(l`0-Lmu>Si48b+2OZ0Q6!7}=rkc=7v=%f$_D3pu?tvfw!!Iszrgi9w$pheTs1aU_44@F97B-;t5AwGy@}uE zpchO=YqE3qy}q7MKN{pLw6SVp=B*o0cb+bFw*;uKcG6rrdm$qes(MtQIzLr|r(CrU zJaxyH49AOd%B@#j(ko;^qsnxQ4)L+&Bu#heF5GU3jPa#tC^6DbMY66q+JpAZ)rBQ* zk8fq=3-X%O1ztKx!`AsykTXjT{BItT+@bV4O;>nomkU&q$ys1sf1QP8RJWJX-1K0y zwTF$hw;vhHvl(Dj|1XyUZG269Ze9wSLz!4u4^n~ zkkYu=ILji9jBgyx8dX;r?TGaO5{r|@GkfZE3~64AIe@kc(KyBOo3UR^+)IogYezBL z8&W}aTcVfW5#iF#VO^+oA$CwUKxW&1T}0K?SgM2PfLl2^WdC9dDL7+U(}t~**Dgn@ zaQYXEWmU;#sN7N6GfUPnPP+a(!!I}eOjVo;=hm%5J_d6S*RP470tA$q^`-B6b?&Zm z%94rOGoyw09!vq{&n<-|25-8^^U9pRUtEsQyL10^d=ra4Jt0g|Q4H=L7IGU+wH=M? zA5J#~8?MsKxRtTcEwxF;yoe93=8+U}{CJLY}Lr-jmY z79lFY7~pM7#z6HX@H67c&LaTzZqrdMp?{piZ``sGJ@HAq2_4y4WMXa5bj}DJoq2s# z8>C96v~P6H`S@@&ycXTQKgf)Qjug2UTnmuwl&>{X*SQUS6o3XVI4ZMeILKHW3CWQmQ{`TKH>+FL*pm8bGvzi&@hNroQx89hOJ~;v51*2n$}jhS>;x6e zht|IoEC0B)(P*o0sc0R}jJ-%UPM{ebot`S2HK=1=Pc4Ef-G_M*=1*n6T*>c{e(zLc z6eypGBph{EYiEd&X9902zbgA192EDX%*7=un$L{Ol^5uqwLSzo!lLV7)m)b zklxcvCfG4VQf&Dl&`PQI>D$O4qgjO16>hSIto_YBg+{Sc8NfgFq21zB=}0+2ayM3- zP(RU;DG^c2Ha;;7U4w;e99~uVA)gC<`1OSVEy>OZ-$g)F_odfh5pOld)dvfmL+naA z|6U6MoQ2Elqo>jN9}#!T*;<47!zA=F7FCMa zYR{>-lT&23Z0OPVJgL;XuXLrQ6K36!t>FdVoX0=#Gf~|La74CJXTp)M|H6$Yi3RiC z1VAF`6J>D13}fIjWCd}SWBN9^C$;;m`R@Obkk{KC&&GDJi!$@Dq{@Fy&*NHAKQRb7 ziOrkU*BIlZA3bSvzB}zE)H5Z=V>SR+BUR*Se3iBC;gpG#9rp%Ldg!VY?@<-Bs*)B( zCSyY%y0~1?BQWR-N>SBa{MotRs7>nE1#b#6dG3_4`*~1UuoOPOoV}3^C}vL1G)@nh zR!!9%hERmYJbENeB|FmY)XlB^<9E@(Kq8!Sc7a4y-=4GF6pGRu7FtDNTKIH;+u z0!QJo$k~avn0XZ>lNzdTN$w5M4aI$C7yvks%Q4!4^3tp^L8HAd8b}ky@(18;3ax9DF|6Xj#U?h`fYXH)y5)#5m;c{UyVP8h#G-} z&Sq3Vh5LB9PWJd)F(Nv#K%|t4eiO>7sPqh6d-&P;=jk7!!VbbOFB59lmoYl=rj&| zYBP~Qn>A3LbK7&M`273NW07)i_h&lJIyWy%W}POFgK9PITWPPG(#tlj#|u=M3O36U>8*|qOGy-(P1H9?a{wz#?~rcu z_%c{0bYLKth9$@Mwn^c#S}vUvZopnaU3{)sdyqxj16Hhddxug|=M#ZNGU$^(Pep%3 zemS=~iV#4$8raI?Ml^0^Oj}*5vtb^So|Cqabs4v4gTzvhSLQ9T>T^}rH$j0dJ^bUL zMrB=V)x}^zy2bBavUVGGB(Sz}sZ;)l>ik(4H>SO@AcXe&Wp=w(JRT^$u_0_rf7u1w z#`gdlYLCk^GOcmB-B(dl5S@-}QP)@Xc6K?Xt&Yw5THRNFQq6gG;xqn56+Rd~Hat1W z)6D;YZRRXy{!pf*iQ1cCobGb6#Nlh~?tTBLZiB0&wSS+?OhvkC$;EH0{lg!c+xe=o zV?ckU9_hkfj4<6Mo$DEyHF?o0u0f; zTXt%L;~%A5S9E>lC+X4T+}9k{BoQ3=O8)UOOQu9hgJVoBn7BFQFxb*5_0=2be|8?Ti1NBs#*#;LK6{=)12 z>eG9-7r#v;?4;s6w5XQZIY2a7_nfy%-3&VM>a~2-M$i76sy+F{Mnz1PtQ=MN%J=4Yw@eSRvdQJAJ-LCKUvs<*@$9; z0B(npoiC4V`BGVPb;pfpe*ua>r|bSYEku}-iVDL?*{7tJ?72B3PrC`O9nuzo)NP}S zpB&r`5-^TbEdsuKMhj`j#m&}&3Mh-^LvmmhzQVJcNDfG|@PDcY$uIds!t}(K=E1!A zZAj5llze!o06dlDdaTsOnDM|1^zyIG5#CdlVbmuo{^RdEsUDm{mG&uD*mgZ6LzdJ9 zwUT2aF# zoL5fQp$+Ofbkr1v3O+oH`;!YohnxFP1RM?!>)(Bdm6n?;^to5wB#qqgP)cZ!0;Ocn z-r9J|S6LUoo#b0m=jMr%A|hz=xa6qMQZ5aT@s^a#x@b5H3Oak;j%@C44DQc|Pg&<66ke0r32x@6<$sBA8n`HjZ2knh%0}6)v(s!8-rxK6CU1(5 zN6!FScf<$vG+3zEjoEq>tIM88ty|LJKhGY92I0}siUY^1M6|HZeJ$E(I3T)}3N1s` zQUVWcpRckuPuoK?M^Oax*hr8g2Z2!6uJ8=*IGe-bmtWEbmOe3F+2fK#_XLyi`0bdP zqL3t`1O_gLGGJ(e#5_`?bsyPr?+$to4SpM8aXA^wRgS1KEXnSgnI3R1Xn|HH>Wi+w zA2Vks#hyYg4Uju7qYFPvBgxX6%MEG|`(=I0kYyfx-6VhE(QdJf!ApD2c#LGwl^xT* zJ(5^bb5>UQj)N@>QX#dhq*TLb(j;%mli^CuY&f2atS{OQlGWiidp&*#DW`|F7l?IH z9IUmaZ?X1dVsjXRZ@|lmPhae>j75fIUR|U~&dYJ`i{nl)nCWdVs$)0~7Z2d=GdEb! z;H!A7+UM^_9a&KI(eio>>scg+au>Q^7Z2rEKi!IzjYU`d+II|hB7svJXQZUzlxs`Z z_Klac%05GuWw7hFJ#dqny-iTZS_XnAnJ-Qy!&;sOah?yC>Ps?Qh*Y6*wP5@F5`Gui zB{d%f=kJ8qzNMg&+7aP69bq;0pL2c1hO~>jn>Ap~<7P8Sd>SR%P4{J9qjd; z_D$uF_yi5U>|wmyzGR-%oaK9h;cBZcuDc#up6D%U$Dc$Dq$gLtEelWn`}dPno=9O8 z`ipa3S$X-Gx-XzPlq=LoQr$Gnl)2&NhN5w$49!$FQ58*-T)1P0cO02Ngc}Ma9lQR) zN%fl>`ZWLy_4T8$19^FW#&1XecD#-{Psb0+*4g+H1fW# z`ejT(F2&_)N{6sc16HeroJ2dR(*W1&*dZ1_W0s8_eWvU%$ORq8>GvOuWs)2a z5+ts*xW)=&{lZvW5~kvJSsC@^!pV#mbI8$acXm@wTo((>w=`1Q@X7&pi!1By6JfLW z_^PT6UG2p+E9e6aE$vOrl|4uQ;r{-=I9;^8SEbYO?R{K+F8iZlBDVS|^k~GWMkO3N zX1ZjzCR%gzR~+pG|C!`^_BXKYFk#|q`)mB3F|TQ?>h?C?@Zz%e@N1@R$tCmO@>U_= zt)E#}m934R8P;-L1?}EHMtzlcp#)*-_*n1n-prLdiEDg{A%o53F4L1sb=ep{1N>F` zjc$VY0Rg&k+0qifneJ;ysmiUj8>&KG@1wn`+f03N;JuT2vL@H|SmW_Ps=3Q* zNNs)HlF*z2=I@_BrDu0Xi_diLjwDP`8}?%M?{MZPa5A=|-4*;IXJ?8PX_tyyuL#^d zkaCEZwZC_}wOx(1fK!fUJd8Ne6*zbAMEqJZ8!kNbY&T}hbiv@YPd1k;Gl}(l?glEC zhc{c3C+mFNGbDcFkWG7A<7z34Dn91D?6%>d)vH(t4cWMoTt^4zUAX%lF+OKRdu$RW%z(Z9b~kJHF?!` zX+7rY$-DPG>ZH1Kl#gO2W`%D8ordaTYt)8_MgHR|b%)>bw^VcS+s0Zq^l%@S#?{Fd zkoc|nOwn1N)ZD0cFgq(dj&oFM+0bW89J*m^<+Z4|a^0F{^TT~QP0|)i%|@pw)hQaA z>Np$yvWl)EkhX@|xQv2RSs88g#qYk>NiXdmp9?x{Rtm6z6=+j7N#Xk`%LY|)JDz*Pp);f{R>)K{jlZYA}$8WeIE1Ww~#Rk3ZM5Y_;}2Ia?VB& z(3at9>Ke^{8gb#6gzby@NZy5nnO_~2D>;cL)dVg23YqEXoZ?L63AJ>fM<=$16juX`% z55&}Sx#(HzNNum&WqS5_ayZ{Ed%jRrX;xb>xw$tp4GWYoB(&<%CXEpB|B5qU8V3B$ z@L;`wi-yW~@0;O%F~k|x*jhW)kemFGsFWAp1r4*TBwNJ}zj66b?!GltyzKrr&Th+`iBh{qbi_Ou>`8{bq&obv* zCJr4hv1$#`^tM^G?+X<&RpNU>8y**5R{Uc8v(!&Z!J2rxgaiURb@{lCA=US_O4Yv9RoTU=}3ZhfIVqtor zD_QpA1v!bR+PRYxGYy5xMjM}fQqHt)fEdJ&Cs>$+y}eN0D6nywi(|O>`rc?vj=%-Z z{7`z|J6zhV_^2xjakzc?8%utUO5+qu!QL)j-J{9iK#mW6pE@xT%T}=H@Kl`K7ppkE z4$Msa_KV#mu;D_kknn%vk=Nsg3Yiy^EjZVxHP8X#cnAZ$w(2;v-#i^Gw$L{A=w6T^_Olg-nC#zd9c5MFp{~ zmXpAF6L2LJUuGkdXA=!#w@LRl8HVI)UVPKn*VloM4G6^|@#wVV=jUg7a?WMrQZ#f? z>84;d&$jmDy@?QT+iw-{+z%=_`5|s;CcayFjULNvZOYW~^l}DND0B?I@>&4vIax^J zT>#IJvTr;bDEiYZ^r2pg4LQ9Z!+8G4&{(GZ4^(AH|D!T@s=A-r*^0dH-*D2Dq*~40 zpkJb$LkMx7+N6TUUiUcAsyKH>?V;pbIFL_*`Al z3p<}sRbg_SqhW6sW1K*f|LnVRV?T}m?XLy6h`hXBZh)JVgJSUmS%8n0B>t^eVsoRbE7jPf7QOO;FdVC`!`rJ z21?G#>c?xTI_X%0r>EbWvDLS4I@2@&ktqA;nw^QZ0;Wf&_KksIbQC%=v^M@;(@}Ku z551)BXtzWATKlFRa>~-S?gbjXsKNN|J-;k|2opY6Jjg4Ts*uZvrO*Yf!vQ2ol|dOhz{i}EG%&? z?eT)N)Qp=dN?P=BXFn2hj?)vL+_RkO1S^kf8JC2~vR2xn7PW>Q{D))ZA_ZEH8sE(| zd~$h-8dO)*r(i@&FLVoadVNJLK$yM1&CzKWL<~nJc8%w^U-au%^XF>#zUBcx;pkrl zFEXYw8){F}JLMHM$s5db|2cM@RRQEd=;3;mj*hNGZ~9ZEnn&>t-Oa-N{Y7bd0j8DO z6^<+!GjCJq<^HKD5TbNtYNB1s8NU7d{Uy-ccp#qx(y>5F_}_Zer`8o;hcHc&Rbo;= zcDeF7!hIUXM>87Cy}iWssxFN=Z_y;P?bq5{06iNS&q<6aDjNbRyt_7Uk2}PLo>;kv z@bdG`LC)~z&Eg}%3)ogY9vc?m&o6e*X##CWj59GI2f5~1H zq`$KhCKp}=P0rjVo_uajpBMhHNs_wuVsQE0X1gkD>s@9^=AG1L^IZq06Skssw1*3F zUca&<9qmkHVgL7Zk&Q{0ozD!!?17Wut%tVDIYh}%!}R2oE7)o(D;)c9$mz{;=c>xr z{J3K-S$ekSHv;VQ5>S*8YH-);Kow4!o^9A4fAx5B8g9?D9T~rbhOR$zqGK)r(}H3K zeQ|wxS2{D*!@bNrIPMb^x49olQtvm%f|1;33qXU{6YGsNYCFN`!rH zP+|URot?=YnO5#$kaxZ_I4_{d>hRF6x2!+lF`9`ZjJY3Z zZhsR!mjZw}co{M#ahw*Mdf&a=YnFOi+Fj%Gg`z+B8C=|G{1195ZfJ4Z{zn>Lb74Z= zdrU@MQ>jq)Wc==$=Kt+pqwxw7K8w;ELwU6sSlQZ6Z&Q=R(55?$N33}3{Y!oK{a;K0 z9P*R*_)ur5WxG#s<+5jV>K&d--%fbJOKUBPjaRUyFsAN#1KmG9y&uif!m{B%NpFZ9 zial1~6~ucEbFa@Bw&B|@B5wQd5LUSupRV}D;7si^8B33oXzTCZ4Z64M(@!AVbl6Ka zVRn3;R4RXv^wjWG>+saI#bG#!aczz+Yvm=105;`x74l}m{)M>Y{RGf;uLVMxl0@_f zbQgCX^o_-bNTW3qCA#M(iOD!2 zBp|SDn@%96h=5+JIvkGa*Au;d_maa!Q#I8GN^Q@MKoslnVyw9&t_w$1R;XWY(9#YZ z#(L`%{e)Is!+-v8YsoABXSGz-+BzcF-4;q5X@ce*htB(sTYDFH5WrnuJqIIlQ-aXH z4pr-qzMoL`+5+><3FYO66e){!GLQ4lkUDSL8a=u#Ni7hYfY$yf`wHwt$K2&Qil5#_ z(>zo?0xn#k9-a$*uzeX<_l{0fPa#u)?g)K)@QyoL7f5D!o5^*UefsV5@@1b`Xe_m+ z2WTD0GB>oZ9cZ|b{?)*}-GZ1>d|utS`b$Q;onHw5dgK`u@WCW2lMGEbE6)7A(BvKglZEL$INk(|@3x?>Om67t zai^>ys3S2lHT%*;@{}-8pf}g)YjnoyNvmG^Q@95en^axt<9o!&$3tg~i8pU)4Jvvz zS_$y6JgKy%WUe#z^pR_s=BNz^26+d^_|IAyT2zsCT5Q4@ADx>~hijJ7^J*-mJCb?Q z&bvRYcDLM!SS`Ei*Vhm89+hdOKAWufg(v<^pH9;0DsHcyTSoJ&vW%ONMRG$BC$&N^QT~bH`bK9w zSk+F;U_qS{dul-3gxXb2zCl^Mase~<8C zwvccGTu5^C+krP_@0ZE;jz4l5XC+f{_RnW;uV+B0GOVcn`$s<3n8*HO4*-$1AyPMz z^<(ZQp6sgU9F5Wmo-CDk zse@M-XE@#JXEBA#%Suvdr`_+%)pHbT&kBJU$UE1Mos4fDQ%F-shZ)_jQ+T;2^NdbE z^-Lwt&tn>Q*C-LDE6BvLV6+;@`9Pgc4<%53Mm!*`!YGd-4oV`5TW%=taVE*M7S>y$ z)8#6zN&$858~!d(ci)xoC1`1^=au zbEVouG0TrNA`Aa|VJ}Bc!8ku#_5DZlV`0#o&R(Fgl=9Z$P{Qj4w~bX)Zo(Ekpkej# z70Aa8ST>Hs>TmsGG5yW*Kb6_Ozeub;5XtxPmJIhfNoLkuRGj5{=N-{&H9NCXQL~S& zG;ePC+WsnA0OTa=q_{Ywe-*6UF0I_uA8c4@cQkzUU6a@6--(Q=Q&CuU?8)u7+fYqLNq%|0q0lqyMQr59Ca;_H>M5@#2e+C&PM*2;hH80Go_c8O z#6L}*+a}5~9sfBX!L}Hu(eU^6m=6dtAFE>9ov-)Vv^y%q+8>I&)2=$3X$C)8r!Z&+ zlgi)V#PCaP1@p%R@h6{m@R-<|9UZJep9q7Pc9BK@Vr}DALxyC zG|O$XlrWe-R9|z)V=!FaO`a{seBkJ1W220RtGuyti?Kgy_GyvoOX{)KKUw*F3Amwy zw43=bz8BvkKhw4Iy=4Ajb2A^lZDtLDP>SL0X-w_5l4W%iuW!D&W~udmV+gxQJM6aa zI^Ke#6^WExbbhs;m5&NmRVgg}BXochrm+YirizWKIx;~B)Qwp|&Ytcgd_F-cU=9ll z+x){au-GX~OKrQ^U4^Kqo+Z#&Z}94?9re5ISFbMTeina1iBb7&bxcP` zndz>qxCW>V+^%S5n2oMlzwBzY4E%r~!1t9juY3Mx-e%qbNPF40BzgEY=gaBLRfAee zwGK~g7afDTXQNQ>=2s*0+~cozhYbSZd;N|Bx6X4g z;wa_KU+fcvrFQDfs(Z3FbAJ3LMypnJ+#nNQ(uLZ zxw%>hy}((1AoH{_1lKmzGfyO0Pe~NMk)O^#Ws}6jsRL9=o-PjI+F~m@F8-VlYaM4u z&8x6k>X$yc{a)#4HmF8s*y)KAs#edBOAQdEDi5V52^FRP!cxtguk}>{_qUQS_-$ zf1-hGx%DHrufuYqF`+j_a%Cj4^Wk)gm&I_n|tUc-_aXz*W-0m989+oC-9xiwS3) z*ReWT79rvHwxJDwfD1I&xWX#Ab+Go!SyiH$i4nZamaXl^L~uV_H}-ULiDb|k#ILYr zNFHlvdte;Fqq>x+0LmDiAk!aPF0xNylWsGyCLakhQViev4XSNI!YMrno0C^k8J^q? zG@m_QPAPo-Re2E2T47WiU3b$;S9W}aAPt0C`zbxmN9k{2{Vn4=&R2ir4k!!_oxonA zYF1V&WG`ry`#un7@E3gfWCSGpWI&+$?Y+ID6_f&9d;9Y1n;W{dXESBwC#c)#n3yKO zWBp{|-U)9;7sSQFboynoII%!CT%R)7A*4zDcJ1mmvLr0Lm3+o{^*1FrnVpxD`11dn z`_7=IqOM&;s)zMYdT%1VO8_AtO+h+H4x9L&bV#9D8ee8d3THb*S1tE$>{ zZE>K3jjQ3a&(nO?{%WfD-rD=>>g!wC-mHH1{dw!u?LC%ZtP5@Hy_5!vTT~(`0+5xRfBbC6gbVOx_)xLX%5;OJG@_)_R5(M z#FuMt9oMsr@zkjmNxtP-q)XQST#?y=qGRl!vy_e6irWemIKt+1z7J*HMVCno78NMA5RfKo;nuRWMbrdBSRL zdA&K}bJp{n79LWj+-1+3&7j|b8O(W&E>9aWx<9H&NMQ`cH56=v$6_4_K_P7j#4yJ7 zjSf4P*Pm8D^Fs}nHROHO!^sNkf5k7yXQ{@YPxQj;H-!1@7(!^7L<$zfq#swjR4otQ zKBGMwVnndu!MpI+rk#x{UFMrbiM_SMjL01~u>L2xHmF7C`MaGt%yQnN)C^dq^KXB0^1#1ym1A$`WqHyj_s|*x zCmYX&DxK)xg%jqKtbM~wqmtm>_j!fR%iwZNQ1SbLa|@XAtCRj*6nis44x zp;Aq~#m|Z&x=>v&_k4<1YWM+re}8^hmZFF8O=g$-n#J_AIsd#KpxQN=W>M z4&u2sCUU9TZ1x3MqS`|6t)0$G>fp^BduKWDkLsdP8ajqGLK)KQJ@>SsIX>9s8|Wpz zXyDL4QDyEbaSP=-d2eLQe5@E+y^M`Vg^`el;7{hhwU!u#>4pWLlow|HRlgV-id_#9 zp&y1tIEYIs&>Uu+6_C9B0q+;rY2+d~J4^${3J%2=<5Q!W?uyM5{yzsh} z_afEW;v2C~Snztx{O0*&MJ>CCNYeCmXwb$D9@Vrc!)$s@M{8-sACmsOwyP@{-op zb3ebi4qO+mbIw`9ZKLL*^2sCzfBX4XCCu&Y7SPHkH(&(z+Z-5DJG0oVD7^Irg?RJ% zv&OE?eIO%SEdC-_EYJ8YE16c~(E@N^q4VqW1V84dKhS~}Zsd2`0YQxmm|`lUQZ$2E zWk?H1o7R0|Ah*P1A8}Mw_b$r!w6@=>8qHyKheg_^-f=E|(RpiUVi#2cKc~)darf6Y zaC%~^bgN=$?sym~k@7ClyVmO0lcA4`Px_2%5v!qm&60p%xt;K&FI8C}yq4)wC(rxE zY&S@(ul584S?SS8`HmzpXNtcSS8so=If z>n>>*PKQ~nPm!GE^^hGpTS^Cu3waI-vTs0M7V?Z+0ki8%h3e4YKmOmeeB1!o!;}e` zS;y1I0+#Iy6$oXfyQ@*7y~n|{1tW<;ryg;TOvjuixB8}30B#3fKNK6Lp2`dFkD*I# z_0~38Us&Jk$>!DQO^SO)1bu318K2=>PvWcf^q$MB>PRXt{Ygs`J4-T0get2*U zh4yGYbq;KvS=~%EEa%kOU+c(Gqz;6Cy&I0*+&giRn7s|;m)IR#+LTlfT86oI&0YJr ziyufq_2DfNXwz+la^b%}%1%rsJ@cJ~JhhMDeU>=+6%~yiX0e4ts}6)3cF8yIYM$(7 zqlqiCt(pH(w~%Hj5v=2-4o@jwV!y}-Pten+nI{&&L1H-XvFyZ?$GAUZPfN}7WlK!- zm=vQkS$@$zD*3HJ;Zb9k#xw!_*4sBoPI_h94`A10`F6!ft^!x*I1RA65MpStf_1uNwFs@%R)Z|X~> z5FO;=AYkvESWg-4WfY`2+Y$Gpp!FH^Y$PR}+n;9d#-+hT_WeoK_0_8jS1Y6z3GCDC zE3=jqd8-PY%eNZf&-7)DUSZEa-7!5|bqG0ht7mftDYY5MHU?(rzI#4HkV(mcD(h5L z6je<*MU`dOFDtOn+1Sz)y}l7__~T-l>m2soM3MW+VBlfA@aa4Ij$-@Ie#|NE@ZV_o zL@w^kZ!wh;&N|wDJaYGe%s|avy30m`dZYfIZQujr!gy#NoPS1m_lup>Vm69pFu058 z+4m?SL$2AKJlU@9^ShJYH{Ms|E5* zAl}OZ@pxZw;aI(%hOPwO9VM zOx$QNIQxQmL;2b4v`lQmO^KboD$|fVePJ8cW%cWUVwohoh<*4bbqKqoSGJ#ycaO%p z{rXdK`;JkWSm6i>A^R;0^A1qN-`nv=4L#i7cb+o5I{-h0{V1oZKmK zDcQEjZ`C+kF0iazR!JJ^nyeB}JW@ljO3iZbt&@NpeXwrkE$?%!6<{!T$#`lvKuo!z z>+A&)wxqyWo6+FmFaAB~Jg$dSs#aV655Jl8 zxT4VRlHgflPh4Fna^Kx65lV_4HD%aK`@Z1MG~X?3yZlC~$U_1l&*)n1a@ zMyuQiS?9v#uC)bD%hNv)kVI{Hq~?kY`zY8JxWi$6K<}-J_hFtv$dy>w0{CiEA8oRs zl%)_eh)?yZ5)10af70owQTwdZ{=>gBJrLmM-u8f)x7X8K;r;mzF|)MXSj}!Sx=a7+ zJaRwr=ZVq^2MGeFQ7@KSYOaK#wHB+?VDE2S?zGR?Pukoj3GrKHQ}NkQVoJ}G9U73| z-u+pSvGsVbhunR+bCDb~j^k)%f8z2bo<(NrMI0lti4eH`?b);J#?l9EFw>)iH#ny@ zS^F{J4CeFZ+_z_|B|Vqq6s-om+cAeGUM!Qy7D$Be69X!TsYnf?U9n?Ydio6bS`{30 zv@0_Uvu+hZSfoBr!}d+MV{LO5;wk0uw%o=RRD6&g~WZw?9(G6B<% zw;%J3W#hD)Tw6=eyshSblS<>oAw|R3>YF6%Z;JEbOQX8=kH1wc~q42Mz}P5^X-?B`oRN#$#A*c0k&1a z*9&)t4{&#?pAPJqK(4kMGv}@7%A9^ms*10A;J}5w^RtUh!IRlfbNcWs^AJw^!(f2X zYuLOb2|n(WQyRk~fo>p&H93=!;74nR~>F6?ZEkPI*#Y-II@c zK~1~)oBL3ksw+0;%*{&mDs3YZlQlZMH|XtGb^Q@5!C3fBn9t0E{hQ^wI3MhXi$`)s zmvT5(_suN*TzS4nC$SdOL$7P7sVnWP5p^T<^y#OQ-!tkZdQLdieW-Edb3+4+eS4!v zlmp*YHFmV@LcC(uk#iFPZ{aUE{0rp?anG2`q2|dKQJ}lNd*7|bf zRR1B>(rl=0R(!R%Jl39sVnA>I-UNm}%Yvzj+uX-t?AVhy3N;q53CRl-!Ud2^ZAr>O z6>zYkgxDcY8qhF4pQQ-ZTEvfc^y1@xaQ=1u3uS}J?k$K3hg#Z$O8M+S8!9Bs{eDIA zb~D-3MxF1Xx@t%#&g|x(N3IZ}fE~=Dg4lFYLgqf#GNKx>hk>P}5udTuVXp7*u_4+W z+OB5;h#?;vwogws@?=+rBIGId#N3FNZNp!n>1-q(9A=9f1#=wnp7I1L&ZcC_M-OmR z&Q=}8r1O#3QQyGQCU-eYUHOsj3?OMc>IScy@h;M>cmiWLvl|;aS z&GvFv7aqla9lfirIhiMq&y=}}Vz7$dl}}m5@A-o^a{bRyd(EhXzM+mYbk08`YI~uv z5|5VZ!d&p9zGYZ__gHLY?R;Tu75Gj5Xj}rSrYgrQ<w z)s}78x?RV%<0%co%5D|Qswe-?BX&o?8nttu!U@KhU6A66PUv|m4otB38wuALxa;bq z7T>vY(kGfgXrgz0ity}WMFhDWRH6psk2aoeSJm4E(M4D*qT(c~f^d7-QGwc1Fk#Pe zp5Nb}Jued3z!uAoRGxDqr-nJAhd2o4Fnp;ah%lwQRWk&PC9Ko(xc-Evl3n;+Pxt9E zJuHC}^UTL1slXw&aw0w7lNn=kD=RCv#K4<`!)z|Q6|m4m_+$vSz6e63Twz-O7TeT? z7lHxBGAw*zz`L&2^`5~fW|-5j!oFU<@$-{yd`ss+l*lPRQ5yZ}WayPWVJiSOtH`j@ z=nNbafEi%GKWV)>-^M?&L$n9hTY}&YP;Pz*Ggnn}^ZXSdIWmBqy=xA0!%R3Vw)#Qk zsS_h(0fk~8n!9IHSNgE_o?X}YeFQ0kg|0g$yt`W`7+arzq5vY4t3lNiKDhXer3<-Z zci>n8fG)sy39v)6#p_o#a~; z+!g?CMM6!6`*0HHQuL>@qx|cg_@^lDq&rj$0c*)>RayklA>5k=uq@Q;@B0lsJUrYI z;fmL1I{BnjHV|o|8cQ%0!vYi#M>xyq|8VGT@Zkt>*ZTTAuTNo>R7}Fl` z*Pwf7)j9xQ=M{K~8R`hgQ2@8%*F{2OcJFh&f7u*Nk*OHkiND-XY#JZe^ena|gxhA@ z{5`w&2_o!RCV%sl><-Ic^O`cVegdftqTflNwltAmGG}Y49rH|zA?=fwk><6ulnika z)lN!hETR?o^OJh3j`xAy_aw0&Oq4VK_{~|)65N=gNGg=gZAMRiwV1DoI6@AuqLzOk zLZXk;G5{HcCGd4Rwk}qrb3Wr&#FkE_QMKFpXxeO(r?q0plE@q&g5AUN=HTE-F>Peo zZ5y^btFgGT7m^9E4@?qo=S4!#G%ltxI@c5>*q9~W#)+(>49f5A!s~&c^v^JPD~Tsh zW(a5ty+*`JLHLvfr4=XF#TZ3xKGgP-CME~Q)w(6C7gr3Tg6SK`UNuv2K46}A*xBDu zybd&n+C||BoH)t7MS?rce@3(-<AbT&Vh`F8^K4T8f?QsWqOv1V9H7U3 z3iD!m?39En-^dT`Z0ZAuSUB~)V6pvlmrd?(BC>tnj)V?H5jmy~%zZ=17B^0U_k3Y~ z+eX(`#L@{v>NcyB(xLOl-mGH$qL0q;le0$W}LLidVkax+x+2^0{`kv-XYO3#PN5+>g0p!C$eU$X~ z?gb#j>fg9U5Ated&qvd6U!vL-A-C2;S#+A$?*fNh#ic09>X1av@S>=&bft-~o5enn zU;9kOJ+P&vr5BT7dHc^yINy!Tiy6n*muTgnYd09WPE{!xI8}{gNYX!-}fe=ir^(pS7W?=`vWkGp zF_`O|@O1zeQW^HSN;klo!y+$ePsX#IhLMGwf(t^_OY(^zAzj0-he~9zzQAydw~)zt z)c8H(asoEgC>RF~Oe-0Cqn_ot(in(+v@8fCxc5Y?3)OFm?WWX{A_64%t2B9 ziqwvT5P{h>r*xvE7>rNtD%&S}VPpiYzq}n1zGhcwB$o0S4u@O4 zL^2$(pDOQ2MMhEW$JqaS%U$@AUd75tKkQ>9AM&{hK#B*SdMV;0Dk}3SZx9&ASaG*O z3u;GSt%c-}WD7Tl;~|q4{TLHT<^Gbf`&35I78zdfkfJ0sWBe|>d~4jhhmUSp;;s0# zH1&HAv3X5Gefc^2)xoF8UQ)gb`<2ATcZBkWcM~mde`Gu4pN%_SGTy}a$RT?#2 zax{shuZGubLGiDV%>+-Ss}cIS0G+VGFZDv?^S+}O1l0yHEWZd(#x?{X!x=gC2E{1& zE?uN)nCOMWt_MQ(^njExfF3NldgW*&l>)KsWOLMi0CO*VE16quA#fvigW4lFiS_Oc zIWqd237n44RRGgmFNddfb9+ST4A?cAiEcc;AuXgap2is)QBalO#W~h52N1V4(D{o| zjph_i09sIV(kK{)iP0qe=6}}Bwj-PF)l(34GpOvd7ZS`dI2>*G;(H`jE$hb?^{Tvj zFQoGF80!+CxIyLTgO5k3%mWtQf0|<--hbbhjD{x-Z$YFkR3Th-IUuK6aS~Ie*<7d0 zps*Xz8&x4&`5ws(5W)~T!NfH9h|!y+^&RQ<67Eb-EsOxDKMnrYLA#Bg5H*QB7B8!k zZRofv2)5npOsa4vPQYD$zM=3yVUkkD4V%7bF{QjbV`7guw$-Q+jHcvmIuQ#|$d}@* zogo>~DaO$)3Wm(5eifnYjAm#@QT9lm5QpBVApkQ3=%inHOZo=d>%gLWKJ)^yT$Ao) zJ8>%k*xumK?o}Y4C@En^-rLJ@TPVmNw6`}Zj`>J;7#2lp>K936$d;b{_Pe9QNQ%;i(V&htcvtb#xn>% z8~EgKK*N6eqt1)dJ8BWw@-jT`A#E)aK~Y!86;8@^OhZDh^b|#Ov%GAhB+yvhESmqY zK$${!WIT1IR0JF-L73shF=(C1p4L&``(?IUS%QEzED(vr1-=G<>X=oZ>~J3>nsSE{OO5 zS|z2T@5i!HO6IRkZIL*A`m!NHUFOWi?Ygn%8VCe zH~6x=d2{K4Q+g;R<1B5;Xsh|@bqH%_-T1Pclpx`paE9(19HJ%#u{LsU$BB&Y@*Wt zu`O8)B<}VFEhCb;M3Wrmbz|XsnH}eF02o55!H+WeFiFu9VZUx22{_1OB3F*uz`&8i z(mIBbRQvJ1m>}6Wp@3@&CK9em(go%+7K}nbFv0KXR84fq=J^LO{+?Tw1{|l0^!?9l z$SKh@0;hs0YE^OSuYq;3RhcnKoS13^SoqW5n}y5{xo{UDC;RnBI0Rf}b4FUZ1FW=bFq$x?DOEQA|7x5St{6 zV3)q}^(3Zq>A*#xL7hhW6X_kM&~(7tZ%`r@;vumZ(aT_TsHozn<UpyUGHV)PI|!Xq{os?`r0qQI!GKw-K^efzmv>RukO13%P*`YGy*5el+2 zrtTQhNn4g+$>5JlXzJ2ffP*^9EA$;>9s?vIe(rOqi1gly_h6#*+C`Bj1hA??${fc| ziRiM)k#J1F#mt8U-wBnFdR+E8$wYbBh6?%%0$fTbV+M)W;od#@dCu&C8IVLN(w%9b zy?pMlm8!;x9mrao4ia6mQ;J%#L;GBjCVqVtiuKUs_i7H<~B?KktwKh7h8M zxshD|4IxC!(f)5-;s3!P4Kmk%IT5e(zafi%-rIi&;yQduu#o>l5ZC{8Fadsj!--~* U#?JuwlZfz9SJhUjQML*HAFbh`oB#j- literal 0 HcmV?d00001 diff --git a/docs/source/pruning_details.md b/docs/source/pruning_details.md index 2c6909c8ebf..48e1df7398f 100644 --- a/docs/source/pruning_details.md +++ b/docs/source/pruning_details.md @@ -286,35 +286,9 @@ Regularization is a technique that discourages learning a more complex model and ## Pruning Examples - - -We validate the pruning technique on typical models across various domains (including CV, NLP, and Recommendation System) and the examples are listed below. - - - - - -
ModelDatasetPruning AlgorithmFramework
- - - - - - - - - - - - - - - - - - +We validate the pruning technique on typical models across various domains (including CV and NLP). diff --git a/neural_compressor/pruner/README.md b/neural_compressor/pruner/README.md index fb34c70c6ed..5b6941f32cc 100644 --- a/neural_compressor/pruner/README.md +++ b/neural_compressor/pruner/README.md @@ -7,27 +7,27 @@ Pruning ->>>[Neural Network Pruning](#neural-network-pruning) + - [Neural Network Pruning](#neural-network-pruning) ->>>[Pruning Patterns](#pruning-patterns) +>- [Pruning Patterns](#pruning-patterns) ->>>[Pruning Criteria](#pruning-criteria) +>- [Pruning Criteria](#pruning-criteria) ->>>[Pruning Schedules](#pruning-schedule) +>- [Pruning Schedules](#pruning-schedule) ->>>[Pruning types](#pruning-type) +>- [Pruning types](#pruning-type) ->>>[Regularization](#regularization) +>- [Regularization](#regularization) @@ -39,9 +39,6 @@ Pruning -4. [Citation](#citation) - - ## Introduction @@ -64,8 +61,8 @@ Neural network pruning is a promising model compression technique that removes t Pruning patterns defines the rules of pruned weights' arrangements in space. INC currently supports unstructured, N:M and NxM patterns. Please note that N:M pattern is applied to input channels while NxM pattern is applied to output ones. [Details](../../docs/source/pruning_details.md#pruning-patterns). @@ -144,36 +141,36 @@ The following section is an example of how to use hooks in user pass-in training ```python -from neural_compressor.pruning import Pruning -from neural_compressor.config import WeightPruningConfig +from neural_compressor.pruning import Pruning, WeightPruningConfig config = WeightPruningConfig( local_configs, # An example of local_configs is shown below. target_sparsity=0.8, start_step=1, end_step=10, pruning_frequency=1 ) -prune = Pruning(config) -prune.model = model -prune.on_train_begin() +prune = Pruning(config) # Pruning constructor. +prune.model = model # Set model object to prune. +prune.on_train_begin() # Execute on_train_begin hook before training. for epoch in range(num_train_epochs): - model.train() -    prune.on_epoch_begin(epoch) + model.train() +    prune.on_epoch_begin(epoch) # Execute on_epoch_begin hook before each epoch.     for step, batch in enumerate(train_dataloader): -        prune.on_step_begin(step) +        prune.on_step_begin(step) # Execute on_step_begin hook before each step.         outputs = model(**batch)         loss = outputs.loss         loss.backward() -        prune.on_before_optimizer_step() +        prune.on_before_optimizer_step() #Execute on_before_optimizer_step() hook before optimization.         optimizer.step() - prune.on_after_optimizer_step() + prune.on_after_optimizer_step() #Execute on_after_optimizer_step() hook after optimization.         scheduler.step()  # Update learning rate schedule         model.zero_grad() -        prune.on_step_end() - prune.on_epoch_end() +        prune.on_step_end() # Execute on_step_end hook after each step. + prune.on_epoch_end() # Execute on_epoch_end hook after each epoch. ... ``` ```python -config_dict = [{ +config_dict = [ + { 'target_sparsity': 0.9, # Target sparsity ratio of modules. 'pruning_type': "snip_momentum", # Default pruning type. 'pattern': "4x1", # Default pruning pattern. @@ -187,7 +184,13 @@ config_dict = [{ 'max_sparsity_ratio_per_op': 0.98, # Maximum sparsity ratio of each module. 'sparsity_decay_type': "exp", # Function applied to control pruning rate. 'pruning_op_types': ['Conv', 'Linear'], # Types of op that would be pruned. - }] + }, + { + "op_names": ['layer3.*'], # A list of modules that would be pruned. + 'target_sparsity': 0.7, # Target sparsity ratio of modules. + "pruning_type": "snip_momentum_progressive", # Pruning type for the listed ops. + } + ] ``` @@ -201,6 +204,3 @@ We validate the pruning technique on typical models across various domains (incl Please refer to pruning examples([PyTorch](../../examples/README.md#Pruning-1)) for more information. - - -## Citation \ No newline at end of file From 03ec4317f6ee4b5a6c031c83acc03a5749240cb8 Mon Sep 17 00:00:00 2001 From: "Lu, Yintong" Date: Mon, 12 Dec 2022 14:47:51 +0800 Subject: [PATCH 5/9] prune README v2 Signed-off-by: Lu, Yintong --- neural_compressor/pruner/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/neural_compressor/pruner/README.md b/neural_compressor/pruner/README.md index 5b6941f32cc..1cb279547ed 100644 --- a/neural_compressor/pruner/README.md +++ b/neural_compressor/pruner/README.md @@ -7,27 +7,27 @@ Pruning - - [Neural Network Pruning](#neural-network-pruning) + - [Neural Network Pruning](#neural-network-pruning) ->- [Pruning Patterns](#pruning-patterns) + - [Pruning Patterns](#pruning-patterns) ->- [Pruning Criteria](#pruning-criteria) + - [Pruning Criteria](#pruning-criteria) ->- [Pruning Schedules](#pruning-schedule) + - [Pruning Schedules](#pruning-schedule) ->- [Pruning types](#pruning-type) + - [Pruning types](#pruning-type) ->- [Regularization](#regularization) + - [Regularization](#regularization) From bad260a9945dd72f7cefcc5886c4f2ac1fea3dca Mon Sep 17 00:00:00 2001 From: "Lu, Yintong" Date: Mon, 12 Dec 2022 15:28:48 +0800 Subject: [PATCH 6/9] prune README v2 Signed-off-by: Lu, Yintong --- neural_compressor/pruner/README.md | 38 +++++++++++++++++------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/neural_compressor/pruner/README.md b/neural_compressor/pruner/README.md index 1cb279547ed..3e12694a589 100644 --- a/neural_compressor/pruner/README.md +++ b/neural_compressor/pruner/README.md @@ -74,7 +74,7 @@ Pruning Criteria determines how should the weights of a neural network be scored @@ -117,23 +117,9 @@ Regularization is a technique that discourages learning a more complex model and -Neural Compressor `Pruning` API is defined under `neural_compressor.pruning`, which takes a user defined yaml file as input. +Neural Compressor `Pruning` API is defined under `neural_compressor.pruning`, which takes a user-defined config object as input. Users can pass the customized training/evaluation functions to `Pruning` in various scenarios. -In this case, pruning process can be done by pre-defined hooks in Neural Compressor. Users need to place those hooks inside the training function. The pre-defined Neural Compressor hooks are listed below. - - - -``` -on_train_begin() : Execute at the beginning of training phase. -on_epoch_begin(epoch) : Execute at the beginning of each epoch. -on_step_begin(batch) : Execute at the beginning of each batch. -on_step_end() : Execute at the end of each batch. -on_epoch_end() : Execute at the end of each epoch. -on_before_optimizer_step() : Execute before optimization step. -on_after_optimizer_step() : Execute after optimization step. -``` - The following section is an example of how to use hooks in user pass-in training function to perform BERT training. Our pruning API supports multiple pruner objects in a single Pruning object, which means we can apply different pruning configurations for different layers in a model. Since these pruning configurations share the same parameter names, we introduce a global-local configuration structure to initialize a Pruning object. First, we set up a dict-like local_config, which refers to some unique configurations for specific pruners. Afterwards, we pass this local_config dict and common configurations for all pruners (known as "global setting") to Pruning's initialization function. Below is code example for how to utilize our global-local configuration method to initialize a Pruning object. @@ -169,7 +155,7 @@ for epoch in range(num_train_epochs): ``` ```python -config_dict = [ +local_configs = [ { 'target_sparsity': 0.9, # Target sparsity ratio of modules. 'pruning_type': "snip_momentum", # Default pruning type. @@ -193,6 +179,24 @@ config_dict = [ ] ``` + In the case mentioned above, pruning process can be done by pre-defined hooks in Neural Compressor. Users need to place those hooks inside the training function. The pre-defined Neural Compressor hooks are listed below. + + + +``` +on_train_begin() : Execute at the beginning of training phase. +on_epoch_begin(epoch) : Execute at the beginning of each epoch. +on_step_begin(batch) : Execute at the beginning of each batch. +on_step_end() : Execute at the end of each batch. +on_epoch_end() : Execute at the end of each epoch. +on_before_optimizer_step() : Execute before optimization step. +on_after_optimizer_step() : Execute after optimization step. +``` + + + + + ## Examples From 9c42e60f067d4e632acab44396873cd2938ab4fb Mon Sep 17 00:00:00 2001 From: "Lu, Yintong" Date: Mon, 12 Dec 2022 15:31:21 +0800 Subject: [PATCH 7/9] prune README v2 Signed-off-by: Lu, Yintong --- neural_compressor/pruner/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_compressor/pruner/README.md b/neural_compressor/pruner/README.md index 3e12694a589..f81b47bd0ed 100644 --- a/neural_compressor/pruner/README.md +++ b/neural_compressor/pruner/README.md @@ -74,7 +74,7 @@ Pruning Criteria determines how should the weights of a neural network be scored From ca8d80b8c8c0f796af9062acf937aa55d29081fc Mon Sep 17 00:00:00 2001 From: wenhuach21 Date: Mon, 12 Dec 2022 16:37:44 +0800 Subject: [PATCH 8/9] recover code in experimental Signed-off-by: wenhuach21 --- neural_compressor/conf/config.py | 310 +++++----- neural_compressor/experimental/pruning.py | 9 +- .../experimental/pytorch_pruner/__init__.py | 17 + .../experimental/pytorch_pruner/logger.py | 23 + .../experimental/pytorch_pruner/patterns.py | 574 ++++++++++++++++++ .../pytorch_pruner/prune_utils.py | 221 +++++++ .../experimental/pytorch_pruner/pruner.py | 347 +++++++++++ .../experimental/pytorch_pruner/pruning.py | 163 +++++ .../experimental/pytorch_pruner/scheduler.py | 164 +++++ neural_compressor/pruner/regs.py | 1 + 10 files changed, 1687 insertions(+), 142 deletions(-) create mode 100644 neural_compressor/experimental/pytorch_pruner/__init__.py create mode 100644 neural_compressor/experimental/pytorch_pruner/logger.py create mode 100644 neural_compressor/experimental/pytorch_pruner/patterns.py create mode 100644 neural_compressor/experimental/pytorch_pruner/prune_utils.py create mode 100644 neural_compressor/experimental/pytorch_pruner/pruner.py create mode 100644 neural_compressor/experimental/pytorch_pruner/pruning.py create mode 100644 neural_compressor/experimental/pytorch_pruner/scheduler.py diff --git a/neural_compressor/conf/config.py b/neural_compressor/conf/config.py index 208f754ddf8..c3f8f4afb6f 100644 --- a/neural_compressor/conf/config.py +++ b/neural_compressor/conf/config.py @@ -20,13 +20,16 @@ from ..adaptor import FRAMEWORKS from ..strategy import STRATEGIES from ..objective import OBJECTIVES +from ..pruner.pruner_legacy import PRUNERS from ..utils import logger from ..version import __version__ import re import copy +import itertools from collections import OrderedDict from .dotdict import DotDict, deep_set -import datetime +import os, datetime + def constructor_register(cls): yaml_key = "!{}".format(cls.__name__) @@ -45,12 +48,13 @@ def constructor(loader, node): ) return cls + @constructor_register class Pruner(): def __init__(self, start_epoch=None, end_epoch=None, initial_sparsity=None, target_sparsity=None, update_frequency=1, method='per_tensor', - prune_type='basic_magnitude',##for pytorch pruning, these values should be None + prune_type='basic_magnitude', ##for pytorch pruning, these values should be None start_step=None, end_step=None, update_frequency_on_step=None, prune_domain=None, sparsity_decay_type=None, pattern="tile_pattern_1x1", names=None, extra_excluded_names=None, parameters=None): @@ -72,9 +76,10 @@ def __init__(self, start_epoch=None, end_epoch=None, initial_sparsity=None, # 'now only support {}'.format(PRUNERS.keys()) self.prune_type = prune_type self.method = method - self.names= names + self.names = names self.parameters = parameters + # Schema library has different loading sequence priorities for different # value types. # To make sure the fields under dataloader.transform field of yaml file @@ -85,15 +90,18 @@ def __init__(self, start_epoch=None, end_epoch=None, initial_sparsity=None, yaml.SafeLoader.add_constructor('tag:yaml.org,2002:python/tuple', lambda loader, node: tuple(loader.construct_sequence(node))) + def _valid_accuracy_field(key, scope, error): assert bool( 'relative' in scope['accuracy_criterion']) != bool( 'absolute' in scope['accuracy_criterion']) + def _valid_prune_epoch(key, scope, error): if "start_epoch" in scope[key] and "end_epoch" in scope[key]: assert scope[key]["start_epoch"] <= scope[key]["end_epoch"] + def _valid_prune_sparsity(key, scope, error): if "initial_sparsity" in scope[key] and "target_sparsity" in scope[key]: assert scope[key]["initial_sparsity"] <= scope[key]["target_sparsity"] @@ -102,14 +110,17 @@ def _valid_prune_sparsity(key, scope, error): elif "target_sparsity" in scope[key]: assert scope[key]["target_sparsity"] < 1 + def _valid_multi_objectives(key, scope, error): if 'weight' in scope[key] and scope[key]['weight'] is not None: assert len(scope[key]['objective']) == len(scope[key]['weight']) + def _valid_multi_metrics(key, scope, error): if 'metric' in scope and 'multi_metrics' in scope: assert False + def _valid_metric_length(key, scope, error): metrics = [i for i in scope[key] if i != 'weight' and i != 'higher_is_better'] if 'weight' in scope[key] and scope[key]['weight'] is not None: @@ -117,6 +128,7 @@ def _valid_metric_length(key, scope, error): if 'higher_is_better' in scope[key] and scope[key]['higher_is_better'] is not None: assert len(input_to_list_bool(scope[key]['higher_is_better'])) == len(metrics) + # used for '123.68 116.78 103.94' style to float list def input_to_list_float(data): if isinstance(data, str): @@ -128,6 +140,7 @@ def input_to_list_float(data): assert isinstance(data, list) return [float(d) for d in data] + def input_to_list_bool(data): if isinstance(data, str): if ',' in data: @@ -141,6 +154,7 @@ def input_to_list_bool(data): assert isinstance(data, list) and all([isinstance(i, bool) for i in data]) return data + def input_int_to_float(data): if isinstance(data, str): # used for '123.68, 116.78, 103.94' style @@ -159,6 +173,7 @@ def input_int_to_float(data): elif isinstance(data, int): return float(data) + def input_to_list_int(data): if isinstance(data, str): return [int(s.strip()) for s in data.split(',')] @@ -169,6 +184,7 @@ def input_to_list_int(data): assert isinstance(data, list) return [int(d) for d in data] + def input_to_list(data): if isinstance(data, str): if ',' in data: @@ -182,6 +198,7 @@ def input_to_list(data): assert isinstance(data, list) return data + def list_to_tuple(data): if isinstance(data, str): return tuple([int(s.strip()) for s in data.split(',')]) @@ -195,6 +212,7 @@ def list_to_tuple(data): else: return tuple([int(s) for s in data]) + def percent_to_float(data): if isinstance(data, str) and re.match(r'-?\d+(\.\d+)?%', data): data = float(data.strip('%')) / 100 @@ -204,6 +222,7 @@ def percent_to_float(data): assert isinstance(data, float), 'This field should be float, int or percent string' return data + ops_schema = Schema({ Optional('weight', default=None): { Optional('granularity'): And( @@ -219,7 +238,7 @@ def percent_to_float(data): Optional('algorithm'): And( list, lambda s: all(i in ['minmax'] for i in s)), - Optional('bit'): And( + Optional('bit'): And( Or(float, list), Use(input_to_list_float), lambda s: all(0.0 < i <= 7.0 for i in s)) @@ -250,7 +269,7 @@ def percent_to_float(data): Optional('precisions', default={'precisions': ['fp32']}): And( Or(str, list), Use(input_to_list), - lambda s: all(i in [ 'fp32', 'bf16'] for i in s)), + lambda s: all(i in ['fp32', 'bf16'] for i in s)), Optional('op_wise', default={'weight': {}, 'activation': {}}): { Optional('weight', default=None): { @@ -264,7 +283,7 @@ def percent_to_float(data): Or(str, list), Use(input_to_list), lambda s: all(i in ['fp32', 'bf16'] for i in s)), - } + } } }) @@ -273,7 +292,7 @@ def percent_to_float(data): Optional('precisions', default={'precisions': ['fp32']}): And( Or(str, list), Use(input_to_list), - lambda s: all(i in [ 'fp32', 'bf16'] for i in s)), + lambda s: all(i in ['fp32', 'bf16'] for i in s)), Optional('op_wise', default={'weight': {}, 'activation': {}}): { Optional('weight', default=None): { @@ -287,7 +306,7 @@ def percent_to_float(data): Or(str, list), Use(input_to_list), lambda s: all(i in ['fp32', 'bf16'] for i in s)), - } + } } }) @@ -303,7 +322,7 @@ def percent_to_float(data): }) transform_schema = Schema({ - Optional('ResizeWithRatio'):{ + Optional('ResizeWithRatio'): { Optional('min_dim'): int, Optional('max_dim'): int, Optional('padding'): bool, @@ -320,7 +339,7 @@ def percent_to_float(data): }, Optional('RandomResizedCrop'): { 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)), - And(int, lambda s: s > 0)), + And(int, lambda s: s > 0)), Optional('scale'): And(list, lambda s: all(isinstance(i, float) for i in s)), Optional('ratio'): And(list, lambda s: all(isinstance(i, float) for i in s)), Optional('interpolation'): And( @@ -337,7 +356,7 @@ def percent_to_float(data): 'width': int, 'height': int, 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)), - And(int, lambda s: s > 0)), + And(int, lambda s: s > 0)), Optional('interpolation'): And( str, lambda s: s in ['nearest', 'bilinear', 'bicubic']), @@ -353,23 +372,23 @@ def percent_to_float(data): }, Optional('Resize'): { 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)), - And(int, lambda s: s > 0)), + And(int, lambda s: s > 0)), Optional('interpolation'): And( str, lambda s: s in ['nearest', 'bilinear', 'bicubic']), }, Optional('RandomCrop'): { 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)), - And(int, lambda s: s > 0)) + And(int, lambda s: s > 0)) }, Optional('Rescale'): Or({}, None), Optional('CenterCrop'): { 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)), - And(int, lambda s: s > 0)) + And(int, lambda s: s > 0)) }, Optional('PaddedCenterCrop'): { 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)), - And(int, lambda s: s > 0)), + And(int, lambda s: s > 0)), Optional('crop_padding'): And(int, lambda s: s > 0), }, Optional('ToArray'): Or({}, None), @@ -409,7 +428,7 @@ def percent_to_float(data): Optional('mean_value'): And(Or(str, list), Use(input_to_list_float)), Optional('scale'): float, }, - Optional('ResizeWithAspectRatio'):{ + Optional('ResizeWithAspectRatio'): { 'height': And(int, lambda s: s > 0), 'width': And(int, lambda s: s > 0), }, @@ -425,7 +444,7 @@ def percent_to_float(data): }) postprocess_schema = Schema({ - Optional('LabelShift'): int, + Optional('LabelShift'): int, Optional('Collect'): { 'length': int }, @@ -507,7 +526,7 @@ def percent_to_float(data): And(str, Use(input_int_to_float))), Optional('dtype'): And(Or(str, list), Use(input_to_list)), }, - + Optional('dummy'): { 'shape': And(Or(str, list), Use(list_to_tuple)), Optional('low'): Or( @@ -587,8 +606,8 @@ def percent_to_float(data): 'dataset': dataset_schema, Optional('filter'): filter_schema, Optional('transform'): transform_schema, - Optional('shuffle', default = False): And(bool, lambda s: s in [True, False]), - Optional('distributed', default = False): And(bool, lambda s: s in [True, False]), + Optional('shuffle', default=False): And(bool, lambda s: s in [True, False]), + Optional('distributed', default=False): And(bool, lambda s: s in [True, False]), }) configs_schema = Schema({ @@ -621,7 +640,7 @@ def percent_to_float(data): Optional('beta_2', default=0.999): Use(float), Optional('epsilon', default=1e-07): Use(float), Optional('amsgrad', default=False): bool - }, + }, }) criterion_schema = Schema({ @@ -683,15 +702,15 @@ def percent_to_float(data): weight_compression_schema = Schema({ Optional('initial_sparsity', default=0): And(float, lambda s: s < 1.0 and s >= 0.0), Optional('target_sparsity', default=0.97): float, - Optional('max_sparsity_ratio_per_layer', default=0.98):float, + Optional('max_sparsity_ratio_per_layer', default=0.98): float, Optional('prune_type', default="basic_magnitude"): str, Optional('start_epoch', default=0): int, Optional('end_epoch', default=4): int, Optional('start_step', default=0): int, Optional('end_step', default=0): int, Optional('update_frequency', default=1.0): float, - Optional('update_frequency_on_step', default=1):int, - Optional('excluded_names', default=[]):list, + Optional('update_frequency_on_step', default=1): int, + Optional('excluded_names', default=[]): list, Optional('prune_domain', default="global"): str, Optional('names', default=[]): list, Optional('extra_excluded_names', default=None): list, @@ -700,7 +719,7 @@ def percent_to_float(data): Optional('pattern', default="tile_pattern_1x1"): str, Optional('pruners'): And(list, \ - lambda s: all(isinstance(i, Pruner) for i in s)) + lambda s: all(isinstance(i, Pruner) for i in s)) }) # weight_compression_pytorch_schema = Schema({},ignore_extra_keys=True) @@ -713,7 +732,7 @@ def percent_to_float(data): }) default_workspace = './nc_workspace/{}/'.format( - datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) COCOmAP_input_order_schema = Schema({ Optional('num_detections'): int, @@ -728,21 +747,21 @@ def percent_to_float(data): 'framework': And(str, lambda s: s in list(FRAMEWORKS.keys()) + ['NA']), Optional('inputs', default=[]): And(Or(str, list), Use(input_to_list)), Optional('outputs', default=[]): And(Or(str, list), Use(input_to_list)), - + }, Optional('version', default=float(__version__.split('.')[0])): And( - Or(float, - And(int, Use(input_int_to_float)), - And(str, Use(input_int_to_float))), - lambda s: s == float(__version__.split('.')[0])), + Or(float, + And(int, Use(input_int_to_float)), + And(str, Use(input_int_to_float))), + lambda s: s == float(__version__.split('.')[0])), Optional('device', default='cpu'): And(str, lambda s: s in ['cpu', 'gpu']), Optional('quantization', default={'approach': 'post_training_static_quant', \ 'calibration': {'sampling_size': [100]}, \ 'recipes': {'scale_propagation_max_pooling': True, - 'scale_propagation_concat': True, - 'first_conv_or_matmul_quantization': True, - 'last_conv_or_matmul_quantization': True, - 'pre_post_process_quantization': True}, + 'scale_propagation_concat': True, + 'first_conv_or_matmul_quantization': True, + 'last_conv_or_matmul_quantization': True, + 'pre_post_process_quantization': True}, 'model_wise': {'weight': {'bit': [7.0]}, 'activation': {}}, 'optimization_level': 1, @@ -764,27 +783,27 @@ def percent_to_float(data): Optional('dataloader', default=None): dataloader_schema }, Optional('recipes', default={'scale_propagation_max_pooling': True, - 'scale_propagation_concat': True, - 'first_conv_or_matmul_quantization': True, - 'last_conv_or_matmul_quantization': True, - 'pre_post_process_quantization': True}): { + 'scale_propagation_concat': True, + 'first_conv_or_matmul_quantization': True, + 'last_conv_or_matmul_quantization': True, + 'pre_post_process_quantization': True}): { Optional('scale_propagation_max_pooling', default=True): - And(bool, lambda s: s in [True, False]), + And(bool, lambda s: s in [True, False]), Optional('scale_propagation_concat', default=True): - And(bool, lambda s: s in [True, False]), + And(bool, lambda s: s in [True, False]), Optional('first_conv_or_matmul_quantization', default=True): - And(bool, lambda s: s in [True, False]), + And(bool, lambda s: s in [True, False]), Optional('last_conv_or_matmul_quantization', default=True): - And(bool, lambda s: s in [True, False]), + And(bool, lambda s: s in [True, False]), Optional('pre_post_process_quantization', default=True): - And(bool, lambda s: s in [True, False]), + And(bool, lambda s: s in [True, False]), Optional('fast_bias_correction', default=False): - And(bool, lambda s: s in [True, False]), + And(bool, lambda s: s in [True, False]), Optional('weight_correction', default=False): - And(bool, lambda s: s in [True, False]), + And(bool, lambda s: s in [True, False]), }, Optional('model_wise', default={'weight': {'bit': [7.0]}, 'activation': {}}): { - Optional('weight', default= {'bit': [7.0]}): { + Optional('weight', default={'bit': [7.0]}): { Optional('granularity', default=None): And( Or(str, list), Use(input_to_list), @@ -802,7 +821,7 @@ def percent_to_float(data): Or(str, list), Use(input_to_list), lambda s: all(i in ['minmax'] for i in s)), - Optional('bit', default=[7.0]): And( + Optional('bit', default=[7.0]): And( Or(float, list), Use(input_to_list_float), lambda s: all(0.0 < i <= 7.0 for i in s)) @@ -849,16 +868,16 @@ def percent_to_float(data): Optional('model_conversion'): model_conversion_schema, Optional('tuning', default={ - 'strategy': {'name': 'basic'}, + 'strategy': {'name': 'basic'}, 'accuracy_criterion': {'relative': 0.01, 'higher_is_better': True}, 'objective': 'performance', 'exit_policy': {'timeout': 0, 'max_trials': 100, 'performance_only': False}, 'random_seed': 1978, 'tensorboard': False, 'workspace': {'path': default_workspace}, 'diagnosis': False, - }): { + }): { Optional('strategy', default={'name': 'basic'}): { - 'name': And(str, lambda s: s in STRATEGIES), + 'name': And(str, lambda s: s in STRATEGIES), Optional('sigopt_api_token'): str, Optional('sigopt_project_id'): str, Optional('sigopt_experiment_name', default='nc-tune'): str, @@ -866,7 +885,7 @@ def percent_to_float(data): Optional('latency_weight', default=1.0): float, Optional('confidence_batches', default=2): int, Optional('hawq_v2_loss', default=None): object, - } , + }, Hook('accuracy_criterion', handler=_valid_accuracy_field): object, Optional('accuracy_criterion', default={'relative': 0.01}): { Optional('relative'): And(Or(str, float), Use(percent_to_float)), @@ -875,7 +894,7 @@ def percent_to_float(data): }, Optional('objective', default='performance'): And(str, lambda s: s in OBJECTIVES), Hook('multi_objectives', handler=_valid_multi_objectives): object, - Optional('multi_objectives'):{ + Optional('multi_objectives'): { Optional('objective'): And( Or(str, list), Use(input_to_list), lambda s: all(i in OBJECTIVES for i in s)), Optional('weight'): And(Or(str, list), Use(input_to_list_float)), @@ -895,18 +914,18 @@ def percent_to_float(data): Optional('path', default=None): str, Optional('resume'): str }, - Optional('diagnosis', default = { + Optional('diagnosis', default={ 'diagnosis_after_tuning': False, 'op_list': [], 'iteration_list': [1], 'inspect_type': 'activation', 'save_to_disk': True, 'save_path': './nc_workspace/inspect_saved/', - }):{ + }): { Optional('diagnosis_after_tuning', default=False): And(bool, lambda s: s in [True, False]), Optional('op_list', default=[]): And(Or(str, list), Use(input_to_list)), Optional('iteration_list', default=[1]): And(Or(int, list), Use(input_to_list_int)), - Optional('inspect_type', default='all'): And(str, lambda s : s in ['all', 'activation', 'weight']), + Optional('inspect_type', default='all'): And(str, lambda s: s in ['all', 'activation', 'weight']), Optional('save_to_disk', default=True): And(bool, lambda s: s in [True, False]), Optional('save_path', default='./nc_workspace/inspect_saved/'): str, }, @@ -923,8 +942,8 @@ def percent_to_float(data): Optional('mAP'): { Optional('anno_path'): str, Optional('iou_thrs', default=0.5): - Or(And(str, lambda s: s in ['0.5:0.05:0.95']), - And(float, lambda s: s <= 1.0 and s >= 0.0)), + Or(And(str, lambda s: s in ['0.5:0.05:0.95']), + And(float, lambda s: s <= 1.0 and s >= 0.0)), Optional('map_points', default=0): And(int, lambda s: s in [0, 11, 101]) }, Optional('COCOmAP'): { @@ -934,10 +953,10 @@ def percent_to_float(data): Optional('COCOmAPv2'): { Optional('anno_path'): str, Optional('map_key', default='DetectionBoxes_Precision/mAP'): str, - Optional('output_index_mapping', default={'num_detections': -1, - 'boxes': 0, - 'scores': 1, - 'classes': 2}): COCOmAP_input_order_schema + Optional('output_index_mapping', default={'num_detections': -1, + 'boxes': 0, + 'scores': 1, + 'classes': 2}): COCOmAP_input_order_schema }, Optional('VOCmAP'): { Optional('anno_path'): str @@ -966,14 +985,14 @@ def percent_to_float(data): Optional('ROC'): { Optional('task'): str }, - }, + }, Optional('metric', default=None): { Optional('topk'): And(int, lambda s: s in [1, 5]), Optional('mAP'): { Optional('anno_path'): str, Optional('iou_thrs', default=0.5): - Or(And(str, lambda s: s in ['0.5:0.05:0.95']), - And(float, lambda s: s <= 1.0 and s >= 0.0)), + Or(And(str, lambda s: s in ['0.5:0.05:0.95']), + And(float, lambda s: s <= 1.0 and s >= 0.0)), Optional('map_points', default=0): And(int, lambda s: s in [0, 11, 101]) }, Optional('COCOmAP'): { @@ -983,10 +1002,10 @@ def percent_to_float(data): Optional('COCOmAPv2'): { Optional('anno_path'): str, Optional('map_key', default='DetectionBoxes_Precision/mAP'): str, - Optional('output_index_mapping', default={'num_detections': -1, - 'boxes': 0, - 'scores': 1, - 'classes': 2}): COCOmAP_input_order_schema + Optional('output_index_mapping', default={'num_detections': -1, + 'boxes': 0, + 'scores': 1, + 'classes': 2}): COCOmAP_input_order_schema }, Optional('VOCmAP'): { Optional('anno_path'): str @@ -1051,7 +1070,7 @@ def percent_to_float(data): Optional("higher_is_better", default=[]): list, Optional("max_trials", default=1): int, Optional("seed", default=42): int, - }, + }, Optional("flash_distillation"): { Optional("knowledge_transfer"): { Optional("block_names", default=[]): list, @@ -1060,7 +1079,7 @@ def percent_to_float(data): Optional("loss_weights", default=[]): list, Optional("add_origin_loss", default=[]): list, Optional("train_steps", default=[]): list, - }, + }, Optional("regular_distillation"): { Optional("block_names", default=[]): list, "layer_mappings_for_knowledge_transfer": list, @@ -1068,8 +1087,8 @@ def percent_to_float(data): Optional("loss_weights", default=[]): list, Optional("add_origin_loss", default=[]): list, Optional("train_steps", default=[]): list, - }, }, + }, }, Optional('nas'): { @@ -1081,7 +1100,7 @@ def percent_to_float(data): Optional("higher_is_better", default=None): list, Optional("max_trials", default=None): int, Optional("seed", default=42): int, - }, + }, Optional("dynas"): { Optional("supernet", default=None): str, Optional("metrics", default=None): list, @@ -1090,7 +1109,7 @@ def percent_to_float(data): Optional("results_csv_path", default=None): str, Optional("dataset_path", default=None): str, Optional("batch_size", default=64): int, - }, + }, }, Optional("train"): train_schema @@ -1099,7 +1118,7 @@ def percent_to_float(data): quantization_default_schema = Schema({ Optional('model', default={'name': 'default_model_name', \ 'framework': 'NA', \ - 'inputs': [], 'outputs': []}): dict, + 'inputs': [], 'outputs': []}): dict, Optional('version', default=float(__version__.split('.')[0])): str, @@ -1108,13 +1127,13 @@ def percent_to_float(data): Optional('quantization', default={'approach': 'post_training_static_quant', \ 'calibration': {'sampling_size': [100]}, 'recipes': {'scale_propagation_max_pooling': True, - 'scale_propagation_concat': True, - 'first_conv_or_matmul_quantization': True, - 'last_conv_or_matmul_quantization': True, - 'pre_post_process_quantization': True}, + 'scale_propagation_concat': True, + 'first_conv_or_matmul_quantization': True, + 'last_conv_or_matmul_quantization': True, + 'pre_post_process_quantization': True}, 'model_wise': {'weight': {'bit': [7.0]}, 'activation': {}}, - }): dict, + }): dict, Optional('use_bf16', default=False): bool, Optional('optimization_level', default=1): int, Optional('tuning', default={ @@ -1131,7 +1150,7 @@ def percent_to_float(data): pruning_default_schema = Schema({ Optional('model', default={'name': 'default_model_name', \ 'framework': 'NA', \ - 'inputs': [], 'outputs': []}): dict, + 'inputs': [], 'outputs': []}): dict, Optional('version', default=float(__version__.split('.')[0])): str, @@ -1143,9 +1162,9 @@ def percent_to_float(data): 'random_seed': 1978, 'tensorboard': False, 'workspace': {'path': default_workspace}}): dict, - Optional('pruning', default={'approach': {'weight_compression':{'initial_sparsity': 0.0, \ - 'target_sparsity': 0.97, 'start_epoch': 0, \ - 'end_epoch': 4}}}): dict, + Optional('pruning', default={'approach': {'weight_compression': {'initial_sparsity': 0.0, \ + 'target_sparsity': 0.97, 'start_epoch': 0, \ + 'end_epoch': 4}}}): dict, Optional('evaluation', default={'accuracy': {'metric': {'topk': 1}}}): dict }) @@ -1153,21 +1172,21 @@ def percent_to_float(data): graph_optimization_default_schema = Schema({ Optional('model', default={'name': 'resnet50', \ 'framework': 'NA', \ - 'inputs': [], 'outputs': []}): dict, + 'inputs': [], 'outputs': []}): dict, Optional('version', default=float(__version__.split('.')[0])): str, Optional('device', default='cpu'): str, - Optional('quantization', default={'approach': 'post_training_static_quant', - 'calibration': {'sampling_size': [100]}, - 'recipes': {'scale_propagation_max_pooling': True, - 'scale_propagation_concat': True, - 'first_conv_or_matmul_quantization': True, - 'last_conv_or_matmul_quantization': True, - 'pre_post_process_quantization': True}, - 'model_wise': {'weight': {'bit': [7.0]}, - 'activation': {}}}): dict, + Optional('quantization', default={'approach': 'post_training_static_quant', + 'calibration': {'sampling_size': [100]}, + 'recipes': {'scale_propagation_max_pooling': True, + 'scale_propagation_concat': True, + 'first_conv_or_matmul_quantization': True, + 'last_conv_or_matmul_quantization': True, + 'pre_post_process_quantization': True}, + 'model_wise': {'weight': {'bit': [7.0]}, + 'activation': {}}}): dict, Optional('use_bf16', default=False): bool, @@ -1181,27 +1200,27 @@ def percent_to_float(data): Optional('evaluation', default={'accuracy': {'metric': {'topk': 1}}}): dict, - Optional('graph_optimization', default={'precisions': ['bf16, fp32']}): dict + Optional('graph_optimization', default={'precisions': ['bf16, fp32']}): dict }) mixed_precision_default_schema = Schema({ Optional('model', default={'name': 'resnet50', \ 'framework': 'NA', \ - 'inputs': [], 'outputs': []}): dict, + 'inputs': [], 'outputs': []}): dict, Optional('version', default=float(__version__.split('.')[0])): str, Optional('device', default='cpu'): str, - Optional('quantization', default={'approach': 'post_training_static_quant', - 'calibration': {'sampling_size': [100]}, - 'recipes': {'scale_propagation_max_pooling': True, - 'scale_propagation_concat': True, - 'first_conv_or_matmul_quantization': True, - 'last_conv_or_matmul_quantization': True, - 'pre_post_process_quantization': True}, - 'model_wise': {'weight': {'bit': [7.0]}, - 'activation': {}}}): dict, + Optional('quantization', default={'approach': 'post_training_static_quant', + 'calibration': {'sampling_size': [100]}, + 'recipes': {'scale_propagation_max_pooling': True, + 'scale_propagation_concat': True, + 'first_conv_or_matmul_quantization': True, + 'last_conv_or_matmul_quantization': True, + 'pre_post_process_quantization': True}, + 'model_wise': {'weight': {'bit': [7.0]}, + 'activation': {}}}): dict, Optional('use_bf16', default=False): bool, @@ -1215,13 +1234,13 @@ def percent_to_float(data): Optional('evaluation', default={'accuracy': {'metric': {'topk': 1}}}): dict, - Optional('mixed_precision', default={'precisions': ['bf16, fp32']}): dict + Optional('mixed_precision', default={'precisions': ['bf16, fp32']}): dict }) benchmark_default_schema = Schema({ Optional('model', default={'name': 'resnet50', \ 'framework': 'NA', \ - 'inputs': [], 'outputs': []}): dict, + 'inputs': [], 'outputs': []}): dict, Optional('version', default=float(__version__.split('.')[0])): str, @@ -1229,15 +1248,15 @@ def percent_to_float(data): Optional('use_bf16', default=False): bool, - Optional('quantization', default={'approach': 'post_training_static_quant', - 'calibration': {'sampling_size': [100]}, - 'recipes': {'scale_propagation_max_pooling': True, - 'scale_propagation_concat': True, - 'first_conv_or_matmul_quantization': True, - 'last_conv_or_matmul_quantization': True, - 'pre_post_process_quantization': True}, - 'model_wise': {'weight': {'bit': [7.0]}, - 'activation': {}}}): dict, + Optional('quantization', default={'approach': 'post_training_static_quant', + 'calibration': {'sampling_size': [100]}, + 'recipes': {'scale_propagation_max_pooling': True, + 'scale_propagation_concat': True, + 'first_conv_or_matmul_quantization': True, + 'last_conv_or_matmul_quantization': True, + 'pre_post_process_quantization': True}, + 'model_wise': {'weight': {'bit': [7.0]}, + 'activation': {}}}): dict, Optional('tuning', default={ 'strategy': {'name': 'basic'}, @@ -1266,18 +1285,19 @@ def percent_to_float(data): 'workspace': {'path': default_workspace}}): dict, Optional('distillation', default={ - 'train': {'start_epoch': 0, 'end_epoch': 10, - 'iteration': 1000, 'frequency': 1, - 'optimizer': {'SGD': {'learning_rate': 0.001}}, - 'criterion': {'KnowledgeDistillationLoss': - {'temperature': 1.0, - 'loss_types': ['CE', 'KL'], - 'loss_weights': [0.5, 0.5]}}}}): dict, - - Optional('evaluation', default={'accuracy': {'metric': {'topk': 1}}}):dict - + 'train': {'start_epoch': 0, 'end_epoch': 10, + 'iteration': 1000, 'frequency': 1, + 'optimizer': {'SGD': {'learning_rate': 0.001}}, + 'criterion': {'KnowledgeDistillationLoss': + {'temperature': 1.0, + 'loss_types': ['CE', 'KL'], + 'loss_weights': [0.5, 0.5]}}}}): dict, + + Optional('evaluation', default={'accuracy': {'metric': {'topk': 1}}}): dict + }) + class Conf(object): """config parser. @@ -1285,6 +1305,7 @@ class Conf(object): cfg_fname (string): The path to the configuration file. """ + def __init__(self, cfg_fname): assert cfg_fname is not None self.usr_cfg = DotDict(self._read_cfg(cfg_fname)) @@ -1308,14 +1329,14 @@ def _read_cfg(self, cfg_fname): content).group().split("model")[0] content = re.sub(r'model\s*:', 'version: {}\n\n{}model:'.format( - float(__version__.split('.')[0]), - leading_whitespace - ), + float(__version__.split('.')[0]), + leading_whitespace + ), content) with open(cfg_fname, 'w') as f: f.write(content) - return validated_cfg + return validated_cfg except FileNotFoundError as f: logger.error("{}.".format(f)) raise RuntimeError( @@ -1337,12 +1358,12 @@ def map_pyconfig_to_cfg(self, pythonic_config): 'model.backend': pythonic_config.quantization.backend, 'model.quant_format': pythonic_config.quantization.quant_format, 'quantization.approach': pythonic_config.quantization.approach, - 'quantization.calibration.sampling_size': + 'quantization.calibration.sampling_size': pythonic_config.quantization.calibration_sampling_size, 'quantization.optype_wise': pythonic_config.quantization.op_type_list, 'quantization.op_wise': pythonic_config.quantization.op_name_list, 'tuning.strategy.name': pythonic_config.quantization.strategy, - 'tuning.accuracy_criterion.relative': + 'tuning.accuracy_criterion.relative': pythonic_config.quantization.accuracy_criterion.relative, 'tuning.accuracy_criterion.absolute': pythonic_config.quantization.accuracy_criterion.absolute, @@ -1359,12 +1380,12 @@ def map_pyconfig_to_cfg(self, pythonic_config): if pythonic_config.quantization.strategy_kwargs: st_kwargs = pythonic_config.quantization.strategy_kwargs for st_key in ['sigopt_api_token', 'sigopt_project_id', 'sigopt_experiment_name', \ - 'accuracy_weight', 'latency_weight', 'hawq_v2_loss']: + 'accuracy_weight', 'latency_weight', 'hawq_v2_loss']: if st_key in st_kwargs: - st_val = st_kwargs[st_key] + st_val = st_kwargs[st_key] mapping.update({'tuning.strategy.' + st_key: st_val}) - + if pythonic_config.distillation is not None: mapping.update({ 'distillation.train.criterion': pythonic_config.distillation.criterion, @@ -1419,7 +1440,7 @@ def map_pyconfig_to_cfg(self, pythonic_config): target_key = str(pythonic_config.quantization.accuracy_criterion) if target_key not in k and 'accuracy_criterion' in self.usr_cfg.tuning: if target_key in self.usr_cfg.tuning.accuracy_criterion and \ - k.split('.')[-1] in self.usr_cfg.tuning.accuracy_criterion: + k.split('.')[-1] in self.usr_cfg.tuning.accuracy_criterion: self.usr_cfg.tuning.accuracy_criterion.pop(k.split('.')[-1]) continue if v is not None: @@ -1443,11 +1464,11 @@ def _convert_cfg(self, src, dst): for key in src: if key in dst: if isinstance(dst[key], dict) and isinstance(src[key], dict): - if key in ['accuracy_criterion', 'metric', 'dataset', - 'criterion', 'optimizer']: + if key in ['accuracy_criterion', 'metric', 'dataset', + 'criterion', 'optimizer']: # accuracy_criterion can only have one of absolute and relative # others can only have one item - inter_key = src[key].keys() & dst[key].keys()-{'higher_is_better'} + inter_key = src[key].keys() & dst[key].keys() - {'higher_is_better'} if len(inter_key) == 0: dst[key] = {} if key == 'accuracy' and src[key].get('multi_metrics', None): @@ -1463,6 +1484,7 @@ def _convert_cfg(self, src, dst): dst[key] = src[key] return dst + class Quantization_Conf(Conf): """config parser. @@ -1526,6 +1548,7 @@ def modelwise_tune_space(self, model_wise_quant): return self._model_wise_tune_space + class Pruning_Conf(Conf): """config parser. @@ -1544,6 +1567,7 @@ def __init__(self, cfg=None): else: self.usr_cfg = DotDict(pruning_default_schema.validate(dict())) + class Graph_Optimization_Conf(Quantization_Conf): """config parser. @@ -1561,6 +1585,7 @@ def __init__(self, cfg=None): else: self.usr_cfg = DotDict(graph_optimization_default_schema.validate(dict())) + class MixedPrecision_Conf(Quantization_Conf): """config parser. @@ -1578,6 +1603,7 @@ def __init__(self, cfg=None): else: self.usr_cfg = DotDict(mixed_precision_default_schema.validate(dict())) + class Benchmark_Conf(Conf): """config parser. @@ -1595,6 +1621,7 @@ def __init__(self, cfg=None): else: self.usr_cfg = DotDict(benchmark_default_schema.validate(dict())) + class Distillation_Conf(Conf): """config parser. @@ -1612,6 +1639,7 @@ def __init__(self, cfg=None): else: self.usr_cfg = DotDict(distillation_default_schema.validate(dict())) + class NASConfig(Conf): """config parser. @@ -1639,11 +1667,12 @@ def __init__(self, approach=None, search_space=None, search_algorithm=None): def validate(self): self.usr_cfg = schema.validate(self.usr_cfg) - + @property def nas(self): return self.usr_cfg.nas + class DefaultConf(DotDict): def __getitem__(self, key): if key not in self: @@ -1653,6 +1682,7 @@ def __getitem__(self, key): __getattr__ = __getitem__ + conf = DefaultConf({}) QuantConf = Quantization_Conf PruningConf = Pruning_Conf diff --git a/neural_compressor/experimental/pruning.py b/neural_compressor/experimental/pruning.py index 9280fa3e9b0..727b437f644 100644 --- a/neural_compressor/experimental/pruning.py +++ b/neural_compressor/experimental/pruning.py @@ -17,7 +17,7 @@ # limitations under the License. from .component import Component -from neural_compressor.pruner.pruner_legacy import PRUNERS +from ..pruner.pruner_legacy import PRUNERS from ..utils import logger from ..utils.utility import GLOBAL_STATE, MODE from ..utils.create_obj_from_config import create_dataloader, create_train_func, create_eval_func @@ -210,9 +210,14 @@ def generate_hooks(self): def generate_pruners(self): """Functions that generate pruners and set up self.pruners.""" for name in self.cfg.pruning.approach: - assert name == 'weight_compression', \ + assert name == 'weight_compression' or name == "weight_compression_pytorch", \ 'now we only support weight_compression and weight_compression_pytorch' + if self.cfg.pruning.approach.weight_compression_pytorch != None: + from .pytorch_pruner.pruning import Pruning as PytorchPruning + self.pytorch_pruner = PytorchPruning(self.cfg) + self.pruners.append(self.pytorch_pruner) + if self.cfg.pruning.approach.weight_compression != None: for pruner in self.cfg.pruning.approach.weight_compression.pruners: diff --git a/neural_compressor/experimental/pytorch_pruner/__init__.py b/neural_compressor/experimental/pytorch_pruner/__init__.py new file mode 100644 index 00000000000..359a68d8260 --- /dev/null +++ b/neural_compressor/experimental/pytorch_pruner/__init__.py @@ -0,0 +1,17 @@ +"""PyTorch Pruner module.""" +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/neural_compressor/experimental/pytorch_pruner/logger.py b/neural_compressor/experimental/pytorch_pruner/logger.py new file mode 100644 index 00000000000..fb5c26a035e --- /dev/null +++ b/neural_compressor/experimental/pytorch_pruner/logger.py @@ -0,0 +1,23 @@ +"""logger module.""" +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + from ...utils import logger +except: + import logging + logger = logging.getLogger(__name__) diff --git a/neural_compressor/experimental/pytorch_pruner/patterns.py b/neural_compressor/experimental/pytorch_pruner/patterns.py new file mode 100644 index 00000000000..a12b375cbb4 --- /dev/null +++ b/neural_compressor/experimental/pytorch_pruner/patterns.py @@ -0,0 +1,574 @@ +"""pattern module.""" +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +import torch +from .logger import logger + +PATTERNS = {} + + +def register_pattern(name): + """Class decorator used to register a Pattern subclass to the registry. + + Decorator function used before a Pattern subclasses. + Make sure that this Pattern class can be registered in PATTERNS. + + Args: + cls (class): The class of register. + name: A string. Define the pattern type which will be included in a pruning process. + + Returns: + cls: The class of register. + + """ + + def register(pattern): + PATTERNS[name] = pattern + return pattern + + return register + + +def get_pattern(config): + """Get registered pattern class. + + Get a Pattern object from PATTERNS. + + Args: + config: A config dict object. Contains the pattern information. + + Returns: + A Pattern object. + + Raises: + AssertionError: Currently only support patterns which have been registered in PATTERNS. + """ + name = config.pattern + name = name.split('_')[-1] + if "x" in name: + return PATTERNS["NxM"](config) + if ":" in name: + return PATTERNS["N:M"](config) + assert False, f"currently only support {PATTERNS.keys()}" + + +class Pattern: + """Pruning Pattern. + + Every Pruner object will contain a Pattern object. + It defines the basic pruning unit and how this unit will be pruned during pruning. + + Args: + config: A config dict object. Contains the pattern information. + + Attributes: + pattern: A config dict object. The pattern related part in args config. + is_global: A bool. Whether the pruning take global pruning option. + Global pruning means that all pruning layers are gathered to calculate pruning criteria. + Local pruning, on the contrast, means that pruning layers are to calculate criteria individually. + """ + + def __init__(self, config): + """Initialize.""" + self.pattern = config.pattern + self.is_global = config.prune_domain == "global" + + def get_masks(self, scores, target_sparsity_ratio, pre_masks, max_sparsity_ratio_per_layer): + """Call when new masks for pruning are to be calculated. + + Args: + scores: A dict{“layer_name”: Tensor}. Store the pruning scores of weights. + target_sparsity_ratio: A float. After pruning, the model's sparsity will reach this value. + pre_masks: A dict{"layer_name": Tensor}. The masks generated after the last pruning step. + max_sparsity_ratio_per_layer: A float. The maximum sparsity that one layer can reach. + + Returns: + A dict with the identical size as pre_masks. Update the 0/1 values in it. + + """ + if self.is_global: + return self.get_masks_global(scores, target_sparsity_ratio, pre_masks, max_sparsity_ratio_per_layer) + else: + return self.get_masks_local(scores, target_sparsity_ratio, pre_masks, max_sparsity_ratio_per_layer) + + def get_masks_global(self, scores, target_sparsity_ratio, pre_masks, max_sparsity_ratio_per_layer): + """To be implemented in subclasses.""" + raise NotImplementedError + + def get_mask_single(self, score, exact_sparsity_ratio): + """Obtain a mask for one layer. + + Args: + score: A Tensor. Store the pruning scores of one layer. + exact_sparsity_ratio: A float. After pruning, the layer's sparsity will reach this value. + + Returns: + A Tensor with the identical size as score. a new mask. + """ + flattern_score = torch.flatten(score) + k = int(exact_sparsity_ratio * flattern_score.numel()) + threshold, _ = torch.kthvalue(flattern_score, k) + if not k < 1: + zero = torch.tensor([0.]).to(score.device) + one = torch.tensor([1.]).to(score.device) + mask = torch.where(score <= threshold, zero, one) + else: + mask = torch.ones(score.shape, device=score.device) + return mask + + def get_block_size_dict(self, data): + """To be implemented in subclasses.""" + raise NotImplementedError + + def get_masks_local(self, scores, target_sparsity_ratio, pre_masks, max_sparsity_ratio_per_layer): + """Obtain layers' local masks. + + Args: + scores: A dict{“layer_name”: Tensor}. Store the pruning scores of weights. + target_sparsity_ratio: A float. After pruning, the model's sparsity will reach this value. + pre_masks: A dict{"layer_name": Tensor}. The masks generated after the last pruning step. + max_sparsity_ratio_per_layer: A float. The maximum sparsity that one layer can reach. + + Returns: + A dict with the identical size as pre_masks. Update the 0/1 values in it. + """ + masks = {} + if isinstance(self, PatternNxM) and not isinstance(self.block_size, dict): + self.block_size = self.get_block_size_dict(pre_masks) + for key in scores.keys(): + score = {key: scores[key]} + pre_mask = {key: pre_masks[key]} + mask = self.get_masks_global(score, target_sparsity_ratio, pre_mask, max_sparsity_ratio_per_layer) + masks[key] = mask[key] + return masks + + def get_sparsity_ratio(self, pre_masks): + """Calulate the zero elements' ration in pre_masks. + + Args: + pre_masks: Dict{"layer_name": Tensor}. The masks generated after the last pruning step. + + Returns: + A float. The zero elements' ratio in pre_masks. + """ + zero_cnt = 0 + total_cnt = 0 + for key in pre_masks.keys(): + pre_mask = pre_masks[key] + zero_cnt += torch.sum(pre_mask == 0.0).data.item() + total_cnt += pre_masks.numel() + return float(zero_cnt) / total_cnt + + def get_pattern_lock_masks(self, modules): + """Obtain masks from original weight map, by masking where weights' are zero. + + Args: + modules: A dict{“layer_name”: Tensor}. Store weights. + + Returns: + A dict with the identical size as modules, containing pattern lock masks. + """ + pattern_lock_masks = {} + for key in modules.keys(): + weight = modules[key].weight + shape = weight.shape + mask = torch.ones(shape) + mask[weight == 0] = 0.0 + pattern_lock_masks[key] = mask.to(weight.device) + return pattern_lock_masks + + +@register_pattern('NxM') +class PatternNxM(Pattern): + """Pruning Pattern. + + A Pattern class derived from Pattern. In this pattern, the weights in a NxM block will be pruned or kept + during one pruning step. + + Args: + config: A config dict object. Contains the pattern information. + + Attributes: + block_size: A list of two Integers. The height and width of the block. + Please be aware that the vertical direction of a Linear layer's weight in PyTorch refer to output channel. + Because PyTorch's tensor matmul has a hidden transpose operation. + """ + + def __init__(self, config): + """Initialize.""" + super(PatternNxM, self).__init__(config) + pattern = self.pattern.split('_')[-1] + self.N = pattern.split('x')[0] + self.M = pattern.split('x')[1] + if self.N == "channel": ##channel-wise pruning mode + self.block_size = ["channel", int(self.M)] + elif self.M == "channel": ##channel-wise pruning mode + self.block_size = [int(self.N), "channel"] + else: + self.block_size = [int(pattern.split('x')[0]), int(pattern.split('x')[1])] + + def get_block_size_dict(self, data): + """Calulate the zero elements' ration in pre_masks. + + Args: + data: Dict{"layer_name": Tensor}. Store weights or scores. + + Returns: + A dict. Dict{"layer_name": [block_size_1, block_size_2]}. + Containing layers' corresponding pruning pattern's block shape. + Please be aware that because in channel-wise pruning, + different layers can have different pruning patterns. + """ + block_sizes_dict = {} + if self.N == "channel" or self.M == "channel": + for key in data.keys(): + if isinstance(data[key], torch.nn.Module): + shape = data[key].weight.shape + else: + shape = data[key].shape + if self.N == "channel": + block_sizes_dict[key] = [shape[0], 1] + else: + block_sizes_dict[key] = [1, shape[1]] + return block_sizes_dict + for key in data.keys(): + block_sizes_dict[key] = self.block_size + return block_sizes_dict + + def get_sparsity_ratio(self, pre_masks): + """Calulate the zero elements' ration in pre_masks. + + Args: + pre_masks: Dict{"layer_name": Tensor}. The masks generated after the last pruning step. + + Returns: + A float. Calculate the zero elements' ratio in pre_masks. + """ + zero_cnt = 0 + total_cnt = 0 + if isinstance(self.block_size, list): + self.block_size = self.get_block_size_dict(pre_masks) + for key in pre_masks.keys(): + block_size = self.block_size[key] + pre_mask = pre_masks[key] + shape = pre_mask.shape + if len(shape) == 4: + shape = pre_mask.reshape(pre_mask.shape[0], -1).shape + if shape[0] % block_size[0] != 0 or shape[1] % block_size[1] != 0: + logger.warning(f"layer {key} is not support under current pattern, ignoring") + continue + + new_shape = [shape[0] // block_size[0], block_size[0], shape[1] // block_size[1], block_size[1]] + pre_mask = pre_mask.reshape(new_shape) + pre_mask_sum = pre_mask.sum(-1).sum(1) + zero_cnt += torch.sum(pre_mask_sum == 0.0).data.item() + total_cnt += pre_mask_sum.numel() + return float(zero_cnt) / total_cnt + + def get_masks_global(self, scores, target_sparsity_ratio, pre_masks, max_sparsity_ratio_per_layer, + keep_pre_mask=False): + """Generate masks for layers. + + Gather all layer's scores together and calculate a common threshold. + This threshold will be applied for all layers. + + Args: + scores: A dict{“layer_name”: Tensor}. Store the pruning scores of weights. + target_sparsity_ratio: A float. After pruning, the model's sparsity will reach this value. + pre_masks: A dict{"layer_name": Tensor}. The masks generated after the last pruning step. + max_sparsity_ratio_per_layer: A float. The maximum sparsity that one layer can reach. + keep_pre_masks: A bool. If True, keep the masks unchanged. + + Returns: + A dict with the identical size as pre_masks. Update the 0/1 values in it. + """ + if isinstance(self.block_size, list): + self.block_size = self.get_block_size_dict(scores) + new_scores = {} + not_divided_keys = [] + for key in scores.keys(): + block_size = self.block_size[key] + current_score = scores[key] + if len(current_score.shape) == 4: ##TODO need to verify whether it's ok for transposed conv + current_score = current_score.permute(0, 2, 3, 1) ##cout,k,k,cin + current_score = current_score.reshape(current_score.shape[0], -1) + shape = current_score.shape + if shape[0] % block_size[0] != 0 or shape[1] % block_size[1] != 0: ## only consider input channel + not_divided_keys.append(key) + continue + + new_shape = [shape[0] // block_size[0], block_size[0], shape[1] // block_size[1], + block_size[1]] + current_score = current_score.reshape(new_shape) + current_score_sum = current_score.mean(-1).mean( + 1) ##TODO sum or mean is quite different for per channel pruning + new_scores[key] = current_score_sum + global_scores = torch.cat([torch.flatten(v) for v in new_scores.values()]) + k = int(target_sparsity_ratio * global_scores.numel()) + masks = {} + if not k < 1: + threshold, _ = torch.kthvalue(global_scores, k) + for key in new_scores.keys(): + block_size = self.block_size[key] + score = new_scores[key] + zero = torch.tensor([0.]).to(score.device) + one = torch.tensor([1.]).to(score.device) + mask = torch.where(score <= threshold, zero, one) + mask = mask.repeat_interleave(block_size[0], dim=0).repeat_interleave(block_size[1], dim=-1) + if torch.sum(mask) / mask.numel() < 1.0 - max_sparsity_ratio_per_layer: + ##to prevent some layer not be purned too much + ##this is differnt with our original implementation + masks[key] = self.get_mask_single(new_scores[key], max_sparsity_ratio_per_layer) + masks[key] = masks[key].repeat_interleave(block_size[0], 0).repeat_interleave(block_size[1], -1) + # if pre_masks != {}:##when use one shot, this is not right + # masks[key] = pre_masks[key] + # else: + # masks[key] = mask + else: + masks[key] = mask + # if len(scores[key].shape) == 4: + # ##we need to revert back + # masks[key] = masks[key].reshape(scores[key].shape) + + for key in not_divided_keys: + p = scores[key] + masks[key] = torch.ones(p.shape).to(p.device) + logger.warning(f"{key} shape {scores[key].shape} cannot be divided by {self.pattern}") + + else: + for key in scores.keys(): + p = scores[key] + masks[key] = torch.ones(p.shape).to(p.device) + + for key in masks.keys(): + if len(scores[key].shape) == 4 and len(masks[key].shape) == 2: ## need to permute + mask = masks[key] + mask = mask.reshape(scores[key].shape[0], scores[key].shape[2], scores[key].shape[3], + scores[key].shape[1]) + mask = mask.permute(0, 3, 1, 2) + masks[key] = mask + return masks + + def get_pattern_lock_masks(self, modules): + """Obtain masks from original weight map, by masking where weights' are zero. + + Args: + modules: A dict{“layer_name”: Tensor}. Store weights. + + Returns: + A dict with the identical size as modules, containing pattern lock masks. + """ + pattern_lock_masks = {} + if isinstance(self.block_size, list): + self.block_size = self.get_block_size_dict(modules) + for key in modules.keys(): + block_size = self.block_size[key] + weight = modules[key].weight + if len(weight.shape) == 4: # conv + weight = weight.permute(0, 2, 3, 1) + weight = weight.reshape(weight.shape[0], -1) + shape = weight.shape + new_shape = [shape[0] // block_size[0], block_size[0], shape[1] // block_size[1], block_size[1]] + p = weight.reshape(new_shape) + p_mag = p.abs() # avoid the scene which sum is zero but weights are not + weight_block_sum = p_mag.sum(-1).sum(1) + mask = torch.ones(weight_block_sum.shape) + mask[weight_block_sum == 0] = 0.0 + mask = mask.repeat_interleave(block_size[0], dim=0).repeat_interleave(block_size[1], dim=-1) + orig_shape = modules[key].weight.shape + if len(orig_shape) == 4: + mask = mask.reshape(orig_shape[0], orig_shape[2], orig_shape[3], orig_shape[1]) + mask = mask.permute(0, 3, 1, 2) + pattern_lock_masks[key] = mask.to(weight.device) + return pattern_lock_masks + + +@register_pattern('N:M') +class PatternNInM(Pattern): + """Pruning Pattern. + + A Pattern class derived from Pattern. In this pattern, N out of every M continuous weights will be pruned. + For more info of this pattern, please refer to + https://github.com/intel/neural-compressor/blob/master/docs/pruning.md + + Args: + config: A config dict object. Contains the pattern information. + + Attributes: + N: The number of elements to be prune in a weight sequence. + M: The size of the weight sequence. + + """ + + def __init__(self, config): + """Initialize.""" + super(PatternNInM, self).__init__(config) + pattern = self.pattern.split('_')[-1] + self.N = int(pattern.split(':')[0]) + self.M = int(pattern.split(':')[1]) ##m is bigger + + def get_sparsity_ratio(self, pre_masks): + """Calulate the zero elements' ration in pre_masks. + + Args: + pre_masks: Dict{"layer_name": Tensor}. The masks generated after the last pruning step. + + Returns: + A float. Calculate the zero elements' ratio in pre_masks. + """ + ##simply use elemwise sparsity + non_zero_cnt = 0 + total_cnt = 0 + for key in pre_masks.keys(): + non_zero_cnt += (torch.sum(pre_masks[key])).data.item() + total_cnt += pre_masks[key].numel() + return 1.0 - float(non_zero_cnt) / total_cnt + + def get_masks_global(self, scores, target_sparsity_ratio, pre_masks, max_sparsity_ratio_per_layer): + """Generate masks for layers. + + Gather all layer's scores together and calculate a common threshold. + This threshold will be applied for all layers. + + Args: + scores: A dict{“layer_name”: Tensor}. Store the pruning scores of weights. + target_sparsity_ratio: A float. After pruning, the model's sparsity will reach this value. + pre_masks: A dict{"layer_name": Tensor}. The masks generated after the last pruning step. + max_sparsity_ratio_per_layer: A float. The maximum sparsity that one layer can reach. + + Returns: + A dict with the identical size as pre_masks. Update the 0/1 values in it. + """ + N = self.N + M = self.M + target_sparsity_ratio = target_sparsity_ratio / (float(N / M)) ##recover sparsity for block wise + all_nm_masks = {} + new_scores = {} + not_divided_keys = [] + for key in scores.keys(): + current_score = scores[key] + shape = current_score.shape + if shape[1] % M != 0: + not_divided_keys.append(key) + continue + if len(current_score.shape) == 4: ##TODO need to verify whether it's ok for transposed conv + current_score = current_score.permute(0, 2, 3, 1) ##cout,k,k,cin + current_score = current_score.reshape(current_score.shape[0], -1) + shape = current_score.shape + new_shape = [shape[0], shape[1] // M, M] + current_score_new = current_score.reshape(new_shape) + + threshold, _ = torch.kthvalue(current_score_new, N, dim=2) + threshold = threshold.unsqueeze(-1) + + threshold = threshold.expand(shape[0], shape[1] // M, M) + threshold = threshold.reshape((shape[0], shape[1])) + + one = torch.tensor([1.]).to(current_score.device) + zero = torch.tensor([0.]).to(current_score.device) + mask = torch.where(current_score <= threshold, zero, one) + current_score_new = current_score_new.reshape((shape[0], shape[1])) + ##to get the sum of N scores in each block with M + current_score_new = current_score_new * (1.0 - mask) + current_score_new = current_score_new.reshape(shape[0], shape[1] // M, M) + score_sum = torch.mean(current_score_new, dim=-1) + all_nm_masks[key] = mask + new_scores[key] = score_sum + + global_scores = torch.cat([torch.flatten(v) for v in new_scores.values()]) + k = int(target_sparsity_ratio * global_scores.numel()) + masks = {} + if not k < 1: + threshold, _ = torch.kthvalue(global_scores, k) + for key in new_scores.keys(): + score = new_scores[key] + zero = torch.tensor([0.]).to(score.device) + one = torch.tensor([1.]).to(score.device) + mask = torch.where(score <= threshold, zero, one) + mask = mask.repeat_interleave(M, dim=-1) + ## both zero will be zero + mask = (mask + all_nm_masks[key]) + mask = torch.where(mask <= 0, zero, one) + if torch.sum(mask) / mask.numel() < 1.0 - max_sparsity_ratio_per_layer: + ##trick, to prevent some layer not be purned too much + masks[key] = self.get_mask_single(new_scores[key], max_sparsity_ratio_per_layer) + masks[key] = masks[key].repeat_interleave(M, dim=-1) + ## both zero will be zero + masks[key] = (masks[key] + all_nm_masks[key]) + masks[key] = torch.where(masks[key] <= 0, zero, one) + else: + masks[key] = mask + for key in not_divided_keys: + p = scores[key] + masks[key] = torch.ones(p.shape).to(p.device) + logger.warning(f"{key} shape {scores[key].shape} cannot be divided by {self.pattern}") + + else: + for key in scores.keys(): + p = scores[key] + masks[key] = torch.ones(p.shape).to(p.device) + for key in masks.keys(): + if len(scores[key].shape) == 4 and len(masks[key].shape) == 2: ## need to permute + mask = masks[key] + mask = mask.reshape(scores[key].shape[0], scores[key].shape[2], scores[key].shape[3], + scores[key].shape[1]) + mask = mask.permute(0, 3, 1, 2) + masks[key] = mask + + return masks + + def get_pattern_lock_masks(self, modules): + """Obtain masks from original weight map, by masking where weights' are zero. + + Args: + modules: A dict{“layer_name”: Tensor}. Store weights. + + Returns: + A dict with the identical size as modules, containing pattern lock masks. + """ + pattern_lock_masks = {} + N, M = self.N, self.M + for key in modules.keys(): + weight = modules[key].weight + if len(weight.shape) == 4: # conv + weight = weight.permute(0, 2, 3, 1) + weight = weight.reshape(weight.shape[0], -1) + shape = weight.shape + ##TODO need to check whether it can be divisible later + new_shape = [shape[0], shape[1] // M, M] + weight_new = weight.reshape(new_shape) + mask1 = torch.ones(weight_new.shape) + mask2 = torch.ones(weight_new.shape) + nonzeros = torch.count_nonzero(weight_new, dim=-1) + zeros = M - nonzeros + mask1[weight_new == 0] = 0.0 + mask2[zeros >= N] = 0.0 + mask3 = mask1 + mask2 # zero in mask3 means its block has been completely pruned. + zero = torch.tensor([0.]).to(weight.device) + one = torch.tensor([1.]).to(weight.device) + mask = torch.where(mask3 == 0, zero, one) + mask = mask.reshape(shape) + orig_shape = modules[key].weight.shape + if len(orig_shape) == 4: + mask = mask.reshape(orig_shape[0], orig_shape[2], orig_shape[3], orig_shape[1]) + mask = mask.permute(0, 3, 1, 2) + + pattern_lock_masks[key] = mask.to(weight.device) + return pattern_lock_masks diff --git a/neural_compressor/experimental/pytorch_pruner/prune_utils.py b/neural_compressor/experimental/pytorch_pruner/prune_utils.py new file mode 100644 index 00000000000..2c4223f3576 --- /dev/null +++ b/neural_compressor/experimental/pytorch_pruner/prune_utils.py @@ -0,0 +1,221 @@ +"""prune utils.""" +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import yaml + +try: + from ...conf.dotdict import DotDict +except: + from .dot_dict import DotDict ##TODO +from .logger import logger + + +def check_config(prune_config): + """Functions that check key-value is valid to run Pruning object. + + Args: + prune_config: A config dict object. Contains Pruning parameters and configurations. + + Returns: + None if everything is correct. + + Raises: + AssertionError. + """ + assert prune_config['start_step'] >= 0, "start_step should be greater than 0" + assert prune_config['end_step'] >= -1, "end_step should be greater than 0" + assert prune_config['end_step'] >= prune_config['start_step'], \ + "end_step should be greater than start_step" + assert prune_config['target_sparsity'] >= 0 and prune_config['target_sparsity'] < 1.0, \ + "begin_pruning_step should be in range [0,1)" + assert prune_config['update_frequency_on_step'] > 0, "update_frequency_on_step should be greater than 0" + assert prune_config['max_sparsity_ratio_per_layer'] >= 0 and prune_config['max_sparsity_ratio_per_layer'] < 1, \ + "update_frequency_on_step should be greater than 0" + assert prune_config['prune_domain'] == "global" or prune_config['prune_domain'] == "local", \ + "only support 'global' and 'local' prune domain" + if "x" in prune_config["pattern"]: + pattern = prune_config["pattern"].split('_')[-1].split('x') + if pattern[0]=="channel" or pattern[1]=="channel": + pass + else: + try: + N = int(pattern[0]) + M = int(pattern[1]) + except: + assert False, "N or M can't convert to int" + assert N > 0, "N should be greater than 0" + assert M > 0, "M should be greater than 0" + if ":" in prune_config["pattern"]: + pattern = prune_config["pattern"].split('_')[-1].split(':') + try: + N = int(pattern[0]) + M = int(pattern[1]) + except: + assert False, "N or M can't convert to int" + assert N > 0, "N should be greater than 0" + assert M > N, "M should be greater than N" + max_ratio = float(N) / M + assert prune_config['target_sparsity'] <= max_ratio, \ + "in N:M pattern, the max sparsity is N/M={}".format(max_ratio) + prune_config['max_sparsity_ratio_per_layer'] = min(max_ratio, prune_config['max_sparsity_ratio_per_layer']) + +def reset_non_value_to_default(obj, key, default): + """Functions that add up undefined configurations. + + If some configurations are not defined in the configuration, set it to a default value. + + Args: + obj: A dict{key: value} + key: A string. Key in obj. + default: When the key is not in obj, Add key: default item in original obj. + + """ + if isinstance(obj, dict): + if (not key in obj.keys()) or obj[key] == None: + return default + else: + return obj[key] + else: + if not hasattr(obj, key) or getattr(obj, key) == None: + return default + else: + return getattr(obj, key) + +def process_and_check_config(val): + """Functions which converts a initial configuration object to a Pruning configuration. + + Copy parameters and add some non-define parameters to a new Pruning configuration object. + + Args: + val: A dict directly read from a config file. + + Returns: + A dict whose contents which are regularized for a Pruning obejct. + """ + val = val["pruning"]['approach']['weight_compression_pytorch'] + start_step = reset_non_value_to_default(val, "start_step", 0) + end_step = reset_non_value_to_default(val, "end_step", 0) + excluded_names = reset_non_value_to_default(val, "excluded_names", []) + prune_layer_type = reset_non_value_to_default(val, "prune_layer_type", ['Conv2d', 'Linear']) + target_sparsity = reset_non_value_to_default(val, "target_sparsity", 0.0) ## be care of this val + update_frequency_on_step = int(reset_non_value_to_default(val, "update_frequency_on_step", 1)) + prune_domain = reset_non_value_to_default(val, "prune_domain", "global") + prune_type = reset_non_value_to_default(val, "prune_type", "snip_momentum") + sparsity_decay_type = reset_non_value_to_default(val, "sparsity_decay_type", "exp") + max_sparsity_ratio_per_layer = reset_non_value_to_default(val, "max_sparsity_ratio_per_layer", 0.98) + names = reset_non_value_to_default(val, "names", []) + extra_excluded_names = reset_non_value_to_default(val, "extra_excluded_names", []) + pattern = reset_non_value_to_default(val, "pattern", "tile_pattern_4x1") + + pruners_info = [] + for info in val['pruners']: + pruner = {} + pruner['start_step'] = reset_non_value_to_default(info, 'start_step', start_step) + pruner['end_step'] = reset_non_value_to_default(info, 'end_step', end_step) + pruner['excluded_names'] = reset_non_value_to_default(info, 'excluded_names', excluded_names) + pruner['prune_layer_type'] = reset_non_value_to_default(info, 'prune_layer_type', prune_layer_type) + pruner['target_sparsity'] = reset_non_value_to_default(info, 'target_sparsity', target_sparsity) + pruner['update_frequency_on_step'] = reset_non_value_to_default(info, 'update_frequency_on_step', \ + update_frequency_on_step) + pruner['prune_domain'] = reset_non_value_to_default(info, 'prune_domain', prune_domain) + pruner['prune_type'] = reset_non_value_to_default(info, 'prune_type', prune_type) + pruner['sparsity_decay_type'] = reset_non_value_to_default(info, 'sparsity_decay_type', sparsity_decay_type) + pruner['max_sparsity_ratio_per_layer'] = reset_non_value_to_default(info, 'max_sparsity_ratio_per_layer', \ + max_sparsity_ratio_per_layer) + pruner['names'] = reset_non_value_to_default(info, 'names', names) + pruner['extra_excluded_names'] = reset_non_value_to_default(info, 'extra_excluded_names', + extra_excluded_names) + pruner['pattern'] = reset_non_value_to_default(info, 'pattern', + pattern) + check_config(pruner) + pruner_info = DotDict(pruner) + pruners_info.append(pruner_info) + return pruners_info + + +def process_config(config): + """Obtain a config dict object from a config file. + + Args: + config: A string. The path to configuration file. + + Returns: + A config dict object. + """ + if isinstance(config, str): + try: + with open(config, 'r') as f: + content = f.read() + try: + from .schema_check import schema + + except ImportError: + from ...conf.config import schema + + val = yaml.safe_load(content) + schema.validate(val) + except FileNotFoundError as f: + logger.error("{}.".format(f)) + raise RuntimeError( + "The yaml file is not exist. Please check the file name or path." + ) + except Exception as e: + logger.error("{}.".format(e)) + raise RuntimeError( + "The yaml file format is not correct. Please refer to document." + ) + + elif isinstance(config, DotDict): + val = config + else: + assert False, f"not supported type {config}" + + return process_and_check_config(val) + + +def parse_to_prune(model, config): + """Keep target pruned layers.""" + modules = {} + if config["names"] == None or config["names"] == []: + config["names"] = [".*"] + for raw in config["names"]: + try: + pattern = re.compile(raw) + except: + assert False, f"regular expression match does not support {raw}" + for name, module in filter(lambda t: pattern.search(t[0]), model.named_modules()): + if type(module).__name__ in config["prune_layer_type"]: + modules[name] = module + return modules + + +def parse_not_to_prune(modules, config): + """Drop non pruned layers.""" + exclude_names = config["extra_excluded_names"] + exclude_names.extend(config["excluded_names"]) + + patterns = [re.compile(s) for s in exclude_names] + if len(patterns) <= 0: + return modules + new_module = {} + for name in modules.keys(): + if any([p.search(name) for p in patterns]): + continue + new_module[name] = modules[name] + return new_module diff --git a/neural_compressor/experimental/pytorch_pruner/pruner.py b/neural_compressor/experimental/pytorch_pruner/pruner.py new file mode 100644 index 00000000000..ebba4a0afaa --- /dev/null +++ b/neural_compressor/experimental/pytorch_pruner/pruner.py @@ -0,0 +1,347 @@ +"""pruner module.""" +# !/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from .patterns import get_pattern +from .scheduler import get_scheduler + +from .logger import logger + +PRUNERS = {} + + +def register_pruners(name): + """Class decorator to register a Pruner subclass to the registry. + + Decorator function used before a Pattern subclass. + Make sure that the Pruner class decorated by this function can be registered in PRUNERS. + + Args: + cls (class): The subclass of register. + name: A string. Define the pruner type. + + Returns: + cls: The class of register. + """ + + def register(pruner): + PRUNERS[name] = pruner + return pruner + + return register + + +def get_pruner(modules, config): + """Get registered pruner class. + + Get a Pruner object from PRUNERS. + + Args: + modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. + config: A config dict object. Contains the pruner information. + + Returns: + A Pruner object. + + Raises: AssertionError: Cuurently only support pruners which have been registered in PRUNERS. + """ + name = config["prune_type"] + if name not in PRUNERS.keys(): + assert False, f"does not support {name}, currently only support {PRUNERS.keys()}" + return PRUNERS[name](modules, config) + + +class Pruner: + """Pruning Pruner. + + The class which executes pruning process. + 1. Defines pruning functions called at step begin/end, epoch begin/end. + 2. Defines the pruning criteria. + + Args: + modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. + config: A config dict object. Contains the pruner information. + + Attributes: + modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. + config: A config dict object. Contains the pruner information. + masks: A dict {"module_name": Tensor}. Store the masks for modules' weights. + scores: A dict {"module_name": Tensor}. Store the score for modules' weights, + which are used to decide pruning parts with a criteria. + pattern: A Pattern object. Defined in ./patterns.py + scheduler: A scheduler object. Defined in ./scheduler.py + current_sparsity_ratio: A float. Current model's sparsity ratio, initialized as zero. + global_step: A integer. The total steps the model has run. + start_step: A integer. When to trigger pruning process. + end_step: A integer. When to end pruning process. + update_frequency_on_step: A integer. The pruning frequency, which's valid when iterative + pruning is enabled. + target_sparsity_ratio: A float. The final sparsity after pruning. + max_sparsity_ratio_per_layer: A float. Sparsity ratio maximum for every module. + """ + + def __init__(self, modules, config): + """Initialize.""" + self.modules = modules + self.config = config + self.masks = {} + self.scores = {} + self.reg = None ##TODO need to add reg + self.pattern = get_pattern(config) + self.scheduler = get_scheduler(config) + self.current_sparsity_ratio = 0.0 + self._init() + + def _init(self): + """Auxiliary function for initializing.""" + self.global_step = -1 + self.start_step = self.config['start_step'] + self.end_step = self.config['end_step'] + self.update_frequency_on_step = self.config['update_frequency_on_step'] + ##this is different with original code + self.total_prune_cnt = (self.end_step - self.start_step + 1) \ + // self.update_frequency_on_step + self.completed_pruned_cnt = 0 + self.masks = {} + for key in self.modules.keys(): + module = self.modules[key] + self.masks[key] = torch.ones(module.weight.shape).to(module.weight.device) ##TODO support bias or others + + self.target_sparsity_ratio = self.config['target_sparsity'] + + self.max_sparsity_ratio_per_layer = self.config['max_sparsity_ratio_per_layer'] + + def on_epoch_begin(self, epoch): + """Functions called in the beginning of each epoch.""" + pass + + def mask_weights(self): + """Functions called when masks are applied on corresponding modules' weights. + + Weights are multipled with masks. This is the formal pruning process. + """ + with torch.no_grad(): + for key in self.modules.keys(): + module = self.modules[key] + module.weight.data = module.weight.data * self.masks[key] + + def on_step_begin(self, local_step): + """Functions called on the beginning of each step. + + Judge if the current step should execute a pruning process. + If so, using scores and criteria to update the masks and pruning the model. + Or, simply train the model with its original structure. + """ + self.global_step += 1 + if not self.check_is_pruned_step(self.global_step): + return + + if self.current_sparsity_ratio > self.target_sparsity_ratio: + return + + current_target_sparsity_ratio = self.scheduler.update_sparsity_ratio(self.target_sparsity_ratio, + self.completed_pruned_cnt, + self.total_prune_cnt, self.masks) + logger.info(f"current target ratio is {current_target_sparsity_ratio}") + self.update_scores() + self.completed_pruned_cnt += 1 + if self.scores == {}: + return + self.masks = self.pattern.get_masks(self.scores, current_target_sparsity_ratio, self.masks, + self.max_sparsity_ratio_per_layer) + self.mask_weights() + + self.current_sparsity_ratio = self.pattern.get_sparsity_ratio(self.masks) + logger.info(f"current sparsity ratio is {self.current_sparsity_ratio}") + + def on_step_end(self): + """Functions called in the end of each step.""" + pass + + def on_epoch_end(self): + """Functions called in the end of each epoch.""" + pass + + def on_before_optimizer_step(self): + """Functions called before the optimizer.step().""" + pass + + def on_after_optimizer_step(self): + """Functions called after the optimizer.step(). + + Prune the model after optimization. + """ + self.mask_weights() + + def on_train_begin(self): + """Functions called in the beginning of training.""" + pass + + def on_train_end(self): + """Functions called in the end of each training.""" + pass + + def on_before_eval(self): + """Functions called in the beginning of evaluation.""" + pass + + def on_after_eval(self): + """Functions called in the end of evaluation.""" + pass + + def check_is_pruned_step(self, step): + """Decide whether the current step should execute a pruning process.""" + if step < self.start_step or step > self.end_step: + return False + if int(step - self.start_step) % self.update_frequency_on_step == 0: + return True + return False + + def update_scores(self): + """Update self.scores.""" + pass + + +@register_pruners('magnitude') +class MagnitudePruner(Pruner): + """Pruning Pruner. + + A Pruner class derived from Pruner. In this pruner, the scores are calculated based on weights. + + Args: + modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. + config: A config dict object. Contains the pruner information. + + Attributes: + Inherit from parent class Pruner. + """ + + def __init__(self, modules, config): + """Initialize.""" + super(MagnitudePruner, self).__init__(modules, config) + self.scores = {} + + def update_scores(self): + """Update self.scores.""" + with torch.no_grad(): + for key in self.modules.keys(): + p = self.modules[key].weight.data + self.scores[key] = p + + +@register_pruners('snip') +class SnipPruner(Pruner): + """Pruning Pruner. + + A Pruner class derived from Pruner. In this pruner, the scores are calculated based on SNIP. + Please refer to SNIP: Single-shot Network Pruning based on Connection Sensitivity + (https://arxiv.org/abs/1810.02340) + + Args: + modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. + config: A config dict object. Contains the pruner information. + + Attributes: + Inherit from parent class Pruner. + """ + + def __init__(self, modules, config): + """Initialize.""" + super(SnipPruner, self).__init__(modules, config) + assert self.config.end_step > 0, "gradient based criteria does not work on step 0" + self.scores = {} + + def on_after_optimizer_step(self): + """Functions called after the optimizer.step(). + + Prune the model after optimization and update the scores based on weights and gradients. + """ + self.mask_weights() + with torch.no_grad(): + for key in self.modules.keys(): + p = self.modules[key].weight + self.scores[key] = torch.abs(p * p.grad) + + +@register_pruners('snip_momentum') +class SnipMomentumPruner(Pruner): + """Pruning Pruner. + + A Pruner class derived from Pruner. In this pruner, the scores are calculated based on SNIP. + Moreoever, the score map is updated with a momentum like process. + + Args: + modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. + config: A config dict object. Contains the pruner information. + + Attributes: + Inherit from parent class Pruner. + """ + + def __init__(self, modules, config): + """Initialize.""" + super(SnipMomentumPruner, self).__init__(modules, config) + assert self.config.end_step > 0, "gradient based criteria does not work on step 0" + # self.scores = {} + for key in modules.keys(): + p = modules[key].weight + self.scores[key] = torch.zeros(p.shape).to(p.device) + + def on_after_optimizer_step(self): + """Functions called after the optimizer.step(). + + Prune the model after optimization and update the scores based on weights and gradients. + """ + self.mask_weights() + with torch.no_grad(): + for key in self.modules.keys(): + p = self.modules[key].weight + self.scores[key] *= 0.9 ##magic number + self.scores[key] += 1.0 * torch.abs(p * p.grad) + + +@register_pruners('pattern_lock') +class PatternLockPruner(Pruner): + """Pruning Pruner. + + A Pruner class derived from Pruner. In this pruner, original model's sparsity pattern will be fixed while training. + This pruner is useful when you want to train a sparse model without change its original structure. + + Args: + modules: A dict {"module_name": Tensor}. Store the pruning modules' weights. + config: A config dict object. Contains the pruner information. + + Attributes: + Inherit from parent class Pruner. + """ + + def __init__(self, modules, config): + """Initialize.""" + super(PatternLockPruner, self).__init__(modules, config) + assert self.config.end_step == self.config.start_step, "pattern_lock pruner only supports one shot mode" + + def on_step_begin(self, local_step): + """Functions called on the beginning of each step.""" + self.global_step += 1 + if not self.check_is_pruned_step(self.global_step): + return + self.masks = self.pattern.get_pattern_lock_masks(self.modules) + + def on_after_optimizer_step(self): + """Functions called after the optimizer.step().""" + self.mask_weights() diff --git a/neural_compressor/experimental/pytorch_pruner/pruning.py b/neural_compressor/experimental/pytorch_pruner/pruning.py new file mode 100644 index 00000000000..ae85df778ff --- /dev/null +++ b/neural_compressor/experimental/pytorch_pruner/pruning.py @@ -0,0 +1,163 @@ +"""pruning module.""" +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch.nn + +from .prune_utils import process_config, parse_to_prune, parse_not_to_prune +from .pruner import get_pruner +from .logger import logger + + +class Pruning: + """Pruning. + + The main class that users will used in codes to do pruning. + Contain at least one Pruner object. + + Args: + config: a string. The path to a config file. For config file template, please refer to + https://github.com/intel/neural-compressor/tree/master/examples/pytorch/nlp/huggingface_models/text-classification/pruning/pytorch_pruner/eager/ + + Attributes: + model: The model object to prune. + config_file_path: A string. The path to a config file. + pruners: A list. A list of Pruner objects. + pruner_info: A config dict object. Contains pruners' information. + """ + + def __init__(self, config): + """Initialize.""" + self.model = None + self.config_file_path = config + self.pruners = [] + self.pruner_info = process_config(self.config_file_path) + + def update_items_for_all_pruners(self, **kwargs): + """Functions which add User-defined arguments to the original configurations. + + The original config of pruning is read from a file. + However, users can still modify configurations by passing key-value arguments in this function. + Please note that the key-value arguments' keys are analysable in current configuration. + """ + for item in self.pruner_info: + for key in kwargs: + if key in item.keys(): + item[key] = kwargs[key] + + def get_sparsity_ratio(self): + """Functions that calculate a modules/layers sparsity. + + Returns: + Three floats. + elementwise_over_matmul_gemm_conv refers to zero elements' ratio in pruning layers. + elementwise_over_all refers to zero elements' ratio in all layers in the model. + blockwise_over_matmul_gemm_conv refers to all-zero blocks' ratio in pruning layers. + """ + pattern_sparsity_cnt = 0 + element_sparsity_cnt = 0 + for pruner in self.pruners: + modules = pruner.modules + sparsity_ratio = pruner.pattern.get_sparsity_ratio(pruner.masks) + cnt = 0 + for key in modules.keys(): + cnt += modules[key].weight.numel() + pattern_sparsity_cnt += int(cnt * sparsity_ratio) + for key in pruner.masks.keys(): + element_sparsity_cnt += torch.sum(pruner.masks[key] == 0).data.item() + + linear_conv_cnt = 0 + param_cnt = 0 + for name, module in self.model.named_modules(): + if type(module).__name__ in ["Linear"] or "Conv" in type(module).__name__: + linear_conv_cnt += module.weight.numel() + + for n, param in self.model.named_parameters(): + param_cnt += param.numel() + blockwise_over_matmul_gemm_conv = float(pattern_sparsity_cnt) / linear_conv_cnt + elementwise_over_matmul_gemm_conv = float(element_sparsity_cnt) / linear_conv_cnt + elementwise_over_all = float( + element_sparsity_cnt) / param_cnt + + return elementwise_over_matmul_gemm_conv, elementwise_over_all, blockwise_over_matmul_gemm_conv + + def _generate_pruners(self): + """Functions that obtain Pruner objects.""" + assert isinstance(self.model, torch.nn.Module) + + for info in self.pruner_info: + modules = parse_to_prune(self.model, info) + modules = parse_not_to_prune(modules, info) + if modules == {}: + logger.warning("one pruner hooks no layers, please have a check") + + self.pruners.append(get_pruner(modules, info)) + info['modules'] = [key for key in modules.keys()] + info['len_of_modules'] = len(info['modules']) + logger.info(info) + + def on_train_begin(self): + """Functions called in the beginning of training process. + + Before training, ensure that pruners are generated. + """ + self._generate_pruners() ##TODO is there better place to place + + def on_epoch_begin(self, epoch): + """Functions called in the beginning of every epoch.""" + for pruner in self.pruners: + pruner.on_epoch_begin(epoch) + + def on_step_begin(self, local_step): + """Functions called in the beginning of every step.""" + for pruner in self.pruners: + pruner.on_step_begin(local_step) + + def on_before_optimizer_step(self): + """Functions called before optimizer.step().""" + for pruner in self.pruners: + pruner.on_before_optimizer_step() + + def on_step_end(self): + """Functions called in the end of every step.""" + for pruner in self.pruners: + pruner.on_step_end() + + def on_epoch_end(self): + """Functions called in the end of every epoch.""" + for pruner in self.pruners: + pruner.on_epoch_end() + + def on_train_end(self): + """Functions called in the end of training.""" + for pruner in self.pruners: + pruner.on_train_end() + + def on_before_eval(self): + """Functions called in the beginning of evaluation.""" + for pruner in self.pruners: + pruner.on_before_eval() + + def on_after_eval(self): + """Functions called in the end of evaluation.""" + for pruner in self.pruners: + pruner.on_after_eval() + + def on_after_optimizer_step(self): + """Functions called after optimizer.step().""" + for pruner in self.pruners: + pruner.on_after_optimizer_step() diff --git a/neural_compressor/experimental/pytorch_pruner/scheduler.py b/neural_compressor/experimental/pytorch_pruner/scheduler.py new file mode 100644 index 00000000000..915022a5a6e --- /dev/null +++ b/neural_compressor/experimental/pytorch_pruner/scheduler.py @@ -0,0 +1,164 @@ +"""scheduler module.""" +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +SCHEDULERS = {} + + +def register_scheduler(name): + """Class decorator used to register a Scheduler subclass to the registry. + + Decorator function used before a Scheduler subclass. + Make sure that the Scheduler class decorated by this function can be registered in SCHEDULERS. + + Args: + cls (class): The class of register. + name: A string. Define the scheduler type. + + Returns: + cls: The class of register. + """ + + def register(scheduler): + SCHEDULERS[name] = scheduler + return scheduler + + return register + + +def get_scheduler(config): + """Get registered scheduler class. + + Get a scheduler object from SCHEDULERS. + + Args: + config: A config dict object. Contains the scheduler information. + + Returns: + A Scheduler object. + """ + name = "iterative" + if config.start_step == config.end_step: + name = "oneshot" + return SCHEDULERS[name](config) + + +class Scheduler: + """Pruning Scheduler. + + The class which defines a sparsity changing process during pruning. + Mainly contains two types: + 1. iterative scheduler. Prune the model from dense to target sparsity gradually. + 2. one-shot scheduler. Prune the model in a single step and reach the target sparsity. + + Args: + config: A config dict object. Contains the scheduler information. + + Attributes: + config: A config dict object. Contains the scheduler information. + """ + + def __init__(self, config): + """Initialize.""" + self.config = config + + def update_sparsity_ratio(self, aggressive_ratio, current_prune_step, total_prune_steps, masks): + """To be implemented in subclasses.""" + raise NotImplementedError + + +@register_scheduler('oneshot') +class OneshotScheduler(Scheduler): + """Pruning Scheduler. + + A Scheduler class derived from Scheduler. + Prune the model to target sparsity once. + + Args: + config: A config dict object. Contains the scheduler information. + + Attributes: + Inherit from parent class Scheduler. + """ + + def __init__(self, config): + """Initialize.""" + super(OneshotScheduler, self).__init__(config) + + def update_sparsity_ratio(self, aggressive_ratio, current_prune_step, total_prune_steps, masks): + """Return the aggressive ratio.""" + return aggressive_ratio + + +@register_scheduler('iterative') +class IterativeScheduler(Scheduler): + """Pruning Scheduler. + + A Scheduler class derived from Scheduler. + Prune the model to from dense to target sparsity in several steps. + + Args: + config: A config dict object. Contains the scheduler information. + + Attributes: + Inherit from parent class Scheduler. + """ + + def __init__(self, config): + """Initialize.""" + super(IterativeScheduler, self).__init__(config) + # self.decay_type = config["sparsity_decay_type"] + + def update_sparsity_ratio(self, target_ratio, current_prune_step, total_prune_steps, masks): + """Obtain new target sparsity ratio according to the step. + + Args: + target_ratio: A float. The target sparsity ratio. + current_prune_step: A integer. The current pruning step. + total_prune_steps: A integer. The total steps included in the pruning progress. + masks: A dict{"module_name": Tensor}. The masks for modules' weights. + + Returns: + A float. the target sparsity ratio the model will reach after the next pruning step. + """ + aggressive_ratio = target_ratio + # if self.config.prune_domain == "global": + # aggressive_ratio += 0.02 + + aggressive_ratio = min(self.config.max_sparsity_ratio_per_layer, + aggressive_ratio) ##lagacy issue + + decay_type = self.config.sparsity_decay_type + if decay_type == "cos": + current_target_sparsity = (aggressive_ratio) * ( + 1.0 - math.cos(float(current_prune_step) / total_prune_steps * (math.pi / 2))) + elif decay_type == "exp": + target_dense_change_ratio = (1.0 - aggressive_ratio) ** (1 / total_prune_steps) + current_target_sparsity = 1.0 - target_dense_change_ratio ** current_prune_step + + elif decay_type == "linear": + current_target_sparsity = (aggressive_ratio) * float(current_prune_step) / total_prune_steps + + elif decay_type == "cube": + current_target_sparsity = (aggressive_ratio) * ((float(current_prune_step) / total_prune_steps) ** 3) + else: + assert False, "{} is not supported".format(decay_type) + + current_target_sparsity = min(target_ratio, current_target_sparsity) + return current_target_sparsity diff --git a/neural_compressor/pruner/regs.py b/neural_compressor/pruner/regs.py index 12f4a1b6f28..8ce97e4c87e 100644 --- a/neural_compressor/pruner/regs.py +++ b/neural_compressor/pruner/regs.py @@ -125,3 +125,4 @@ def on_after_optimizer_step(self): ##decoupled with grad descent reg_term = self.pattern.reshape_reduced_to_orig(self.reg_terms[key], key, self.modules[key].weight.shape) self.modules[key].weight -= reg_term + From c848ebdd37fb295a0de0e95c3b90099edac72b2c Mon Sep 17 00:00:00 2001 From: wenhuach21 Date: Mon, 12 Dec 2022 16:42:16 +0800 Subject: [PATCH 9/9] recover config Signed-off-by: wenhuach21 --- .../_static/imgs/pruning/pruning_patterns.png | Bin 37938 -> 60008 bytes neural_compressor/conf/config.py | 306 ++++++++---------- 2 files changed, 139 insertions(+), 167 deletions(-) diff --git a/docs/source/_static/imgs/pruning/pruning_patterns.png b/docs/source/_static/imgs/pruning/pruning_patterns.png index 30f5f8eb2dca8bac903c94de42bae5efd4476c45..d453622ed5a71955d2c8ca0db6dfbc1f0b0bd4eb 100644 GIT binary patch literal 60008 zcma&NbwHC}_&01LpdwPzN{F^*NDQPLFc>nrr5mKXH)=3s zbU)+!d!IL+zurH#-8=Vv?sJ{bxz5$+o2I(LLlQcY8#it|R8st)edES$KEidw{dD@Z|3@P+ztZymzWu+STw+%;`M>-8Uyb!_-&Fpe7yZ{5n#|_@|9<;*<39)+ z-~VSwW$h=XkhcGW4&@dVNcI0ooX`JH;@Sn4@d&dEWj?b;u7<#nFJYCzrNt)ERNgSlI-s5OFI9; zQczSFA2Z-0=4oRZbXbQ}=k=D-Fcq)AT8lI=$&%D5 zJ`sG89B{l8?s0zXZtv=vTv*7(`SPXaUIP2c$;tArPi|hGTnvL4%TD7N^1L(nymMJ( zj;2FCoK?X#w*E6=eM1D=~Vw4e+?%`(>aD8v)mpvJU4<6 zm|b^R_EU2XdGO#-cYm4)MkDxiEz-cmYTEi!6M{8nYB(QeLm+ zHsk<`t&7nZ8cyH{6xowZ1sdmiAmy=dH@lL@k1H<+YPPs zabHd;APYY7cmSVr(1>rh>ksJpsin5zYc6X&UhN#nzYSJ}19UYzbKrjbgXr(v9RHN9 z)ewoJ(Mnpa?woe%&@Zc`kc9v(lGFj^cdbg^4|TMSXSk`jjw{M5yYp0%(N#?s+ZGxF zy=Oa3{8EuOk_>!uQI){R&5pdsz_iuG4EYIPPAADcFIXaKnu*hZ*t$-67-`m&r4&m4 zh2oY6jO2c=-msz3Oo10n(+{SLy^^W5APwtt+zw5g0CBD%heJuV59J;`i&J|0Gb`)e z=H_ND0-^Ht>sR!@rB*>HDP9GtfB2oYw$KOM9z6?KEY@2BwDm*%AaSfv#`U3K5?#+3m^*r_L*MR!+U4mF*2R22$`SQQ|l52O(3D6v1Gr@EA>#n zQn$2LgR`taQ{U8Io??;l#WI(>v5^t8kjh74V^Grq&0RSx2@+9V_7j1cYg!lQqY~QI zHZ?_(8n7kAF^Pm8g>D2z}a`tvXc4Cl73N##n1aeXCA%$E?MaGcH@^t?0ALN1;T4$Z*yBG$c*Em8mYib za*y&KnUmGh9<%DTe2g!;n?w&Y(y1I~3hmmrHj?S*!LZMJ=t=%#hB;ydU7=q82R- zgv?IUa_WT^8B z06=IrOM5l(EmA!r6?)ns%vrRVkX}3D5RJ^Q5WLJ9-{!~WrOtwllMVrsx2ni|K+QMIO^>Ob}+nJ`wtipEQx%|*+#Ci$bnx)PB0pW_4U=5Cv zIBr`w4rj*sNHBE5^0+?!ovQotzNqDq5Q<2rOU$$)x4fA!%iz*TAC&+nExo9^Na&FY}tlG5Q}>d}SK3iL~pend(l ziEF}xN6}p~u1zaXKR<7-@juGoONIIZphz7D0QG540 z>DKCNs3=({sBc=p_Lm>54Ve9{(>f7aflRq^vH)MFCicHENpjlBP1GAl-0?gl)dJd7 zeP_H;cxGv7>Gx1ELn(V(7~&QKjsNjJ;Mv=%vYG|+_h z%q*j|=L8$#Fp*Lpq^zM)!m6hl14h-|%4#J_VScZ_B|QH5;M1SCL6ex}`$k2W)#91u z1AP_qez(!;ggiHRE=mu$mz#4pBaiLQ-vqe2inUrVMm zz?*Hg?kgL6nWt^;tPbM0hwKWhqv3w3V5h%f54Yn1(!2;f7JPs(>n&a3prjUgnf<1C z?alj6Z&KJTp<=9>byadAGUwEgQz67stz7!-+%qLTHEFoV28tfAlNrJ?Cn1j#kv;ra zJ;5Euafw$|E^{8v@Gkorw_V7naCyG>Q=Ljr^C4VI?~ov!o@BllyWBhzFgIY~ripFi z+CZO*o_9#k-Ne{8T+GB^-d$^|I+TODVri9*d?yE^SpWOy$8Fss%LOh;JkY8S;S!W> zGu95L`K-DBM4C>D8DL_3RAbH#MXU~e2vSSH24%iIaO|j_vJJ5R;(fhGd42?`d`FAk z7+6XEz+I`hiK`$Q%vQ1CVpY6bnL7>~y*KlB8a}zy{{yxLOG~41buP>GeZgLQsqE!9 zOy{B;3D(CWPIFz|bms$)8rtKtDY!g3Xtkvj*ov1@50010ztTfCrFg^i<^G2@X>VWR*$*;_Bs?$wZjub z+2Gc}#L_kELCG`iYfbbO$h%tJ)MPgAvl4yX-zZE%Pe_df{EHIzDVExkO8s@U&J_0r z33%Rr=b}BnF+kSoIGQBAHHU)h-3FvQ^);!Xt&l4yEp=gZ+7n_h#XonEgCA00?qs`n zfj4NR=R3q9;&ZBjtDkqnM zUYXK861W=8W0RKfeNcWcciEjm`=@1LNkSXlrtAG$Xx~^y+f;2IY2GjVJaX z03kL;srUH9nVpWdWFg#3mRGrMM9kmVacF}B6^#e(L?=_WnMzlGtEKT!eScv;=A|>C zJ#Eb?-rGG#%kepKawh8O=}>KrhFo13GGH%W{yqAp+!6mIn&;JV znYQUj)~!pEt^M4wIY`J@q@7;0%HSV!Uk+tFo-ltS_4HIIWRp(}Ek#&$dnp$-wGF?Y zbR10a&GDQk$1Ceyq^f6KeM&%>jBcR*0kb8rL3H=$EmR7-lIb(vz`#JkSFe61U@17% z*YAi?NBav`8{C$qB*ovdFL2#Xey}4TX6Ucif8~tzCax+7dNghlzhB5YJSqH?g7C<$ zj0;Uv$EjN|xudXXmwWqqS5yb1obeyn`$#*{ZxUwuc>K)5h57=2B4GIE>Wb|5?L(AR z5=hB*@MpmOZf6v${0oA?Ut;BPS}i>hlz*DODU)RAs{=d}gH|751+t}5iS22n0zTjy z-WM+HO^@?*M&}mzfc{F9klMZO)R1dX*>5IdR=)ZdiY{cM3yvs8il9G^rStuSWk}^{ zyyKDkTuzy!kC;`!P?Vx1pgH6m!@c1L?nQNAGXMjx$F;BbzT=UGl8gI6_FTL0J?q4S zja$<*JN0#<7HHX0tD9gtnr*(MT2t-n5l$cp*h1^$lr}amvmd*9h(nj?7p~jcFtl4=!M=IPvuP{A) z;siOigKYViG$2FBpfe3)6TB18rzcOkuV(MV;T?&A`3-kGwY7B!*Y$GYLRNo->Tpsh!m)|IhS% z)k&m^Pb1uOCHOM~@CC-noZSUhr_iEu7Q=m$S>rVGBmp0C zrGK)EX40>wXm|hE(Cfb+iq_rlGR1i66S}O6Ac_FLV$%P1s?0$mB1mn=9qDzy0Hs+a z5Xjz`LO@J8ETb3liyg43ZZYK2gNu9(0SxiNH3q{1Uq79xA?T|lNFnuTtz*Ii-&{`aCQuM#|(_QcI1`{V*}6d(F?fW4v?BK=(z3I|7;F<1{lKkBrZ=b7EFd zW%$SpSFCqLCFj*Y6YX~{ANJ;EMmgN#7bsW+DZjtV#8E^Jm-DP6kFv& z4o*La1!DFf#cpfsLzXe~p{a^3?$h)lt_8V8dP}WE?e%TQZ7vza?W=eW$mph~Qgy>&tm%DDQd{`aHV;DnV8 zEv%>ffAgAFn*xAzxe7Odf#Sw*WN{(L^?JB!yB(2dCNLukq9srS*(!*@5wjjG1+@?N z$U`;KD;ZezoekQ3p*aV=@wAYwf23K*qX$@#irAXZ5;Y`XQ3nR4=U?XfJlFV@-{s+L zp=rj^RQCEh-LLIp5R_zhVXFK&2X=f4XlrE3gE=gVXUo@{8ChJ6B z)+}Yx_dXX`BC*AdsMK2fPl;h`V(sgYfntbyKuo4+MgHf&CGqEcjj^Jj!XX)S0!Iu? zCOAmzwm)HBx<~sDIcGOO-N`iW@Lw7`e8IG#u+OJSOkt!sr>DGY?D}d4O=ZUsKAZlS z1Ce=?(LWi4z>$LXg7~vK(xDOdB3;X|s{LLCbA71T*rM}RGEk0!Z^KXTkn(|IR3v-T zv78Ut_a$>0jCY^|Fs2sn7!>OTBI{vJdb_)I#pTgnw{}Qfo1o#uFUEOyUK(ifz#!YS2ixP%+&q{Ul*IfGu>lHRjs%Lr33NXV_i5rO-Wg6%jL;pnBeaT+l3Fg9?n(&Z)LbncQH)H4_$38V8Zd7^J#_Xc#e{mqf zR#L4mOa&|3n)p^(dW_oA`kWJN*K)Wwpmy-8soHqMZysgFG2OAOf@*Sm`$&VS3>f&a zbLBj&7os)Gmlnxyc!>QlV;G?Ii2AUrG{KXcImGS#Zw-*KPvp}bs?VyTb5Fz1?0iI2 zMKyMo%k(OWQ4Q<4_)hJS>s`7z@$IXOE%kW!wgi~b|J;iwbdGv{1OhNwp}QBKv&~1BjJz!U zJuf7W^StQb?dG%h>Q{K9&jooS8N^lu3x=JU)bnh#7b8!)WMjrTbQHE%9?8adIe<*H zqw0Wr@nHo6>Y5(rZBo=yax{#9?Otv$zagllNbS8`kGYpTiuRAL?h2=rU_6`s2I z?gaXWJ#ay;Har6C{ntDbKf>YNFXaH-raDEMH1Nsk!MH$b*Bxe3=JLQqlkAGRpLHuL z!M!o{t%JU#Ou?WiX{y;SJL{wJ9Q?w0r*(M!wub54(`$FotZY69UIT`tpska-LBMGBcph* zoT+y~aaSE1z=KT1798C5+Dn%~bSEd=PA?$GOXg+zr zo+}T_dA$?}kHMadS-uppAZ-JgFT}Ol&f?m&V!?7aBxb0tBikEAR7B zx3zDc!U&F0=cUhI+59tv=-GfkkI(YJz^wGe(;2rNS3@q88`I@OrEbL7grg9ddIhwG zhTw|!Y^9VvFMSeF>w!sS=yKMs_U=H*aL`*abvgh2W|U9gGjy=ORln8mJhA(yP=;e> zb5=~LyLhPcUxYw&sPCE-O`V7Ts9$>7)?jy~xUIe*Uh-5_m1Q0)i`&~XJC{Z6sQ@#X zm+y!A+WE|mH{=%y5%bl=&eS6N8uFd;HDVZ~TpZzKK;H;8WmDKG@~pZ{B$jd1^SL`w zbLLdDWZXn{lj9g%Cwli0S{TdPt1i>U!n52$nlemEhlXzLp?J6Vhc6Z1G3(ft>q{V8Q!&sNI~4_D-8eh9J5-OgH|d|(d~6X1~W ztmQd#&vEYFOkQez9aI04(|GAVyGA`rcbH=J+$836wOfwm%OM3~V(&etF0%LUwkaE(s_LJ~%#n}2{Vs3Z;02bVMV3A) zN}@sS32l=@sM;X-Ow!hK+NV-a@K0YUxN^j#CvHzQBwbVwRoZ|Y8D$v-mW=O>3AsD# z6PiF)72Vw{L7TNjpu&(mR9obENs4Ilg%T6o31kPJK=nSDAykU{Ubg zM(3{ac*9*(X>-kBD1uK&80b&E;5UESHj^M z>o8SBW4g->irTGn1ODf$oXJ=D4lBojPxU|CO-;M1(GGwNcVlg3A}{ZkKhAe!gNOP5 zP5ltFB2cnD_I>3vzLia|Idtu;3KZRS+DG0SG;7B#7^AG!A2-|R>8i!;c!D0u#0yNkI1oA~d^hr&GuewJ=f-8&P}@iwVvS)o;5c6#XJtW#`x z*QPcOJyvO_47!E}7ezMSn^X(OEVu_pT?y3UTTL{}vcCCHpF#SYKW1kZLCFpPHE zH%7Gdmqu^DCkg4`Em_#82%R4fxiByI-&j_N?c#nfG&Pf`h@HDiQ8O14ZO$ynjwDS@(>(l%pRjDxxx5U z>**W2g1V7y=ywW;k-*QLyTt4a6plMg`Za^Ute-6;hev9BF0Y z`!mWR#CmD?LK~wDaqDa3!$wC?udB?Og4up7K9(H3#Ei0DbCId46z3VKA7-m&@d*9T zqdy%5GD;G+yzY@Vx&qO*y2@Y>32U2vOf!FT8MJaQFyo6Hgi+aex>i?Ph;Q&$Mp?Qo ze8#h25A`no05_i{7hM##^3>bg`?U6%jsC>8mnBg&ywZJ8<<-)xzmZ--3(UUP5tQ>; zq7Hvb^*kLox*5eT$;n++B;@jR-(r(kOmmVwx#V83-%P{A{9pd~+a=c!^QuU_plxT2 z{BdVttUdr!%fPicG+{GFAQCg0;{cpNx5j=kX!4zGov0hx^U1QO_GRIyk<_WG4Z?@1N889xf_fbG-(0dO|TR z&;zggk3Mc80P7s+ayk|AAN5k6n&d4-<8cy3yN@$o*u+G>W0`e%{6bO1+xvh(d^)HBdJib>KWSEaG#VwzY7cnD5dKO7jzsx8ZR9)t4S{|*g-Zfl5a-0h)JYKh*aSfzvO(ag!5mLFd<3ghs2%V7; z)T$;p-5Zv0VJ|1XtN!M5b#CsO9b>T@AjK=Ci3qeC&$XfOW9kyj~zVE!={EfpQ^PrhaN3& zX|}O9)(-S^ypJYUGEsC9)BXTqEa`|Lm7vVZ%`G>{nDy7~-$-dnu@spOahSC|icwK3 zmRi2`ysXx=$~Z8v;Zo$s`RPPce8@%^@h1r>LE#`DEd+nkTNA}+Ow$yZVnLe4+h5dAw1e>s$$uYk6uM@Idc6D@&BJ3G*xf5W*Y--wODNlW}UXlW_%}@F^PFcl>NvVTy24JC)6DNw zmXYcVW`*%b$HPXhVPl$fSJh|UgtR6oP*|+q9B< zZAk>7RzmBmT@_P}#iTZcNLHq7B!`$ZM%MwF#J`j00 zEPs7R#&tdgnSX*`T>)^}?esZ~`dAa>X0+IJ%Fttc*FA;y%G|0A(T{|-hnZq=Kni^R z4_i+so1S_NcpVtiSG~BGw^%rm`JGof6Q3Q$4v(6Ua#)Yv8d5=Z|x-l z^>v+&g2dbf_b)!xqrT)=?6$#*H~J4_BHL;GnY2C2xpPc$^C~}cEW97~2H!uC7Qgg~YvU)i3qiSyuR$H`mXH?w*%~V6C}alw@UK z*R{nKGSguVut`f7qYt3Dz1Gj?oS;zkKUn*my z0I#!RSK6L$o13+_76DKcl*vbHTrz{Z%KioYUjd+5^9mSc0W6{apn^`f*rG7Z(9o{M69NHhh$AzBAn zNmTiucVO$0n6LQpzzx0d&r9w`m58xxfLj4X=#S=blWH&TYnyDWB#3ugiWT27Ap9Sh z&f5unD3=;{?&EQ$9p5F|?Lpjn`L(_qJ!InCKkT7*ZEVQbTkaxcVc)nfaT>DCxQN8O z*3;Mjs`@)V#j?}-B1`{d=$Cu1FQ|5|$ye3D7jCQ0N6=o6lfQi<=H%zEs<#oLb5u^t zBs+L|D5HF}XlUZTMjj>+PKh(APxq04ls+yjSKEG%pB*^<_?KeFpS=X7fcxqbbr8-R z^m6{!J6}KlDr*^+a$As>l997n7x25tLV!vn;%vX-TD|$ZD1_^ZkSXR@tu54g15iEw z=%M0^>#b}$JH@5O7;58hG z<~{SMm8x`pJFHTRo{nBT`t*ENU~7;|qGGdm<1iQB9}p&SWJ-1+YV{kAa(|mKPtLsM zYrLdh&;eVFHILPAVc`Ss%tHfH^zb1Etaw<3C9+aM7_4IN9bUhu{?GGweQAOS*hRw7 z<=QCQaNZ`Q2RWF7DC@u6>0Z8@NPlQxbS+_m{L5H0Eb$|mftC5t3b#A4G@8hHoohqR z`2>0I;+E+AqjbBL6Q{1*{GSPOWNwn`S@RkOl675WlUROESL53H^LjrpO-^#xut%RN zc@u%=0kdV^gVr~F6OY>@8Y&cxKD^lv<8d?1U1+Z!h}k7ygo)JAqI+ruK2xDUQ11A%?k& z`SKr8!Txv;MH-;O)u_KpXIlbshE6F~8~!_4_|5<2b0ds2&3uL{!F?27Eh{hAl>zc3 zA{QSi$c%U@qc6Q-NfS7$+h8x0pN}@jU9%52LqthRIp%UoCLEa39TxOJbuPiteK0Zl zC&6C3s_~QCoO*cHFo6Dr*wjSNyzAzh7ygs zk+JW-`teu;{wua;v!TM_Vs3(}NjlLeJM&&-6G!S#3WeW-o{`>8Rkeip3RsuH*Iprn zP$4zb;yr#!$nxpdXxN(s__k+=i+Sw(bTK?^&h*vdt{FU9{)s)k81t~XOy>pe+Tw1` zd*mxO!W;kdDl*R5MqsJl7qY>Ctgk1Ssh`@Zuj}8G=rHGZmD@N=A|IvYtp& z>Bmc08mWdF{YXh(Vqew1uI>a5q0gV{(+95|I%;TR>_q;bN*T2H}OXcS8ur-rZH3fLP><>$e7XQ z#f&){6G^n>;G!gp@#tUT*Gth`>6`bfR7bk(BoT?7a8<61G>UbxD<_0h4tpNO#*EDl zCgsbvOwLbn^E>H)jtURFMTc-^GldJxg28RySCPj^TfQ&h9$0@kZcC&Xs#CS4W zV|9|sl-g8p0V9r+%-)iO7wI*Pjw3Z6@FN|PJB8m|$GiLkbaE&Q+!wC?Oj%kePPSn# zjuw)OZk3$d>E)cCp@dIF!e=JTT%a7nX;RDUph)ApPfuG)B3=wBOzg;EptrLlZRY!`p9^^W}e!V^g?+nMLF0c&G6 zy+JU$g`hm6@ofZzwNsq~J!QelV7(aMR<9BUy!FHkPQUWj*eCVr8eqGplR21&J>%Zp z<%f77jHNyyR{WJ;rZP*N%W-BO@OPV}v;0|TA7YF_bf~ZXaDPAl=RujA+85I5y$*WU zo-EPD0o1n8@SMzx%qz)VrV%=ote4)qc)hT9RCF{&tE}VEC%_tnatAzSO8Qo_`clDE z8EWG#)X{JFspH6JWuZf{3qczReA>dfJRw&VH|AfcYvQIcv6Ju9S^A5Vri5#lP%Zk; zfJ^^jw0aL7Hs%K`I`z&bPdmy>?(GMFDLgtC?$`0THe6IPbn$reW*&bv4qvMrS5#U1 z*u}cm*dmFiECLo-BQX@Dh8xhvtZ%akw-$8iwXVTiv%}0i@W(vD=&=IFqKMQHiK{+HVG?o^aa?KgEwDv_8)%l!aL{TclgB-9NT7k{%+9*GK}mDFJj}qlNE>sntK+Ur_t9I3NRS~ zI8Ufv`TA3RVAW;X1o&a5qA|$zo~If`M~X(?;Xx>)Y*LYp1go<5e;R1nn$(3(Z5e{3 z{ob5cSy6{bWMzu)$vuAZ2o#h#g?4XuY3`X7%t)j-T{SdYVp0I+&1o*t=}VWD%GN)e zJ0*5n>H@en@UqW?e4T)Kjvrd7Rw2N|l$)#s8CPPs&)j?1YQSZ!@zp?mFk|RQB9xWQ zhEOK~P0yzD{Rr)J2k!Wfv4(pT9+3aYd;BK<{v$Fn>>W`N5d%CP&*029S^xfPno}#( zj8Ga)8Y@4)8DG+{n9e-%4?`ufi+LXSNWx@Et#_7fP)I_mWPcF%$m*Mem#A#}UQVp^ z6Ae9>Bni&2PmU+88J1$nu`d;2cJF40PpO5?yNcPW>|qa6i?PNtkGw6#Vr@gQ67H0x z`+C!k6G#>LchR#=EtXTkMRcIU1SpfzV9B_MUATWH#bggDVGx7fSW|D1`p`j^9u*;} zle5Ek|4332>$Z&MoJv`R?r@(~(Xr}31Tt#{nQRt}DeTD3y-vJ;>z=#dFV&3)HJ*Y; zC#x!^MrCki`OX)t1htfr3|LFnmB6ts80qKjBw*%~+}s(P;eTKcFqTJRTda!%$a1a} zm;^mJa%FH9>85$IN0uakz#i#c5uxfmHcq+JicLPdp-rCw>v4!k%aF>07$ZMdC-?}le(YLhDx zau>hSvz5t7*Ss!PNiit*CIxvijqm_*^=C%+J!le3^?Q!YK=KBs=Z8@klvF#bf{nWP zWn!cu$-5Ad;xdUi7_4~V68C|%dsT|nwch;d9MM#NaX6z5T;S4h$&V|MwrrptstgXS zl+Bpaeg3c?_&k&3scN-)249CapkJYgPJ-{^;=-J>hYEm8|r#`nP8lcZG?$W&!5>!%X9aMM}|>9@j-{!gJ;^dc~I(F!4b7Lo3~G zaI#o9biaY4L3^;yv0~uD+_>E()?=$o8Cl{S7U$ni2yNbr-#hsbdc&H8UabYeER4|ztsKySv>4^;P zgyhOZ<@b(F^=BCkX9Y!}BLaK2RQ3Z!am;++j^u8C(>c>ZGa2#$0qfwK;F!!i;6z4# z&qv^K9N$Pag?-Fdw|Xaw(`u(B;yQk)5rMEX97=GiyhQbp24k|>m3V;Q(}EAGCXX|I zLv+^TGwY4nu8w_5&o6S?@!;5`4`b^q2o0*<*I3fL`a*$hUMaKZ-$db%t@Lc;8du-= z^F4~vp@c)S&D5q#r+AMDLHmuTiQt83+)IjBLn|6Kp$YDV-~!D{r40um7q*AMrh|ae zq8Xj6!4`#;`uid;ne;*}%zha(9xb<=0;1`aa!PjUHQCb0bh{S2bae;l`F*!5Cic@N z3g+Q(xQel{aUb2qy9l3i`~kMv?5nMU%WOS6)mSmUZTiW{1Uw3JB}tuDk((LpFMopn z`lO&}TeJIouAQ7UncD!l-bwlXRnh8;>3f`n%JJdY?=kX!7`Y~m!p35j1|k_T!_x(B zw!{osUb6Z1vC$emxI~td)jc`w7SqH_sVWuJEnsw+iuv}tlK|Ans? zmN5tI$46S{p!D0zT?;3vxq(-vLKZt@W_*yuEiN_5xiOJjLZ|1TSFBa)ia}A4!6qhil+5wC8_#gkvoeG|9W0DxZc67qd(A~D zs>-{O7X6%mtNyL&BSvw`gd_+gI{_4<*8ZjZ>F4@Up~_xVe;7X6)&sKx_q__;G_=FQohr>l3+VeZ8!=+~IYu ziZbt3d%1L>V)6ol!kqpi+~_m@RnDTa43Rp*#QKw6Ws*7Q50jKV{hNZ)Bvw9f*PEGd zb_po;7emgd8b5pLuY`~MnK?^dQ+80#Q&sH`^}g-;YRPc*N6UO5Nl0)xk3lCHVz<@4 zt&~VtvX^haXUK9Wftr-9>3LW9U)S z{zJk!8qVaM#>WlFfGdf?weHFg>)|O0r?YRLN)z)VqR0nE$3-f;5r8Ok-5{n*Mkev};u7~9`co!@^{63{6`=bTw?!K#G9LlNe+*6D6MFNeXOmHRa82)@&l`nH z@Ke9x%fVJL{Eo(ImnHXJVu)TBdbGUJTq;S{JG|Z=9J8Vexv!2xWY+L<4tcLIPLr(351 z-TcTzBdS+VPKYf)VP{1?>HJ6axxaq(e7;R&wa^%7F4EX}L?})rUyLv)3S)JVl4?L5 zye&{h;?am6j$Jjx0m-9su588YJl#{Gd+eh#b*$vbOpfZ(_qTQqNg29#XPp^{~MW|n(0AAWMXW77uFU8w#OZ7n0-WPY=;_8LQyqfJJg zQeM^)?F^Fy=AqM-*kcS>_Yz+iStBG=;jy>6{ewz!H>p`IMz_1j z5$UCKV|$w!WCf>zD8|>+qx3U?Y4GiI$qr@sTQzK<#orLFd+!><sD74>eTj3k#kRNoxSY5x+S@XCj^mCkXfx(LjHNU;Dnl6d z6w)1_|KWDD=#k1G0^kvEnMa8-&MKZg` zqdJWGDr1;x{I>ljMVzLoZtRU{$UuimiMQM0zU)XQ7ekV_nleV@fc@#0Ik(IR+hzFR z+ZgG<-=R5LiQDD5@5L8ood0v9`8sfkOC5OU(d6OQRE|vR+Ej(Ti%SyWG{?r< z?L@Z$##cy3Cup>PB&M^MGE4Vz7uuN^{8HwI-PJO4;coDGNK30o#y}F0SvS;~NF^_; z2FT2wH@ubjii|@&{qNCWuw)5JLH7?R13^;!d3jMgo@suXZKmE`)V!~^m+Pbnj~?yRb2OI>#XNbB)Z8Z zIwX@SDofR9EG*RSe0R&pJgYw?QrMT$94zXuh=p#x4mdOlD>-2|g-H%4>vaBj7FxfO z2X|Ez+@s>u+zqY%qIPMi38X=;%?#Sud+2FvVDUU>t8 z%ACLQh;h;rh_G~ud&c3;xX`L#Jkt#0QhVFd3?rh%ZK-o7Pu~lZFenl8^Q;T1{?iW_ z9-U?rM&^nY5K4u}S-k9_DsE(u$=g$dXGK0dql)qU~=@!U2j?b)TivEebO$^l=P?!*T z^HLKDfl5SDzFKq4A!@rZ$6OzmM{{)Y@NR97X5o$MGK2#Ck2H;cH~$go{zMcbMh13! zDp`brPs9t(ku0wlv}y&L$V$3Kp!HJrR0J`=IwuwAu{X{;k+J_)lO^u(@S(D|T5+zG z!RA4*u0X&zgH-8f=7P)0HX2rvm;Y~UgW@H`M*&rrsO{Lch=T<^SrO> zUcFcn{t&kCUf<`Yg`*=PsD{0}1R%TnX>UlapO>q-FzLWw8NE?);>Qt|n^tk&93_^T zFIm_4R(P9xOo7{YX4)ErQ~jXvAyhCZ{U3FscPKd_-)V&+-Sal+}kfTJ3oXbG7B0RU5w5I?TEj56#C8JWBtc70sw zWhd=#z}7xdk4w{mS5kmR|K0_^Eq3!f>9)1Dt|{+BHRsdqQI`gj$G2KrU-(ht^n4xB zANBKETEEB@nMBAOKLwS3g5EuSJvQcj+8t02?<+pztNxtPVvx*oH=~d)Q1YwjUPPn_ zsk@X)=ksN9cl(oZmIg;X0DvRpeU7*}$Z_pfi_*K&_a`bTaCw>fo4_fhv#iK9d4uJ1 zik=;PCw|BSDjh7AzT6D;{!)$xw}54SO6{r^vej>BsInSfabBrh+thjfJINtve48)c zMD91Il{@;orGiUOga1+aoE}%*LcutG%KO+OrbBVNcQCD@FCm)i9##HR-4ZX%0WBf> z?@7R`E6}CDq?ZPqVVnSWUppJLwFfp`wa9*MTYMC1x-u}ehg=rF3^>@-v$w~NmWEk& zuHX44So;Nfezj3?_cVgGaddPzD^oY=zL;^}w%ebm1EbJ1l$(2d8!0K;#<(Fp2Brh~Fp&&E z=a#J72OB>>d4Y2m&X-&|em0v)2?jrZc-1}N9QL_5eF*#Bu49lMLh^D4&RJ5FwEsOU ztsuqVme6NwD=%3NTHV-{0>sR%u6U<1($|85*RXh}rXW3XuUG}BmuEnEz2xpG5Cn5iKZ`@vm z`dd8X>JzcL`($1BU?S$}Y|4)x!?8{p8l5u3dHMN<4p=(x8@bB;EXLEUL-b?RjHz3diej)FNk?EPEhm8DnavG3rf}rm?KF%%oK2)qwemOGo zdY{k2M!ymjYpixdaXA87Dt^X?lvVC3s0=6Pxr@za6cvwWfoE-8yhg!_j<{KT28pw!k_wLxK&|Z4E=SQNhEF_zRjG5qSx>UGs_;nRm2z zgBO<>M5;(Z;%olh`I|cJXYb@PvFmZUC3eBq<+ChqSyrYfP?24z-@bPX3b$Uj9QkOS z!@@<`H&wx6`a(Nq^{IR5lX=&7LWy#@%f4Ha0x-L{>Ww%nO3#tCii0dbCXGT`zHMph5Mv+59*<_{Uj#Z7Tm{NM_(2@ z>x-E^+IM?>Ac(vd$3%N_pX%IKI?T>P-aZ)ra2he08%iB>?@P)gzuYZFp5`>MSeobm zsJwpO_~XJ&cKGbu;zsD>oA=dO*6)KTI7;=H7zZR`J32eP&7YCSyb@Gczu&zQzp4wvd;|S(|`Rt7~f&rgwyI5e@Vj1$*5|T*+e|_(SL68uOgTw8UUR(^Oyo z7kB4*W)sJ^-eC%-OywtRDIM`5Ul=r&SxQe?nD?T>7L)H7MKD3tNgh4+rTcuFdFe>$ z$$I)*lb5QWGy6mYB0j2(k#AWAE`L+ovilwrqwM>?So-#OrrZDjZMNAQw>eFQjUhQx z4iR(SQjruwW)l@TH>t%o$1x0vLMRr>XzjL@Aj<<7|Ax9;Ef|2?kj zwd-}gPS4l#dc89qmay?3XY*SGS5In=@FK-_UWq9wfx}mvQrH`6y`&NG{skX5dVU>P z;Zkp@{#b{%n20MEdQ!em?NIfLsMdc|5pIA~bGT=~q)?l!nA!JwG>dl!KI!>|Ivi`eH|L?DNc?m_ zC&braCm(B*3uZY5&6EqeLve4fK0i`!SLmkx3id&Ody{{ex`4G7!X_`P+5?j((#fVB z(2L?M{O5#ugpDQ@RnIOHT9-y&+g@}C{}2I6`@H)2{UfA)ryfnA;H|n2E=#KZRmpX4 zTGYo1ff0`o-ex+tspdnT3-a_cP1Au?b(@RJ zDvxYjoeVOB0)Zg1uoSvc@Xe1DZeq!EDM7dgPESwYt>BF?8gJw*WT8@FtS~TC9`ZjT zhFs@p=>@&)seivaBN3`NI8JU(m{Upq2HvZr2=z}P9!BiIngRyvoYU_Pfx{UW9;R+z zcDF(WrDgL7i?Ep4?k756YYZ!zp=)p_k0idmHMbDSNdGk#dOSTSYeAXLd~S^09q28G zkx74HGpkrFXzGeN3F($K-*wM!IXM9G;O;?DIUqZ;G8MxuU zlPjcg1_-URY;`hq>WCEbtMnFkvd2!-*8UOmupn_!ZnCdmg8U)5GOL=5vxF@jNwPfG z9aDD01;nsER+|gPNb+2gG%<`6g{IJuG(+vlSOG!{q*cPH;2t4tvId~Hy*7qr?>jY7 z8{Upf%QIidFSVLkdaH3o@k2(trHI5eiTN zrU2xXH_UpX{-Jg!XGGE2_&~*+wVqoI1tOOHAPpCK8S&+;Zno*-ps-nkY7>c%nmu~R zplE}+TdE@)kJ8wU&=O4T;Eu{{bUlA{~>d)gj`` zX(YfoYrcyA67gC zG5EZ}u<>_9Sl_J$IB_uWTpLP@!{q}rWQ=rtwcQPXgPQUzpb&k-NiCtz^KQSD;_hus zT{*(wSf5NPa{eU2KuJ=@pkf#}QrO)UY>%W`3#GGY*F|qtF1eGmR-lpW?xv(EvS0Bv zob_TD5JXL2T8VVxx+9>e+=5sOK}>AM`Xhe6{z&|D?v1G3DTKqX@VIpW1YpwGr5SoU z=V=*dA`H{YIefe72+M-HB+^)C7gvZTbVRz{@+g2>+5(AQV+iTUlzHr-BDZ8<9L|{h zgv7vpB4dOqty|+1uij2*HlmfddBweIc54E-0sYGef?SG*!})!it)>1SCff0aR~ed$ zb=7jK=bfy(<$EZHgon3Lyk$3h;>&Tm$-y&H*U5c@_7GqSJk3k%hy0xvU5`fw_HYur z>i%5v`Zh+Cx*=|<`QH1kcb8S$3Wt~91-cX{PEsR<1~hqNa=H&+ldk?l{MTOHDoxgdRhj*8PUHwKJLOl4rs*am{e#EuZWWn2lY}SetNMiA0 zshU;}K(&M!8&iipACwWgkD?jZJ?;o+_!BfGiZ?f=ohKB=sk>0R<5TiaTMaB>_j|?* za%F!)fC_Z)Qi1y3RLk7+;~9GH7z4NniBZdv*4+Zje)fo)sA@m}Naam4JVep!jv**%^A#J)aN8mO@Qx)R@%Gb*AR> zgc;e@=~&acRtv(c;IC=Zm}a#m_PAXn85XAybN-T8%U!%D9D&IdwH@Yl? z-2N#TB=cJH4Q71(60!t*WF=-M$H~BE8|%)6hR&xk@#}||a-Ae6j{N7z4V#TRJI6C+ z>w-diKCQ}O>YwEaU6+j3{Py!ASdgh~6Rn9Q<7B(aqbPfr8*?m}ub!arz##e0k%!2( z;rqAtsgpT~bnF#yCb1A{&G}cpVgXq$PRfn0*)~(nz6Cb4gt3w9Q}qlr-?twPO9GM< zd}WWHy~>!-b)BdVbcOVrek=>AC8)g)8@sBV4=&5FUdq)@p(s?(%1uju?H=M+3lg&; zWEsd^*^c;tPm^~E=N9ZX?s+Otm|HkK{~r7mpXMe`l*C9a%S(JHOtF{M)uWZ$rZ&R!6B$dg30|R_JdjLqp4X;!Nu|sKY%}6V zRdld1_MmF&HK|R<9rX(NmSM0JY)8L&&L=69t+DRop39G+KZDihj)4L>nqmIqFxSu8`h?~V-_pBWRfte~s;X3W-xTnhr9r>=| zI$4(`?sSkRz33<`HR^lty@Qi~JUz2OG0ykXvl6YT5oO#E*FT{7-`+rhg_!k}njYi( zEK+|}_mcl8-y!RSM~M6zd1vEu)%#q@$nA|qPTJs$3wK_BusA`2$zmo}t_@Ir3upVq zTmjm-1WbFjGB_@EVQVt$6*hO82nT%~^%{s=_*wiPO9YfeA$Y4Nx|6VIv@(`40Y<4k zPJ)5*v-p?YdS>RsD>NkXvb!cK(E~puv||S}00aUS3qnsTJwPss9QA>@BL2qt)8HNF z)!y_h_0?C(=B$Yo`&ECvIC$`F7FNsx{DU~$_xF5yD|}Pt9&h!L!+xhtVVTb?6~??! zdBYxLiE0a637Ew@jtZ|hvaI0f&V&rWrt<%o!XNqi9ne0xFSC?G;!seU^oC2#RPhPu zi`j~tn)!W*Z2s^0``R|Yx`wuf%qE(N=+?3@9W24oFsqZ$jhRin{is25yVX^M_virP z*k#qR+>f3Ii62(~^^)l@r>R8ij0XlZ;acq(9`$|KEXJ&`Q*$od!8Z4BNu0HAWOv7Es&zc#y^)nc@9iIDA+Ssz zATaWBnXgV41)PN|1v~Q%B)z6*#1?SR@M<#SLQ|fuW>yq4#D$Zc3!LI07b4b6?4o7? zn8dTOzXzUf=f@`PsXk_%t?CnwR(Cg&$!JNS_HbtYq%-|ff=dr%5?&s{hJ6M z%XwvO!Yn@@Ajox+?<87MxZjYt9~*dcG_9+x=9% zv@T2I*cQU`PRE!LD=l}*$b#JA)cA4(aQpT3uJKl_JNpU;9d*Hm8jE=Aa-~MIPKJm) zr>|qLk#OgQcwWbcuj87Z6}qNNLTSwyp_8K`3U-iz~dY`8Z+6xTGxbr zZ1fQDRRSEJd_q@oF|xEir=ddeY%EY*6XnmXgd){ea?c_z^UPocR`zPY(fS4qE48q- z6OFUpN{2CKJCM$T+sU)omhLhl9=_;eQH5h+~|}cw&LtA@lm`Q*VNTE^+VfF-GQ`4$n1)oagVnFc(z=< z^Bw8YjTOn6n-H`CiDV7;ktGzL=kbP&t52;p^Wwj2bT$C?p!5l~UIqNljlY#R>rqk+ z33x8^6YgH>lmiBjE6r9)-4 zE5GxQ3LuQ3=#<>Djcl1AKbcwZdgjoqQYH)-msn!HTyNpalM_Sjcc(yxSoH#Y&(*%{ z4e-u1=^2@f5+{mwlW>c6@0$^g-$uC47xyhL!)2+D z65#U09Y%8{&w5qaXXuS>&!iO{$^eIihH#38f8;2Dh}XN7|73VZNm1m^az~KPSPt^` z6__2=OU`IB+Bi?~`o7nS=jQ$h|FgTcoDekykUNvA+yU2SrSjhmB0;XPuifgitC7(+ z_6W;l;~~QvL&%`uh9-qS+m|c3=C6Ubs}(DY>gg&^0VlKHKB{0@OkRtrQh$H#R8eL} z<}rE&zxbn?kOkS(QL0}tYwKhEpOL61yYu7RUVwZNmZn9Ce%n2W{k?QZNc6$R+TKu@1bASgdfGv$t zf*UFB&FrH7*v-=vf9_X8$UtnXNy^T`w8pf1hMdFJ5N9KKNN_VuaU*|#yD=)k1ma`z>3 z-e#QRFV^1aZzvn;S^5jeIXL;>btdQF?9& zYd&eCeE&FEw_mUAxl{mLF`j5e#lN&nc6T7!cqu-GH<{sca|+OWOV@VHfk!F3@$UFs zTHk$Lu}8dt>+L(w;V`i^sp_?g0^7P(E2Wn{nuXP^HraDT)IM-{^M8qz?3xLyJ9oX= z#vRNxTx1q|9P1=FpXn)YLu|BZT31W+bfM4pN_TXd(W@U@p8c4MT9tF|$!|Tau&`K~ zMV?$wWwvlnxcbFMeY*5I>ISJx7PGtoF2Hx>Skuq|*B1tN80a1Df=J?~B-<@ub6X$K zTbyIS{yRJWhpy@HaDwDP_^%#hVY~Pw7B(u}xs$t5fhof`=0D}$C78oX7Wn!dWIg z5rm>B6sO+#k?iW(QvKOnRm8p#GxfLm=A){qU!1d;(WVHkY5a)FJssJV#foAZ9-E+| zb_+lAf6X-;fs*<6vo#t#+3dYS%sOcoc3vSST>x8E&MM8@IuSYbOaSu?3HeQ?sTwz! zb-dUfTDD=8jmIauhXma(BAK(k?ZKL3R7#N*X?J+j)!JFvbzL#a*fM0+EHZBRb~SP>u|SbGECAIbBdJ1bCqe<{?+YZOK@8}l0hhDr9e=wxl{ zm^YI$Njn!z%IgEF`Iz#YWDXXyRw3xlXq5p(@km?7-}x&l_vMaa-;lq?%%CRPMO#Sc zNn80*)_Qbkx>JX@Qs=dhtP(9&MJH-=WM?L6mELOvz2s&y`r%scMJfJ~$AH8p&DeM$ zy)cHJU;hmzfKVtD_=?zH;Ia?F-`Kr6IY0Xxll){rer%vk=$4$jeEWxY+CT zzbYH~G1~~x5G%X-gd&;8BtJ8rXG5>`yYa%}yY}p|>2mSTt`aIw2CjB%@ooe(vE%F^ zQ5y!#h^Urbdv>mqvQ{n8QIv2VD69Jcuhc5FmKY*igcu|%HUQz)QJtYgB>->h%yZ!0 z%=2uw|-CBL_Elg=n;h|RwMs@!c{u2i;#dXCETBsGa}7CPls{U z<%k^~MnNzja)7CxdpTVOvRX|bw7~SOU-fU^esPfMkI4o_wx;vW|Mo915#GJUs1w=I zayO86!lS^_&W0pef!Yb(VCxGk7mYQmKPpBxMy44ZXw)fQSXNmW`QDb-)?hY?@O4B% zk+@%CPvM=ctl8%~>gN_IOI)K=y7~Wv_$sLp)g~_@q6(DUdEoY)JO6GMWIxXFlm%h@ zJkI}4joGro$>)!#b`$0L)08U>RXnm8}g!jZ6(6 zP8>pTUL<^GyCxZ6>i^NkgM5`V=cFomiXp&HTwq^G78j0R986CFw`fRA0#Mj&C%Z{8 zfWuY;Z3i;~RB{L{A*$2HLD&c7UM7||zJ$u&sW%Td=D)**{L{74T138ij@D@%F77&5 zW07#vn+A@U)ps@`>_zZ@b+ziy8S|ozpx0>q6qP7=%vY&Auy9z-9F{^uZOL)+4ldu) zp;q!=iD{XP>j%~^s8Pyvqt^J2bTA!S)7nnBUlg&Rn^bdoi=gDghw$T`!p4ZFO{zUj z2xyC%+`|KNPF#uZKEmX>KTWgK-I;YriozGg33{;xg1vuH>TZd21fe}SIihQ`l$nF< zWPI(M!Gdo4?Vtj`gL@uM9Z3743wzM-*CvqNyoFx3kkX`_dp)k5I~3$--#!rW0tl;X z)I&jrGR}>CUmAVQy_=M>l?OO|cvnYJ=_GX^f)@up-R8P4db;gK9p>8$EPQu3f}`MT zz+^POLu#J$;{2lq-@M;^i(g~8UWdGha>UVk{4y%n6Z*cS2e7OnU@l-$;4RM3o>*%N zjox;&Ou>$T*j24oZaSSaFOm+F;mjL*ayb)sx!XZzOLxGE2|l|Zn@kpO>AkDWF|pKs zSbi{Tf1scIzXwq{-y$C(9c z_|0Gfo|jy12qN+R#q;+5op%?p>wHbXr1nHAU26{Z;M9MhIZ77muj4)2THV{(_;s(x zECM3~ABrDp%$bk6hF`>!@Jm92{yo;5o?qzF2t5kyJ{Z?(7_O!fF?VjBIgckX!Mf~x zsd{v-leh1kXD9_fiE;&?e8ByGbDFT-wMHtCqW%Ma6mcx`a?^ofKU(Ouug+EzC@t_V zBi}A)m+3F08_!wd^ua(CrIOs49+CyyZd2gM(iIke6F^GR7a~^e&RUyHB~LvTVB>W+ z?s|qGewb0rR{|d^?q^7Ele%O`vs~{HYk>1o5RdPOVyo=89f|^lfc4usl{qESli!APHcxw-ygU1JDlY0SobR*!7vwfDzB-JsJ1b1UG_Fu=%D!xW}0J*;Nmk-mn zbPXs15eCHGL*fFCD#lX0jeJdSFwtef3bl2km3MTyq*W3{(W4@=3*e3M zdiwa7WDAiz+^H4N{vg%7CL?wa3DjAdWRr0NQRTV3`}Wgc=P=h@3_}k`w3PpmgQa}? zsC9`VvPRroId$R+=3L46E33GItOot%O%Skz zAyt1Kv#hj`oq^XpPrW$tZbd8N95@+>7+fY0-K`l@iHyeiJs{GK*VsNVBSAizlxa-$c`VpL7?w_3Zp zRqU$yMl0XeqipP`vZtt}8A#|mx#$TnI0*p(rVED`B0!0o8(_xlJ>h{bQx8`y-9Ppq zI{35E+pv+p=7RwQ)ZQ6l&ex`wM^QCN?lytqE=O(#N)>e_!2YWhpeQYAfyZJhwyw-oV}I~LBYZbAK5;{=L9CDcT+k;Vww_1Q{x zg;1^)>fGxpb+vz9;?9dM{&(q#lm2gkaqC{{B-!z)eANis^^1N@tV>~_K!Lamt1r6H zrl}B$kD-^2H<~?lShKZXs6u-y8Qka^m+CGQMO+DBZ_CP8npncekP)Yn&j=3C5?I}b z(+p*U5NvEh!j?IED#e{eL95rj%9@AOollxxsMiI(al)f@Tg*X z@d6yCxUSb9Q6h6sJwr#AU}|R)>aesAmDq3Qo*<{5TWYIo29W zL1wb7ke<>A$6PDBM8xU%S{JSQ0=#`zXmr0UJE8*GW_!0JiNL%=N{p$Voy22FyOcIb zuIc~PG+y6^#<6?b0NVW8-$$^JJOp~;G{7Ey>2)Y|eJGqi1o!bFUXZ#_lT;P#DTaoOQ#PxZQciC2k&!Wb~Q=)ea9%Ykp}N~Q6G^-6yo@2@{6 zn=Q9-^SJqTE7@qJEB9M);$xAQU?f%h{9^A%&{3IuI$KG|pV}=5*-(~~3@BW0Nsf!p z@!ZY_uD$|elCZU8DtMr=T%LXV$Im2Lqo=m&h|F!i^aJ_ZMr%-s=A;q1WqBcUjdGfs zu(IG5IIt3i)H1~T2z8Wen2)(3%e}Tl$m36OrPZ zS#+3J_=pb6lVjvlHnQ-Ru-<oZyXW*H9m3ZJd2&86pIR5l9nG1PpaVW!A= zMv68GC~#L-l~@(XXX;wgYA#u4Dgh~&zrrm|vn>1y+py>>PT61m)I;KFN=YvFaoKR6 zUFGgcz@X%<8%?qn-H4ljNnm)AuQME}pzCzK*qjh;vS9OID|#FYJ$c!%=h%=-1pwIW znElpg z9?z|W1XEugn5nDy`+Qws@OaZVvT3tKKffv+VwGq=gD zGx2Tc<@!twfy8s8sW&pjfu*vZJ4`W=9>xFaOXnS?b2Bg9W8;&P4_z{{EO0th^klh5 zd6nrU|66JQ)Wqqa1Fh6IAJZQ=Ba$bU$WAmr7xDw{a6qW>GOmL)j;Yvsin(9p@at%% zuiNye-Kk*B_uc9I44M^NXAypYK-j!ha$4MSt3}pnJ)@oP*nI+NHQKB4sljZVIfaf` z*4lyASyj+{=0S2PcrjgT^Wr2bA*?bh^v&msT)E}rj}TwfGuY?au*UPu_?7R0+(|~_ zT!(wd<;qMqF{>D7STFw+KyFFccqLP+7)u7d{(+Q4{ z_5L34emfZoC0h5+*b&eNwP`W?QU zXAT8U)7$ym)3)WvM=ZC?=nBD$1b?Lf#*MfHu;fvr9`DH|uCs=gil_XoY_ zO7;D!m$C_n2iyPWaNpAO2WxQe7hAZ`R++|F ziKIA*XigiL=}e+1br1rh3ltanF{OErC#_a3xvJT(MW}U;EFTlG5(Gqn$`A%VD-L^T z#G%^1RpCst7(wWcZ;wcl2}688=Hnais1%$kqx3M8RX27)$O^kNkDvuH<+qx&b8vQK-e`=@9< zuPB;yw1%?PDV&F~J}Y{d$>EdoDi?93MHFezBeJkQpg%(O3WY5kr zpWbmmc9?riH3J&EEdeR(rHSNW=_R-iEvLbgf#a54M{eD{z{Gmxy1e^{Ofw!8+zQRbFE0s?M>6F-OPC86Q zPsL3!V&qYgh136ocR+w9Popj8F6NL5@0o{+K@2%A+6v(NbTlXc*(V2wmmCNWZ~p6$ zqaM=|1@&ji$xfOM&Adz{4r?NA*x&R7>4 zC%>g%KcM$E>r>gUsr7xg7aDeg)iW9uDhj3`&U`%yl8^S#Ek2HB8zsBz_u1ahQ?7|t zID-o>6Y6&`6Uzbu>7tdEuA2@&s-)8 z{Fi*l?bFB?Mgi|pwS4%vNCyM@Oyx9_A!cP&G9GxB5 z`^AfFV~N^smyWA`zgRKztK$TfHA1CPJuk-mCB@n`JwfgQHIr0d%XH1TJwHJ=(Xe8gD5cIRD-%=H1epTP6% z%9Ad7yB$AbEoC!!UYvRF-~D*U?bMxb=5JLdcpn6`dEqSvZs8_cU3M}=CPEq>u0T>9 ze-`|Ga+oE4RJ_hdrR*BbkW1mf$*Z@Z+nO~sZV1ilRjaX#* ze8*4MsHk$!vSCXw>YTa-V{Y> z?xgSgz4d+4q?K#V-#;Vfs_6N_V6x6Edbb`MG06v(+-fwNDtuVbB&}1AllkXdK|VnB zDDVToAl2US!=xE{$C)THqv_!dZU=D{5Y5ri++&~*BP2s<&Qg_nUwx&jjmgKeJF%S& z#VtYCl=hP8Ti~5T@-oh+5$4LoPkeN3_tbUN81osDI9i#Za_zQYTTKm48J)BP>^g09 zVE>i^&9&{{1mPPiXJiwW27RgT^xkbZj$7LA7wn(aRjxU$`Ei#4=Ig_f99g^WwJpnF zyjy-&z8>4(dH+Vhgx##}_XOy8zNDJq^;FnQTEr$!`$VsQ-k2Cr{HpXOKt@lh4M{JD z(uen?TsYf@m-+tppmv-Y*?t{Lqr9K^PDVT>3i!t*I?mHXn^Tm_2sA3Fe9NjyfDyVT zCkSy(VnJ8R!ZPjmN3T39S$50pYU_!Ws;q@A>y1xdz7sWvJ?~hj$TQOal-%7X70OCd(10R;+#RC?9%v&&E~$d-gaYaZX9!_y8hZ z0#DF@^~S8_x)iz&kmL)I(m2OfW7EdN{YWjoQW8{v1EZ~c6R(D(P4-F{?2lqk=}Vp$iXE2LMm_(CLcxXUdOhvX3U#%E%>J*6-Cb zcqVTYPs^n*IA_NCO7-B6a4KZ04!;cLz_ib_(R z-I=m;z_zXXDZ(br2(I+n-HMq?u9@hTsD$kiJM`J5y^+I>6^nNHSjq)cuWwM6Jv1RX|+b1dn-gBR( zDn^BIfuI-V29{`G1B?Mm>R_t^@{yBhG{t)2j;zQ>69$)vWP0&{tjmse6Ah+Y@8Z<{ zg9384AXVG}KXwE{tEqfE2`WU^$|XtMQamI~tSAJ5CZYk1Oh-*HsBU(A_79VDJ+FmR z)G{}PK*+&W zqxOVa#g`Zcm+6JUjbuIVpU&E9eyN znZVAv8~;^ix)bwr4Yu~+JhaE-s9j=6tJ3(j>)o+m)M_9bYiCs4Y@B`YCt&rs^M)!i zu8q&Q0M|41NZLNCwqRoEGoll*;FqRcNk-jSHXoacOd|x-Ycp&g`4}uV-_-o zK&b=*Q+z~zC30PlTLJxMAD&9+Olcyp>RxF*dg*3g``CXpBQ#I|;m!QUv|kc`vjXT5 zUaVO%%k63YSb4k!J_yjb|B}c*oP$t8*Z&S`OUxaPk~&_xMQt>zgmB(g)uM642Kl`2 zHI9vn+ZQ6r{S)FR%}p47Ba*S#g)$7N+FBp1%Tj+727!3Cfa?6F?Z8$oMnUCTJ7u}W z`z6_kgZIa}jtw9GyCSh=_V-`EE^n*5eJu8&fyKvDDu2%zA;CJ>Q1VP!PLHXKAjGo* z$sQ-ju$5u{N0HSmGmFvEMuY zKeoeG^wujkjeoD8*qMNezp&rH%L6)8PAFrrhS%#Ay}~(ULy@a0y)#DmxsjW9tJ;yU znyv2c0gA9{-w|Gv)v2Iw{6-We+V)7Y`7r(6xGzWs2F(h>MQ$qz) z;+K=_;YKpGeWg_$>^V>VHd;KGrIFR~c!uS5Hn|tldJy$+Y(-FC6&({-fZW z#|3n`_X^Q=s~3m4I_{f6%kEfl7kVmx3NW>;Ct>xce%j|HDZ8?z5nKyZZ9DY63|B&) zCnX;`r<4pFAY~3XjT1b|2OaTt9hJCD{WhvDKPMx)PsC2w5*WoJ*cV_#*6ny-p7&fOAb^7OlPgNZGo+sQ zuO5-jre`MP@L3%sy)1{!zH$130RCT*xk(KAUe?68f_FdA_3nI=zS~y&>@z6Cf(WuQ zTX$jL`Bna5GchRPPBsaH)xNB%C-tc9+_Ssc!{3u5sU&wWpDdga10FTF*_;*T%VHaK zLOTb2=fAj z#n#Eaz2%n77#|GO^#N6G6T?W7zFASA(1j{LJvS@+uO3tu&WY#~WpN^WFEv>5$ zhz4KF;UG7|J<1Bh1V*T+znAm|xLIdfz1RP}60qM!#eo!fy*IA$rt8 z13bfF>O$l7yh!RTDX3rfm&S8rPx!j`CHE`m_+2N3JD4nN$r|(-s2$|m;Fg+SDoVvW zNbN`(KyGW@xj&Gdfsti1d;z>VHBOELpQGH7tzv8c{5v$Wb7?{L$vpyVW0RPric}-E zV3qM{0w51v)09PLw)uPjJR2!(U8oEQVQipOM~WO-7q1%ZDO1t+k{9dR6t+rI*Yh@P zGB=AU;M=~BZ62kZ;UZf4_b5>hb?T;d`##fn!1lwlJ~qEQbe%dfc5Yk0|AF8aLw9%X zm@h+pPr?U^S#AjVkjOa~;X$#gP$fe(H>Bgmz2tAr7H5(KWU^gr1UIbc27|C0iXiVrye*k2BVt)!~fif z;w4Yp?${BwR=~Tz2@Beo+OI&&^`Tc)AGfHumwc7Oz>>;{U20Ww+CM61{+SAUar1NK zaqkt!23&T8 zpfHFifjyS0ceww6vT>xGoQJso;@{42J|JE%{MW|_RXmy98Z3D`$7L)u_SxTu?fDT} z?i4>N+_5f-R5El$z2}8VYQ^QRk#`AQbkQq;9Lgpwck^$5pKT}f?N9e~~VXC)ia^R`BQ z$#eCF;kC_<>VW8A02PpJlN{Yg+KlV(T0Yrm)Wi|>wGgf==zth@di!^<07G#Op&)g1 z)iDUrE}XQgTa~ulqp0!Y$2pdf`$1l4yiU2^;GSyTP9MOTo2|;q%J07ORilGZ4s=TV!POLra!_mMsVe@te0hUA*-l)d8LioKbRmu#e-x(~j40ruc%~TK5G3 zo<6L)+0Z~xgNyY}zDW0+K&%g+d~8+}k){yY*g>UVa0J*>v+=Qw;6ORH0h79i6k=+L z@S77EJwj!j#M$(3!M|wvsP?;hI7zMdibwc?#201ymR1$t@}EZ-S^Ob07##mTc8}@| zi!0t;@l$dsF~!q)s%m~Ou`9Fh+sP$`j{Y`U;RB)KBZ?a>{TnZq&UW-&0G3BDZLbh> z@LQ9Ct0ZwU>^|@t9?^rlPCKvj9=J{>D{vQdmDEhIA%L4fkLL7{QQ?@2I{zHyrC@B( zybFfl0eGt1brfL^t-!4M%3fA4Q1Kb6XTm*AID$4qRa$|45v=$A|_O{6CFrmO|)kxSj zyK|1;i_M<(V=}%z*mv@oZzrTfKJ@liD_MIre!QlU=y&6569it>#_JI(#<_owGVz^s zRu45r*yuSa{X*;tu@l?K+VMEw$-!mL(exdo#ps>{{a@7H1fFl&wg(El$z$mzcBVt5 zE|eCdA{vgx19Em^q%Q7xACZ810mjfs2spsF851bZ9@Q^B*aKFpRuKykqPa*?%OrvePPDoB~?9?3V8OO20-GDvZ zZ5^d8jeiJoNd~P{P7;2U?dRi@x5pmvmzUO?H+aB=UJcJsxLG6SW?1}R)N5cJnl;XP zj}ZWD-{(Lhy*@t$wgsLS#Lj_xiMd|POLCoMUimo+x)Kxm0I7ZVA09kLMloe13LrFy z$NscdWm;^34gxq=g$6VE>HHm$jajaxb@n{3r;vRSrui z4m&I3qz!#oVn5l(p}0tLq@r7K*hYQYS*c|!1SQzJZ8bk+c%F|O|4MKWn{{w*O}-ac zOmmiR?<2_l9;x|yr(N-WP=R9m@qyg;GWF!Qk$s!39;z+yhaN>Kk}+TNzO{bh_*zDt zz>gtL(Yv_eo_(elN2@-bBy4c}_Bsc-Z#wk(+jOWR|W2 z@hqqS8nOE&yy22srJ31ns!?7WdggFAYWE7+vW*4;Q>&?mz(J9`*p^*aN(KM>P>b1> zbS&-jra5B}QO01LRb2g$Kq^c?;T9AMwM zDJeqO0{_%=K%%``4TxUddXA-E6Fr21h&I7nR!bi8$L7GZ#Qzd%8RxBgGp)6CEnwWn zRjxC)DS+2?5fP8Bl~3>7{YJ|?=I_A-c~_rp=TCaxIdwAx%Um6iKeeJao_oZut7C}2RjVmFI5+!9y+Dy|8}f`U@RSwoLhEHnPFG=?R3VOiJSxB0ChmBjTr20K zM+puZ(&=! z)j4~CRI2!Em+j2FKgZWLKOtzNmS`?}VgH9#%8!c2xZvGY@IvfDcAbi60_o+MZ~4O# z=lJ|7wg(EX@7qV@6u>*~4-LyMo9mH2Rq3~l47GZG8WAl}(3yEeoLc*a!Cgev7rb6y z=#N}w7u{l*2!d%x9`2#*?7&$vDF9d4uLiT9Q+^4}Im75n=q-mVA>yvu?xMR`*^;S) z?`~ggRETZHeHC1wWIkifc2EXIbMOobZy#@ESXudVFlrly&ekrN-tG;9;`uAPKo>CreUsRVHR%Jb> zn`q8fT>EXD309nQ`gKlIk9lMP%IH_KJVZ6{nT||*Qx8oail*Xl|84fEs!(shYG3~L2Ng_}D`bDcq3jN0WL!^4xz0~y&J9>MroQ)C0s4qp240?NBz0jdo3wr^1xpp2SHV)nifx3jz(-xjML=N_0>gn( zI&qY4(IJ3o9ki}MiD+*rG870r1uTTx7@#z%WfI}XL&y|g4g(2+O}2}V^+-VDfU}Je z5MU#=rzDzUB=HXkBsc;s(wPa)6dgigUC(@77cr!Y^9frQRr&ahA^S|_S2(_?v=~s% zlAu)!A;XzxKO!h%eVfPqX;1PC$%>o|w#G9A0>&vajE7ZdONkmNIUP|2{ObR5PJE0? zh`KWXf~iJJQDQj#@ia9$Z1DHV3!5yqZFQ>xo?oi0?Bb zRpNSDEVCgu$Q)}UZ1*FUoiekN{;9D1v~Qh4gONTiwMzAI^G$o0`h%XIf^-jt@VCFs zrw+#abqBXFMlu02GY~XbkO46;LxvD0C)66Y^jjjzCMDCJMeM+`2#P)=N}l$D>LP2uK#k?&bXscMn5h`UOCQa5g@@ zKt4uvjFC8Jvqh^$Ha51af_!zrD@D|qcg$NKTTDc8ni5Rwyn$IVufoD2^9k<6i(%Rr zFapa6ogn#zIs~));j;{Pg-v`v8Zk_~bL1zQD)_T!*o=a6SU)ic=?I@EN`rs!Icy=Y z)j=Hr1xBPihz4-gcz%jI9|@7E?&HYGH1Z$?J6)ec%;L37vA82XIw3#Prd;|mWCY`5 zvNZLJmlUbBM;G#e!#E+ zf^I54{S1>(@aKD^&Venx1p_pgereXt?z4MB044F}4P^|KHJ9OKeZO7wyA!#_2z;}1 zbSJ$uxj? zh3X#v_6t-*j)aNg2;J>hXGHrDesl|Y9(wEI?_I@Hzy9<6z?|dF02)fdojyFaX{Vhd z#<2zW+C9WtA6l0z*iuMbm}Y^id+#6^CcSr5_WXS;vLm?0i0gK*qF@2l#OD~?c<7)y zPj``U^poZBV&Fr2u;8X;APGOw_eRGy3#mDGlIQ@~Xhp~XUeh~-NhyB|0YA8g5lnfL zBpi#MCFDFB%`|{gy=MfSjsx@p+%5jLn`&S%Yvgu9}`n zwm3pUB@o1ddw`q27P~~J@c=X}(l&k|fLs7|sfHx|?BkBVkL=iQjDQcp2FV)lsISM; zjB)&o&H6|QA*xV_fB!lv4dn6OAU!%s3Q*u?dDxcfMpz(2@;#Jw!L_a_G zUu)WM`J&lL6%QDS`))n9HDKhDbYJT?Fq3K6Ee6&72+n+1(&52Ebm~gEz)94LcZHQ* zK1K=*KO$!hC3I~Bv4+c%f=-SGHjvsw6&>G*uEb1;5Hy4S>k9yudke7}xb%7fok{tl z`%T)p(^eH@%i$g?ZQv|YlnT@ps~6PJD~iQkVkU&|)Wx-*kp{W?Xv*p(woCU_iRVfY z=_PbNu)P=21ma>E4^UMThAWd9ybd5^`VYtZ&;6G`sxyWacO zmC!j2lQ>usM3i5i{?jdy-cU~qK{BFk!I3u1+3*|6{z}k%;-E~W;%w-fix_MR#}VFN z;d>JQy%qf~=TPAFaZ`t0+2g-+asLM930?)byYgS)#!hNA+wOfWbOVn zZ@pWCPl`i{Iy)4ebBV^j(C6|NGwWSSd4QziS6(5yD`+5ZS<~$tF0`-O4sYpt+&Ofnl*Vn*-}!Uhn@rs;MPR1e=F%yOK@Al5Q(NCPthq!e9UccUpj zjMpNC^Tu(%uqgf6(t6NRX4rxpL#rkr^|ffhawWZ=0h|2a%zQv2U%U@mZ^erBKn-?vzWXnFs6(!74|SQzYb zVik)=YJ>ZJ!G0>-pB4U-q!En)qP^^!2#s5$QqYKVy+q;|R#Yv$+|M{2>xQ!kMC7y8 z{9E2xy|@TekB{Sq?-mf)_SSGu>br{@Qrj$nh&A!WD$3{Zo3NzMd+Ut6>Ew(c7mml? zl(I__Dxk6MYFovJ^ob)VD_{b{`;i(H5;Y+sgA&Q_)O8L?kA={Q+l7IOfsFz|_u6mC zCJVMvT`|$5Bm!o+jrtN;?H{id^U45a$`uwxS&3%lLWB>DJMo0l=joo;``kM!@M}2d z8e)^mXC$hk94M~<08He}8p#DpVnELS0zm-dWAWAqwlB2wGwQk#H@yDnJp`mpx0ep= z7;rd>mB`0gb$P#WzJ4{L#iZ4$1l^-W2A>f~G5`VUmN7Jjo(~*hzoH2fmCc`h<2{0j z41HzSpojsL{fs8sACwW;=P%GUWbcXRzVD3zYgC`->ttLzeSHua|9q36=tU#G9kGl+ zU8isgI@`$o_1vXdsd_v^B#;-4`KMJBYmNRWDDzc=)lMh$Jj@%rE3m3HNZ6 z{*I#Zz0oMH7lSp0SE3ScH=)TgX5>P4mf(rfu!p76AyHTYoQv7>N$WH2+Z_(WW>EP$ z^TlKXS>Ig~vn~E|oMaU}-9;jRUW0-OO}kMr89ZSc0(2wbVjxGSNc^zKgbD78ETy;H zK<1sFV_@BBD7#PpbP6?lqm%aHxZ4JTW74}3zeFH0hWn~Bm0(W8$C1g zDgAu zO}-Z5oWf-GxZRFKmInwEZaiPPVbi^p-yx({Fq4pp-nO^`btz=)6z zaq3j6oa9dE)&S63yb>a?pE`A}cc12kgyOOjsKQ^nImP@cCHp^11o}nFFO$lWcfX_Z zu$aJAapJ;a_d~y>xnQY0Efe%yv=m1XKF29X&lgs_zWzj@7&FMkmXA(O zck_#$BSLgmNLZK?Bo7QaNLw1r>w?UKIU>Wssum=}h)q^JkIyj}T44$tNq^pV(})jU zuw>irYJ~S6UvhDFLz{jY1}t*pL-s>J`Ez%5<}`{R7219t{{?Pzgt06*>~8&u_X4d( z;|S&fM*i6?M~E$7U*_m#^o|j)#Q%kVz~(5GM!4;_L^!@jkqJ!%E;_)B`)aB1YwzAy zF`Nk3K3V)=iO}L4|A5ay_VbJ~7*aC#3s%Nbb4NFeNNMlMVPIW8p%my&0U@~0L4^S> z^N1G+(a>jPzmFqoQYJ8P4Iy)7iX+_N_JAf+kQ^lR8&|s-{a>T1rc`PTQ1Bc0QxN|y z45X<_Y4=&=))!3xs|Kx?(ogDZH1$}Gh91LRSAfeld?|D@yZ=72GGuDRM6b0gr%m$z zpoP{Mj?*eBtu9G+(@Z|cY<|k=vebf(4GHv|z``|T3x(~!sG|k`B9jU9hYy`PwjcDX zAQ4zVAEStv=q^$lfo_BzB1?)33P{QePRQUad?=W(Z%}fjL7^S2B%|CvAF%MC5MqZd zS^ciberMTXc4=&65Wqi(DtWy^=+l7WeG9Y{z@R>=TLH7~}sVFS>s`EVB}K zp7%%$>dfDVUL%Nv%6DC$egi1`@$*=3LI}Z*<RF5@RH|f^%Al*n^zVe+Mb!WC|EO6{xt+nNtf||0y`C>L$61sDKGjvU(;VCS%@5LaQ1ML~kbV75kpGj`{#~ z1;d5KqOHlA)SJ?@<BF+BzY+*(ux~bDHP+FxU zm&SOWNVY~50sr_>w3j^o9$x?YEe$2&NEX?Jo@QDcO~piman;}vA?!qsNj8tkC~d## zxkV|8EPPI@V1#xbOv%SB1lv0|Iv>GmIf8x>hOfW~6O|?vfy`&R^t^KX zp^16~R#Ngj+yo7gku9Ss4`1d2UVj1@bqk6D5`RLHGKqI0Xyi~MRTnrejN?p!pG@C~ zGN}AT&|S#}L>x>gJh6lUc0;~EAidP*wkehz)r$*ngco?V1`%CE0U2^3%gE|gV+!fS zmdu^LkU~=xk?M!>0nEM|+Z(P=iU8FWLysM4*IF+DiPisS@#Mk-kikC?F-07S-)JM{ z9J}ffDofegMIZtpHYs=Y+*=W%Fvazx@yX7K5GfZZFTT^#v5J`t=!x!DecnYT%v)cd z#vF1CAvTCzVzvB0KH^2-^B!uzYz8R?D$X-Sa<%u3adzYyb(V_>6xYr}vSBQRHm>XL zhrR_DJ+B_)_KE+C3$KZP#vYzX4w=PZs1zb;`SB$UZx(lrqJotz!i177a}Cf6N66q# z#Y?i;wAO)pgd@2?a9u3kmrGi!K!jk!y0EH;?sSgW^ zO@c-_q|WS>$6`?Dzdjp)#?_#UauXf3)Y4rL6HDhWWTXi|no9%wj1s0IuaHE^LKSl_ zP>=j;XmoT2k+MZClI$MWLuQ0#jcgZ#b|QJ3OGXb08DCOpAjP5`JN`Vb>1K8$ zCK4V@Y%4+aDj z8x&(1s;?#k@RKIb_$KDDWz^stsy<$)=*CW;BCMF`SUF$Qs3dhYSq)Gu4f6x6q>Anu zRif(DW16Y}WZX6Z@t;2h`E_lo0?9Mr#0*-iA)%m|C83fw|XkSg1vF%-9T?YUH ztv|b5baZ9$t|zV9k9&_{79a76Cihfc#>ZReMkczeg811v1SmKG!aPl-(p?VmK$Ebf z6wg}ubMDx!2O+xcVX{)>yTEtq1Wm=9>1i%$Cz&7hEsW+ap_0^-^UD-cIHE?WWZB6Y z&-j06UBpA07O*Jn z3~OIkbg>C1;^oW5?(v}2hI8-i?BCTG9A#1QNX)hSSz9W0_(ACQ2L0~?G9H$7gIy8A z6|8|O-Ra@M9KPD`-Q+w*CMNywvO_bi4TD9h1vjI27gTlks33j;c6!vIfuRpVShQV3 zzc<-m_$x1WM|GX*bay`ptnVHNb{Pv6z=rmDDb%RjRw&K?o1QGM+^p=6_86mUb{5<0 z?u(k7nqvX5%m)V{OTrD^nAZly4pAqsB&ce5*>Grp?;H=QvOUpZ?TK0N99RNa zYqm6%lzzHDe@ap6R7`4x7a&(G@aR|I2*J>|dL981N;xul-4Y{_$TFge4jl>#$bw8N zO8n#xs0Gv=4?!Pl4O4^aOTcTc3YUk9vf1tuNU}#CcnfC-<1)B!*k{lGrhUM{k#mv^ z-qwu!B&hgFaGZ{~LdI{&ZP1~O>LJDjY{<_fxe8#6Gi#!)eoAHTJLP=^g&;w@d}m8m zDqGc{3aJ=L!Oi5n)h|DXB@1Q~DbKCZy;OJn zRAjB*Orqi9Av?;@OfP6PgnLPtEI+MKJYH)PZ)<0h>V0SMB<8S%Ev=Z9olGCgl0+|Z zbt~uh60Otld1#iIWpNpYQ`qw647mh8zp(TRJ-q;5fKryb-1U@)nzM>Vc|a*q2B9XG zyDXAUG^wGga89bgaiz|JsZ}ryCkD}JpR+0%#CtpF220^LWl%mV{1ba~Jp(`q7Bjx> z84hrEXyASNQxMne*Be8;@@qp`zv$IwSy>2@%i2(*amp-^pPscPl3z}_lWc5%u1KP^ zvQQ0)zU6h(6n1C;iO-(-UT3QH@x^Jd@V>=2iaAtJVgH*z#HmH1Q0VWofiKN;E5E#7z@*=GgN~blw(=VEwx*I7l#p z)dVKzIe)v_O%IyCbt>tRGH#}bK=JW;=V>L}iN#eH@o@@A*=Yas6W>xYn9@$prPEXK z&{k5(jW8%186ldJa#+DaLTdAceoMT=uq2pCFRGieDXapwS}I&l6X2;Q+$G7Xszy=3 z%_`cvidEWeQWsG3=nR}v-jQC#)14x2YH{9aUVfH(Y@!X%QUm$(qIO1-a9@#BN)3#Z zGYdBCclR*<8(l`0-Lmu>Si48b+2OZ0Q6!7}=rkc=7v=%f$_D3pu?tvfw!!Iszrgi9w$pheTs1aU_44@F97B-;t5AwGy@}uE zpchO=YqE3qy}q7MKN{pLw6SVp=B*o0cb+bFw*;uKcG6rrdm$qes(MtQIzLr|r(CrU zJaxyH49AOd%B@#j(ko;^qsnxQ4)L+&Bu#heF5GU3jPa#tC^6DbMY66q+JpAZ)rBQ* zk8fq=3-X%O1ztKx!`AsykTXjT{BItT+@bV4O;>nomkU&q$ys1sf1QP8RJWJX-1K0y zwTF$hw;vhHvl(Dj|1XyUZG269Ze9wSLz!4u4^n~ zkkYu=ILji9jBgyx8dX;r?TGaO5{r|@GkfZE3~64AIe@kc(KyBOo3UR^+)IogYezBL z8&W}aTcVfW5#iF#VO^+oA$CwUKxW&1T}0K?SgM2PfLl2^WdC9dDL7+U(}t~**Dgn@ zaQYXEWmU;#sN7N6GfUPnPP+a(!!I}eOjVo;=hm%5J_d6S*RP470tA$q^`-B6b?&Zm z%94rOGoyw09!vq{&n<-|25-8^^U9pRUtEsQyL10^d=ra4Jt0g|Q4H=L7IGU+wH=M? zA5J#~8?MsKxRtTcEwxF;yoe93=8+U}{CJLY}Lr-jmY z79lFY7~pM7#z6HX@H67c&LaTzZqrdMp?{piZ``sGJ@HAq2_4y4WMXa5bj}DJoq2s# z8>C96v~P6H`S@@&ycXTQKgf)Qjug2UTnmuwl&>{X*SQUS6o3XVI4ZMeILKHW3CWQmQ{`TKH>+FL*pm8bGvzi&@hNroQx89hOJ~;v51*2n$}jhS>;x6e zht|IoEC0B)(P*o0sc0R}jJ-%UPM{ebot`S2HK=1=Pc4Ef-G_M*=1*n6T*>c{e(zLc z6eypGBph{EYiEd&X9902zbgA192EDX%*7=un$L{Ol^5uqwLSzo!lLV7)m)b zklxcvCfG4VQf&Dl&`PQI>D$O4qgjO16>hSIto_YBg+{Sc8NfgFq21zB=}0+2ayM3- zP(RU;DG^c2Ha;;7U4w;e99~uVA)gC<`1OSVEy>OZ-$g)F_odfh5pOld)dvfmL+naA z|6U6MoQ2Elqo>jN9}#!T*;<47!zA=F7FCMa zYR{>-lT&23Z0OPVJgL;XuXLrQ6K36!t>FdVoX0=#Gf~|La74CJXTp)M|H6$Yi3RiC z1VAF`6J>D13}fIjWCd}SWBN9^C$;;m`R@Obkk{KC&&GDJi!$@Dq{@Fy&*NHAKQRb7 ziOrkU*BIlZA3bSvzB}zE)H5Z=V>SR+BUR*Se3iBC;gpG#9rp%Ldg!VY?@<-Bs*)B( zCSyY%y0~1?BQWR-N>SBa{MotRs7>nE1#b#6dG3_4`*~1UuoOPOoV}3^C}vL1G)@nh zR!!9%hERmYJbENeB|FmY)XlB^<9E@(Kq8!Sc7a4y-=4GF6pGRu7FtDNTKIH;+u z0!QJo$k~avn0XZ>lNzdTN$w5M4aI$C7yvks%Q4!4^3tp^L8HAd8b}ky@(18;3ax9DF|6Xj#U?h`fYXH)y5)#5m;c{UyVP8h#G-} z&Sq3Vh5LB9PWJd)F(Nv#K%|t4eiO>7sPqh6d-&P;=jk7!!VbbOFB59lmoYl=rj&| zYBP~Qn>A3LbK7&M`273NW07)i_h&lJIyWy%W}POFgK9PITWPPG(#tlj#|u=M3O36U>8*|qOGy-(P1H9?a{wz#?~rcu z_%c{0bYLKth9$@Mwn^c#S}vUvZopnaU3{)sdyqxj16Hhddxug|=M#ZNGU$^(Pep%3 zemS=~iV#4$8raI?Ml^0^Oj}*5vtb^So|Cqabs4v4gTzvhSLQ9T>T^}rH$j0dJ^bUL zMrB=V)x}^zy2bBavUVGGB(Sz}sZ;)l>ik(4H>SO@AcXe&Wp=w(JRT^$u_0_rf7u1w z#`gdlYLCk^GOcmB-B(dl5S@-}QP)@Xc6K?Xt&Yw5THRNFQq6gG;xqn56+Rd~Hat1W z)6D;YZRRXy{!pf*iQ1cCobGb6#Nlh~?tTBLZiB0&wSS+?OhvkC$;EH0{lg!c+xe=o zV?ckU9_hkfj4<6Mo$DEyHF?o0u0f; zTXt%L;~%A5S9E>lC+X4T+}9k{BoQ3=O8)UOOQu9hgJVoBn7BFQFxb*5_0=2be|8?Ti1NBs#*#;LK6{=)12 z>eG9-7r#v;?4;s6w5XQZIY2a7_nfy%-3&VM>a~2-M$i76sy+F{Mnz1PtQ=MN%J=4Yw@eSRvdQJAJ-LCKUvs<*@$9; z0B(npoiC4V`BGVPb;pfpe*ua>r|bSYEku}-iVDL?*{7tJ?72B3PrC`O9nuzo)NP}S zpB&r`5-^TbEdsuKMhj`j#m&}&3Mh-^LvmmhzQVJcNDfG|@PDcY$uIds!t}(K=E1!A zZAj5llze!o06dlDdaTsOnDM|1^zyIG5#CdlVbmuo{^RdEsUDm{mG&uD*mgZ6LzdJ9 zwUT2aF# zoL5fQp$+Ofbkr1v3O+oH`;!YohnxFP1RM?!>)(Bdm6n?;^to5wB#qqgP)cZ!0;Ocn z-r9J|S6LUoo#b0m=jMr%A|hz=xa6qMQZ5aT@s^a#x@b5H3Oak;j%@C44DQc|Pg&<66ke0r32x@6<$sBA8n`HjZ2knh%0}6)v(s!8-rxK6CU1(5 zN6!FScf<$vG+3zEjoEq>tIM88ty|LJKhGY92I0}siUY^1M6|HZeJ$E(I3T)}3N1s` zQUVWcpRckuPuoK?M^Oax*hr8g2Z2!6uJ8=*IGe-bmtWEbmOe3F+2fK#_XLyi`0bdP zqL3t`1O_gLGGJ(e#5_`?bsyPr?+$to4SpM8aXA^wRgS1KEXnSgnI3R1Xn|HH>Wi+w zA2Vks#hyYg4Uju7qYFPvBgxX6%MEG|`(=I0kYyfx-6VhE(QdJf!ApD2c#LGwl^xT* zJ(5^bb5>UQj)N@>QX#dhq*TLb(j;%mli^CuY&f2atS{OQlGWiidp&*#DW`|F7l?IH z9IUmaZ?X1dVsjXRZ@|lmPhae>j75fIUR|U~&dYJ`i{nl)nCWdVs$)0~7Z2d=GdEb! z;H!A7+UM^_9a&KI(eio>>scg+au>Q^7Z2rEKi!IzjYU`d+II|hB7svJXQZUzlxs`Z z_Klac%05GuWw7hFJ#dqny-iTZS_XnAnJ-Qy!&;sOah?yC>Ps?Qh*Y6*wP5@F5`Gui zB{d%f=kJ8qzNMg&+7aP69bq;0pL2c1hO~>jn>Ap~<7P8Sd>SR%P4{J9qjd; z_D$uF_yi5U>|wmyzGR-%oaK9h;cBZcuDc#up6D%U$Dc$Dq$gLtEelWn`}dPno=9O8 z`ipa3S$X-Gx-XzPlq=LoQr$Gnl)2&NhN5w$49!$FQ58*-T)1P0cO02Ngc}Ma9lQR) zN%fl>`ZWLy_4T8$19^FW#&1XecD#-{Psb0+*4g+H1fW# z`ejT(F2&_)N{6sc16HeroJ2dR(*W1&*dZ1_W0s8_eWvU%$ORq8>GvOuWs)2a z5+ts*xW)=&{lZvW5~kvJSsC@^!pV#mbI8$acXm@wTo((>w=`1Q@X7&pi!1By6JfLW z_^PT6UG2p+E9e6aE$vOrl|4uQ;r{-=I9;^8SEbYO?R{K+F8iZlBDVS|^k~GWMkO3N zX1ZjzCR%gzR~+pG|C!`^_BXKYFk#|q`)mB3F|TQ?>h?C?@Zz%e@N1@R$tCmO@>U_= zt)E#}m934R8P;-L1?}EHMtzlcp#)*-_*n1n-prLdiEDg{A%o53F4L1sb=ep{1N>F` zjc$VY0Rg&k+0qifneJ;ysmiUj8>&KG@1wn`+f03N;JuT2vL@H|SmW_Ps=3Q* zNNs)HlF*z2=I@_BrDu0Xi_diLjwDP`8}?%M?{MZPa5A=|-4*;IXJ?8PX_tyyuL#^d zkaCEZwZC_}wOx(1fK!fUJd8Ne6*zbAMEqJZ8!kNbY&T}hbiv@YPd1k;Gl}(l?glEC zhc{c3C+mFNGbDcFkWG7A<7z34Dn91D?6%>d)vH(t4cWMoTt^4zUAX%lF+OKRdu$RW%z(Z9b~kJHF?!` zX+7rY$-DPG>ZH1Kl#gO2W`%D8ordaTYt)8_MgHR|b%)>bw^VcS+s0Zq^l%@S#?{Fd zkoc|nOwn1N)ZD0cFgq(dj&oFM+0bW89J*m^<+Z4|a^0F{^TT~QP0|)i%|@pw)hQaA z>Np$yvWl)EkhX@|xQv2RSs88g#qYk>NiXdmp9?x{Rtm6z6=+j7N#Xk`%LY|)JDz*Pp);f{R>)K{jlZYA}$8WeIE1Ww~#Rk3ZM5Y_;}2Ia?VB& z(3at9>Ke^{8gb#6gzby@NZy5nnO_~2D>;cL)dVg23YqEXoZ?L63AJ>fM<=$16juX`% z55&}Sx#(HzNNum&WqS5_ayZ{Ed%jRrX;xb>xw$tp4GWYoB(&<%CXEpB|B5qU8V3B$ z@L;`wi-yW~@0;O%F~k|x*jhW)kemFGsFWAp1r4*TBwNJ}zj66b?!GltyzKrr&Th+`iBh{qbi_Ou>`8{bq&obv* zCJr4hv1$#`^tM^G?+X<&RpNU>8y**5R{Uc8v(!&Z!J2rxgaiURb@{lCA=US_O4Yv9RoTU=}3ZhfIVqtor zD_QpA1v!bR+PRYxGYy5xMjM}fQqHt)fEdJ&Cs>$+y}eN0D6nywi(|O>`rc?vj=%-Z z{7`z|J6zhV_^2xjakzc?8%utUO5+qu!QL)j-J{9iK#mW6pE@xT%T}=H@Kl`K7ppkE z4$Msa_KV#mu;D_kknn%vk=Nsg3Yiy^EjZVxHP8X#cnAZ$w(2;v-#i^Gw$L{A=w6T^_Olg-nC#zd9c5MFp{~ zmXpAF6L2LJUuGkdXA=!#w@LRl8HVI)UVPKn*VloM4G6^|@#wVV=jUg7a?WMrQZ#f? z>84;d&$jmDy@?QT+iw-{+z%=_`5|s;CcayFjULNvZOYW~^l}DND0B?I@>&4vIax^J zT>#IJvTr;bDEiYZ^r2pg4LQ9Z!+8G4&{(GZ4^(AH|D!T@s=A-r*^0dH-*D2Dq*~40 zpkJb$LkMx7+N6TUUiUcAsyKH>?V;pbIFL_*`Al z3p<}sRbg_SqhW6sW1K*f|LnVRV?T}m?XLy6h`hXBZh)JVgJSUmS%8n0B>t^eVsoRbE7jPf7QOO;FdVC`!`rJ z21?G#>c?xTI_X%0r>EbWvDLS4I@2@&ktqA;nw^QZ0;Wf&_KksIbQC%=v^M@;(@}Ku z551)BXtzWATKlFRa>~-S?gbjXsKNN|J-;k|2opY6Jjg4Ts*uZvrO*Yf!vQ2ol|dOhz{i}EG%&? z?eT)N)Qp=dN?P=BXFn2hj?)vL+_RkO1S^kf8JC2~vR2xn7PW>Q{D))ZA_ZEH8sE(| zd~$h-8dO)*r(i@&FLVoadVNJLK$yM1&CzKWL<~nJc8%w^U-au%^XF>#zUBcx;pkrl zFEXYw8){F}JLMHM$s5db|2cM@RRQEd=;3;mj*hNGZ~9ZEnn&>t-Oa-N{Y7bd0j8DO z6^<+!GjCJq<^HKD5TbNtYNB1s8NU7d{Uy-ccp#qx(y>5F_}_Zer`8o;hcHc&Rbo;= zcDeF7!hIUXM>87Cy}iWssxFN=Z_y;P?bq5{06iNS&q<6aDjNbRyt_7Uk2}PLo>;kv z@bdG`LC)~z&Eg}%3)ogY9vc?m&o6e*X##CWj59GI2f5~1H zq`$KhCKp}=P0rjVo_uajpBMhHNs_wuVsQE0X1gkD>s@9^=AG1L^IZq06Skssw1*3F zUca&<9qmkHVgL7Zk&Q{0ozD!!?17Wut%tVDIYh}%!}R2oE7)o(D;)c9$mz{;=c>xr z{J3K-S$ekSHv;VQ5>S*8YH-);Kow4!o^9A4fAx5B8g9?D9T~rbhOR$zqGK)r(}H3K zeQ|wxS2{D*!@bNrIPMb^x49olQtvm%f|1;33qXU{6YGsNYCFN`!rH zP+|URot?=YnO5#$kaxZ_I4_{d>hRF6x2!+lF`9`ZjJY3Z zZhsR!mjZw}co{M#ahw*Mdf&a=YnFOi+Fj%Gg`z+B8C=|G{1195ZfJ4Z{zn>Lb74Z= zdrU@MQ>jq)Wc==$=Kt+pqwxw7K8w;ELwU6sSlQZ6Z&Q=R(55?$N33}3{Y!oK{a;K0 z9P*R*_)ur5WxG#s<+5jV>K&d--%fbJOKUBPjaRUyFsAN#1KmG9y&uif!m{B%NpFZ9 zial1~6~ucEbFa@Bw&B|@B5wQd5LUSupRV}D;7si^8B33oXzTCZ4Z64M(@!AVbl6Ka zVRn3;R4RXv^wjWG>+saI#bG#!aczz+Yvm=105;`x74l}m{)M>Y{RGf;uLVMxl0@_f zbQgCX^o_-bNTW3qCA#M(iOD!2 zBp|SDn@%96h=5+JIvkGa*Au;d_maa!Q#I8GN^Q@MKoslnVyw9&t_w$1R;XWY(9#YZ z#(L`%{e)Is!+-v8YsoABXSGz-+BzcF-4;q5X@ce*htB(sTYDFH5WrnuJqIIlQ-aXH z4pr-qzMoL`+5+><3FYO66e){!GLQ4lkUDSL8a=u#Ni7hYfY$yf`wHwt$K2&Qil5#_ z(>zo?0xn#k9-a$*uzeX<_l{0fPa#u)?g)K)@QyoL7f5D!o5^*UefsV5@@1b`Xe_m+ z2WTD0GB>oZ9cZ|b{?)*}-GZ1>d|utS`b$Q;onHw5dgK`u@WCW2lMGEbE6)7A(BvKglZEL$INk(|@3x?>Om67t zai^>ys3S2lHT%*;@{}-8pf}g)YjnoyNvmG^Q@95en^axt<9o!&$3tg~i8pU)4Jvvz zS_$y6JgKy%WUe#z^pR_s=BNz^26+d^_|IAyT2zsCT5Q4@ADx>~hijJ7^J*-mJCb?Q z&bvRYcDLM!SS`Ei*Vhm89+hdOKAWufg(v<^pH9;0DsHcyTSoJ&vW%ONMRG$BC$&N^QT~bH`bK9w zSk+F;U_qS{dul-3gxXb2zCl^Mase~<8C zwvccGTu5^C+krP_@0ZE;jz4l5XC+f{_RnW;uV+B0GOVcn`$s<3n8*HO4*-$1AyPMz z^<(ZQp6sgU9F5Wmo-CDk zse@M-XE@#JXEBA#%Suvdr`_+%)pHbT&kBJU$UE1Mos4fDQ%F-shZ)_jQ+T;2^NdbE z^-Lwt&tn>Q*C-LDE6BvLV6+;@`9Pgc4<%53Mm!*`!YGd-4oV`5TW%=taVE*M7S>y$ z)8#6zN&$858~!d(ci)xoC1`1^=au zbEVouG0TrNA`Aa|VJ}Bc!8ku#_5DZlV`0#o&R(Fgl=9Z$P{Qj4w~bX)Zo(Ekpkej# z70Aa8ST>Hs>TmsGG5yW*Kb6_Ozeub;5XtxPmJIhfNoLkuRGj5{=N-{&H9NCXQL~S& zG;ePC+WsnA0OTa=q_{Ywe-*6UF0I_uA8c4@cQkzUU6a@6--(Q=Q&CuU?8)u7+fYqLNq%|0q0lqyMQr59Ca;_H>M5@#2e+C&PM*2;hH80Go_c8O z#6L}*+a}5~9sfBX!L}Hu(eU^6m=6dtAFE>9ov-)Vv^y%q+8>I&)2=$3X$C)8r!Z&+ zlgi)V#PCaP1@p%R@h6{m@R-<|9UZJep9q7Pc9BK@Vr}DALxyC zG|O$XlrWe-R9|z)V=!FaO`a{seBkJ1W220RtGuyti?Kgy_GyvoOX{)KKUw*F3Amwy zw43=bz8BvkKhw4Iy=4Ajb2A^lZDtLDP>SL0X-w_5l4W%iuW!D&W~udmV+gxQJM6aa zI^Ke#6^WExbbhs;m5&NmRVgg}BXochrm+YirizWKIx;~B)Qwp|&Ytcgd_F-cU=9ll z+x){au-GX~OKrQ^U4^Kqo+Z#&Z}94?9re5ISFbMTeina1iBb7&bxcP` zndz>qxCW>V+^%S5n2oMlzwBzY4E%r~!1t9juY3Mx-e%qbNPF40BzgEY=gaBLRfAee zwGK~g7afDTXQNQ>=2s*0+~cozhYbSZd;N|Bx6X4g z;wa_KU+fcvrFQDfs(Z3FbAJ3LMypnJ+#nNQ(uLZ zxw%>hy}((1AoH{_1lKmzGfyO0Pe~NMk)O^#Ws}6jsRL9=o-PjI+F~m@F8-VlYaM4u z&8x6k>X$yc{a)#4HmF8s*y)KAs#edBOAQdEDi5V52^FRP!cxtguk}>{_qUQS_-$ zf1-hGx%DHrufuYqF`+j_a%Cj4^Wk)gm&I_n|tUc-_aXz*W-0m989+oC-9xiwS3) z*ReWT79rvHwxJDwfD1I&xWX#Ab+Go!SyiH$i4nZamaXl^L~uV_H}-ULiDb|k#ILYr zNFHlvdte;Fqq>x+0LmDiAk!aPF0xNylWsGyCLakhQViev4XSNI!YMrno0C^k8J^q? zG@m_QPAPo-Re2E2T47WiU3b$;S9W}aAPt0C`zbxmN9k{2{Vn4=&R2ir4k!!_oxonA zYF1V&WG`ry`#un7@E3gfWCSGpWI&+$?Y+ID6_f&9d;9Y1n;W{dXESBwC#c)#n3yKO zWBp{|-U)9;7sSQFboynoII%!CT%R)7A*4zDcJ1mmvLr0Lm3+o{^*1FrnVpxD`11dn z`_7=IqOM&;s)zMYdT%1VO8_AtO+h+H4x9L&bV#9D8ee8d3THb*S1tE$>{ zZE>K3jjQ3a&(nO?{%WfD-rD=>>g!wC-mHH1{dw!u?LC%ZtP5@Hy_5!vTT~(`0+5xRfBbC6gbVOx_)xLX%5;OJG@_)_R5(M z#FuMt9oMsr@zkjmNxtP-q)XQST#?y=qGRl!vy_e6irWemIKt+1z7J*HMVCno78NMA5RfKo;nuRWMbrdBSRL zdA&K}bJp{n79LWj+-1+3&7j|b8O(W&E>9aWx<9H&NMQ`cH56=v$6_4_K_P7j#4yJ7 zjSf4P*Pm8D^Fs}nHROHO!^sNkf5k7yXQ{@YPxQj;H-!1@7(!^7L<$zfq#swjR4otQ zKBGMwVnndu!MpI+rk#x{UFMrbiM_SMjL01~u>L2xHmF7C`MaGt%yQnN)C^dq^KXB0^1#1ym1A$`WqHyj_s|*x zCmYX&DxK)xg%jqKtbM~wqmtm>_j!fR%iwZNQ1SbLa|@XAtCRj*6nis44x zp;Aq~#m|Z&x=>v&_k4<1YWM+re}8^hmZFF8O=g$-n#J_AIsd#KpxQN=W>M z4&u2sCUU9TZ1x3MqS`|6t)0$G>fp^BduKWDkLsdP8ajqGLK)KQJ@>SsIX>9s8|Wpz zXyDL4QDyEbaSP=-d2eLQe5@E+y^M`Vg^`el;7{hhwU!u#>4pWLlow|HRlgV-id_#9 zp&y1tIEYIs&>Uu+6_C9B0q+;rY2+d~J4^${3J%2=<5Q!W?uyM5{yzsh} z_afEW;v2C~Snztx{O0*&MJ>CCNYeCmXwb$D9@Vrc!)$s@M{8-sACmsOwyP@{-op zb3ebi4qO+mbIw`9ZKLL*^2sCzfBX4XCCu&Y7SPHkH(&(z+Z-5DJG0oVD7^Irg?RJ% zv&OE?eIO%SEdC-_EYJ8YE16c~(E@N^q4VqW1V84dKhS~}Zsd2`0YQxmm|`lUQZ$2E zWk?H1o7R0|Ah*P1A8}Mw_b$r!w6@=>8qHyKheg_^-f=E|(RpiUVi#2cKc~)darf6Y zaC%~^bgN=$?sym~k@7ClyVmO0lcA4`Px_2%5v!qm&60p%xt;K&FI8C}yq4)wC(rxE zY&S@(ul584S?SS8`HmzpXNtcSS8so=If z>n>>*PKQ~nPm!GE^^hGpTS^Cu3waI-vTs0M7V?Z+0ki8%h3e4YKmOmeeB1!o!;}e` zS;y1I0+#Iy6$oXfyQ@*7y~n|{1tW<;ryg;TOvjuixB8}30B#3fKNK6Lp2`dFkD*I# z_0~38Us&Jk$>!DQO^SO)1bu318K2=>PvWcf^q$MB>PRXt{Ygs`J4-T0get2*U zh4yGYbq;KvS=~%EEa%kOU+c(Gqz;6Cy&I0*+&giRn7s|;m)IR#+LTlfT86oI&0YJr ziyufq_2DfNXwz+la^b%}%1%rsJ@cJ~JhhMDeU>=+6%~yiX0e4ts}6)3cF8yIYM$(7 zqlqiCt(pH(w~%Hj5v=2-4o@jwV!y}-Pten+nI{&&L1H-XvFyZ?$GAUZPfN}7WlK!- zm=vQkS$@$zD*3HJ;Zb9k#xw!_*4sBoPI_h94`A10`F6!ft^!x*I1RA65MpStf_1uNwFs@%R)Z|X~> z5FO;=AYkvESWg-4WfY`2+Y$Gpp!FH^Y$PR}+n;9d#-+hT_WeoK_0_8jS1Y6z3GCDC zE3=jqd8-PY%eNZf&-7)DUSZEa-7!5|bqG0ht7mftDYY5MHU?(rzI#4HkV(mcD(h5L z6je<*MU`dOFDtOn+1Sz)y}l7__~T-l>m2soM3MW+VBlfA@aa4Ij$-@Ie#|NE@ZV_o zL@w^kZ!wh;&N|wDJaYGe%s|avy30m`dZYfIZQujr!gy#NoPS1m_lup>Vm69pFu058 z+4m?SL$2AKJlU@9^ShJYH{Ms|E5* zAl}OZ@pxZw;aI(%hOPwO9VM zOx$QNIQxQmL;2b4v`lQmO^KboD$|fVePJ8cW%cWUVwohoh<*4bbqKqoSGJ#ycaO%p z{rXdK`;JkWSm6i>A^R;0^A1qN-`nv=4L#i7cb+o5I{-h0{V1oZKmK zDcQEjZ`C+kF0iazR!JJ^nyeB}JW@ljO3iZbt&@NpeXwrkE$?%!6<{!T$#`lvKuo!z z>+A&)wxqyWo6+FmFaAB~Jg$dSs#aV655Jl8 zxT4VRlHgflPh4Fna^Kx65lV_4HD%aK`@Z1MG~X?3yZlC~$U_1l&*)n1a@ zMyuQiS?9v#uC)bD%hNv)kVI{Hq~?kY`zY8JxWi$6K<}-J_hFtv$dy>w0{CiEA8oRs zl%)_eh)?yZ5)10af70owQTwdZ{=>gBJrLmM-u8f)x7X8K;r;mzF|)MXSj}!Sx=a7+ zJaRwr=ZVq^2MGeFQ7@KSYOaK#wHB+?VDE2S?zGR?Pukoj3GrKHQ}NkQVoJ}G9U73| z-u+pSvGsVbhunR+bCDb~j^k)%f8z2bo<(NrMI0lti4eH`?b);J#?l9EFw>)iH#ny@ zS^F{J4CeFZ+_z_|B|Vqq6s-om+cAeGUM!Qy7D$Be69X!TsYnf?U9n?Ydio6bS`{30 zv@0_Uvu+hZSfoBr!}d+MV{LO5;wk0uw%o=RRD6&g~WZw?9(G6B<% zw;%J3W#hD)Tw6=eyshSblS<>oAw|R3>YF6%Z;JEbOQX8=kH1wc~q42Mz}P5^X-?B`oRN#$#A*c0k&1a z*9&)t4{&#?pAPJqK(4kMGv}@7%A9^ms*10A;J}5w^RtUh!IRlfbNcWs^AJw^!(f2X zYuLOb2|n(WQyRk~fo>p&H93=!;74nR~>F6?ZEkPI*#Y-II@c zK~1~)oBL3ksw+0;%*{&mDs3YZlQlZMH|XtGb^Q@5!C3fBn9t0E{hQ^wI3MhXi$`)s zmvT5(_suN*TzS4nC$SdOL$7P7sVnWP5p^T<^y#OQ-!tkZdQLdieW-Edb3+4+eS4!v zlmp*YHFmV@LcC(uk#iFPZ{aUE{0rp?anG2`q2|dKQJ}lNd*7|bf zRR1B>(rl=0R(!R%Jl39sVnA>I-UNm}%Yvzj+uX-t?AVhy3N;q53CRl-!Ud2^ZAr>O z6>zYkgxDcY8qhF4pQQ-ZTEvfc^y1@xaQ=1u3uS}J?k$K3hg#Z$O8M+S8!9Bs{eDIA zb~D-3MxF1Xx@t%#&g|x(N3IZ}fE~=Dg4lFYLgqf#GNKx>hk>P}5udTuVXp7*u_4+W z+OB5;h#?;vwogws@?=+rBIGId#N3FNZNp!n>1-q(9A=9f1#=wnp7I1L&ZcC_M-OmR z&Q=}8r1O#3QQyGQCU-eYUHOsj3?OMc>IScy@h;M>cmiWLvl|;aS z&GvFv7aqla9lfirIhiMq&y=}}Vz7$dl}}m5@A-o^a{bRyd(EhXzM+mYbk08`YI~uv z5|5VZ!d&p9zGYZ__gHLY?R;Tu75Gj5Xj}rSrYgrQ<w z)s}78x?RV%<0%co%5D|Qswe-?BX&o?8nttu!U@KhU6A66PUv|m4otB38wuALxa;bq z7T>vY(kGfgXrgz0ity}WMFhDWRH6psk2aoeSJm4E(M4D*qT(c~f^d7-QGwc1Fk#Pe zp5Nb}Jued3z!uAoRGxDqr-nJAhd2o4Fnp;ah%lwQRWk&PC9Ko(xc-Evl3n;+Pxt9E zJuHC}^UTL1slXw&aw0w7lNn=kD=RCv#K4<`!)z|Q6|m4m_+$vSz6e63Twz-O7TeT? z7lHxBGAw*zz`L&2^`5~fW|-5j!oFU<@$-{yd`ss+l*lPRQ5yZ}WayPWVJiSOtH`j@ z=nNbafEi%GKWV)>-^M?&L$n9hTY}&YP;Pz*Ggnn}^ZXSdIWmBqy=xA0!%R3Vw)#Qk zsS_h(0fk~8n!9IHSNgE_o?X}YeFQ0kg|0g$yt`W`7+arzq5vY4t3lNiKDhXer3<-Z zci>n8fG)sy39v)6#p_o#a~; z+!g?CMM6!6`*0HHQuL>@qx|cg_@^lDq&rj$0c*)>RayklA>5k=uq@Q;@B0lsJUrYI z;fmL1I{BnjHV|o|8cQ%0!vYi#M>xyq|8VGT@Zkt>*ZTTAuTNo>R7}Fl` z*Pwf7)j9xQ=M{K~8R`hgQ2@8%*F{2OcJFh&f7u*Nk*OHkiND-XY#JZe^ena|gxhA@ z{5`w&2_o!RCV%sl><-Ic^O`cVegdftqTflNwltAmGG}Y49rH|zA?=fwk><6ulnika z)lN!hETR?o^OJh3j`xAy_aw0&Oq4VK_{~|)65N=gNGg=gZAMRiwV1DoI6@AuqLzOk zLZXk;G5{HcCGd4Rwk}qrb3Wr&#FkE_QMKFpXxeO(r?q0plE@q&g5AUN=HTE-F>Peo zZ5y^btFgGT7m^9E4@?qo=S4!#G%ltxI@c5>*q9~W#)+(>49f5A!s~&c^v^JPD~Tsh zW(a5ty+*`JLHLvfr4=XF#TZ3xKGgP-CME~Q)w(6C7gr3Tg6SK`UNuv2K46}A*xBDu zybd&n+C||BoH)t7MS?rce@3(-<AbT&Vh`F8^K4T8f?QsWqOv1V9H7U3 z3iD!m?39En-^dT`Z0ZAuSUB~)V6pvlmrd?(BC>tnj)V?H5jmy~%zZ=17B^0U_k3Y~ z+eX(`#L@{v>NcyB(xLOl-mGH$qL0q;le0$W}LLidVkax+x+2^0{`kv-XYO3#PN5+>g0p!C$eU$X~ z?gb#j>fg9U5Ated&qvd6U!vL-A-C2;S#+A$?*fNh#ic09>X1av@S>=&bft-~o5enn zU;9kOJ+P&vr5BT7dHc^yINy!Tiy6n*muTgnYd09WPE{!xI8}{gNYX!-}fe=ir^(pS7W?=`vWkGp zF_`O|@O1zeQW^HSN;klo!y+$ePsX#IhLMGwf(t^_OY(^zAzj0-he~9zzQAydw~)zt z)c8H(asoEgC>RF~Oe-0Cqn_ot(in(+v@8fCxc5Y?3)OFm?WWX{A_64%t2B9 ziqwvT5P{h>r*xvE7>rNtD%&S}VPpiYzq}n1zGhcwB$o0S4u@O4 zL^2$(pDOQ2MMhEW$JqaS%U$@AUd75tKkQ>9AM&{hK#B*SdMV;0Dk}3SZx9&ASaG*O z3u;GSt%c-}WD7Tl;~|q4{TLHT<^Gbf`&35I78zdfkfJ0sWBe|>d~4jhhmUSp;;s0# zH1&HAv3X5Gefc^2)xoF8UQ)gb`<2ATcZBkWcM~mde`Gu4pN%_SGTy}a$RT?#2 zax{shuZGubLGiDV%>+-Ss}cIS0G+VGFZDv?^S+}O1l0yHEWZd(#x?{X!x=gC2E{1& zE?uN)nCOMWt_MQ(^njExfF3NldgW*&l>)KsWOLMi0CO*VE16quA#fvigW4lFiS_Oc zIWqd237n44RRGgmFNddfb9+ST4A?cAiEcc;AuXgap2is)QBalO#W~h52N1V4(D{o| zjph_i09sIV(kK{)iP0qe=6}}Bwj-PF)l(34GpOvd7ZS`dI2>*G;(H`jE$hb?^{Tvj zFQoGF80!+CxIyLTgO5k3%mWtQf0|<--hbbhjD{x-Z$YFkR3Th-IUuK6aS~Ie*<7d0 zps*Xz8&x4&`5ws(5W)~T!NfH9h|!y+^&RQ<67Eb-EsOxDKMnrYLA#Bg5H*QB7B8!k zZRofv2)5npOsa4vPQYD$zM=3yVUkkD4V%7bF{QjbV`7guw$-Q+jHcvmIuQ#|$d}@* zogo>~DaO$)3Wm(5eifnYjAm#@QT9lm5QpBVApkQ3=%inHOZo=d>%gLWKJ)^yT$Ao) zJ8>%k*xumK?o}Y4C@En^-rLJ@TPVmNw6`}Zj`>J;7#2lp>K936$d;b{_Pe9QNQ%;i(V&htcvtb#xn>% z8~EgKK*N6eqt1)dJ8BWw@-jT`A#E)aK~Y!86;8@^OhZDh^b|#Ov%GAhB+yvhESmqY zK$${!WIT1IR0JF-L73shF=(C1p4L&``(?IUS%QEzED(vr1-=G<>X=oZ>~J3>nsSE{OO5 zS|z2T@5i!HO6IRkZIL*A`m!NHUFOWi?Ygn%8VCe zH~6x=d2{K4Q+g;R<1B5;Xsh|@bqH%_-T1Pclpx`paE9(19HJ%#u{LsU$BB&Y@*Wt zu`O8)B<}VFEhCb;M3Wrmbz|XsnH}eF02o55!H+WeFiFu9VZUx22{_1OB3F*uz`&8i z(mIBbRQvJ1m>}6Wp@3@&CK9em(go%+7K}nbFv0KXR84fq=J^LO{+?Tw1{|l0^!?9l z$SKh@0;hs0YE^OSuYq;3RhcnKoS13^SoqW5n}y5{xo{UDC;RnBI0Rf}b4FUZ1FW=bFq$x?DOEQA|7x5St{6 zV3)q}^(3Zq>A*#xL7hhW6X_kM&~(7tZ%`r@;vumZ(aT_TsHozn<UpyUGHV)PI|!Xq{os?`r0qQI!GKw-K^efzmv>RukO13%P*`YGy*5el+2 zrtTQhNn4g+$>5JlXzJ2ffP*^9EA$;>9s?vIe(rOqi1gly_h6#*+C`Bj1hA??${fc| ziRiM)k#J1F#mt8U-wBnFdR+E8$wYbBh6?%%0$fTbV+M)W;od#@dCu&C8IVLN(w%9b zy?pMlm8!;x9mrao4ia6mQ;J%#L;GBjCVqVtiuKUs_i7H<~B?KktwKh7h8M zxshD|4IxC!(f)5-;s3!P4Kmk%IT5e(zafi%-rIi&;yQduu#o>l5ZC{8Fadsj!--~* U#?JuwlZfz9SJhUjQML*HAFbh`oB#j- literal 37938 zcma%j2UwHc(yfK22#6pc2tibe^o~?fY0?CxNe5|CqXY~Pq9D?H??{*4L8?gaQUe6( zMF=%?62g6h-?`^I=RfD(|9Mb}5Z*m|X4b4Vdp7>h)fC9C(qFxB;R2bG;#18F7YJ|{ zE?m4tLInH|F%)G8{B!Z0ro!V3Mg6x|ffoc;vZ}HdE|i9nVoeEw*H@er_1;~$Kyx4e z=c1D4z3mGZB0H3x%D#MWw9$Y;GLAxLlHp(APvExhSvvaF(8N9ah-~`^PSy4ChhdYY z!KWuL%{$_}#jdUnUQDLEL2^au!>4=q?wJx?@)jexZW2OtCG5M%$i)ukYt&bIgo$m9 zq<7cdw<;|MdQ%pr1Xj|M%5n zKk{^cnSXo@_%3ljR@`6TiGR;;=7b}vzrES}|NZ99RyLeksx;$m2^TK0_uD2;Nc(C+ z372&e5xep01-hjyHS1+PUQ0c(zGpLyF!v(~OsPx^gmg$$h4-)Boo!$lIx#yDgvi1rP|^7rm6ba?wR zxv`JefX(e-<#ZhpL^l3AFSP2WD{XbGCM)moryqoWp7f=Y(>l%Jtd+$-$73;cGwt0f z@SIMN^zdeAsT#HccY)0+YOlT9`#rerHI_esC2aPJMi2)U)e7s@<2Gl}suYCEj_)7n zR&DtF)AlK$v0`ITHuoCaS3_fP+s)=+(P_FVkKMkx*KLNbzivuyf4|(E5h_iOTRO8_ zaXm0Uv!=JVM?4>$MIQL`REEAEk$y@OaT4krWlVeAQXGh#Ls+~z9kg`jfh|&;?`50? z7ZsdMEs5k{Yi8@Ajpf8=lfhAp_kI18D=`jd8V>qpOB37ghrq-4QzO~b58gVk{@Kj@ z;zoKqWL2T@CR8w%m{j{09oMweOvgS(jIM3u;V+}Bq4&v#%AC8r>!0qoR-!6x5ud4U z9nQevVjm+BVj&Ak9jevb8}vd){pkmkp_9IyUe4Zs9NG!`CA%_o?q!cfiquIqT+Q`( z8946yM1B=vpyPXy8V{s$^sUwKq9s|+MdZ_SLsl9Nr%4?e_QyQ+R{FTeX$pSP>K6;h zCuG_mhp|Be-}qIbR1-yfkWA3U@a~{JMFiUVZQW-#q8Y1K50CO-*D?<7l-<~n8PY)e z0-JOf?o6Dl{Et}s{MCLwROwTpel_{+Qf#fsTy}@IkG@gCfV5hw2bVh9vHSJsbLZ|( z+7=V)7(p$XJOp*tuhJNt8F&~z;2UD4=n1Xo@Mc%5Ff@MGCp^a)Pa%VKvX?(uG>3{5 z8hR7W=#5CvYhbSH`5k~(*^gV~{x!rqL0uyPJ01Dn`8oq?CcXMKEOGEY@8(`I-jKe| zP>-(Et{|nRZgzZ(wn&~~cfBD6JjS%Wb{-F^)+@(5py6!L7ew7G=qD0o{>UulSR%~9 z1}QnCxRA(Q8MM7--Po+eqYZPqN{aBClljgq7yd_%b%Iu3O7FNHZO@rZ*F#Nxc=g|^ zI!$C-d4EGNGSr1T#>ueZ7SOjJzRlG6vBPq{j_sFfze7XqH__asON@cAr0QsI0sr-y zjf6+=8Q7tZ@pGo?aNgq=AxhP@uGyuZ zp0VL_b$_qpe`sOz{lpNpDR$pzeTUjO>{GMOUly+t{lm}y#FS3ZM6TU@Xx$R)BClb+bj|i8 zExmb^?^}&V@UKzFGMQx~Y4<@kc<>c`v9;_ZxeLV!J&; zQrLNWwz)kH^BN(BgisOBD94heSTe56<{$6}0TCa7glBRtg|g$V8)W=vN_MK;SoB$~PS2Go)Z^DFqtl*K^w*KYpH^Av#RRf!#D1tJJn3f&t^ z<&W9fmI@=Qjus;1bb3G`!95@lY)|7%t4=w_fL1?-mChm~C@NZj6l9CY*C{4PsM+QL zTT_Aex#JFeAsk$Iy>#Y zWnZ8n-L2>P_drI;43U-pCu7yW`ZobcMzp&!K*0&_?{@dzDtI{bJ|ihA_U#MYgLB9? zeQzJL_axO|$I0$i!t1EP?tC3jD==~&Kz;Aa#NuTWcxg?^gcC0guM7vxCq5aFcAf(PSrP zygt$T9nH-3nE0|l**c>-7kj6}U#`{d?2e2ZwHw}B{#0yk!e5ChDL_EV68|?#+2!Z_ zn-IouDK4=C3Qo|~Mrd0(@IYIO940O1^BgTPsn5a`Mu)a9|9YBHpuK5hyf#<_pV13)i)(Y{?ogkNn9@FecR58%y~=;Hiy6a>uHeD7kw3^DF~rs(rGCL$|tA z-XlDWeV%B9VG7h~97FrcyX50JBp`8?1%}1?)jS3ah4%8_^6e1x9>r6`U!G z_MQWXG+M#+=eIq)JMcgt=EI6pOBTOLLx*WE0}&V-;#5#UZ}k~-)o-0=f1K+K%vGcT zD%gyOm7+|viVPMQXs|ngNUaG3*KC}O={u5b*6;R@-CC*=e>qqrd_?{nLnpuS;-i zKUEKusKXFc_JSCAkZzq(*(D%pJi~sKsJ=u| z;_4YsciVq^lAVgcT%BbJ3nN+M8xR{b|`u=Qn?iS54_H+@w>Cr z!$XfUDUnc-TfnZ3*d&y=!1{y{pFoeS>oh_IHePub1H=Q*J-qiptDp7GiNwD}0o!3K zsQ8jbXYl}%3cFFy_R0|mf#=06e=>GCveefD=u z{|om+->PHnXQVN$1M%-1DR65;9s2m6Pkn0g@!W)Rd*r&DIjzUr<*ahqSaDUVv7+K8 z+=z#C;@xjK}8 z(q}_=U;Sljm^&!{GF2ew?@X(RuYUqJi1Cb;wmoZx2eT+A!@dzJ zVg@;9^aZ`pky(^^W6Kgq!n|HI@TOAICci z5bytDNX|ArJT-pzbXL7zYQzoGdyHv0p@#XKGv3do!Z>LdE*b9*+?)PnQ^RaXu#B|Z zciS-gY9Q%j-~4YgqlP^#%qpo#^k`vA`lp-30(PQ!zmyL8$M1BQysa6R?`%mrYXbJh z`;{)LPT8CGq70IOc(|8-0|Gu+D2B-Xfrfvl!`+eY^IKI&2U61GZ@blmXB!fSmD4!f z!r#){ayD`UZ-H2`#p$5@`fVe~s8(|fxBe?fTSO5zpn19WX4294Gk1~P3qe#R1oq4Cz@+u(BhyOPyk5BVt zX_$CieEb#C6E>?2GTb&pbu}J-MN< z7hmY^vJU{0Q!ahJsv-_ka^&hG;J~xShl5&j{<7(x2)$`1@ zx(fg7aX|JKTekh#&l6iCnAJNwBYVDXHOAZ?A+rs)(LAj=mNR<*^s9iUFv@S3e*myD z@IUIb(6`hHNAAncQN zA8S?ajuk=4-u|kE6bicsm)q5CPR=UBy3_uDP{S^CF851H#z8QM`*vz$eLh6~TOs*K z&b2>^X%`8V(Vw@QXL z%==fJt1ltYcj5G!Ih_vySVT5YVM?8K42eL_?3+GM>1N?ckjnr_ZR@c3IOP{9_g%xhK^RDVm%O z?WR?Qy5B5ilMe?Wi*$+%ZW#c*fpczGocC`5@!#kgG3IcKf)qFUE{3pa_fWK=ot+mj zO6!k!TmL0d_Z%B!Gs4<;~7Z#=?(nJ6WsQ<6@ zhX0u=HdzuX5GMoo)cq)Pj2>-NmBn-~`Vr*2}G|LQMObB4pTJWFyN^qM`Cu9iM(y@e1N zV+So|)<@Nm|8bbaX3gE+1<*3rRov-JTWr=9`3d&B7FoS!A1ZCV4u`Uo85~8##s-Vj zlf=F>i+W9VTmUKu?SG@$o$9OpGUK%nTitNSlsd7aYWO=<^I)`wTDLwM-n_ZpK?M*Y zz6S4}ob?;s_*eO)LcXo>DLX;6c4PuiN;3}l9|4-F4bV(2|5-D2wa&P2HPCuehFyit z>BkbEEf?Jl%}^n>`L%Z&#Q9jEI{%#0=6O}C3>8fy9Fp}_MP5IRbU z>!HPP9h{T>4crGjh8~-pEhsCS_lR7{;L_&lW{&_Pg+&Afhv~`%g7XdQytinZevLG{ zx@cM%co+Y*uhnOYI^7@*yleh0_rsIBS#h)5b1kMzB?GCI>_;2wg-JCU!uKY~yPtT+5vZa&5CbW&LS>wFh|Da2&E!2FVYmW9 zpsnE>{*SK4q}}G!H*-e4Z6$D$U=On*pwCBg_r%()&^%uV8n z89?n`v5E9uQuvU5EnRHT1-6hRKZ+jWDz8! zghbyNU?s|}x?zt{Y3z*`WD^6LUz~zNcMW?o`~lZHO1=RBY|M}4>3L(|A!f~WnHY|1BWz~Fm5pDS6$@cw6g5OY1kXR^U9tQtv-Y~G?2#u+jkD>{ z=9Q+0R9M6S4++jf1*UMT!->l#EGA2t$!#c$X?{|;SRYMILnXM9>}uq&AQs|kjBM@n z#U=#&6FL90T-(0Y)0(<-f+{AibKN>f`jiamub(Y$5}*>_Y4UV2-FAmJ@KLXxt(mW& zW6#QyXC^{VGi<_mR;#c4Uvbs^nT(;n*5K``MRRPU6cu(U*2+uf6zerh(XUH~?YE*K zIy>Q|@o8_GXm?z1X}rCVUFvyyGCN*s-eB-F`Cg@M;^MbiFVY`5<#9C+52xOFzw3X< z_NSu_j(n(_IJsX#Y>c+O{MQEG1IikE6X4zRHgiwKNk-q_I(I#~Y`CBoIckIhlDjG; z?^gOvn!TFo&7naQcbp_w!;BLDG+jbKGE9*j==w?JP_Q##dWr(_N)%)w6KNZMIm0VHXm#tTiB9fc#Wse)J}e5nciaLayQ&WyHUMW5s);h%1-f&`W;^q3&4Ap_{sG$Vm)If*l{(PfKHY-L zRM{68d=sbv8vou>?gs?KN1QwahrUx}SUEFvK-{?7l?wx+Z9Ws&jAyIe zV(_Psr80}2Yp zo^om-Yi4k)A4KM3urp^EcuSi7zN^GI9pJ+|61 z&m>BCjPV9x(++!_W#LV$*@^Wo%`Gg$pW7NZi)w!C5>hStQh+u$M!8>!?yO95S5Mu* zu3O2VHh!t%2F{LU<`}Q!3##*OBa(m5`4<&EDQTww!hsAGBnC!!&x?ADTI7Og*&N(U zYIk)msX7x!&AyOf7gY-u1V2B?Y06R6K?@11*Of3P+uO_WSN$44uJly%NMxO= zQ7|Y+b=V{v1-3WX7JXw4avwwFe5|Rl67?7tA=~(JlM3F1UE;;qGEi33=$o?jz*s(o z(mJmWs-!rj8TGazhwX))VNFrt%xb+Lp-HP$Se2_Y=8N|v71K!Hn!oY)ONW;!B!3=u zcvDEz;l>Dao5)_q87`XHt#q;NQb>nCqOxaGN&*f(c(4o&?8)tQW5bUU#NP3$K*|A< zG8yTd#sZEk{Xn7xIohhWUwjlk> zQZ~R|+t|b070hS32R}s_`(i37McK`rJAsuQob_$Zo-6`IZ++_w-NWnKR86_o{%u7q zXw;X~aI<^cagt!)LWWg(+)}zUjrAkzT#RNM9YA_Zx#L7ryHfhsZS|4}@jt(}Z7& zbF~t3G>&(RG*<27*`d1R&eJnBVcLG0*-3)rkp@m#*f&AbGgqs{3~AFjS6q|J#^h`Z zh0L|;O2Yn!`+h|1UIOrAnKRGReMHOY1mb`{RWZ$9MDSS>X{gmU&{*~95K69zKgwjb zAl=$TR`X)d5*-^Sngad5eJtvFuHOBIF@jv#x7AMnrBnNHYrh25Mf(qKyNdy%gKPb5 zpBtt-9cfDLbg|vAPA~jfRqj{LV-%&vnY1f89|W@p{aU{+S6a2q0;J6qCEV}YB9-BZ zfYM*zCqF6+Z9>8`*^m(@ZnTO4?qOpHq?+c`!tJKLA!S(MYI~ziCWHzu^a|M@YjbT3 zTE0=AzO$BJ{5f=4KSvZ<=jyl(oYw7!ykQcO?^bH52)Rl^UR4JnIw+Off*-Yj4yuSw z>*DDy!y-W|+2mlx8|53|$AbNABrf{pi(hk6*6i(>3~FjW-ZD1{FxsoV$S~x=f)6ex z@K|)KPG2mTjeCLmG-d9^fsk+G2gEZDjIU{hpv>wkh$d@6mfPiwL(!4;; za9Ue;jZ97$0KD198TqvWeIOJ2nfX#{HWysRy|#v`NSZ_D!s6=aiW=gK6E~z{pgZ4# z169t#q$7OM{M59QsX%QzH4e0aey~0M5J+lt^X~VFErgM~{aqU09R63Av#=WSgdCe< zyAs7YPfHOCd`5cKKQ5@lB^)O zHJs*fvNQL5#ffw`51EyX9Z56hnSugf!z#9HIM4lj<9*t~pt_m`TZ01$*WWeIg{I-a zq0`SVPo@!3~hcA1nprr?NzRp<#6|3DExZe>)y?N(ZUFjnx4>*GV z!De%A<1~$Yn^fl{B2gu#hFH&Rreb)~UuHJ?DsqKQ=2*!Qb>V3s#9@6o(UDv&MREcI zp<$Y-Zgb#o9G8D`vcwCYKxTOU&RtZ!n{C<=eqZseJe>c+tj$uC0- z&dW-Aftc;E+-k*lXu-223(EX+uv(4?E82@^HH>cq8UBpcJH2?G=_)re=8g0_Z=5?v zU~f(XOo86pXMKEJ-YV631M{?p4TnDiBs-kzf8e?>KfB0lR4+C~anWV5Gklu&veElF zAJ3KVx=#XlO-|=&n|^O-P;9o51xxrB$OZ_F^KO`(d|OE&1xO;(X0`Poi(_&@E%OB= z<9$>G_%%mUcpF&}h<)}7KgO^pUNABjkOlybiAxGX9vI8?e)J)^`KN*2T>KcBXB&?MAOg8UZHpU3_A5!pB;Fs*`Fd;RYww4eG7@a=!S)6f z8XC%EFIRq6^X-2e&fI3pcDvkWID0zAIX885BQL+`s*&gM(lqbHm-1J?%3Ks(1(*kd zU`DCTk`wULll=zni7(_pi3l9d6nLi+{((#mU*!_6Z48|}W&{fKxTrEH_awan6{MWB zYU!KHVB4Y>UbMGbR>Us1^3@yeBGMv?d}b$yLWH_j2@eJuQ{xnVUY7ghsnc zvbbUbkz9g5-<3H#9&m5E_C#u3iR_xOF8EPHIyD2noc@UYuem zTH0_lTJCi4wV_`7pK{o+^u>CLfmUvTiFyMCc_FkZJJD!FJ9y+{;qQK3N<@8~YRB1O zo?=~#l*iz_GFF9rCM9=YGQ(ky?;-*OuFKZBY^=r7E+`vM?L+1`h-~YHjwGY5XinKL zkKg3fCcujocYTE?KaGKzh964XMd$!eDV8fSomMdN*maR0y&Sqg=vszyD~z(=Rq=Mr*Su%dZY7 z6Bi?Zsnq6j0*7>8Ys-RU9-T*Fm-1kZj;O^vQ`9O7qQQD@o-qDpFpMQdO$~d|Rp+70 zK#CL)^=TS znXP)s03w(V%@+je0vZuo>hg*J4+yK>WjO1EA}-W9RZMzPKq9wbWEd6f{=BMKM`@yl zSZi-TkDcK#&2F8l%h_d~cR+_}ftk)}C1fF%Tc1V}XxcOw%fCY}c5SaxN-!;4ifWn? z?m~X~Vi)EHh~pMvb472Tj9v5(Qr+iw6?7OrmzWzP`x$!Rz)Gi(cw! zY0?G__0gxLUraO)OqzLczW)TLCK2-mYk@;Wg9Ko0?Sco({`EhNN>~(iPMlSBV)hjx zoh`Kf=V8`*^8z-}9vef9BC)V+pp4fhAG_B@O53zK z%m?&0`ZjdLOH_%rGZk(HoO}}aDMBYyz!ONrP5;drMvdlXDEaaLUqV>_kgM^rFhZ#SiS#uW?dU=lQ`b z(n2SZPdWg>nORB8rsn)=mOJAN=&P@IB?bmZ9>x#(*|qD2-}Vk$l!KD-J60w ztC>FjAW3+3U91NSabc1 zj=#o-KKXobZ9-9jrWGf?Q0iFZvTQ~JI^Dwg^2?i; zfZ28_Gne?BJ-{%iU!XfSv38N07^4GKQ5p!%Sf`%ESvHTlh|t1WU>T?-BeQTE%9~LdbN?Yj(kpZ9IFq#K3%$nR_((!l;-> zGLeOq@es>)^!dr5(;0p)qEc|6bLj0#R;WNY8X@%-+V#QBX~wDl?~7y>aHPLLUP zJ#6iMt~JH^OlULuOZRwebY_iCR363+;KvR1FK=OHM> zjm&IzYKa0DI+_X739owS5@vWjK3=%=a-@SRr4Mm%B((~WCUtQTx7bpAxe;kYYYk|h zLe?|-M^tTkDS<=x0stx&z#3Tope>IT2hc~ED4!}@8zmi=Cpwlp#7mBOMFmf+l<_ZL zF>oqJgLx@zz{Mn5?TuWo{oSFt8O1r_!kZRZYvXKXf?CLPd_GBdfaQh%GYmM2WIR1INwN*K}%@Uh#7U8Qg_wjn@yCy8y zwXhJ2EeyDqdfktNMcWgHKC2MleoBF18M$u9lS$j~o13a6I1eVUaVfag7VTIjq?KgR zz~1+GEO1Ear!!c)QaAYkXt0>uf4d4mGC+fev4cd0dX+MdYRedEBiu~1o?A@yJRYlw z^{r?$x({72(q8+G0t1XEBlqrs|GG+FD~S5E+x&Kpdg^A=lI6?!`Ir?R#7gVB&xyVY)4q-}Xoh1*Lxz&~V@ zZ>MWweZLYk10-IkYXc6m@PLV8Ge|XHbyf0Bh^et^4%9|!_Kdg|ANqsvCSWzNH6 z8eJ{L0Be|y(RWzQ2^=h0O|wY5>@Z%^tn3P?8`JNF0kp;*gUTRt{`oS-p$1!f8|^Dg zg8UZpPTUMC={V-+TDJ^15(%3guql2YN_P1u0^fm2Xvikz8!|#Un&#Rt<3&p6I$xh4+LW&la{8!)VnXC zVJ zfO$EwI8jHVI1%Fy9-H4*+?t{;56)htv;(d>L|-$P}o z6*R8H^i4PaD!tex`(yOdY1s_GXy$BbEJ`c%*jk6$b&bDtws9thN>Ij-n|*2`X?noy zq+7cg%Lg3|fmop*KME$w1t!ct!}Cd|2&_d-kc>MI5XF3{VV;aE&n^`rc#9-x461hG zXUpUgu=$B10fJ&qvyb9L&(S!Im}%u&MY4z~(w8p^S<;RmM7I zS`D?j)kOvRFT9*BiVZ6abY4lVr4`jEBw!0KD>q%WzXf!g1)cSyUrrzv=8d$JYv~5Z zbt*s8w#5Ujk3-H-ttnZE#C_Qdb0(`gJ4*_pilyV_zbyFc<~}oc0q2%qs$IpM-Q3@N zLTes~PQTB9>wH`PrRU`fc|SxnBt+Ty$xu7>h4!Tz9ePmiM_QCM4|QV5EkiBpP&&Gd z-Pczl^YAY7rFE1Pw?)C}dPf(H3w3Aa92s8@c0`thzIzGLc=ObFFdv;>VL#u?NQ?4Q z2JYHGOB-Jj4@GZ`MUTXu#RhvX{EP_0~U?G97PNl z<>a$)9QZHuRZ_uCSOPxQ^q#OfTf-C7^PXO^KQJ&Xne+Kr=q_I4Jh$$(3^+1I#J^lDubw^l zuA9a)$*#vbt??2(V-V9ke;GhL?LzJtP8b>LwY?W42YR2OK(r=F>DVKpwL@!jB{sEG zCmN+6Y--6(G^X6&)N0EeXG>^)sgAuENFPc3;|q!N1JrY+wUTop2DGn! zY^)DB4qk|UIhOu0v7$^m#vbLOk)8f}N9ktVOtxWKa`h&3W=uM(t0}-HP>ewi&=@wY zhEbl|&BP7H#MfzS@Ac;ni1Dc8NGwT8 z6FYpH9((}re)!Xg_eT_A8 z>vCr#c*1OXg~=dllHUd4Ol|P!Ra)2N&cpVbk-8}tqC41VO1M)cX&a`Lsu##X(o;zB z1QuxyZ^D^cp5^>jte!6n)7?^fK%pbaWKhu6G$dr0w3O8=XTy|NGocX$4k%|a1I9{% z>Vx@Sdt~;vHlhD{1YLDFjE2@ox=ukr1f!-6P zNcb@rBMZ<<4S-Y9d~DQPwp1`ue3vv|&|9Mm_)T)~S^;m+WgvL7Q2<}2t1jwAh3177 zt$!{OnhEj-w>P9OuA*oT$3YF+)l7ZE0Gu*G@?WIt019XEXKSjrVc0LAE;MlO{QS;s zy}kC5@(8^!-QbRaOqE-yubG4kD^fnJgnm2%6v!d(g>U+a_u|S3IB0CD$-8E1>TX<eaRO-e~DT;i0TG!_F^u4ABJZ|d1^#&dX)dUL3?MIF%z6=jdTD-YNCoTTtIx&*;24YR$SPE@^x+Xm6 z&%1|`v(cV6OXcjJ3e+5C4@r-=&!! z!fDaK1rGP7Mqoq+rw=G6II{G=v1{)2rJ zRj1+jz@>|{2|QSNz5t?JuIBmE!|7?7JJWN&2%RRPetc|ypwMTB1eOsLgybMs)#a=3P$wAD08Nk{RgxRJT zuL=^F3=m9fFj;t7TgF%`&;^PqZWBX;VvAIEJH*0@daYD}DSVurQ&n z@C;IB1=tU{VvMfHM?uFFy&C)s%HFX!o7vF)ezt}5;7c3tNK+GXVW!P@n+bOl*oG3G z0;!vvX&9AlT3YNU7FfqF1x3H!LJaCo)3|nU^mj_A6@*+?#roIlf*3@HqWcg4XNapP zO3KV8SnbA+J;nQ2G%~V4C0~M01|00n~A!*C!h@ zS^*3cicT?+X)da>Rh%9*tbgylbhycA78Iz_DBaFdhy~3N5isG@AF+sqi7xpQu`|W; zEaU?hZbS`)CWkU5Spy7GgD|CbJxJIF|5_w?SpMUI>uzdX4a{Nsi%Gvl`Ke(fTzc)Fd&*RH>=o_eb+17EL26`{@&B|2%Y>qbfQJksLUd=G1-2{USNgS_|Ta;o!+ zv0oqwDrgfH6r@`2LhR-iADd`iECXCrydphFB4NJyW!6I#M>cp5ETZ<_g4<1l-eQV2 z3#{@gN%iE%ODZ^UJe zVk-KYG2#nGG*hS!MhTY@ro*AM%}76(nTPc}FX3iDgp8~yAPeT2Iv!r2VlD=Th8jV9 zMim7~cn|>Te_$*bLM>5kWn=rFz|} zNO!c4e2}JKK;`OcwAe67bsB&3Fxcyg@$qf)ChKag#v<-v&n>EBygxt1Ek6e{s zKxkF5!QWxRA7}X-eupihrWq~5UvFLwA?<#|_x5tst@L1ofNqEB?d$B6EPhYwo&q@7MADKJ>1CcWbm+ZtL;Im&1fHPs|$mD=&;H7E^GeVjyT z{!73llG>_*v^xz@ulBlU@%Mn-9*uT*$ohrzTlkY{7YFj&Vm~n!0*X2@)vUjPvUShq z*;F}04jtGsNq4H;)1o~%PRW`h5EgZ@NoBs?ZztWqiQ4XhQH_)7QEa8?H2%b)c)!K) z${~cO*O1BrxJOiZ^{&@-+V>7&t-7?V55l&|)H7OPWerygxMD!G4Zw}2z3%T-cs1nu z^pyG2b}%YXNt$sKa7fMy)0?2vW9T8#iczfq7eYv80}-Xf7&p>t93gg>N&Q(;p)SAs zoQcTSL`D&w8yHsMJGK=Z%l?)or&gMZIwCqcui|U`ZISI?IaOaoYyrOvV1n1%*+Qh( z7<}TOIYy0l47F;YJEu;6N+j4O4w!n1#861>J$EeLp$vva)`xSo2eRUBI1!sze&68N z74HyZR(yrSR{AVo(Ui1k|D}f;XqzSFt^X;JM^6$qc){Xq8D(dZ{t02H7c_FvvOOwu zIxORZ(Ygq!Vhdv@Uhz(|h`dxEvQiZOeNq>)usmR5roQMT%_>|{4R~+I6=ePCH1r)g z!HPk;jz^-<*UMuatQ`I9-tIXkqdLYL8-_CAdA#(WK4Sxyd)FsAS?QkRf$JYU$ddZi z=jah;l2j{@$01uQ0CFjs7n;6Tpj12LA$TQ$<}gmNx>38jDk#vs^;V$N7@|YTVEr!6 zZ}kxXa^uMtQBfd|ZP^mw`hpWfVAL34@&LM>M6HOqgt?9JC%~Tf4;T1N8=vYC4Y4xU zU5xN>tlOyCJ|npxDmG3#2b_F?JWKn$zTNt!=zdOr_?RA!@z_Qw;EX#ngs!B?jJk*UeBm5zBkm*g)tDSLqd<*a;4VEO4q+fyXZ zGyf+GAL=7p_4d-8Z?6Q_Z+NIYE@=OP*L^6#NTA6#Q7f<(3SR3L!*?VmN_VMqcH8Y$ zhk<&m^b;5r8EiBXRsx6WMvfp*tlS6wtFlyZekaPC2##a=glK=#;=DW0$C;Ho7I}Qa zsWX{8kLEn%YuuG@?LJO$`Dg>BDj^}kwx5;$MiKc5NGt>M;&URO?iO2)Cz}V=F#ru} zgClqCPKniYgx_u#Fc5iS+x`@$ds9hTmJUBA*oy(iuK+Cq0+`m>zP_va2yPEqO}t`I zpd{n>$4(kwca2xHdToG#QKa+Wne{j!`v7SAEnWDnKc!a*m#Ytq#5}1LkcgrR9Cll% zU|qvAN*l8&@7jv{tM}WaFj8NO5nX_;%_dgG%9W@oHG-4Tv(4Ufj)TnFL_>3r2iBAy z1(27GnJ(~LpnR4nV!s9&Ky=B=ri}2?r`6Py*H3g@Vp|Gq@e()w^Lrk_~iVqb{T@E`s>17>kf zqrU^!84s(1h$V9HMs5cqS7djUj}B zEMqzF$s{751vnfX#*2mD)Qbv_6j8V9UGTrf#A!yul|GUg=Jmz!Hzmo?rRQVJZIf(M z7cGp}!xfmWR4eS3OGaLzMo##iDl9>P-(iAd9IsH%I3I^ZEF7g%TKq_r%rdiBaHQdn zZe#Nh?k1w)vcEtt$CMhh5dEDUAGXK&GYoa7g9yj5Ok;`@Pa3cY2kV{wGQPERA&Qa0 z2Vgt^N2Z^Pc$C$`5T)Ip7PZ?zWcFJ2|Qs8{DaS3koK0@()>Q?45QP5_RzdAX46#&l4RA1g!LsSwmy)|gC4z4TX1^mj3U z`QvaBI$92At37p88%-Wd*|Y1UR*0VX10|Wx2mP#Vz_G7(nNd_oIyso8fqh9SJUvLi zU|H@QXL;K{Ixk&7m*^s18pgZUb7XJ^9sxXnIksdocXb70_BBbhXIyJdRpb&A{CSKKg%rAPC+g3O7AW2iIw zWD&SHK}u}V6PWX%?D+_r$dT6ZGZT8icaZU|6zGeB)8<%5tZq`@ft2crFkF)ueE?>E z(2tH-Nf+uCYmIl;=;j1Nd;A7E?rR4Ezr6=QRw~F{!-r|ywPl^}Up&=`+3|I@5O5)1 zD-l!muav{LMYGB9Q&Nz5Ci*_~Jnz}STf6wi6x#09n9`8_6gf=%iNB2aea0HaoJI27 zLjJb^Z9c4OfAYbJcds+BzC7Z-VmK4y%t704ED0;-3{dvIV~@aJ4AvFsq1{&FoV3-` zZnlm+e3`b#CiqLw-nbt=q!=U#E@RVr`N|Xz>plL)Yocr9FK(p*1V}>ipq;^S-TrH+ z$Q7Yh@TiFE=2QvpY=;#2dHryp{*D;NYa)kAXjR7`DL7FQRv#qB`D{8+lP(8fsC^;e zjB|@gA&<|1CiipsC(Sj^7AZTd&zKDL#y~^SJ5Pm8Z;stGRJv6v{)M~x4u1Ac%?%6= ztw*=|mcgV*@rz8lY{L@X3Uu8l$Gg0n3l($j@##JcWs;giuDxhO8Q91Myp2>4dZZzm zPvt60%cXlASM_$Od%P^-Go${~qwn|&YQoxzE2}xFoA!(!np70SLM2uo=|P|Qpm!wN ze=Xv5uo&#b*|GRy#wAz!dYbvGi1U%ZKy|%T=Ff%FT#|-t!=99OD`;0~S8uDcEm{%c znZZnB(21S~Sj0hoAH1gNBgXk8CQvi201&pDavV#AyOtedgRQBZf&1R#_y#|#;lnre zx8#5xoR3T`|Dq+Ci!E-7h{Mp9|%21#iF0RgE+Dc#b|;>-tifBSvi`}=u*oN@MGICQh%dFGt= zy!*bcwaIk{Q2(kI^eoM*+$Qo6a5Zom`nU$wq2{lj*R;lCQcUpI!@6mnHi!XLb^_6( zPPCS!W=J!U0E9mv8Vo|07kLVgripeUK1S)gekdz(r2k^#a`@wTcBbZNY+Rqh*|c-1 zzAk1nHZ_}{f^qVaDg3If5*~;!CM$+G71B;psRw!Z^KIJ;ZG|ycdTihknAf?Kgfo*T z%hN5swIW`eHya2lf`xf4A$;4ohH}e|H1W*>H{+{WoJT{7APPK^(hYfJo=oBhh>;Doppj0H3Z~gCRrAL}TTtwQ>B3jr zm5J3e#z@|Sm|%t@llL-lf&LXunyMgedBX_a>c$j#qs2#}w3R;`kdi@~3JCn@|+{tM-h!al0xOi4)b7L#RP1Y;t3%$zk` zTG&duzByVYnLWt2E77dFcb&#gs%*AjO3QKNBEw8J+ox1@6qE8`pqhr0GQIGPt3XkX zUFGgt3pV+Dk3%NqXVCaqeA^GdcB1t6!XR&z*XooM|(SIXXC)=X$pW zHORQ$3buh)4?37vLbY9aib-l@r+lT1yyk%Ro$z3dCkXD-gQW2cF->3P54#=acv56> za?S|Q#D8h?M_yJv*JG1N&N;~p2Mbx}(9iGpoenA9R#7|uB{El7wO+TgIIm{f>$J&G zA^eETcm%=bl-|GPdo^j9uXaz~I{4Y@HBJQAhh3mVf#7*yp_lgdy*ErH@)?fKeZASa zkYt5aBZEc=YCj9X2F@X8#+JTWkNP*M`<}EBpwxVAreE?d%4nbMJG$~zQx}2xCRr$m zxCLCgCFc`y%S`D&24gx@olT_sRdFTWZYM+A?ZW3n|>`p&dDABFKaC>O{;5=fE;t37S&QzSqh#h-<* z9%rbtspbIO?_7;L#!HzCNyQ2vTMwo+s(zKGJ4BCngOAB`H8s3)ZH~l0&;WQZ*BB5r zw{(*9o9H5JGyHg4g!x7*jA$I9Zr<9)ReA*q(nXc$vD^ydy(eK^Y|+xaXFfrb)$C6F z_-Z|q7u^U7$&VSt*0lRP*yls3r|o+K*rY$lH?rgpX70Jyu3(*|naCH+V0o%TqLlcc zcCeY5WS3`AjB1vsKYOScp&}|zEy~^wl>b-#2m1iot~zHLstZcRY*L6VWoX5z62K|b z`Lkm9k<9|A!^#C9e&765cT8|NGduGn)argJiE-5%z`!q8aCOqaZQ!9m-4A=jC!gp; z)#AG=b>&9-Nt!rgr?Q09TM#IAb7f#m`ydWnt zuzKkZng^!K4kz4k6Cc_SxEgiun;l&!tt6|4*$aF=)=*unrr-12QepC*_C}U{IzpR- zD~S~3r5m0vMeV1f7LJqyfc8sG)Kfh@+LmaOTXE4AXb8oKiR>p#UBE5tqxUVUu(Eq3 zAEyzh7k=d+My7Yz+l;zDH%kn7oMble7}50wkcB=q-!^l9wDK);j_xDz;YsQM?(OLt z=1W9LOe&mfQZ1zluv1KDTeP*XvWgX~@F|NDTQB~BGwm&G9RALg@O z`^-$(U7?J_EQ-2%p~ryg>NdP1{5JVC(SwJZ{+2>+4d#?=nlAOd>~>NlPM|B+sg`&W z6jSheH4BU+nEcv}W{tFPaOL;+9s%9q>Sb24u)RMeH8{low8;vuqH;dfx`?3f5RHt9sv^lRN*o@#;v;07F z&!KUcG}y7`2GytoI)!hGiK~kHeS9E*mC17Uu43*8Xl6NDFHE3r(G(z`?o!SL7(F}s zZ^{>QD;>4{!}=I^`y1a~wRK!;B5N4m*!A4fEpIoUU!CT2encF^7_VDtw^d5qaxQ>n zw#478$65@W#%++3$fOVE&)PSUQoTf1=+l?UE|X<)(kDwnpLTM@#-5lYYR|oGeH0CLMu*L$^Y!S3H61VFx#U*WzilU{Flm zJeC&-XEmSArIqG9ZQx*v8^DI8#aua>b2Gjw3~z*_#Aa3gD(GC3YWxA3>z@W*W%gjT ziXUJM9}Sw05b1Zjbp~$KLEZz3mRt{UdTyaRCHTG_s^93$Ov0VW#`E(j%Cxq7Btc!E z1YOn%{yb>!zBiTH zON3epyNU?D#3*hsSwGYkVo#bdi7S~cYk<&b>7?LkKX)sLTb~%0C42`AK&+tPM3d&0 z5*!6BMqP}G2}_n8ib{#jz|emWW3Iw8v=yfp-k(dZgYc1t&3V!l;_>v?@_BacLn=L3 zG|UlI+i3IZoV|h#Z5uJt(YJU=HPZE)7_h>Rcyq86OU8Tqf32SyV|kDCe;k9pVY4>wH!y@$7SSY2~XkJ1+pZT`1svq zl_3encxHAu+a`;crgt+byB;NGI_v<{AXAtK#l0r|g-%cDeN zI!Df=dyB!hq>0vsbwxZ3C@L402bTo|*Gw0202-7H)Ar4L%`wWvI~@6mP9bJ38%*3( zqoq*AyPB?0g+cl<&)rBKzuo$U`)>>V$7qgsTgH<69oT6B97A{QbCpe1&)Y7~Hs_QS zUl>6c7GP?4Opq3&4;>f>L80ncV?_%EwY}>X)zQ{RT3=ogn!ekTB=~f!-KU7_wf&>G z%Li?EFMlZpr%>GI%QlwHw7Z^d2FyabI890^bM<`q%s zVLB?k#r(Rmyw!F@+8{qV%agec*e{KYL&h2N;xiR3| z)XyXjQSY=Z0#d7e*68J1B?B8ZZj~#Y1{is^%yEF^LBkPuy%f?qMJ6`q4dbDmT zJ&Gf_nZ?g6tAtb|ZqrZaXLc$|W|C-nLfWPi9w~9su>FME2fx2Rg}E||d2t(zHboQn zTdjZT1M(l_a_=h)G}6z7>o`BV;KqoLMS{-@9FgbfQ!UnDG-0w3>$U#Tf_h!1YF7Ou z^^0$AWBuL4xXRmDmr%V6kH6syo}0AIXK6W|s5K5|eUAsVtv=k`-O59a9^8pf%A*b6 zf|0(E@y3q5ET>im@<%wLhu;aEoapbz<{Lv3=qK~bN1;IkCLoEmY}xvXfJL+G&D=Q& zu>U|&aS_i?N?eVC`ct)tC*1m&gEcztzwz(kI?-Ab9e9?q70+H~y(`?TjpY~*&JkiplkDFqKhqK?u({?dIG+14;{H?{zv{uo^`t^&v_C-+_aYv&`?}c%aan( zp8U*}E6UV51zLRILfEow+-YSwS;)a>ps8MhDV2(&%MumJ0r=+3n)4u6RB_;_TA=8Y zIsX}mRhs}g*?Yv)hcCv0r@shrG<(^FoSLpDx}4HhSBqWA~L zSIRpG3{pRAjPm$0U*M&=_5})Wg*TsYxSUcDC!e>$! z@O!JKs+m~bjS@cjm$WK)-4?Hv6732=#4iUbc zvD5d0gW6rKIE?krsQ0bHQ0JbkJM7P=J84h0UQ5Xi!pcl}*=Bc!{J`*Q-U7b)Bq&pC ztr$@Yy~qcx%~gK+AjR>tmL-q&(R>)!bs@_mV($LbcdQb=rS~7qw}vq~)n07)U?|#O zdey6BNML~}277j}bB75T83BrHRgae^9Y7JW-KmIP6~Bh4Jk@b9UDL^6`rE-2CTwm>0ax0a>Kyow>nS) zjYE{p??uE7T>LngE!F@TY`Sy8U*M!skWuoXT!qu#gg^WC4n>LHeN~b7c;~45mkQU7 zp@9A=fL9))K06`^cm4o6G7}b$Ccr%|u}pAYZyoD@QMh~G#)D;og?WluXW^Fz z$jBWNDK+3GA;mFj)D~Yse-LOU(95O*vwL?9Vj!EA0h&mau>0+LqM=Djv~8Gi;w1^h zRwE5nw}ABsU9-(xF}|?tY&n<;#En7fU3xaq(H7O1aeaE9mTfhf-69dDGk82FC5o2| zy0JM>O9d^|;Ju6&?_}#%@&XAe6nT$d^8nP-ceXZ~&5XM%9~KvYtjgxjhvm{?qs4aQ zp$Gurd0>d2e0Zb6jajlSZV}|^g&y^##j@p)>3ARr$ zR|1yo15lL*0Yv>n9N+6dQcG#QHlh&s9uIRitnu478o4e8P&LRRv4=lAH(i=&((E^9 znhZm(LSyW~BKsMPG7ihj03~O;U+uf|;EyH$*MUYN!=RTOqze(4OQQzi{1n)h$++MU z;KMvECE}o6e2CLe$;h?`^zZtb!xywmU3}&n(0Y``bil6|W}tJbitQ$XJ3m?5h?F&~S-Tb~EbGllpo+$0zaaMNBO`8Ey;Q(gp22swjA36@ zRbg{JbMdryD$G6?qypuS zFOz3D*u)B|VfkX5aDO6Fn7CiMeRhqNi;lj*S5e2M6vKE%Qsz>wXLSdQ`o|SJS+#m+ z-C1Sy5_wMREgIiIWpOv_s++fDZ@`B&tM+M**Jv7+#dMd-%C~ewA@T7I$>5juIGQlh z?`5nDptTgxng#9CEPR4J`P7IV(kcR#6B2(vqRgg@4orS47JoG4H!^?EY_;I)#5+TZ@6I*G6;H`YdUp7wk0iX00Q&_P?Tp+D=tW_mHg>4a?Z<>B|T8N#J z0N%;Wo;h1jMDm@lJkUsp87eeL|p3Vl25WcSsD!L{|QNVIe!{}IG}@U;DxUy_y)*y`y({L z1OIhJs@6rz+sGEWD4zf}kktN{ej(Jh* z7uPQxV{r)}V{vYa)G6xnTB36qwt8zaw>#H&9VHa&( zbDF3=_;h z@6Io4^eA>ElV7%C=yK{ZSsHRxMsG8T2N#l^MfK~tcjrWVB5GwerG{Hb=4A|LNz#DR zpsfGHXo;vk70lPz$W>k{pN`btrwX@yy~L>(a1p(I3eNFxc%W2cb7Vp8R3W|#DF<)u zs-W$UD_>(P426!wzs!sSNT-Xa;0;{GySCUh(q+iW<{Cn?9}ri;aYxjm1*hqP>;C3^ zjVRb^H_+XOPuR&G=G4Daz~)oLAByHV{!sxM!iK*0q~iB7{ir>5EoZ8Y*JqP~qN4-b zmnZ(iEp&j*v_*U>v#BjSPP8HgxINDV3N~`e`3^1@AOc+Oi7_)^maMSsGc00R(f*g; zqA@bslsbc&RC{uwC`+n~JKaZYdYjjTmh0<3tEm$)L1B&P(^$pxW4gqQy9Qv=(S@e` z)x$}4bENcJTO9Q`&Q_dFB-_0uA!FSurpU$;#Kc6gPZ5A;d9_s>OLY!2x8U_2-+r96 zc`eFcjqGQH#Q;I!*xu1apel2rH>}8io>k80l>8OFRSd!`EzULXwYJ~`lz^rB*>Has z%l4dF&~M;aDRO;dvjt#E?{D#GSG_A#?ZeYuS& z&j?%9=PfQ%Wp@@ME{K+k%vJC^9W0j}kj;)Pab&yL?A3q$1`}Or9Cnz6ebfhSS}RSqXnj)xvm5~BAU(H%*uYtQ4ml+jL^Jq@_3jLD|EZHKhm8`QME3ajx-=L+w9j&Nk~InskPQN-bksqg{72P> z!tVAbx3f6z5L!q~Ofcr@sM{KdU?>jq*;{Phq=*&1RdfnQeR&xP6t>Ll^fEbjXt|C9{6UfT2XF^xmEOfOwoZ-VpVfN zinx}pLCnEtfNSD5Bw>o`N5ises1(hp!*>|k!nerZ zbQlmB$z8D;@>j8mLhl#*v00mVsd)`76-B;12PhEdyf0GW1)_=q#|OP#lK1^BrGlxK zr+e5!(YT5q*Qa(m_{J&6gla%`(H)2S#q}2;P0~<%n7Dk{=~}w^8Vp=98o${!Mp|79 zU3~inPT?-0{(~^m!hu1f;z;U4;3PlDYTC5UPPK+r$Hn^o-syYpjW~55do}fRXV}k^ zW><-5*!1PpO>EiYL|HpBX*f2h)D>E?d;Se+u3L>0VCiMl`mp@hZog)K<*?PKUd?W| zfyd70wa0;`3l_SfU^2}a^j)LpX%sCj;rf%a$YoDHsq%u)$}-3;YB*)fS~uUrm@x$a z<=4QuLL0;Vzp#XWEx&k$UPOtL%X((rpe~cbqlseWpxla`pwG70Wh7EmB$7wEy~om@ z93awx{Jk2F!f*Ns2(%FA6PXCuW<7p!Q#d>{D1AG+JgY~#H~tj8;=0xCZo7KzI~M*~ z+Ddf7JC^YbWnpLrj2RXS@rZr>`l7p6EHxpil|zBZ0l&+7flzDhtw5z;3^P`jsm#(e zUw=u{0WW5P%r{uB85D@%?4h@8Pk(Rp07EW$B=Oj7I}7>jxFKRxO*exZAp{dK@yg7%!ggaM+PbK)R8m}e6Roj ze!jp?%Y2RTH;D`coPT!8-!K7%*~17phtGo|-b&t(dRYjWLnt5oEsOli4WntT3L8Ye z^?1$T`T%@Nsz&|^qvSLZ3gE?2JKuI{y;BE|O(2-jv$t51tTDvUM0xxNp$NnW@cQ~1 zvgs2%Di7a;SA;}+MQdrY>Qh)%EUhHCP+MdnB!OXn1FUX8z23wj1Sj2}G&asL6ItQK zeLnC$O)jj_lF_UZ;PbGTezv99Lr+bn4ExHa5}_WoDIAVWYkSD@QpH0+qeOv*aeray zkGM&?H2&7lkK{MWx!Yp7l43?Qp${iQjj;0FK+R8V)p-rnd?8O33in?RT+DS1 zJL23~AChE3eU3T&K1_euZ9*^Y3?#!IaGR_LLfj^%SAbBgDxofGBG&nD>CVR^%R@{jQ63L+|tUro`~*<*mOgrO!a7?F>FH0<+lw{_l> zEyH@=FFMYjtkX}vpx5<_d?$G2y7f@_+{-}7&*M)S)j_lpFWg{J{E zm*+Z|!-1z{PoAosM6K)Ay1H0=V6Y4~k6Whqm0`eGAXj9`^6)#gcbtD? z?D6V_56#!6Pe?!pPO$fXUV+CinI+jYpXplMjWGbfs-IO7K#%{UVrUpawk!=_V^+&P zzXG_3Bq_k+!#^lw$3xg9yO^C?8tq8%@Z>n#vM`?ifJJ_;HU?TGIzpN$AduqNb@jhg zE4YF!jh}Af=U(Xay>(wD0HS!&O8-~g)UaxDZr&Jb&3OtawZIz`ulH}M`3@`C{TQRt z4obTUe!p!=m{NGm?r<#%rLl`oW_h8eWdEso!2h5zLnJL{aj(#*2gE&ZYAD#_Yh_A~ zD7*MeV+;MMv4;SZTJ-jv0Ue?dFoT_wZ@XwR{`T#Xt-S!zf%b;ksGt=%2NmpqzQ(i>8b)({x!*JJ|RB*C{6{)w9e z0LfeZ6mjbuXCB^hhHLPWXX>|pi5SkG>IAeL5cWP6HtF{@4;ykE`1(#JJAU<%p-zoF zpVPI%HoH)-dqgOxK!oq_Vmw6Xp2vnt`#Ux{w7WQ9cZuj`S;wej^#M(Z%J%IhpTqCT zz|JTAVdrN??Z*jvjpjojkE?v6Q&1ZoJ{0Nnj!pGjDN+BGfcCQanOrvw3BgvGcfPfz z2@VV_b8&BM>!PAC`G1FBhoo8TM4I3mChmFcPPDwW0IWR0Nz?Y1DslM2|BL19gm{JW zy8eZqknIo-rjBDu;0Yhd`!nW@U|8L+Bac;bFY5`T<}IGJGSK~&|JIaskivOwJzd*8 zJkfD!TtJ*?kqB=hFF8E@qUx_xof7}!R6he$vHId%z;dxV|AF@! z`0&?MyzF=jfbYp$TS`8iml^-~=1$`{Kzj86^VRup^|f9w?H(qjYe5hGi(%wM7i0Mb z`-T*ey0ZV*WB);zK10Pllze1x-Y7(L!gDZ31;|g}PlsCpF`dzdxxW(THX23^fc!yb zj)i3^&LPXAi|s|`-x)`KZ!!s;DVsC>uV=!49q#&{PTUMYzB0Vl9>q>r`+C^qgcKv9 z4&4eMV~sWtu}Z;(%k+f5DNa`ZiQ?q<0~)?YhK1WOU!`Zh4TjEk`wBlPy2UhnvU^>mriWSn)aOMUx_|Pl09fJf zEJKms{{GJm6tSOo8{8azoLIfNlF##0@E0SBNv)C3f2^%{rv(tMq>V#_E2#m$><0BJYZ~&|-#A=O92P;>fn44A zd~i;=;GF7D&dGZ<^=YEevB9e8tSH*?g!N>|UvKKKTb;qpAkO7abpVmCjQ%8FnZ^ff z5=$?DFNXc6@}!i##*$bFFc8#YS$$PX6SW0zRF^xWO}6ni%O~`|1L70z*@L>oIH!!Q z?Kc}c-dnf!@&3~JKebo-jL@Z6*VrYM=O(pc+9qEjPTX|rKiu?&K`t)mQ|{{bhAp-) z`*XE_dU+xiP#${TF4P;4;t+?PNkC%8z6IAFh%GJ$Y?#*ew^%ryu%rBssGl&g%$NOQ zVrlz#trZ}A(|h~dxFv6|*J0&_euau}M4u12a(AOns8UR7cX$4UDh1M0Zot#=AZasA z>p!p6S(HVve64)*UDiqHsLnz6D!Ka))epFnirNDpR*COfIhMQ&Vn5~Grj9XsFI7&k z_5Lr?6zqQ_P5GYC-}U$W$JijSyx>}Q2d%^21t_+RZGIMf0L`Y|0B5y zWCz}wyL;}3zdJtd{R2?qq(k&w_euU;q5ZV&Ycg|STv-dTEx>FGnRd%nGsIzWhH#V?3F*gvk{ z8RoqT2Q(>eV41@T{n?sCvDUqTfXgk@?8KjdK<_W&JIxHKW7QrsQRQ&A$?v_FBKiGX z*OTY!2-xeb;^+@u{^>)3&Gq`F-}L@e1{UKYSrU zyjtdeSu)6E9L(neupS-mJOWA0oDtwHj@b1uBpT%EXa4j{f0D2q{UZqr2zU>G0E2*+ z#&y18&^gvDysu3+_1{+sNLvox6#%q&FB&g14<$9Q%*BuBR#3h$-muP88mmCJGW`rZ z3Xg9#{0kY&Z}*>w&NQK#7I?(ysLq7yVKwjH*5l9Jr4)4)lB3avf_p#!b`o|dZwovf z#?2#ciAYA6^$UifyTYP=Dx+{lJ1le5jpL~x%8=|`ayCJFyJ$l{mA7RH(o=doK};HV6Fk*k}%#~EbiK`Lkc}i0F4cr zJz`$#cut0MyltI}fT$v24vQdwK4Bf$rh|NdslV7@l7F(n2v%D$47(N~cL+j3r|)Dk8K22&Y#mmuF;6Tf z0|1REvs&K8_Fw!kCjnRJ>0I)?6MC4S6Lw8o|I2{V6xh*!2tumf7@9Y7)-~70KIWZo zsa<^GPk*s2p5p^4s|wL)h9fmaevB#?fZ8|@m5uY2|BJUJ>)H2po|s?6E%KOEz};d- zw<5ODWX-4g`m=&j03h2inhLOH31sb}599)joBY3{zbYPwxbWOf@I$|Tj{9fY2e|B{ zGya_K8Xs$e7bxRs<5w85b8m{URyKiOvlsMDF7y@Uciw$k`z%q^TxoSUQR^-;nNlqc z0#K6?p#J3@nTZ1T66^wvzSgz>&NlpU3jT(GzxHbv&1er+(fY$u*tgQKwB06FMUYtm zsT{G??4!v6q}dJnV37tH{)JVw!M|?uq=G6a^9|rVI7l zFk6hg%*D{%y%DJFjgqE!NvE3g|Vx-juLhtF1 zGoA5*N=45erokpk!(jW(9jKn=#?*BHxNP;Iy65={_J;eosIlQHJH}#dFyIipjRV}Y z$NtCk&qT19XvbxGS&AFdKgWrFW`V%ggcrg3UPQdnID)bxZ zh@kdANw?+gDMvDZ@X#Vor0!GKx`})(Lorcuq_3!f3#ImF3c(HS4Uh4oMTr8c_qXYO zr6LzM|E^xaTa^6xjE`wZU$pGXg?_2M%I|&nhZq%ZP7T@W`@brZGd!PZ_~?ZKlGs__4$d7(oUQ{Y}n%pkuzX^!tc zj@j5h6h}`P?s2~BrT?X9Y5zz-IDg@UQXaM0gA!!A#Bm@48F39!;qclt?9QX&c#gY# zrk!zv6(n~~SYcY1Q?`LD0yKqu)G?;!etw>-sq5;jzRo4UtuZM$%yiJG&xQv`!~cu+ z<)<>fdWnRNOpl9wcPfgsvHyKQ2m?Y}M4p5WaJPbd=s(El3nxQRD?55tG)5VwOs4Zx z{OOp(ED2Z8O#=I6fpxR?*el58#st5|J#4w1$pN^cLH3ExjUboZ%(Z4(^^0a)7{|~w zo|}DlG4Hb{)$)Jvzf4~Iq>t&cb2Sy+4o(xVNEv7NeU&R&)B=cHW)! zG$GPDGzoL*o`h|ju*IMylm!c5VjK9b`~Y(Fe&TA>ajfD5!$E#&aTs7rzY1kT5*%jU z7yyvBI(KZ3f!FKg<~Nal@xDNYbrcxZ>ym&o{#)Nz{6(q9*Z_#g5J|HSrg>xQsmBzm*^{4SW)dVzcf z=$XGkOopY_{Lx_IWCe(DPfAM567G$&fPVeFSK|pk zO-vJiDf2G}OX5!t8sutz639TgI0G;QQMWaqPm`@##Gp}Zrep>InKp|owR7s4pwUqE z&hwHH8sxV_$k$Fv51yCqoxkD96vP0&0sZadkGbmrFzl~?T}tvWn^64yy-W9eE`r&> zU!R0NF5vV3fB66MhDBfE8?{kc)vn#rt{cZJk8hXSL|HpeQW)w+vFW0RX-|$Z^Kncu zobvcE@pvO211fe4mY46HKMhXS=w39xc#16*7P;+_>G4t`wsrNEnswGC5vUKQj0if~ zDP!noM-0+1sKXZ2$S`dqM^CKLH83PN$0EmZ!;%Geh(+Z%4gP?yg>8d9p$!@YQ61<&AW@5A(w#ccVm%Tx4fs* zjn?p+iI<29?1{8;9L(_L8)gq?kfY1Ad_S2%G0dhWy0gnKxpyNF(I4BpNgTJC7A^S| zEy2i%tcw>v$MpdJm2{C<@O1TXwCD7!Ukv@bsj)ygbW0D?OCKFCJ6yZem^cdSpfg#H zR*&q^DV0X}6o{*QF&r8D1lberO z&^@kvmax$t@(4XWokT6P=Fx0_*On3`oFP9=(^?_yT-fiO8CiagV?joQr7`j?p6mfW z;s(bh>`1$wk3UutJ$W=K_Xyue9vR8iNjeo3S=m(AXyBATFd5){G-)nN?Jg4KomcA4S z@s20Mga$8`CD}@EgiK4J+0@vXx!p$|aHIB6#EoSog4~#{Z5}he?urf6A+@DH(54TF z3JThq-tr3l>A88<;*y;mw?+@d_AjNiUevh`*;X#9Q=jl3ryqj$pubN{eng0>=0 z8$&PYi%XObGnp+jqnbd!G;ERBK`xT0i`b*E!lol0!)Oyq;j>&%EbN=ofg0$3WRdN+ za9f?C8Np_6^m>hlKwU8fk(Q0_*)HH5rFssd^E)E#7`9Q?q3YpDLEvo2s_73R(^z$C z=|kX&{7WNiWy_94sG%Q9&ljWYC%Q@*Pgd(PGA=nU-)0)-4kNXf$Lfi_cOki-E$yOu zIQ{6fO_OvS`5B6bSA_zK>n=(jSv_dFTyEc(bf$IN9!IW$dzlfLR4%$4K^0yJ0g!tm?y@#$D(?$f?}f(3wqFMg>SUfQ?FU{+-&c{ONouwW5l%7 z;M{G?J**<@gf3E)lRq5cu@EGs^7%+Yd_fEbZNayE0JMkV*5*zp=B2lt|c}}2jvb1u)Rjy zRVFpm{wx2*sPfGrlY0b^ml5NWbv2a7XSt!=WU?CZk|+K<*OL*R!Y8uBEq$kQOZ{d- zim4{rX2y7enZ5TKb80R90oP?b+0kf+Aa2{^gOokCpwdNoXY1if1_L+V`|&!3yCfEg zDUNA-^+&9%1m?Xk$E%;C5Jb_)`678o6*)Sffik6Ih1xv`c)NKmEPCsX;UdA)I5a?(mRq@wOSdt@{4il z$$(4;9>Xf={FWypDLRb9u6GxVZU?u=PA>VfU@Erc9UZ%D{9+Z+VGdS9BRoZ42l{9c zHL~u;*;TnE;?|@tXijTEnM~6d_L1?P%`}8$S&n4EdXJ<|3ZIvcRF1A3!39@_>pkZ@ zlm=g3<1N@WHCw5o+VcWS#H_HnuGicZ$XXb@GXCC7i7hNRj7XG1B85r4{fmc+kb)cj4);qpXU z)Pqpb(PgE&Xvr~wl#vivVc;Q)>t>JfTxxo!P*TReFCCJ`k>=2!H|e1i-DWfc6&-C7)6RM%orRH-!iA%ZD=DV>Cz6Irf|m4m3lgzKXl#h)&d7*?jj|WL=6DVuXq%tW7%TWX9~mA9eBQryQA-FW<<` z38HN;&|JLzAPShy__Q{dXnq|}@CX0SinD=E>fCJ^;=}UZZoS*=Tw-Yit}BaY!UtTI zX|*sc9(M;tB-&y;o%D1HBN5@X1V3MY6Kk3Y2v$cP(2t2#4gu>blrV=%`aK1p2&1v! zHYvgcEo;1n;iiW+&{+(Pvde3uMPQTpfoo}N7RM=7y7F`TOz$rsuGI(4vPb`z*zQOo zDO`xD(x6ImFzsOPz+nUWh<>lmg`0^`^AN98QCYaxIb=K5e>v={TJNW47X1}?-8X^9 zW_pXRKh&qD=XKMFZ*BJ3F(yN?QJu?(1G1~vd(VlkSfQ=Y3pp*JAJrwB{G5 zPw42lMDp^QQZ-m6(&x;jao@JyndjTKy`!+%r$Z;7Ud|__J3d78lFe!;^l5L_y<-YB zZ-zzrvYhGnPNcQ&;I)H#`inak7<&VFFUR4`%iow#0!NV&1gm8Uxb{WIkeJaVfTjK; z)u8TLM3mCm>b>rgIdihtaE`7*MPdepH&10^%%1fIC`eae*w*)9#P$CxCBCtGBj+HZ>NGb$0a5*Fzc*C=CLo&2U-e~A$!`o{+PFGS)_tt;#FYWMrk5DBa{1zL@FwKV)t4<2IBGrAG>%E-j zLW1g}@@V#qvmSTd{WkX&FRs95+gzo5UNe%CG;1CaK94$5o`q$J^oCM_YarW>|dY9Q;?>@|Oe zB`=7_vOc==g=KzTYO4s)9Fjru4cq=fKiD+4ku6V(%)r&MK_%y=8BZ`QOCng+dktaJ zHbWZBX|w;zkY zr=@k6+L_zcov`a^XHvsC*tHHUxY0`}{ONw1V1o3>BlxU={z!bY{beNlOIIObd6=+9 zobND=knE&Mk}=6A`aj^Tz4JAAuHDm?-Z@XK z@<|7qsuE7e@OR=#kGnbToa5ANe4kt_i^~O^qA{QQQoEfcBf7V!W)wEd1A1}{_?q9# zs#T=$Ng+y792=>KSk1N2DT`04%_!`#?E}Sg#|zT|%E8g?RP}cbH0el3(XyO2=gY z&bFPa;@J>02bG!({f%9Uj4UdT5MGCe7Y7JoPZLHe^4W>t2wJm8R2S?;1CCg9nYl%t zxAs^iKUjQuBkdDiF4eX0o2~Z66s4&rDk_7b`36P0<+4fx8W_&qc~9qg=_~-UXvE4FR3FKSAzfnQv>yQ< z0bz!%Tykl^eOK^t!=IlhVN9F1q9=0w>hP7B0er&q5-m+{25WM;qR7JXl^dSqZ$Z|F z!BgHdHOIT3M8mY9k;&ZrBrD|e3Ehzl*@=l|*!2XFS*zivFh0qKX| m-vghb`hVkxgRbu$pJr7ROvQWNfjkBNgGnen$h~La{(k@yy%L51 diff --git a/neural_compressor/conf/config.py b/neural_compressor/conf/config.py index c3f8f4afb6f..de0105e7f84 100644 --- a/neural_compressor/conf/config.py +++ b/neural_compressor/conf/config.py @@ -30,7 +30,6 @@ from .dotdict import DotDict, deep_set import os, datetime - def constructor_register(cls): yaml_key = "!{}".format(cls.__name__) @@ -48,13 +47,12 @@ def constructor(loader, node): ) return cls - @constructor_register class Pruner(): def __init__(self, start_epoch=None, end_epoch=None, initial_sparsity=None, target_sparsity=None, update_frequency=1, method='per_tensor', - prune_type='basic_magnitude', ##for pytorch pruning, these values should be None + prune_type='basic_magnitude',##for pytorch pruning, these values should be None start_step=None, end_step=None, update_frequency_on_step=None, prune_domain=None, sparsity_decay_type=None, pattern="tile_pattern_1x1", names=None, extra_excluded_names=None, parameters=None): @@ -76,10 +74,9 @@ def __init__(self, start_epoch=None, end_epoch=None, initial_sparsity=None, # 'now only support {}'.format(PRUNERS.keys()) self.prune_type = prune_type self.method = method - self.names = names + self.names= names self.parameters = parameters - # Schema library has different loading sequence priorities for different # value types. # To make sure the fields under dataloader.transform field of yaml file @@ -90,18 +87,15 @@ def __init__(self, start_epoch=None, end_epoch=None, initial_sparsity=None, yaml.SafeLoader.add_constructor('tag:yaml.org,2002:python/tuple', lambda loader, node: tuple(loader.construct_sequence(node))) - def _valid_accuracy_field(key, scope, error): assert bool( 'relative' in scope['accuracy_criterion']) != bool( 'absolute' in scope['accuracy_criterion']) - def _valid_prune_epoch(key, scope, error): if "start_epoch" in scope[key] and "end_epoch" in scope[key]: assert scope[key]["start_epoch"] <= scope[key]["end_epoch"] - def _valid_prune_sparsity(key, scope, error): if "initial_sparsity" in scope[key] and "target_sparsity" in scope[key]: assert scope[key]["initial_sparsity"] <= scope[key]["target_sparsity"] @@ -110,17 +104,14 @@ def _valid_prune_sparsity(key, scope, error): elif "target_sparsity" in scope[key]: assert scope[key]["target_sparsity"] < 1 - def _valid_multi_objectives(key, scope, error): if 'weight' in scope[key] and scope[key]['weight'] is not None: assert len(scope[key]['objective']) == len(scope[key]['weight']) - def _valid_multi_metrics(key, scope, error): if 'metric' in scope and 'multi_metrics' in scope: assert False - def _valid_metric_length(key, scope, error): metrics = [i for i in scope[key] if i != 'weight' and i != 'higher_is_better'] if 'weight' in scope[key] and scope[key]['weight'] is not None: @@ -128,7 +119,6 @@ def _valid_metric_length(key, scope, error): if 'higher_is_better' in scope[key] and scope[key]['higher_is_better'] is not None: assert len(input_to_list_bool(scope[key]['higher_is_better'])) == len(metrics) - # used for '123.68 116.78 103.94' style to float list def input_to_list_float(data): if isinstance(data, str): @@ -140,7 +130,6 @@ def input_to_list_float(data): assert isinstance(data, list) return [float(d) for d in data] - def input_to_list_bool(data): if isinstance(data, str): if ',' in data: @@ -154,7 +143,6 @@ def input_to_list_bool(data): assert isinstance(data, list) and all([isinstance(i, bool) for i in data]) return data - def input_int_to_float(data): if isinstance(data, str): # used for '123.68, 116.78, 103.94' style @@ -173,7 +161,6 @@ def input_int_to_float(data): elif isinstance(data, int): return float(data) - def input_to_list_int(data): if isinstance(data, str): return [int(s.strip()) for s in data.split(',')] @@ -184,7 +171,6 @@ def input_to_list_int(data): assert isinstance(data, list) return [int(d) for d in data] - def input_to_list(data): if isinstance(data, str): if ',' in data: @@ -198,7 +184,6 @@ def input_to_list(data): assert isinstance(data, list) return data - def list_to_tuple(data): if isinstance(data, str): return tuple([int(s.strip()) for s in data.split(',')]) @@ -212,7 +197,6 @@ def list_to_tuple(data): else: return tuple([int(s) for s in data]) - def percent_to_float(data): if isinstance(data, str) and re.match(r'-?\d+(\.\d+)?%', data): data = float(data.strip('%')) / 100 @@ -222,7 +206,6 @@ def percent_to_float(data): assert isinstance(data, float), 'This field should be float, int or percent string' return data - ops_schema = Schema({ Optional('weight', default=None): { Optional('granularity'): And( @@ -238,7 +221,7 @@ def percent_to_float(data): Optional('algorithm'): And( list, lambda s: all(i in ['minmax'] for i in s)), - Optional('bit'): And( + Optional('bit'): And( Or(float, list), Use(input_to_list_float), lambda s: all(0.0 < i <= 7.0 for i in s)) @@ -269,7 +252,7 @@ def percent_to_float(data): Optional('precisions', default={'precisions': ['fp32']}): And( Or(str, list), Use(input_to_list), - lambda s: all(i in ['fp32', 'bf16'] for i in s)), + lambda s: all(i in [ 'fp32', 'bf16'] for i in s)), Optional('op_wise', default={'weight': {}, 'activation': {}}): { Optional('weight', default=None): { @@ -283,7 +266,7 @@ def percent_to_float(data): Or(str, list), Use(input_to_list), lambda s: all(i in ['fp32', 'bf16'] for i in s)), - } + } } }) @@ -292,7 +275,7 @@ def percent_to_float(data): Optional('precisions', default={'precisions': ['fp32']}): And( Or(str, list), Use(input_to_list), - lambda s: all(i in ['fp32', 'bf16'] for i in s)), + lambda s: all(i in [ 'fp32', 'bf16'] for i in s)), Optional('op_wise', default={'weight': {}, 'activation': {}}): { Optional('weight', default=None): { @@ -306,7 +289,7 @@ def percent_to_float(data): Or(str, list), Use(input_to_list), lambda s: all(i in ['fp32', 'bf16'] for i in s)), - } + } } }) @@ -322,7 +305,7 @@ def percent_to_float(data): }) transform_schema = Schema({ - Optional('ResizeWithRatio'): { + Optional('ResizeWithRatio'):{ Optional('min_dim'): int, Optional('max_dim'): int, Optional('padding'): bool, @@ -339,7 +322,7 @@ def percent_to_float(data): }, Optional('RandomResizedCrop'): { 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)), - And(int, lambda s: s > 0)), + And(int, lambda s: s > 0)), Optional('scale'): And(list, lambda s: all(isinstance(i, float) for i in s)), Optional('ratio'): And(list, lambda s: all(isinstance(i, float) for i in s)), Optional('interpolation'): And( @@ -356,7 +339,7 @@ def percent_to_float(data): 'width': int, 'height': int, 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)), - And(int, lambda s: s > 0)), + And(int, lambda s: s > 0)), Optional('interpolation'): And( str, lambda s: s in ['nearest', 'bilinear', 'bicubic']), @@ -372,23 +355,23 @@ def percent_to_float(data): }, Optional('Resize'): { 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)), - And(int, lambda s: s > 0)), + And(int, lambda s: s > 0)), Optional('interpolation'): And( str, lambda s: s in ['nearest', 'bilinear', 'bicubic']), }, Optional('RandomCrop'): { 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)), - And(int, lambda s: s > 0)) + And(int, lambda s: s > 0)) }, Optional('Rescale'): Or({}, None), Optional('CenterCrop'): { 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)), - And(int, lambda s: s > 0)) + And(int, lambda s: s > 0)) }, Optional('PaddedCenterCrop'): { 'size': Or(And(list, lambda s: all(isinstance(i, int) for i in s)), - And(int, lambda s: s > 0)), + And(int, lambda s: s > 0)), Optional('crop_padding'): And(int, lambda s: s > 0), }, Optional('ToArray'): Or({}, None), @@ -428,7 +411,7 @@ def percent_to_float(data): Optional('mean_value'): And(Or(str, list), Use(input_to_list_float)), Optional('scale'): float, }, - Optional('ResizeWithAspectRatio'): { + Optional('ResizeWithAspectRatio'):{ 'height': And(int, lambda s: s > 0), 'width': And(int, lambda s: s > 0), }, @@ -444,7 +427,7 @@ def percent_to_float(data): }) postprocess_schema = Schema({ - Optional('LabelShift'): int, + Optional('LabelShift'): int, Optional('Collect'): { 'length': int }, @@ -526,7 +509,7 @@ def percent_to_float(data): And(str, Use(input_int_to_float))), Optional('dtype'): And(Or(str, list), Use(input_to_list)), }, - + Optional('dummy'): { 'shape': And(Or(str, list), Use(list_to_tuple)), Optional('low'): Or( @@ -606,8 +589,8 @@ def percent_to_float(data): 'dataset': dataset_schema, Optional('filter'): filter_schema, Optional('transform'): transform_schema, - Optional('shuffle', default=False): And(bool, lambda s: s in [True, False]), - Optional('distributed', default=False): And(bool, lambda s: s in [True, False]), + Optional('shuffle', default = False): And(bool, lambda s: s in [True, False]), + Optional('distributed', default = False): And(bool, lambda s: s in [True, False]), }) configs_schema = Schema({ @@ -640,7 +623,7 @@ def percent_to_float(data): Optional('beta_2', default=0.999): Use(float), Optional('epsilon', default=1e-07): Use(float), Optional('amsgrad', default=False): bool - }, + }, }) criterion_schema = Schema({ @@ -702,15 +685,15 @@ def percent_to_float(data): weight_compression_schema = Schema({ Optional('initial_sparsity', default=0): And(float, lambda s: s < 1.0 and s >= 0.0), Optional('target_sparsity', default=0.97): float, - Optional('max_sparsity_ratio_per_layer', default=0.98): float, + Optional('max_sparsity_ratio_per_layer', default=0.98):float, Optional('prune_type', default="basic_magnitude"): str, Optional('start_epoch', default=0): int, Optional('end_epoch', default=4): int, Optional('start_step', default=0): int, Optional('end_step', default=0): int, Optional('update_frequency', default=1.0): float, - Optional('update_frequency_on_step', default=1): int, - Optional('excluded_names', default=[]): list, + Optional('update_frequency_on_step', default=1):int, + Optional('excluded_names', default=[]):list, Optional('prune_domain', default="global"): str, Optional('names', default=[]): list, Optional('extra_excluded_names', default=None): list, @@ -719,7 +702,7 @@ def percent_to_float(data): Optional('pattern', default="tile_pattern_1x1"): str, Optional('pruners'): And(list, \ - lambda s: all(isinstance(i, Pruner) for i in s)) + lambda s: all(isinstance(i, Pruner) for i in s)) }) # weight_compression_pytorch_schema = Schema({},ignore_extra_keys=True) @@ -732,7 +715,7 @@ def percent_to_float(data): }) default_workspace = './nc_workspace/{}/'.format( - datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) COCOmAP_input_order_schema = Schema({ Optional('num_detections'): int, @@ -747,21 +730,21 @@ def percent_to_float(data): 'framework': And(str, lambda s: s in list(FRAMEWORKS.keys()) + ['NA']), Optional('inputs', default=[]): And(Or(str, list), Use(input_to_list)), Optional('outputs', default=[]): And(Or(str, list), Use(input_to_list)), - + }, Optional('version', default=float(__version__.split('.')[0])): And( - Or(float, - And(int, Use(input_int_to_float)), - And(str, Use(input_int_to_float))), - lambda s: s == float(__version__.split('.')[0])), + Or(float, + And(int, Use(input_int_to_float)), + And(str, Use(input_int_to_float))), + lambda s: s == float(__version__.split('.')[0])), Optional('device', default='cpu'): And(str, lambda s: s in ['cpu', 'gpu']), Optional('quantization', default={'approach': 'post_training_static_quant', \ 'calibration': {'sampling_size': [100]}, \ 'recipes': {'scale_propagation_max_pooling': True, - 'scale_propagation_concat': True, - 'first_conv_or_matmul_quantization': True, - 'last_conv_or_matmul_quantization': True, - 'pre_post_process_quantization': True}, + 'scale_propagation_concat': True, + 'first_conv_or_matmul_quantization': True, + 'last_conv_or_matmul_quantization': True, + 'pre_post_process_quantization': True}, 'model_wise': {'weight': {'bit': [7.0]}, 'activation': {}}, 'optimization_level': 1, @@ -783,27 +766,27 @@ def percent_to_float(data): Optional('dataloader', default=None): dataloader_schema }, Optional('recipes', default={'scale_propagation_max_pooling': True, - 'scale_propagation_concat': True, - 'first_conv_or_matmul_quantization': True, - 'last_conv_or_matmul_quantization': True, - 'pre_post_process_quantization': True}): { + 'scale_propagation_concat': True, + 'first_conv_or_matmul_quantization': True, + 'last_conv_or_matmul_quantization': True, + 'pre_post_process_quantization': True}): { Optional('scale_propagation_max_pooling', default=True): - And(bool, lambda s: s in [True, False]), + And(bool, lambda s: s in [True, False]), Optional('scale_propagation_concat', default=True): - And(bool, lambda s: s in [True, False]), + And(bool, lambda s: s in [True, False]), Optional('first_conv_or_matmul_quantization', default=True): - And(bool, lambda s: s in [True, False]), + And(bool, lambda s: s in [True, False]), Optional('last_conv_or_matmul_quantization', default=True): - And(bool, lambda s: s in [True, False]), + And(bool, lambda s: s in [True, False]), Optional('pre_post_process_quantization', default=True): - And(bool, lambda s: s in [True, False]), + And(bool, lambda s: s in [True, False]), Optional('fast_bias_correction', default=False): - And(bool, lambda s: s in [True, False]), + And(bool, lambda s: s in [True, False]), Optional('weight_correction', default=False): - And(bool, lambda s: s in [True, False]), + And(bool, lambda s: s in [True, False]), }, Optional('model_wise', default={'weight': {'bit': [7.0]}, 'activation': {}}): { - Optional('weight', default={'bit': [7.0]}): { + Optional('weight', default= {'bit': [7.0]}): { Optional('granularity', default=None): And( Or(str, list), Use(input_to_list), @@ -821,7 +804,7 @@ def percent_to_float(data): Or(str, list), Use(input_to_list), lambda s: all(i in ['minmax'] for i in s)), - Optional('bit', default=[7.0]): And( + Optional('bit', default=[7.0]): And( Or(float, list), Use(input_to_list_float), lambda s: all(0.0 < i <= 7.0 for i in s)) @@ -868,16 +851,16 @@ def percent_to_float(data): Optional('model_conversion'): model_conversion_schema, Optional('tuning', default={ - 'strategy': {'name': 'basic'}, + 'strategy': {'name': 'basic'}, 'accuracy_criterion': {'relative': 0.01, 'higher_is_better': True}, 'objective': 'performance', 'exit_policy': {'timeout': 0, 'max_trials': 100, 'performance_only': False}, 'random_seed': 1978, 'tensorboard': False, 'workspace': {'path': default_workspace}, 'diagnosis': False, - }): { + }): { Optional('strategy', default={'name': 'basic'}): { - 'name': And(str, lambda s: s in STRATEGIES), + 'name': And(str, lambda s: s in STRATEGIES), Optional('sigopt_api_token'): str, Optional('sigopt_project_id'): str, Optional('sigopt_experiment_name', default='nc-tune'): str, @@ -885,7 +868,7 @@ def percent_to_float(data): Optional('latency_weight', default=1.0): float, Optional('confidence_batches', default=2): int, Optional('hawq_v2_loss', default=None): object, - }, + } , Hook('accuracy_criterion', handler=_valid_accuracy_field): object, Optional('accuracy_criterion', default={'relative': 0.01}): { Optional('relative'): And(Or(str, float), Use(percent_to_float)), @@ -894,7 +877,7 @@ def percent_to_float(data): }, Optional('objective', default='performance'): And(str, lambda s: s in OBJECTIVES), Hook('multi_objectives', handler=_valid_multi_objectives): object, - Optional('multi_objectives'): { + Optional('multi_objectives'):{ Optional('objective'): And( Or(str, list), Use(input_to_list), lambda s: all(i in OBJECTIVES for i in s)), Optional('weight'): And(Or(str, list), Use(input_to_list_float)), @@ -914,18 +897,18 @@ def percent_to_float(data): Optional('path', default=None): str, Optional('resume'): str }, - Optional('diagnosis', default={ + Optional('diagnosis', default = { 'diagnosis_after_tuning': False, 'op_list': [], 'iteration_list': [1], 'inspect_type': 'activation', 'save_to_disk': True, 'save_path': './nc_workspace/inspect_saved/', - }): { + }):{ Optional('diagnosis_after_tuning', default=False): And(bool, lambda s: s in [True, False]), Optional('op_list', default=[]): And(Or(str, list), Use(input_to_list)), Optional('iteration_list', default=[1]): And(Or(int, list), Use(input_to_list_int)), - Optional('inspect_type', default='all'): And(str, lambda s: s in ['all', 'activation', 'weight']), + Optional('inspect_type', default='all'): And(str, lambda s : s in ['all', 'activation', 'weight']), Optional('save_to_disk', default=True): And(bool, lambda s: s in [True, False]), Optional('save_path', default='./nc_workspace/inspect_saved/'): str, }, @@ -942,8 +925,8 @@ def percent_to_float(data): Optional('mAP'): { Optional('anno_path'): str, Optional('iou_thrs', default=0.5): - Or(And(str, lambda s: s in ['0.5:0.05:0.95']), - And(float, lambda s: s <= 1.0 and s >= 0.0)), + Or(And(str, lambda s: s in ['0.5:0.05:0.95']), + And(float, lambda s: s <= 1.0 and s >= 0.0)), Optional('map_points', default=0): And(int, lambda s: s in [0, 11, 101]) }, Optional('COCOmAP'): { @@ -953,10 +936,10 @@ def percent_to_float(data): Optional('COCOmAPv2'): { Optional('anno_path'): str, Optional('map_key', default='DetectionBoxes_Precision/mAP'): str, - Optional('output_index_mapping', default={'num_detections': -1, - 'boxes': 0, - 'scores': 1, - 'classes': 2}): COCOmAP_input_order_schema + Optional('output_index_mapping', default={'num_detections': -1, + 'boxes': 0, + 'scores': 1, + 'classes': 2}): COCOmAP_input_order_schema }, Optional('VOCmAP'): { Optional('anno_path'): str @@ -985,14 +968,14 @@ def percent_to_float(data): Optional('ROC'): { Optional('task'): str }, - }, + }, Optional('metric', default=None): { Optional('topk'): And(int, lambda s: s in [1, 5]), Optional('mAP'): { Optional('anno_path'): str, Optional('iou_thrs', default=0.5): - Or(And(str, lambda s: s in ['0.5:0.05:0.95']), - And(float, lambda s: s <= 1.0 and s >= 0.0)), + Or(And(str, lambda s: s in ['0.5:0.05:0.95']), + And(float, lambda s: s <= 1.0 and s >= 0.0)), Optional('map_points', default=0): And(int, lambda s: s in [0, 11, 101]) }, Optional('COCOmAP'): { @@ -1002,10 +985,10 @@ def percent_to_float(data): Optional('COCOmAPv2'): { Optional('anno_path'): str, Optional('map_key', default='DetectionBoxes_Precision/mAP'): str, - Optional('output_index_mapping', default={'num_detections': -1, - 'boxes': 0, - 'scores': 1, - 'classes': 2}): COCOmAP_input_order_schema + Optional('output_index_mapping', default={'num_detections': -1, + 'boxes': 0, + 'scores': 1, + 'classes': 2}): COCOmAP_input_order_schema }, Optional('VOCmAP'): { Optional('anno_path'): str @@ -1070,7 +1053,7 @@ def percent_to_float(data): Optional("higher_is_better", default=[]): list, Optional("max_trials", default=1): int, Optional("seed", default=42): int, - }, + }, Optional("flash_distillation"): { Optional("knowledge_transfer"): { Optional("block_names", default=[]): list, @@ -1079,7 +1062,7 @@ def percent_to_float(data): Optional("loss_weights", default=[]): list, Optional("add_origin_loss", default=[]): list, Optional("train_steps", default=[]): list, - }, + }, Optional("regular_distillation"): { Optional("block_names", default=[]): list, "layer_mappings_for_knowledge_transfer": list, @@ -1087,8 +1070,8 @@ def percent_to_float(data): Optional("loss_weights", default=[]): list, Optional("add_origin_loss", default=[]): list, Optional("train_steps", default=[]): list, + }, }, - }, }, Optional('nas'): { @@ -1100,7 +1083,7 @@ def percent_to_float(data): Optional("higher_is_better", default=None): list, Optional("max_trials", default=None): int, Optional("seed", default=42): int, - }, + }, Optional("dynas"): { Optional("supernet", default=None): str, Optional("metrics", default=None): list, @@ -1109,7 +1092,7 @@ def percent_to_float(data): Optional("results_csv_path", default=None): str, Optional("dataset_path", default=None): str, Optional("batch_size", default=64): int, - }, + }, }, Optional("train"): train_schema @@ -1118,7 +1101,7 @@ def percent_to_float(data): quantization_default_schema = Schema({ Optional('model', default={'name': 'default_model_name', \ 'framework': 'NA', \ - 'inputs': [], 'outputs': []}): dict, + 'inputs': [], 'outputs': []}): dict, Optional('version', default=float(__version__.split('.')[0])): str, @@ -1127,13 +1110,13 @@ def percent_to_float(data): Optional('quantization', default={'approach': 'post_training_static_quant', \ 'calibration': {'sampling_size': [100]}, 'recipes': {'scale_propagation_max_pooling': True, - 'scale_propagation_concat': True, - 'first_conv_or_matmul_quantization': True, - 'last_conv_or_matmul_quantization': True, - 'pre_post_process_quantization': True}, + 'scale_propagation_concat': True, + 'first_conv_or_matmul_quantization': True, + 'last_conv_or_matmul_quantization': True, + 'pre_post_process_quantization': True}, 'model_wise': {'weight': {'bit': [7.0]}, 'activation': {}}, - }): dict, + }): dict, Optional('use_bf16', default=False): bool, Optional('optimization_level', default=1): int, Optional('tuning', default={ @@ -1150,7 +1133,7 @@ def percent_to_float(data): pruning_default_schema = Schema({ Optional('model', default={'name': 'default_model_name', \ 'framework': 'NA', \ - 'inputs': [], 'outputs': []}): dict, + 'inputs': [], 'outputs': []}): dict, Optional('version', default=float(__version__.split('.')[0])): str, @@ -1162,9 +1145,9 @@ def percent_to_float(data): 'random_seed': 1978, 'tensorboard': False, 'workspace': {'path': default_workspace}}): dict, - Optional('pruning', default={'approach': {'weight_compression': {'initial_sparsity': 0.0, \ - 'target_sparsity': 0.97, 'start_epoch': 0, \ - 'end_epoch': 4}}}): dict, + Optional('pruning', default={'approach': {'weight_compression':{'initial_sparsity': 0.0, \ + 'target_sparsity': 0.97, 'start_epoch': 0, \ + 'end_epoch': 4}}}): dict, Optional('evaluation', default={'accuracy': {'metric': {'topk': 1}}}): dict }) @@ -1172,21 +1155,21 @@ def percent_to_float(data): graph_optimization_default_schema = Schema({ Optional('model', default={'name': 'resnet50', \ 'framework': 'NA', \ - 'inputs': [], 'outputs': []}): dict, + 'inputs': [], 'outputs': []}): dict, Optional('version', default=float(__version__.split('.')[0])): str, Optional('device', default='cpu'): str, - Optional('quantization', default={'approach': 'post_training_static_quant', - 'calibration': {'sampling_size': [100]}, - 'recipes': {'scale_propagation_max_pooling': True, - 'scale_propagation_concat': True, - 'first_conv_or_matmul_quantization': True, - 'last_conv_or_matmul_quantization': True, - 'pre_post_process_quantization': True}, - 'model_wise': {'weight': {'bit': [7.0]}, - 'activation': {}}}): dict, + Optional('quantization', default={'approach': 'post_training_static_quant', + 'calibration': {'sampling_size': [100]}, + 'recipes': {'scale_propagation_max_pooling': True, + 'scale_propagation_concat': True, + 'first_conv_or_matmul_quantization': True, + 'last_conv_or_matmul_quantization': True, + 'pre_post_process_quantization': True}, + 'model_wise': {'weight': {'bit': [7.0]}, + 'activation': {}}}): dict, Optional('use_bf16', default=False): bool, @@ -1200,27 +1183,27 @@ def percent_to_float(data): Optional('evaluation', default={'accuracy': {'metric': {'topk': 1}}}): dict, - Optional('graph_optimization', default={'precisions': ['bf16, fp32']}): dict + Optional('graph_optimization', default={'precisions': ['bf16, fp32']}): dict }) mixed_precision_default_schema = Schema({ Optional('model', default={'name': 'resnet50', \ 'framework': 'NA', \ - 'inputs': [], 'outputs': []}): dict, + 'inputs': [], 'outputs': []}): dict, Optional('version', default=float(__version__.split('.')[0])): str, Optional('device', default='cpu'): str, - Optional('quantization', default={'approach': 'post_training_static_quant', - 'calibration': {'sampling_size': [100]}, - 'recipes': {'scale_propagation_max_pooling': True, - 'scale_propagation_concat': True, - 'first_conv_or_matmul_quantization': True, - 'last_conv_or_matmul_quantization': True, - 'pre_post_process_quantization': True}, - 'model_wise': {'weight': {'bit': [7.0]}, - 'activation': {}}}): dict, + Optional('quantization', default={'approach': 'post_training_static_quant', + 'calibration': {'sampling_size': [100]}, + 'recipes': {'scale_propagation_max_pooling': True, + 'scale_propagation_concat': True, + 'first_conv_or_matmul_quantization': True, + 'last_conv_or_matmul_quantization': True, + 'pre_post_process_quantization': True}, + 'model_wise': {'weight': {'bit': [7.0]}, + 'activation': {}}}): dict, Optional('use_bf16', default=False): bool, @@ -1234,13 +1217,13 @@ def percent_to_float(data): Optional('evaluation', default={'accuracy': {'metric': {'topk': 1}}}): dict, - Optional('mixed_precision', default={'precisions': ['bf16, fp32']}): dict + Optional('mixed_precision', default={'precisions': ['bf16, fp32']}): dict }) benchmark_default_schema = Schema({ Optional('model', default={'name': 'resnet50', \ 'framework': 'NA', \ - 'inputs': [], 'outputs': []}): dict, + 'inputs': [], 'outputs': []}): dict, Optional('version', default=float(__version__.split('.')[0])): str, @@ -1248,15 +1231,15 @@ def percent_to_float(data): Optional('use_bf16', default=False): bool, - Optional('quantization', default={'approach': 'post_training_static_quant', - 'calibration': {'sampling_size': [100]}, - 'recipes': {'scale_propagation_max_pooling': True, - 'scale_propagation_concat': True, - 'first_conv_or_matmul_quantization': True, - 'last_conv_or_matmul_quantization': True, - 'pre_post_process_quantization': True}, - 'model_wise': {'weight': {'bit': [7.0]}, - 'activation': {}}}): dict, + Optional('quantization', default={'approach': 'post_training_static_quant', + 'calibration': {'sampling_size': [100]}, + 'recipes': {'scale_propagation_max_pooling': True, + 'scale_propagation_concat': True, + 'first_conv_or_matmul_quantization': True, + 'last_conv_or_matmul_quantization': True, + 'pre_post_process_quantization': True}, + 'model_wise': {'weight': {'bit': [7.0]}, + 'activation': {}}}): dict, Optional('tuning', default={ 'strategy': {'name': 'basic'}, @@ -1285,19 +1268,18 @@ def percent_to_float(data): 'workspace': {'path': default_workspace}}): dict, Optional('distillation', default={ - 'train': {'start_epoch': 0, 'end_epoch': 10, - 'iteration': 1000, 'frequency': 1, - 'optimizer': {'SGD': {'learning_rate': 0.001}}, - 'criterion': {'KnowledgeDistillationLoss': - {'temperature': 1.0, - 'loss_types': ['CE', 'KL'], - 'loss_weights': [0.5, 0.5]}}}}): dict, - - Optional('evaluation', default={'accuracy': {'metric': {'topk': 1}}}): dict - + 'train': {'start_epoch': 0, 'end_epoch': 10, + 'iteration': 1000, 'frequency': 1, + 'optimizer': {'SGD': {'learning_rate': 0.001}}, + 'criterion': {'KnowledgeDistillationLoss': + {'temperature': 1.0, + 'loss_types': ['CE', 'KL'], + 'loss_weights': [0.5, 0.5]}}}}): dict, + + Optional('evaluation', default={'accuracy': {'metric': {'topk': 1}}}):dict + }) - class Conf(object): """config parser. @@ -1305,7 +1287,6 @@ class Conf(object): cfg_fname (string): The path to the configuration file. """ - def __init__(self, cfg_fname): assert cfg_fname is not None self.usr_cfg = DotDict(self._read_cfg(cfg_fname)) @@ -1329,14 +1310,14 @@ def _read_cfg(self, cfg_fname): content).group().split("model")[0] content = re.sub(r'model\s*:', 'version: {}\n\n{}model:'.format( - float(__version__.split('.')[0]), - leading_whitespace - ), + float(__version__.split('.')[0]), + leading_whitespace + ), content) with open(cfg_fname, 'w') as f: f.write(content) - return validated_cfg + return validated_cfg except FileNotFoundError as f: logger.error("{}.".format(f)) raise RuntimeError( @@ -1358,12 +1339,12 @@ def map_pyconfig_to_cfg(self, pythonic_config): 'model.backend': pythonic_config.quantization.backend, 'model.quant_format': pythonic_config.quantization.quant_format, 'quantization.approach': pythonic_config.quantization.approach, - 'quantization.calibration.sampling_size': + 'quantization.calibration.sampling_size': pythonic_config.quantization.calibration_sampling_size, 'quantization.optype_wise': pythonic_config.quantization.op_type_list, 'quantization.op_wise': pythonic_config.quantization.op_name_list, 'tuning.strategy.name': pythonic_config.quantization.strategy, - 'tuning.accuracy_criterion.relative': + 'tuning.accuracy_criterion.relative': pythonic_config.quantization.accuracy_criterion.relative, 'tuning.accuracy_criterion.absolute': pythonic_config.quantization.accuracy_criterion.absolute, @@ -1380,12 +1361,12 @@ def map_pyconfig_to_cfg(self, pythonic_config): if pythonic_config.quantization.strategy_kwargs: st_kwargs = pythonic_config.quantization.strategy_kwargs for st_key in ['sigopt_api_token', 'sigopt_project_id', 'sigopt_experiment_name', \ - 'accuracy_weight', 'latency_weight', 'hawq_v2_loss']: + 'accuracy_weight', 'latency_weight', 'hawq_v2_loss']: if st_key in st_kwargs: - st_val = st_kwargs[st_key] + st_val = st_kwargs[st_key] mapping.update({'tuning.strategy.' + st_key: st_val}) - + if pythonic_config.distillation is not None: mapping.update({ 'distillation.train.criterion': pythonic_config.distillation.criterion, @@ -1440,7 +1421,7 @@ def map_pyconfig_to_cfg(self, pythonic_config): target_key = str(pythonic_config.quantization.accuracy_criterion) if target_key not in k and 'accuracy_criterion' in self.usr_cfg.tuning: if target_key in self.usr_cfg.tuning.accuracy_criterion and \ - k.split('.')[-1] in self.usr_cfg.tuning.accuracy_criterion: + k.split('.')[-1] in self.usr_cfg.tuning.accuracy_criterion: self.usr_cfg.tuning.accuracy_criterion.pop(k.split('.')[-1]) continue if v is not None: @@ -1464,11 +1445,11 @@ def _convert_cfg(self, src, dst): for key in src: if key in dst: if isinstance(dst[key], dict) and isinstance(src[key], dict): - if key in ['accuracy_criterion', 'metric', 'dataset', - 'criterion', 'optimizer']: + if key in ['accuracy_criterion', 'metric', 'dataset', + 'criterion', 'optimizer']: # accuracy_criterion can only have one of absolute and relative # others can only have one item - inter_key = src[key].keys() & dst[key].keys() - {'higher_is_better'} + inter_key = src[key].keys() & dst[key].keys()-{'higher_is_better'} if len(inter_key) == 0: dst[key] = {} if key == 'accuracy' and src[key].get('multi_metrics', None): @@ -1484,7 +1465,6 @@ def _convert_cfg(self, src, dst): dst[key] = src[key] return dst - class Quantization_Conf(Conf): """config parser. @@ -1548,7 +1528,6 @@ def modelwise_tune_space(self, model_wise_quant): return self._model_wise_tune_space - class Pruning_Conf(Conf): """config parser. @@ -1567,7 +1546,6 @@ def __init__(self, cfg=None): else: self.usr_cfg = DotDict(pruning_default_schema.validate(dict())) - class Graph_Optimization_Conf(Quantization_Conf): """config parser. @@ -1585,7 +1563,6 @@ def __init__(self, cfg=None): else: self.usr_cfg = DotDict(graph_optimization_default_schema.validate(dict())) - class MixedPrecision_Conf(Quantization_Conf): """config parser. @@ -1603,7 +1580,6 @@ def __init__(self, cfg=None): else: self.usr_cfg = DotDict(mixed_precision_default_schema.validate(dict())) - class Benchmark_Conf(Conf): """config parser. @@ -1621,7 +1597,6 @@ def __init__(self, cfg=None): else: self.usr_cfg = DotDict(benchmark_default_schema.validate(dict())) - class Distillation_Conf(Conf): """config parser. @@ -1639,7 +1614,6 @@ def __init__(self, cfg=None): else: self.usr_cfg = DotDict(distillation_default_schema.validate(dict())) - class NASConfig(Conf): """config parser. @@ -1667,12 +1641,11 @@ def __init__(self, approach=None, search_space=None, search_algorithm=None): def validate(self): self.usr_cfg = schema.validate(self.usr_cfg) - + @property def nas(self): return self.usr_cfg.nas - class DefaultConf(DotDict): def __getitem__(self, key): if key not in self: @@ -1682,7 +1655,6 @@ def __getitem__(self, key): __getattr__ = __getitem__ - conf = DefaultConf({}) QuantConf = Quantization_Conf PruningConf = Pruning_Conf
ModelDatasetPruning AlgorithmFramework