From bf88cf87d90d806f6104b45a7c41bba1e0e84654 Mon Sep 17 00:00:00 2001 From: Caleb Princewill Nwokocha <47554663+calebnwokocha@users.noreply.github.com> Date: Tue, 22 Oct 2024 06:09:38 -0500 Subject: [PATCH 1/6] Add files via upload --- ...v_llama.cpp \302\267 Discussion #9965.pdf" | Bin 0 -> 735147 bytes w64devkit.txt | 394 ++++++++++++++++++ 2 files changed, 394 insertions(+) create mode 100644 "How to edit help result of llama.cpp_ \302\267 ggerganov_llama.cpp \302\267 Discussion #9965.pdf" create mode 100644 w64devkit.txt diff --git "a/How to edit help result of llama.cpp_ \302\267 ggerganov_llama.cpp \302\267 Discussion #9965.pdf" "b/How to edit help result of llama.cpp_ \302\267 ggerganov_llama.cpp \302\267 Discussion #9965.pdf" new file mode 100644 index 0000000000000000000000000000000000000000..85e3abce9df052cb81dd342143cfb2d726034a11 GIT binary patch literal 735147 zcma&N1ymhDw=Q^acQ`m9xXZx_5Infc!JXi4A-EGfxVyW%2MO-(7BonJ5F||JzxS>C zX4acovsU%#?&@9Db-H)$+TXW#)2c{HvvRU=qtecwo$R7=QgTo_n%bZW39)NfyV+S# z3Q0>zOLK5=^K)?U0((9_4h{$hu;>1-^MA*@JizwvT;RXz|5Nu*{$ITWPzT8Trw#D) zua1uk*#6fwpxuAufxdZp|N9td^RFE*C$LHW*B&AT?Ekg))5et zuyS#ypI_~N^@<9V+^Fp8 zVw7$!?iT;&zWD#AFQ6ATJ`TwLSx(^pF2@Psi;CV_~m^qpQV^*+m_~>Ru2@&8D5utQ-0~Es^)ho+4zFTe(gdf_w5XA5$ z7?Yd~!rx|;Efjy|vHJ_0!V2{b8Yaco! zI@LA(Lf=v_Wkt!Ro^0Ii$6T3NdgyW)961+x>#;4qaLmg%k(g}Gqahq6?UO}bY7 z-}Gn{lBEKvyXH3b-7!)w6Ygv-WWNeqLQT$u=0&z0vxw^^eiK>lux)pby6(P@JsbC@ zV!KG`dUG@N;e33srPySBF5caFv;)~QyebMVCDd?U5o!?Dj=5+-kgWAq^7cj^bpT{QGTQw$;?TQoxGBuv$fi$$9tR@DD=Sl47H z>SZ7C**-##m$K^kl*Ohi$EBY8SwT(3CF^yzPB@{z6Z64`u<7H6nb_Lrp3U~xOt|{b zaWn{o_$6BU-zzI7syn`_yVL&A)p=}gxUHCLy4iDsrhZW>u^`*>_qxp4arFEU`cqj5 zoSB)b0!`9U)!PtJTN|$=OMyU7$4sna`gEuH7Jn(Cm%s{-YApP^z4O8Ii3i3mYi^CJ zbNcD=E89{zbM$6{*gIIyH?o-Qi~@sY{M#x98)`;tvl_GR-4(snEv2Eeu7wr9rQQAY zE<}GcC;YUubVRG#tr<@ENe3K#&Wqt%m?HnVgXxK!)-A4;o5sgLfIc2VU3V~`>QaT@ zNI+Gh&(E}rCHo+OilffDYh~e+sAW0JT;&}qMbBqzA>Axli+^$e+bv$Ejq2_aC_Oem z_Ml6=od?crR;x5L;i~ACx8SO{po^KWc;l(_ahRNIKbA4he!|*A_YSjh$zcwPAy=qn z$yG)M&K@zj;a6bG?BC~@jDF{!)PD3)=oeSc31{cHrIYTRL#OkEHL<4--9`&atNolA zxhHWJFS$2s@SLUA%7ja);-Rz6$mV0Y^Zr3v|HTY{m0YJ(v_FmOnubI8#{Pnm7rGhS zW|})xyUA_ z9Nj5ZIUO5;54F2`F|dg_rCi9(HaT*Dv?f@akXPy2`yvoO!>|(?3eHHu@~XKF|LBm5OQd z#5)`-C}P60fiQfZ`Qs$*f@kx*1c$&6wTN~kr2bawBTLmZ@j65916ehEU0n;5>3Rm{ z3B@wh10GsWu9ZEbJGi8QDKFmvC0q;pqKhQg0kwb=52~fhua~f~!Y3kK`D!b%F&8TW z-yPdC7u$ojyzXMF8~dVy|J9qDgVDuP<9G}C^)l$*=S_$0hn=5}!M%^XS5GhXFp<^? z?V;8Lsg2Czs1;+4#D_XO$;1IV11kl;WUYyvpRg`BiMw1qHOl;b6g0FvOlODGwTYyv z;f5}#uTm;jI&6hpd8RdnmN?T;A8g9VQkH(*BGmP_?7=&9rYe!zjzmK%7?(>OVFcVgkOw|X76-AIR>_&q$xP~| z>oA8}%zn2=4ACjoe81ZQV^%tHj^~g$V&btL!gdHXhtLd`*p zco?H0)IMVLsq>)`r#dOo)FyN!gBwn&RQLYHW+zV6QL$PsB+I^}RV%nZv;cRYKDUgiX1 zN2RQlRRZ*}8MDMUXJ3D(Bc<$cu`>5n?0ApZ5RFJ4goacYdx-M{xdMX?W-y^(WWQY^ z%o*y36c903W|j8>YzZs|Zj7CZBYz3ZT7|`l&{_>UbnCYy1Y4v`t?}10Lx#O`dR(CY z%)i}F0sn5o0Ij(pri;VCBzs&EJ(?6juwZFf^ZVZX!n&HbQL|n>9Y5WqF4FgY&9)u&Fwn!U?l63k#)BdHk>Q${Q7txjQpYWXvBc(heI=XGOJe7tLQ34ixc zxCvPC_{DH;CP$SqQrJ1BA-i@b4Mt1mA-m=fI4baq21fxl7Y`&y=`Dwet}X_oQVcNC zWR}%~L2iXowH#DgV-tcb4g})R2OI%oJdiD)t%24{+Pu(igF+iiBE>rQ(Lvkzo^x83 zR`+HR=NW-V;;;o5cK4}s+OR>&iSyG&k*)UzXV~2Z|KB_3REvqoW5FZtwsVqB9qT1o zR`1B60Jp;N8pPwbIx^PU0>_Kat*G2grd1ybmb%p#?m z_3$MNgA@?b1}}rq5wB&rtA{8#Pb2|LT}2pNffII9C(x9gyv{8~F2|qrel=HQ)S2y5@i}C^ z=UWpyrVe4pLXLM1rUnKs?~6j7q_8rdRn`Ij(QGTOkI!;7I|R(S%=&1X5Qkz#UdMvI zo+5od-Up?!ZLJnVhw>B4$j3hkm_=6E&-^L4$GOxWf>pDWlGe5r*lC>WPYuEKO&| z?uI1ObF9#Ez~UmA{(gLei7c4oM9IU$%g5f{#@$!|-okwz*NSFf`k)C~*}KQ_s>HW! ze-sQPwhQs@DG#gulU2`?{Jv=+71jX7?CD)aJ!{<|Zh-2{wpLWJ-HVJT*EOmawZSO! z4yMcla-Hc`o&rA=0lGZGAW_!J z7nZCHP@f>*4U0Q|1v4eCJR;#ZowEkw4UgdYg67O*4>Xpx5^&~qW>flMTCPk4>{!b; zCTU_KSSR#oTEqT{A`6E#K`v`pkoKq02L!w@=ap z9jow?!+8At^(s8xU`PL;lQ1+#s>U|*(r{p@zW_wef+mD-{2k^}KI6i5!mnqANRs<_ z(^Q?N3t=SJ^=nUB?o}e4@B5co!s*q&x?55&_?d8{44~eDdTvv_^SS87It{7nm(p=t zrPO6}PLgSjrCgS3Ap^r-X6q_|irQuG^AkS&q78Z;ZhHZx@qbfeOYDDOaVfuT z`FWe;+H4m@w)S>?4%;tYt$jycf#TX5#ZyjdbwAUgZiK}H=Xgl~~)nR{9X;44h zuV!(4#qv<2(FGMV0$iQwbxM=teez(mijbRGe>dm+_=gAfi6^IB>!9i|lSFdKyQe;< zwDQoZLg#c`I4h{6#!h9HZZU-)(~`B5tK33WoxmzJO*K|EzveqqE~CV@^R6X)`!}Oicb7}8QG`iQl`fT zXLk&YR(3n*+aH^bFx(*sGO&>*CArn3W^@4M#>Vl0&#kYsdv4?-N3y1!0qZn$d{H}Q zl+7cWqSpz9h}4sev*oX@vz{4o9)U?qJ-YsC!q1Z38fomB(KP$ovrg6d`GMn3RpB+) zY`_bDb8!^o)?Xd4O1+CQ7CH#|Tc^PRIOCLx%tYUUQp!_YjftD65|!*0zTA?)s2Lr@ z>VJXe6TTfbwbPAZiZV1=`pydVr{dMc;4NIMN=$Epa5b1~_9+-s_p=~t#0_%qHtfm! zeS_|BWF0jS(+Q7i61nQ?^9BzVxa!_1`M_R#De9K1vhv!{SdxE@usZp6{76n(1h~ux zY0d)@j1tByEGnf=q8r*?@K8Q3>+~EB?SSr`N7zKh8q;CeKdjP`vT=VM zz;LD|>-p(qToVczmZcMo>PP9azd}Yh1j5X(+LkjwU*FlgbV}LGpz<))=nJvnVWJ{* zLv|yiBUxapGfKHoFc1?A$F*$Xonk}y7@Vj(T(YBJf5X0sHIOG85%S)M{OK3F+RJEl z(#&yRY6t^^nVsVAI?J>BYLD}jYKhE_qbX!#bgCf;c&GI8%;NKNKpEhI6f`vKQWm#0 z6-BM)yJ707N=5JMmx#3Cl^Z>e!nSjZHgoZ5KCRzVM9`v9Am{(|hq^hxv(r1}W4}!& ze4I@B3_=u-OSrLh?@5kGaq6h8MwZ2lUb%MO zluZ$I@KJia_7sN0CQ|*T4I`I}Q4qjW4^qXg|$h1!?Kh1zVQcNe?9xlnA0uNMRG*~rzRrZNQm`aB?W})cZC`Lfz8FGpL-+G}0_aVn~8ja@|^a%g8KNN#Zccbb_~1YI6wMiT(>f;3^iHjJk=A-4cZ2g?#u?~hk!6RaK%tljs4O6uV?a6mmHp*&n1Ej)!hvS2Kj?2UkS zn$h+a3&ewoe6u4`bsg3Vf%?AKVN)`7jms{5pZ_SID(r9cPxfBDE2Ib{aZ$N&))nM+ z4tWRFRWwrMZn|1w`O4IUwEiDMwStl1nA5$HI>s!U(bY%k6`x8-*jyMn6CTkrKN}w| zH61K9Wpxb5u(2t2YhaEtoly)Ro`Vopvbus#Wb;dLCFb`f<_Z`)f?z&7{r;zMaqPP9 zf;`Rz4qE9UE3Z~7J(5kBImpW6g5pGa>dBrLOuCwftC|F*O!D*Na&u#Ha}nBgd;|Xe zYIaU8Hwmtx<*8$0CE~=GOQ9|K14k4fyVK&E1^GkxMgSk~RKk!QJ%`1H3>4T$)23Z? z#L5}$bOvz&nngkZp9%suwjw;k;B@AOph}+mq4>+Nks+jt4v;5Zg=^rpXiY6wBJ7}~ zHJNtn8qXld*xZ#=fp3f{0>YY;Vo8S7)r&dGaT zwEw!ezW_V3^XQvtQ%s61x`0xpy<6UrJWSX17fy^X0?Ob5b00YfDhhRksSEAr4%nu; z+_qK^B$6)T`F=~%qc0R8L`JJ!yPJ6{6s#JOf@!T=EM4}s{v?`KicGE_fFj*g7yd(r zEoZ#l=(p{Pk5YQ>pH9N(fN7|zU=$-k^x2rGJM z*v@h>nXJy7-I$#8lUuH%o72p7$j-q*S=Bl957ZMiu-o0Avu?`jlU%CI*)+X*Sf0+W zY+z_J;QFRzZ8!$`wsNL?!E54(2)b~rys#;CV_E3!%O^i^7-$`>pzUXUTZvVS#{UA@ z1z{4Q%Y?qT?3U>c%vOd-SC5wAq&Jy1M;9j5X{fbm4RM)n_vPW7Cc(x;SE?gNla#A6 zbCpSw5XYHTiAfP1hZcpFXb0Ap*t3KpoGw@|;vEDn0W1@sNfneUAWjaqfFMJrQcbR- zBK=AGK;{aP!=-7Z^dBED8r8uw>5|{PC)MgV3qYqqF`ypiXEvB~eqpbG%K-$Djm?r6 z6fN^~eyMn40D^nrv`&WUlcjJxD( zeP9i=hs~R~6BLwU%0gZWU-wEnf!pEIdq4pSe5ErxX|Jm8I`dC#{^na84VyI(TEuoE zpS|T)qdjteLQkw91umR@FJk%8YsNDlW*hzq))KH(G=Q_yyM=b0qf~4*Dm3VkXH@>< z#P$OMEADumI|UJ%P0GvaQ^*&J_R~^!wb#cD21*|GiXDrb%U>?;>-N2$dge1Z^W=48 z2xr{Sw{=9#?6zW6aS+PUhyeO@!tq#!l`(2;Oe876n}Panw0!_D2;T~#>f1ksL(~S9 za>T~~q+JxXII*`7i#pa34lqV)?8$GYWYGk10_Sv`^Z$MryqA8sMBi>?_W4nBl`W#` zw;k^Lq&Uh4*e=Qm#GhNPqgcyb*k*%Yv5F7B5kALHmGK*mJk;Oo!i27lkx3)e_!xX= zX8#UPPMRE$^PXQo04PG#aASsj&j5;{{3xcb?VyM@0}M{Q4nLs&Q!}5;BwKI({sMut zm-KLK-=^De5T5*UrSJuEon#xP#{!neLU0?PZ$X{Zn*b@D_o43`VQa5p88kMA`UKFX z(o;MYQr^v?rAPYQ8&ER>tY5K)Eg!;0IBJ= zr`UGKl~~el{WMWTJoDjOz7G3rS(|rrpJ`G+4*dJTt8_Mp5$hH>26J%(FL%M*Na8a^ z8&wyQoHuQ#R>eYkGC}EOqr^hNi!>4zkmtNfmU-I*1JOumHgXSTv|xfEvx-f~6?M$l zPQ^C4VU-(oNDC;=pSb9i6bB8hLC^n?)o0d-)+OnseDCfkuVn=8Z>93M>u0H2*GTIR z+jRURogw9&GBuNLHTsO1Ak1fgmvSD98v1(F2Yr1=8|BhzZ|1APa0VzDk>=$6v^Si} z_vO{ig+Tzloh;(lIrqJ3dQh7nq;sW4S>(LVzI=pZzB==mbK;ixqii3jOuiNK7`x;Q z>iD}~(QcRxyEdR~RVk}_a~P`YbH+|^-ogS?wsiL8+A=*eN4gA8_btrljsjjfdaSOb z9Xw8)^wK_4;j|ohHY;%xRX>Rj7Vq(d*5?cj9TepOf>9zpvS+qmM77hWz~dt33M*ps zl9c?|pC5#PV){&}ab+-*lb!#$7i|p=sV5;&gfl=23WyzR}WZY`lLee~unz=N2jls#`D&a*jo2jfsNl3JYq;qQz0_dd;vBXiEE?raU-x45%cVaSn_`cXFT%~oW*)hHN~?tl>4>bir!c0=4qov+pt(HiknlGNlC}xyAp(= zQfeD>Ra7Gu%hHFFu{?m1g5{V$uR5ip+nB)DNmox4C6X?f29tDRw^bnIfEe`Xr#Si? z#fth(X4~v^_d=DF(_pIWKmiAGKCre*aq4J~*r!j(Ds2Vf6WE0z(1rsw@?+HH^KXWf zm~fvgy=elYwbkShOE#P<>44RfyD^0bjBnhAI*!5ECBqb3ts`~nyoV?sqhN~p3HoYr)pD*OppDSyt^? zQPwkXdd@q)kR7zBp4udJfGQLpKz?Y#qkLkpyoaXNE8o?Dl^X^n0VqgH!xa3)q*)|* zNo0g+_@6lTC0(4YRnI_8&dC6#%>1lmVDjzo2VyS2f?BQLY~T~_AZ%<{-f#`Wn^%U{ zfWo&h^-sUaZr0uVkOZ2-?=J+Krs)wGw?=Q%nzFn`OO}?68TCH(lrYr-B_m-F;B6sbz2HkmK(&q3yT* zH|}ydceyKXjc0BXJ(~Qy?DE9!*xDo@hGRuFPQ5`-5WJ@cYsrq^OY`h!bt&$PgxaG+ zFIH2cYO)jsQ<_9uiJ$k7@}*S$7pFleV$4iY@&M11mIzp<&;`35z^1#CMyfn4AzD28 zo0)Dd$3KGRox%ydv4v|ANO;tkTVU&;QrVbE7`D==#4?yn%l6Bk$u!E`=gcy$aFZQc zgX%VxB0=22O-wpkg-Hk2VtaF>4ak0C$8$!9Q6n@iplfYh>pV0AapR6Bxo8Aae7w)# zj_!#tWx??sZ3XXoHENBD`Y}!C)(I#}2jv@PV!eQG=dW1NFI`OVsrPPWErb`mQf zK#Vn8974;3N7!sGrUA^?sjs@QhV?)WnRX)k)%Fo)ps%bHy~2zY7foZ=w(XHOv~k_5 zG1p@k%lI5JfdK`#=rq)Ipu38WpH-ZI7jH2p(QhZdDup=gr$lt;rw0bWWY_!CTAWE@ zT1DC?b<~W<(>oa)4b&mai zUZ2-bGy+|FF1*~I#9Tl-D1PSv8hvwb8An;23F%kQ`HF_UcXg*8)!yR z8xpVLZ!!B=BP1RW1LIKqSj65eCR!wAsJ;vSW|@K`;lA$b5PbDRR#$DkppZZ*Aa@Q1 z(~F)Q_1iLSG|$-(Vqy?mfY?aJom#ha(E>67Q=tC$(lcm~P6Tm0FZ?dimw-7^yuVnJ zEw7qmbB&MZ(;J|R*>WE;2R<1b*ECEv zA4yIEPFB`AX)w`K^P6Hb(u%13=_n%DZoZ1ob;RrQI?AU;*z}@isEF%%v@ji5J6hjP zKD5Jb<*7}G>re_P*GkK>E=1VGQ-_cw49{225R_>}5=ALX)U#E8`KS`f;+-A?TR2yi z&=(EcDU0zJr2n_D208$Znu_?0*JlE%w&k<#8w!Ob4p^{J34AQ(u+<>dpsEh4O>yRS zsS%@Y%E)7=rEnzyc)B-4p2QKNv`{$hl(byH<{2)Pj>&FmBfG{wR@_Yi@gM=6K)7|LeD{F(83gpv^oAUH>{Um zRKi~MY_#F5v8XRIcG`5lZEIK)sh&z55IS4-2B-`4sm ze!u5I7p!wJXOo_)86KHZqRaIPKT_OfkA|Pa8&X>{-BCQ*SQ;sbqwrapS7H%Ld`GHq4tzHTA{dvzRVEwqG9p|q z=|cc7Gy6U>`}&ZDp7sJ69+OKRCnX)%hyJ;rC^GO7S+QEkpJT*9BHKJ~(&v#p2 z*|ZiTs+6qkQ`+^C%@iBzrOQioIJYX&Pq-)CNsiW<%3doqc~%R^9Fqm~Xj&}mi}l`d zTIG;K6kBENn#nMN%gaa-^7~1TfA>QT|$tANejr{9v{VYy1mo%+{iW_VfXK^*yY$4>V;uZ5~T;lVxy z&pudB7s6yjPBbg*-mhB{JplCiT;Ovbg7h;6`3ePV#eK%#b7kvNBB64}pPPkSSny@z zd)J=bas7bceRxMjLD*?f$JnpM!}c=Te5%$0ZbBl`1S0YrpIfK<)xuCI+4m_lgsH$o z=kM9^v2!p4JeL0N-;vbR3xS~J!2!99;%2TF3%|f8|GoN`$dLi1oG+7;@{jd2`E?BE z2jy_(<$!E(V&d;%>6dxWk=|zyGQ}vCZl9L&k^|{MO)Fv1H2dnGK(VPj0e&4SPXM?M zFyEeqmUV?NmiSKLf-7+L=YhOiPV}SNoSvew&w;8M`6lt2rD2-Dy2#XP{2j@ z082s@V(o?Yyx~Z8t}HlHqAJ8=3zZ1z&?-Zb#t#$ZDsnn> z2~I33;|{7I@oRGsE2CKq5SOM9#wm_y8np-K*Am!88*7J{vf!s351PdDR1grki!*45 zQGkRDZ(XEOc2X&rt~~hC=tj`@xAI`2Z8V)yTW_sPc}}FHvt>$mZSl#vr6Y`_9G2u0 z_-ay#(bWo7Ef?}zt2J&8Txv8p&^M7l&>q`BwK`+`=ibEfT)uTbk&4h4T5@@;r8y~< zAB}b3Zj|4NoRD?tkvyk!sur|7e-E6^=KT9W)hApEgkVXt(z$+PCdz}zqBTy9=SR8~ zLbvmrGLF{=A{lt8A4MZaxdH$U4NNc@-6DV3FK;iDV^Niql-i+R?DQfL7F%u;S>TKK>CyZ=R;5xq|MUTVpp}MO4c;$8tqR6`pSG?X~PU zQ~8u=T4PFKDCUfGd8I7pq7-vrb2&ZWG=T#vKGYmSM}{4?+kapEtTwuo&z_1Rr@o>A zx?eD7DuI=@O!&mO#eFUcKraCEh?~szPe5_)Lqp{>cEtehOG0hm)TJ?&;04ryv)2%? zT9-%>+4)vGeE?>E-G93C6#EQN2On}GCr5=_ z6i3wj>@<1hTn(7ZSeO|Y1%in(Q?RoWG5<1kAzi@(Q34+`s-0X2Kn`3%2YSrvjp4<- zX@E0sR#rgU?R^;lcb=j7>?yABMD-gpEC+4g;Z|a6sA|ole0|A~U|qXe=1q@N_$xC-L)+!5#L8m*zgBz;=fK>7 z>>}gusI-Xn^O4fr*1)L6(x>hcsmDi(Q@4~1m+PP1hJ5lEb8%f0;Iq%oYyr5bkxg%g zc?t42R;jn6?hIciSWhJf>w$M-=tsalV9WF;7tRMmsnw^F;yA z$FdCrDHshfCSR6A;fB8+`g1e!A3{eXi|W+r3tZ;_*YfMsVaP{v3^@V#FKir)H!4S* z11n0!h4L3CT$7l3KTL4NWeL%udjJ~xxTWYE@HEplSlYacm3J#i65i^{>63TpAQMn3 zRj63Jx?mweQvrH$8nL_xAQ%Kz#lfDSB9sNq#Bb#yo^CZ*m@+pl@bfWHo8LUOO~py*~xzQ z+y9qhAD;-4@7qp<<&&L}uW-^je|e<+6g3Skbo%3Hb%=i`kxwfs3Z}ZAqVBtW!XQF0 z#sP@oi&)f6Vu~g%q0#puh(R;)q}MT#qRx;@Pli75UsWIm227g2X4Bf+4=-GYw>ZWu zk>I@Yuq_0$wg^0V4m2t-QPgl3vhx`XxV{eH;FbF47~}W9sMXrwxELMsGgL~CE;2ok z5EGum7Kb1NV{78J7}E0ESa{R&z#yInRdXVw^cP$zOGDc(~}2 z6z$Ngdys&Uf4v(;b6vdGxkSpeeT5S@F%b<+){XJ@3MwvhI(x+>i!`5zSw#LGAen^o zRRtpZ%Xj^**NN9@xLdy4Avb1l!meaxEa5-DlGA|IfEdm39KrkDz@~kbO%VfCy_tnj z>2jw#@(fvIt7h90-69V}br=k}A{2bV3elh?7w&~Lt@c9ApEk=1z~wa!Gvs!ccV8sZ zs?Az2g}mG@ug3;sVm|f?ziQen)K26%9#OvlY5GN=#T)Qe*yP| zoLz|2nU05_6_@A?5AT$a{FpF?=nU{GfV@3_7s=5`Y7=4^1xvzgF?+4zAsy|SoNf*L z>_pu*uS0ntI0c#phaBqny37~BkDi{X%ZagWlpD9i2d zQdSIs;zBfp7-LcrsMpvKT??UR^hmwrkm?i{O1#R1Uv5kq2WqkcOpK4yn6HP21FA&i z!O$;JDwY~QwfCe5ddosf`};KCCo>07!mxc2i|@147=*G`9D-kw2MpS5!eojyZ_I;m zVDKWH0Jh$@M~+kettm;9c%5PW^Sq#a(NXbz7`)pIBgZ?7P64yb%!b53-2)qP@XvT2 zvn|~nT?)&V_Ka_5Ad)_*vlQAI_P1|QdiDbAb?ab9=Yv#1+c-&^HlnBa zJHvt|xWH2R?fF;!QX|B$_N71r#ZbX@Z zg^CI+In`qpfs#)j2ZQpPeh86VnLp_hdRNfSg41gV2g6H95rtwqRCi&`NPq=_@w0?Y z=&GX5)*RLhdTH2HXqL=cE7>2L!-S4jT=OeZhXY+cr>3HTAfE4@0#e{4N+ROJF4374U))nm571Ng7IS<$33JzLO@8>{uXNI5ddmKV= zBxAla{>w1A03GtS9!Bi?NtrO){Q41tWS6q>On>(~H6u<){bVyOd}DL+k29iAWe=Kv z;5c|Y8JG(h-nUl4ZoT)h==%WB9(3C|+hGsBc;{$o2+s0Eo$POOj#q#r>c z$Zy-+9Y-aLH|1?#KMKwNeuoftGAcQk3L~qKw}L z%PvKS7e_yw{4#TPBt$cau2ktq)9p46{GONKn!rU#D2g%Out=B}H12=frWGd1k^_PZ zXg+R*7xr@2vFi;DuvleTkY<64dsNWwf+0xUc`?Jim}MVA<18Vd=(gq2Ke>jg6k|{coqK z$Uy;mab;S>Vlwd_2lJhu9Ngx8?@b+L>_pYP&9y&FZZy558d=hh+%1#TO45f~=E}Ew z^SmKKt8BRu(rR3Qhm#)qQ>lH^5H)44-*Ia;q>0=g#04h+9R-jVt*_6_{D+{KQSOMC zo47a>MfqU=sN?9S0P1kh2mekkujTS{>N*tI6$BdED5o_s9v=)hL+W|s9h@nZ&NRM= zgUTXhYKi3snJPK{w5GB6cvs7=4Zw#?b7V4I>QYpk_uVtm= z*#~Q*O%dfCu}?s4hFQW&rD{X;!8AQ&Th}@>m`5D8-CUNMx8!*GyyOGrTG24fzSm3VW0?lG4U4|Hg%DqDE?$G`wiR1r8(UK8a7uFDzNT~d;}!WMn0!ems+;kXKOPILuQbAK z)j;f%g^6!&M#3N>KnpXR{*YHNL9doxcHn3Dt;2_>5`B$|J8$-+iH}pq0{we z25KGv#d6g#;&DCu+;~voBi`%k^-f30Fr=+Ne4(c zNL?G`hCXRNj`2bQ!3*8TB4IiVX3VTI&+tW6xX4E5}a0`L-*WXf?Q8I#VG_+KQ z3JSFQAHt=s558Y4E`E-C!&d?ZlTxx_U(3B#Lj{F6>6;c+wk4>zDr+m95WLy z7890*t}DE|COh)CFYR!~n0W{g%?a_&?-AiML=q@S7u3`rZHk1rvq9<_Ohije z4>Q5W8c_=n%+)wVgPi@jmdT=kC|{S5-uPN z(+7Cqs~Mc(;i+le_r!n1NxP*s*6imo>8)}Gzm?))9W%0YAkyKIfH2>{$e?}HRfHJR zaXXcp+qHY)f8u9iizaN;!>5bF4L75;?3S!dO~+7Mh|I(BZtnX z;sA%mCt)D#wFiB9%ZY5}J@#U?+rIM5g`@L5ehX>AL+Bl0q0hpO~|w4!DrvN z)!XH0Yp}DW{p6^B%2jij&7iW(y8QU){%m9f>^O}z-UfIxaR()D#)QC0E)0|~FN#1K zAXX7=NXbZe>5xG+6G?89@AJdW_N(8okvFxkFPD6;e}89Uy)mq+eUED^`DfV%5``Af z1aRNb$zg=-2!0>Bzu|k)keR(E~* zlkzxMa=J&y;HOE5?!hbL1&3HD_x=c<839Nn&ifMxm|h^I4OD9k=sSKdsE$DpweNf=1N8@%t#;jnwgIeWectg10vpX=B?>zi-1!z^Cxsbm)d~Jfp2XT#HB@d z8YmlP)d=)#u#XM*l_ zP^4b*XaxO;S~+XfRCK1f*e$LZ3=zPj8w(K<>&m#VkvL6PX-dFT=<6q1uY}aoG@drt zZeUvF4@tC?+JRO8IQiWZ2Q4XM`6BRbO$zZrKgl(NHX4uJ%=)IN5yxWu#80{a+RlK4 z`H1hLWGG*sV2d#ZJ1sdEPajI;+JT)+y+w3@f)E*1T4oeip{? z8FgkVzf)e|+i))LJFKHYVQNj{wMLgv999{Qt|qMPFCideoPIeM-Td9I?oiB{z1CE6 zmafsgTo}14gA-9bVS6Uwc7F)Sn#$f4dqsVqkc^$)#g14le9aGkp>)Jz!X!c~LwV;; z1tjm^sR>m0I3H?%P6y(?g$)hVjSzrNi#Le(M=ZACQWsYqLZo=#_C6BA`!7L59{pW> z3W)RJYsq7!eKp^M>7R#*ftOqgp$`AE-IqUy<$9i$xgzbYE%(M2hkqMCz8CiQyF45> zGV1nzxMy|I*_3b9z+|4@i~m&ettQehz3CH?ecEFGX-~IMb9?*EQ+P9=u67P0{*UH5 z1qaVgySMcl(a{B63$E!1w4ZqF?<}KE`(OTqK6!q;+`wg!!eC9#mf+)u{JhP2I>Cc? zjg9_X!0=w=hnJV7oQjLSsCdzK0`hG?SNsR^&rBufYjNs$U}F~Fm+O(U&fj3f_Lq;$ zm7qpqsXstSjQ-0FyM8D4z{y~0N?1DMubZzw{xs^b8$I7XF0%Ljby4qoBQq_>z@QN7 z#4;+QdsvxQTyIs^*l+W7VtV!GtI+R<>+H7BA=s8oo@JPU?`x>P__(o<5w zC9`!}?QlqlWyHa4DDt(Eh_G!)blGIV8ZmH9Kq^~l!u3^=WW5f3Fl+0%W${RUE=B94 zV#gJW1J^g(Sq|dNgtezWW+2d=0dLf8K|NDrQ;NY+dH3^A&}bz4eGCV8rWWo5)Y%>RT43F&{%4dfJrxAyFhwF_wvA3Pfb~SKRgthAQ;##adJAV>}fp=BtiqJ`o{9SpMY-PhK8HR-cZNQeLg8wjATKwqJkjBd;hEZ zi(me)pI`Pzv@f@7yZv6)0k{fqF^=0JpP%ke)?1agdyQTmp4M{xd;Fg6|BQ{jY*TFM z8u(t_91haQU3L)u(8MfNWA|)VRLrolNG&K+UN5p+=J^xdQZaC5;E}JdDF$R5x8%(+uzZUVgJBrR466g3&=Ww4%g2l zjs$;=3n?NO_7c37@ldm~9Eh_(0C(G&?VkXv*z>RHWw(i*&#e&QeY>9(y-=-ZvrpEt zf3Q-|n55G-=kZcT;(q>L05?I%zO1g^TU)!gx_a;Bi~Fx$K3ISK;Q6y9t%hG!aXvja z0U8is-+>zqSDR}uHB=Wj)?K(>Uotw-VAS&O-f`W(H~#3+64v%VdFG~?hI*i(Ao9!^ zXg2_~f>E$5A{YVI|GTdPc77kU3#9C~A%5S5`XebDdnowW;RDBG_Z|t~b-?E+@&~kXo=(Zv$a&C% zppuNpMZ;17S_;vqO3H%#fJ(~35wB5&n28^!as@gSA651>VzYO2fDLb<=rC(7I+SQN zP>hvX^)j;#t#w6U#a3#k`+tYt&xV# zMalSQgAKhkENW+Mnw9$Z>t9H+LNy<&Vkqlx%^#p@Rn$(kFk1kA>P6^Q*^3?+_+(+g zq5&yt543!yU9QNisD(`GqHlBR=@B-HQ`jfS_Uthm| z^#1+Y=H}`jf4qM8?)kfS&)>gaU0c0BHS6LCN9(UwFq+H6;?c#MV~-v!J$-uj*|VQs zym+v(a_`BL+m9dLdHwp)=H_$cFs$60^q7X&eMRL3hjUKEX}HOkqZP ze#+S&&$f5hkGZs~t3SVb_3-s;^lpFueGNXspMS2udiBsfW|1iPa*fDnSGvXwzuce4 zBK+ygm%qMPySKV>`^DPbmoI*P`QpyTq|{a8L7KS3~cO>h2as zXUo-*o@S>_aqo`x$)oAt9!@`ddh6My5p;>UJreF9whNdl>4;o3Di;ldJu4Lqqg@iP%7t8kl#d?ak9PBM zBo!ryy}vkfy{ znz2T)9T*Gg{TjBWOYK4JtESl+wx+1HA52t|s07p1KOq3BUBo4T3DRiisiRwVLGX#g zq3Q95b57s_`ssri$zd4?tzqf$=z{vpk*JKgl>Fq>!qk)Jj-SXrmiI$iO>>zbAz}w#KTKnm$(d z)VT52Ulvza?>&8b_vzESYisw`*B^fTxCWZ`&p%(UuRmT}`*maE$>!z?`hf4z`+vLf z+shX}y;!@u^8D`V%AMD*e_dO6eCP7M!^MuTX+3)%w?JoUwjCntR6_wCm{|L{Mw3m8k^1Oc|2L9y8Oy5XL> z(cbH%nM=&=L33hR?YxmrxJ$-u!L5+hC*===GawZVlig<&V$4`fU9`jLNSBm1M2+cI zi}2_ewCk%0?eKzUZDAYMgy(3vZ2ECG3cF6YO=v?yD>UOP2^FdFeP$)rv1+GPgQve5 zoK}s)qDC_coCddD??88Tc8l6(P{N!6Dv9bX#FIi2Mi5maK8$4*a}QBZcmIydcI7{5>v^R|W|($3FXXQ!;VYG$Tw&Siq!7oj(x zn{4$(B(&3}GU>(8r3D|&pc6uPz=RWD~TM8YOn z0-ZZ>n*gAH>kSnsu%Q9up;sxUZ(AL|?hNUk5*>u=q zS^FP!EFH7MRul|qQ#e70k}ajciG^qZu4;Lj#Yn`$m_8Wf0@JkmjWdJRg%+tDZ?TH@ zr%ov@1s~7vNW{LxaQ~xW0g3)(1C)JQircPKOAKw1Lod&*8Y zoO2?rFe9z-bYW@M)n-N?r*B}Sqq)0bgwty<$?x8oc=CAp*|R&ZUZDnojg4pT-aX&k zd=98YGWFe4N~6AC`ThMfL|3>)jD7k3{p!ZXvo~*^Zf-vR@ z+3WfG`r}ux9=>?-%j)Wb=g;r1u08;UZ)`ky_wM=IjYk`A9==)sW#i2+?>C>J7sW5M zzW#V|!E?R6=0e4V=I(}J8CPkyn9Lfp%VKhx=cXM`p4Qe1(;e5-CpVuzonL*v^kQ}S<%>J(ukO8ic@KZO-+sC?XL8yqx;nSD zmiR;;iaeQ6P<*Dm{&IC&V_kE3{*R~kNAHKBwb0Uwn|y=53)=IQ-?#tV{SUuGyNQk^ zrLL~)RlwN6&RVE*LAwEAEPu3{8nngkguYr>AW+fZ0`Vwh31mXFO)ZRw(J2IK8Al;z zig|<3xq!?-6&J>&7&Ri;eFj;tT8T*`MG-F*->8w;O=^$B0OPRS4lUVKg)q=&!YL+; z+F?;4ufPUvO?q5G!=9>5j~T1POvxG^oG(;V+9X zUp`z}x&P$x(vv5*U%Ys*zW#W9{V_oDXCFQw?YH^% zS8Ba`{NeoyB(mPVeGU@$^Lo=Z(tiB-a%1D!t5=9)Z{9q?(syl}efsC0D1q={^9d{f z{^sWM)zt@b8K<+S(cv^ceR3De(N+A$Ucgtw#s>N>0HXj52hhf^UqAZrVRf5B3ygj9 z=Fy86KfhSJ_hRkd#+!%lHXd%i{SBE6AJC_M`}Qf+_1?Te{_~qRk2W?QzghofZSDT* z%I(*$?!8(6<;B|F^;h@aZ9IIp@!-w+FYh)UV!^+%xw-o0&C|tOGcBV73Fiy;C&l{4 zL`0+}RyCH(wUXOQlMn9CudLjE`t-KRtSGsfADnQ|HztGvES_@C1iJkWJP)vqgB9a2iR`SkLC5^{k;0+qEQIMqMDb4g-R+ap-GGOltCLD zsQBo=GdQt$ydf&P$hoL(0hXMnR|=4gAVb{>X1ui`nGHyKpkVJsJX{4`?34L{P{V?| z7K0QdE2T)0XRB~ns5U6?mV%Z^k9FK-9(S82J*Jrnv}^6`gcH`>q-$={HI0qj3AYXJ zYlC(cLwh)Gm8(qj{1q9pJvDB`iquT`v53s3>2b^4lw)?%F*oJF)U~11!I%{fJwR{3 z+ZtwptQYtRv|9?l09cT+Q{zUUG)gbHjX-`kFJpubg`MRw)0n7yCwHjvX`KHQYDGalM-C>}!wVsc# z3H#;2SQSX8!?05ZQvuNeB6r(lHU?9HXa45~9Zw)K6&MTMqeRg9#RWqrX>@8-!Nt68 zZokf|?;N^u_I%n87fxSmywciP*Ee{>Z8zTkd2Vg(!N$gu_4UVaLA|<)d{+W7bnX26 z+Q*NukjDG)eg!VoaR_Q<^WD?+^@khlKdrw)m*J2o0GAc3%{8zW{=@qH!x}IaOo`3S z)%WjLfB&#XuIRmM{rC6JK5RaD|Lzff6-xIm&bwzO?d$80uq1v>ykGn#pTj%xisT2Y ztM?#<_2$i^_wQH8^TU_O?;qCQzC|DB#fzU`zqmPf*+Tr_;pu@hz zE1)L|AhI>u+ejCvWYaFZ(>FD4i#O7Rcm3{#AQrVLtpo)IP@oGE{z_`ML|6t5+Hx8o zOj^C5+Y2dPsdwO^j7!ax)l$C&PlBdnVJT}t?g}3>oQH-J;<>D-cBA0~SHVpMyOpX6 zS=0`z*5lOC0E@N=hh4(QoR$fm3?^cW`hT#$DzKH+oShz~N;kxmo-x;4N4Ix!yE_af{N(cCf2o4mO+sTmPY+qy{YkM@H`CD&fNH=|q;>*g_oM0k2?v4_LxPTkM4kvQD$QjJ zho|S<3pd@P+`fkE6(Tvy?a7io8}3lc423ykDUl1AL8-ar6@|#NP)23=$W93!nLyQufb3Uw?SNLZl`byjUbZ z{;>Jvw{IS7ym<(KMeE)3&39DKFW?&9FdX3T1^j)4 zHTLiCpKZSV6^mFzS0L@Hmp`w+{`Gg9qd*qN>)&SQJ~RAF!~YfV{nvh9qncOrZpalhR2QRogSX}dku8W5fLJ9v9e@W* zc*`5<;*X*P7gV)iM_LJgh?Kh2at`WF#qyHs<-|E6imSLZ24qHih2yv^kXbF~Xcbgb z7;3-85HRU^WG8NmUSd!SDZwmeD#YluAnggS2%#q9r(;?ng@vhT7<1{zoF)*ZP#U9> zUC8^j!J6{er#yBjpTUF#Xye606xAdgiwbR+vGm?6n8Y>VHhP>ohgIvaXzXT<6X$!$ z&bebwlgDA4pxVbknp&sE&E8GK^zgJEr*6V+oWM@SxC?koZSai~WnlbT$6W@u-QaQP z#+(MSqm9Lgj2Dw$LU9%CwEO=t_TEpDl~=atU+`YscsFj0W*DI!ArMXo;gK{T7~!3; z0)!E?uo6NTAta0tG;}vjdsEe2-n;bPXQj_d?|q*1KFf47Ge5uBd!O@FcA=TM5nohf zWo2bm7r{FFti23m6zDZ#t#Vl@gZx;+RkdOPKFXIBGHCKrhyi1v)na*yFqS;Ch^u_6 z;kFZDFuohlV5|Wj(q%o5m}UL~xcw)J6}i^Qfitnq%A!7&cvrzW%aRAaImhl0YyCF^ zEt_UfY5fN!b_xohk(K(Xi9q<>{u2}=_2}>4rX1!+fB!a#5Qv*4Kqh2ntSoJ~ZvMrs zw?2K>9nakLlScqoZ@&MhKY#RR_dfA&w?Fao2cEj~h1Z`R9XpbYJ3F0XI_Wj*rDlUT z5qB2yzI+Z2$3-|Uf}5Z;qbmu2x?OH_qfF4jkc#L6rXN7+lE>IiE8c3xnvEo34oU@T zbjwfYzvc>B8yi)EEV>8pb^+OX-E6xBOlGf}-soolzP8FR7Ivc5vcJsvn)N8)Ff7-A zwV(->hypsq+5Y?2O{6b4+FQ|egcyTv2bBi>0^Ra=+Syh!)ovv_?QFM`A&P(|XS&v$ z+p5sdVnhlFCx|p)boANH5+hIgy=8 zWOK6?^snyy>aV|i@!^}W@8*U(Z=x`0)&uUD^@_n3 zMMOUp13=au&bk-Ri}?cC!**PZ*&s!o!-@`c4vS`k1UXXms!0vxuwKIa5hk6CGR7Hb z6NI{KI+vY4&Q*=gq9pqRVXWOkt+i^q(1U|ht1!nxl%m0IQCW-%QdZb4DqeGI)$q1l zpr+yQHv*~^6;>kBA5DRgwkr9eQU*3#iF`pKnT3Vd-01QO?!1JB7vp0INLge7drYz} znr7i!Y!LxwL)$*tZI*!Iufh9|eJeqVW+kp*f+t|ZRKPu`)cPMV)_fj^FqjWBIM5D+ z4?sQE36*=#jD~fcJoBfIsSfYe&kw)8_b4&3cfYusrG~OvJ=PFS1MqM69iwK_A3t(8 zjjz9Y`7x00@4oM@C+~RhnL8hU{(;AS{m?_tKk%D3p84wNXC9Zn)q;vGckXP1pA~U_BN!ph;oMmsf&2XmO|XcuS-k0n^;WkXYc&(ycC6n`Y_7vhxZaKQ z+QC}ISFZ*-tti}vQ}NJIhsd+FcKaj`}tg|upsZuUE!T&tM^ zs$Rm z4cqNPqmkL#Y7Ab6Xt0k(oTF2x{`$qAo_p!Bo9_Q93vB$+&#t@t#>+3i@$x}Fmf309 z96NHH@9LtcScbGT$D%Il1o8$)`L@J~Q}mI>UBcL<*)viR#uUhA>3?V3g^jTJlg3vz z!NTk+nS%uHl_}{mY+)1Le(@KGz{ zo5hTLuz0VV)HZO%)G`B-1``kkMT|blD%r0W07ajc%6oT*XH+_oU2+J{N^eejEQr=S_jzT;Y2Nm>D{8R z7{L6cl!D{{ba%@Wa0!!Pl!SbN%(G%t6o?nb7bWv63!_353_&Hxw+KkVh5!s|fIVQJ z&R7CO9nh0((4DqMkO7Dw13*sL;HWV;f}yN5&`y|xNFey(fk4=zzaR|t1g0<#u!AG3 z@C?rwuonoCFFVk6kd>teC)J+M=JX$&UijVq6O?U2fhWIu?*k}^!3_NC?)~{yx7|$c zo@?*E{bx@-_1K&5y!6R_swyBx_mxk-diswa-1Xu!KYje3pFI4tM}B?(-b0_rq%-3a z$7kkFYxE+mK{PsfeCW(UvsqEd2T;FCYnp7mo~&0RwQ8td4T5H%94VKhv}f;ji~T-K ztsCn|rLJ@R9@py?dfj}xou+vbM#@g2*Gcv|Nd%DWMnBW|kJal5yeZCi zIt4ntUa8Y5&`)nR)7?(K+b#6q%}bjbKmu&xjtoz!+bz%?`g$+Z>m^3wFAAj zZ@m-fx7S*AXRYjLRNbw*zgY`(TJf!o0x5FYC`iUJT2A$Pg>E<3X=mE)Y^#-NHWRg4 zyjqP_D$zwFiqy?&H@j! zv0nN5>p{x8`=9^W-P)|S+qrr@>2MkM9scZJAOE)-eh$I5m*0H#W!GGE)%D-~+5I=X z_S>gf;?~~3y>sx3KOfl#$y!5)J{vu;cl^{><0lV{pZbag#uChueqeFt^xXJK)=N7- z2^hODeQIH9cwy?q{3PtZV3b`PBX|U(fgEy3aDoMvV1DuxzTuI@X^cY@O;IqH0#)&J zl#*F`y`WV<%6EQLA(}DiB`%xJZdI8LQiGN@=1e6E350}BFl6eb5CE$YJOvs$D|(fP zwb`=SlC|Gb0j^+(pb?aB8(%f4aNyZQk7&uE}Bt@ z!DEHAgUo{SFg>;!<*R1ps#(Dz2m}|c!e&uAR(U%ErPBo0R{dVH+o5-?0_lPKJH5-M zcUZOf(^ZaDE&e7%?ea@H$as`Yi#0g5k!xY8 zT<@hi?RcvZY1YGV8gbgilcKfR%&={Ky`G|h8F0SNuk~pg#TH}&FqUA6-%G&;tsU#N z!|ldetL|;p+|8=9T(lH&)>_%ts(a!2Mx@_OZ1gibxCrEp8+gg!Hu`|uw2*>7MJtJa z2Q+ZCGQ_yj$Mt@JG+MwO^otwo9Kj&bBgB&s#%^s^Hu|{XS}$y~zWhF}Kx8cc_i2S{ z6<@95tCrkc!CuH33mJW>=&V-*8~qHxHEz?kHcNk3;c)J3qt`2GExIROdF+~>-+1*c zS6_PL4=%grq95J(-8&w<<+p!$mIZ%NAPkvjM~>|qJ-(kxyCBFKy|nC1j~pTGE71jn zvCMHmlq}nVu`!AITolq7nRtriva@3+2)AL}g?PhhQV>zpp+YoEgWGO!+B7y4!Gb1MFUM_^(rQ$g^)gb)f>l8QQYD>g#i*6&R4W?gG98LR z&{Lr;2Foz^fc1j|x(b!JtbE*Tkm2Yn8%$^+Hx`S@65?5aL?z1CYE;`TdXy+=4QeTM zG?4yQNY;{QwWKbSpcqVUhU*1+yI`z^@z_K#OU7HBn`E~xqjObHU~adl@n}~~kbwi} zZIm%yS7@JsAzw%TMNn5j{j*}(+_HkSTeGCyT3VW1nmt3SD8>WO8AMuEMk6o=0s6>8 z?qKC2kFjCKwT|%$ z#l_J~%H3|~K{$li)oSHhtt`=u2pu3RZ8nlXLRLY6-)g4l#l*X^TitVK>kLM=HsNNr z-^-)FDM2zMgpLf&X01k~UWI%1M%7!d2I|#dy%q*l0HPV8daISC2gLN!?KzqVl8r`+ z5O{sP%m9|T7(kkZ2-fc=dhKwh73wtot-7~ba@WeXM%B@%xCwY`6<@RN?{^bho5dY4 z%NruL`0E8hC~~}+Rx<@Nd?{M1#X9XQ?4&lh^A1e5W%^A9A#`&+M=bG1Kacns-CWO+ z#!QeU@UPt3DE7OFPAl4|dbxt5khA8~`b<)njHxn7b-7@umfW3Ih&E!3WT5vMyi#m! z)sqR&@bu6Ko}`NJQc=W`MeV_DO|=*iC+z|x^W7(&#^;e)gV zW6uyJi@^l|hJ>+{XDCGO&#Gac0)isR(gcwMgvJQ4Q*yB4PElSrfivaG@&m0DatTOd zVAP5bH$ccrt9C#VHsEUn zm##@K;iX)vCAEBB1+74H#K+2{^J2-|l5|nFBIsjx9G1~NaAqw2!OhopXbgR77DNdA zS8=fgA=m-7^Wp&(ld&i18DWm4@s)O7-&}9)7+(j6*d2(vLnzDU*i*W|0g?6NF%hl3 zSXByzHj{zt(i?wp$@Q1-x?7PGg+Z&;OcV?LWWtrr_>p(bu?bTU`BzZL3Sv=G z4zN)TcR;`^5|IcRq&Cn3G@I4R-dfpHD|@SzV7(S4xF$&z?fOx-pJ1$Sxg4)nlX!=o zW6o9vu%y%iq~iDT8JuQgbl^t2zE&-{>lIJ8>1kBG&6@+&eyb;`i^pbkjXIU-^@(FTe5f?_YEA#aCbSgPXp8$Ah=L^UiK66#H_|dq?+w zG<J?fFm~e0kz@O4tmPYNiJ=_^>VUy4PzTe)yxjm*SOlIl!Gb~nagvM% zR4&7ZbS(=R-78Q6j$A8XEFa0WFe(#IQy(H$%&br-0nBwq6#|+T=u;!!;{zPYEJi>f z|N_P(=f+fU*2ci@66H)W>R9O=_b~Y}CM< ziB^Se$14IX&!$opg3G4&I*f#_Zkv%mcB8{;z#qZVVu|l1wt-x|!Zr=*W#*;D7xSYs zFbN)jF&?c4^o?CZ!oKKsK|=ZpE1&-vR$xx}O7hojF%@oaj(?j?DjJ72FwGHHJ@?kfVBkR)i`*r26Vg;Yr!8&x)s5!9UBEWt?Jgd5(i&En=dykWna z;3wEJKUG#ZTdC+Q6}^>msM*XAvr4$#?G|Z|U$3Vcjm#i{KoczO;%RGt?(DWa47N*~ zCA?uT2%}@B)l9US(M~JWs0Cqct_EoHRV#a&bw9#g{+pw$u-^v~pxa4zTG3iHS}BJL zc^_bG(a#nAomOINqd3S05ZcpY_ys!YMm=0D`)ic|`_Z+Euhj?%v?{R#IoxNJ5tEax zy1!ELl#5p22eQU&N|#BhQgLY_s>r5v<)WotC8up-y`QJ+*V!$=^G+*SDu#o8lXPif z-@ZRT{_K6X+;=mziT&`#i)oI%{?^MMd<-y_wagsd{}F5SrajmAsRM+rXO4bB7|RzX z4)b73a@pCj6QtCldwE!Fp_q#4T|rf~JSm6O6--3Du7F;Q(gN6aiJ^l=CYpvV%=|bR z!BF@L&RO1{1%xWBhfvFEuv>Iy1Vc)WB~FzHRiP*xA`NLqrA`EcMnZWA_@qRymJmyT z8M6{IPs#GcECVY+XQd{p6L0o$kz~=3l1f;v$(TNtDH%`$pa->p5K4pkSRFiArBDSm z3vw9M<$*;Q?~^5HC0c-J z*a;J)2aFm0+gt(vaa+MPZsJQ`MlqE8|B@?UYFIY4ve|WTpdB%Whq3$Mn8HDJE!G40 z`va$*egETIo_PGK+iw2uRTq8dvJ2^a_o|D4G`I$kmcs5R806mHyztJ)?}|jzg?x~d z|8SEI_ya^M_>5X{#MmseL?ORgB2g=0AXv8sR8<5f|NeE89`lpiI%t{bzL>^W+CU-u z>aSIN<)RIbg)6Lb1y6|!k~^&5uh1KIIt2_F$Pgnz*v=+rVqZZXh6*v_Jh$i1t(vg3 zLRA$iyLw$uY6arxk(=RxR9YBuL^#{6jY54hQvW`TT*; z-+%Tu58v_NE&ukwk8i&7`s;4{(bc!`qwEd0U9|&akAL|wVJTnmjYvCw>Hwn)*v^Z2 zyGTVaKXHnAV;MO>q$}ma@GvWyWFs!58lp67d2R$I-uY3vc!o?{P+}{dm5cZiTccLO zbE;YqFG=NZ+W|?KjEZkSz&m}uVg`#6Fk41KeKc541E9+IRkrJ=uL^@7+*u>_su~0< z_xt}N#!d+Qz?6A#lm%QJGX}r~Ttvgd=vXe=xIC<%xPqN4xVXYvsT3yX0%9!W4-i+tK$X5U7=1u`FtFclmIf?c zUAl3m-q3+PyWIw<0O+#dDnrsN%y@~`Dln9m zxFA21D`46Y=x}3kkMz7noLriO{t4nJrK*aApJ=@tthMxo4|*( z8j(tAEuFGE?efXfhd%$?JMVq)yFdQ%rB_~i>fsk2f}oh6UU9{bc3u0^OCNmfmOs4p z!e<}9$!fgL9Q|VW=-(KkvT1hw)Bze|NvuVUv~2CQFf~NlR2pJQ)T)rqkPLu00l;1f zWOpM~dy>k&Me}21SP(6Yp-z5*#HCbHfR(JkzVh+iS_vtvh#jRndcCmmNAMJ*LV)UK z0BrSYDUtvpgGNHwOT9BlfhsIUIkKJb&PKF=A$qHjdDz}?5m?y(opk6~&=Nx8EUH7) zRa=Y-+JE6rNkl(ez3gHlWH zaTtAWv(IBj=P#`o1yDzeULsS1HvgS^vKXTBy;VZ z*OAGKjIlRAbpNF{{@}Y;UG(isF1X;LfBg0(7yRR{e;}A8q}_GJt}AZ7>ZkYq^p)41 z5{YMmK3g{9PA8ou4mM5YQlL@}3DYb{F+1&eubbZ5C<50<^3E*?cSZdGFXLopC^pTq zl?|z0*}4aBW3x2G0ux*ImWu9D(OD^Z8ns}n5v5fckpKjty|LE=a2CbRcZD zPcp46Zs&7!MPL~PVAtB>VHWoOeU4s6H~vH*pt&qY(=44zInrn(L36bQ3f^9?*zM*C z%-8z`6cuDpQ4p_JgJ@*%S1UmvldHi-BYk$OHYidB$8IlGuK~tZE5QoqA&i9!N)i4D z<)W>SvsOx=bD&c$S}UBb!nx|zV5b!&8X5m$&O*+bOPjJOT_L0A@}_FZTrIhgWcCBJ z*8RYE)L=HQReW5*kx80jVO2D&k4LT1u+i;QDy1{~4uA5*Gk4!~>!rjMyzz(UnU|LO z4gh9x^EH0*z@TJ{M6i@?I5~WfB(fymA~IH}M;s;6fQ_uU2;;M$#S7ykKcKmn=Vq57 zb5EyQ91JbNNT>yc6zXY&HIRhY3FzfQ)hvW{3)PDJ{XVOn*^14+SgJ^qGl{Ht9T)n)P!?q&N|v4h$hDcZH2qplH~=HYT9GBj*QkY*MIDWr zZaZ+ZE}MZIwJw_x5t2W+R#TI()@+bljR4dxn-QXA-BzF5;#`Fvhs?8@@vihc@XQH% zE9gnVu4@=%X!#U$piRVjwMYr|5E67oz~x3QU#41cwG1T(%TtiP&hxPhW`lfg;b7Ai z|KPlsED9v#As28p$OEQu2wK@khO~oU8enA!R|#Xo6m|GbT3|QEJcnei!wTwML)Q=gv6=_Y3V6d zU~{wD>w(aVaH-eL0u75Ojf7;PS&xzc3DA`zbd7X6Ih2Q$*}yqy?8k&uGwGyn=AjM^KqfGxO6iC#` znTtCwaB`u*3_=TQ_;;&SqxD*})l8h-EU{7c+}SEdyCmziaJ2$|whNLvoUDFMua zU?6Mc@|J4JTq|2DC2P50C=~!a>(v08{DD{DNL{d+&zONa$XUxp3#`2=?poPZ;asJn zlgnGmMJt!L)yht;u$s>pvngXCYp#@B70y%0dtwp$s#&)G%MTxT{C46B-auW!-Y@=i z;;T;xS_x)x&$XA@y9T|qB#31gyEMlvU4XPpbElT(hN<0P+p8r|vXUiWS^3@sRC7@p ziBdL8dDkpZi_%^A?6b z>alTKDgqKiC5O!#??f;FLK3tx5f@Fc1D0Ej@PJle8lGX`hgc=PgJO_pKn_{wLd0|1 z0a`(3-~gooLqg;efT6|r!QfmY8feoM(RVx)^;|tmUs#0 zs(#g^WmgJbc&1mXwNj%_s?kV{8mR_c7;y12XrbRnC4=ZKrE*0jTL3}bGSKEknT``S z@}Abv!21bqB^ep^1qko*AVELbj z`dEV5Z70HxM1mczt{pnq)A-=DA#_sfKPYp4KCAg;LiX9D^s{N@7jwE#rxbgp6(5g@ z|2(|#`rf0@zW?!^zkcDmdw+iAE!V@U>+DebEoU zcg?NWzV*TH%w|O{6U0`RWV4aR-HpG@tyRmudM(mwMzIVfPLye*9&Xe^C|eD|aNA6v zl8R3zSzqTR`rU4!)5*8n*=8ewUZg~`5vi0zct`B6lpOVnr&0CQD{BY>)@<6C&sr-b zSFe-USkIq3TSf7FSJ*MNzy2D|I>_?tG?XG)(f*JB=(1SsufJ|%7FMCzh;s#RDq-gG ztM#h8+w!h=`7IKu{Yj~dgKKuZ2V^X&PqY1Qy46fJ>akiSQ0CSuC10~1>UEN+ydp_J z73X0P^Z)06bkCiwufrtF5n}-VUSzPFjd+7J!?kFm9;4k8N&?dD7D#{VwFsSRC0wgU zTg`Z=n*YE!k^jXN7ZCI4hSkaRo;)Z$r3Vtyk>zinEjlC2b+2uW+_{#Z#{as+Djm z;hi2m^7u1%-uQ1ndg!s+-g#&DzE9shxc|fBhdw=V__N`|pPxDU#pv<@Y}$x}ejPM!ii!2Bp~yWoiCN0-QBJ2gx*Eu^@JAe39H z04hMEgr-!A(qbjot7bV+g$gL9QOV~Fn9pz4%gqM4QN#CknRGChqWEe_Er*H(>;V@R zSbV(~8DPbvfTadTrDUEe0AQ|O0`6jAYFH_q)vHBjgB;dFW;NWZ^V6xC*=%hVt<9ok z&(M>2Gp=V-A`3kr5GHdjgV|M+8U+A4ggyqdW}0TX@W)G*gz{V8tG|QNHELX7UVe7!##)i)~y>4@->?2HVuOb$~h_cv9qtny&+ zBdbdMd(qrLx)MupLgU-FXuy>gBodGl_MM~SEW?nZ2`F&;vYW1A+pce4asf@VG|+zM zvJ0>L$<b&mMey7aZgHsUN?h7pfJVN`9qXxwdAHlp3`nSi8Cq;oRnBa;x1g1 zh3hr&d^PGc5(i5~cQ$QJ#kHBFE}t=$i?&k1R>&GNNgX{|Dmq(@$l1-(H``lwto4P{ z*^wW6?i>cjwiBhoYCIy%res`BTPquTEmyDYBNr--wWOOOp=_^{AZ-Ka$1vQrS_zgp zn67J;V7HyxVffk+JF~e#oI{>OT-_@xh&yeVZ*lo$2d*`1EEf!gjJ{k{aydhz!fRjGdnw+J(1YZ|S~biS)=-b(ZB*Qi zid)!=Im$T7mWwu8rM2onFJx2j2QTKW@ZS*!^>DzlZ_j(T-**GevG4uyr7!oq_tpLn z4;}dU=pm>_Jc&xJ^CwvH#vaG$LOzb0$JiN^#m>P4qGw6L1rY@E!xYm!b>=W>Yggt+ zss5YN2%)Y8f?^ghv>yUVsGpi(7WUoB1)>Y6s~VJ7Y7!VonH`k`uB34V@c_~SAQ(`I zsZ0TOUV?G}M_I8-wg4GZi#!`VKLOVT!D*#{l@o}mcpCyili>VPGjt**!MSSixh!6% znXC!~i4fk!m)hu1>$N@f4(!e}3XN>sanX%5(0s8mzQ7AU%h#!KN4 zYzCRIihVq9$%Xg(^hz8f1xdgF%%0Z68=f(Q=p#KC179uRD#7ny&n$b)l(By|gKr%!#?YxLQ&*Yo<8WK0}#TkB)b}R zntn2I(50|a0;acAg!Tm5d^Ku8E^o`G429fkg>yCQ;c6wg=Fxrp;qM1A_VZ8P`fC4& zU+w>pDA{2!&mLeB0zRE!`UFcP0I;1rL6WWc$)iLR3>v#|G8~#6J3e*#$mH;$kz;#@ z5AQiW^wsh#*b0==S*>zWNQz#jwiogyD5Y~$cLK{V^|A()$(PSU;$bsI;i;;YX#LS3bX`FMsb%$z9f@OElv&3O`O2-`}o52Figv6 zw!&?fg7{cwuw{5lb1Ct(POIMMvIM*~uhYzY3W%`=oHJp?&nQr|MNpfAj_C@Bo8{5f zVI+MQLtRn`*ewvZxaQ5&tg6QkGY*S7rc92T6b;q`S0SrGg|Dnv6%L(ut{TdX~$J zi#biDWW}q4A5vesiGGI{j5Vr0eo)8RTi~LDcpn|YW#9Iu@9Xm|34Evv${5%*+w10u zf~FwuUN_zEWxGfbRLcHVGuG>-g_Ocfr=0~;7&_q4iI!^ST#!|>R$V``r zxlSDZ^nu53zy8+C?|t}|ci!Fo<(~KUe+B(AhYo;ti{iG}o{JsATR#3}@&o}a$+t+m z#li#@riP|Rj*g!?aPsIEM-P2UXZY}*g~_3nIT&S?(pgH<-VWSWEfUNQlC+RdvOKXc zJxo7Zyf7}8%s>GR2nnP#uTpm{GZE12O3y%$3yNSVhmc{cR{HM z@S%yJgDZ0*O39o-E5@~&f{0CeJ-;ME$YowyyG6g|1c3FqtS+08O}xUGsv`>2BuI9N zk7ZLX(6A06rPg7Fos`4k9HeR;CZ=>n{gsd$?liM+8H}_=ibEst1`50!C>eucMu{2; z)tFP5(0D*vEJL{FH(z?SNUK~SjFn(s!63$tn+7-g+s}*`!%PFfp69XA7#uMW0Sh|V zQA6N7&=$A?dcbg-hhKa#`xbZy15XY!TfyL@O5Zj3{2=eHJ&e8D~KmOv; z-@kpwbI)OO!6R4QcJr0DTn}ch>n^9kmG)VeTz45|FB9b6aQpT5JaYTfFWmS1?nfSa z@{aN0L-D8!aJ5rJpKPw(%3$O^2#|330xJ?wQf2hnlp&unk{}cA=uufTc;8Jou#^MP zX@#m~7vh5%4O4*FrJ^&R)~DjqR9u=)h;wOKF0IU_G>EY}e5}i+boq>xD|j09@ZUAs zQ(!D@*+^8tFqT*Kw_NSIzK~r=#6}{)6Yf z#mYr{qvCE7r*S^a)}Yu0C=u#nVEKVeg~b=*pp9#DT}Ylmn*$|`v5;*Xo*|TmkdS6+ zVat}~GYru!+v_Cps^sMIHeLst(dRQ-E^n!oy~rLHNuD7r?<(C6X40Y`x!FjPQkUEi zWzJXPf+a3k!xbC;GXn^-wnENU;XL(PxLEKl&kjHH;zKmYzV(M+(*%3y0CgYiVLDja zbCHvaeM;jiM82RhmV#s#rcV&YuFMb9-S_m!(UIXpv^zVAbL{ZR19RgiNXMm+&S;g3 zG{FL0pj^vwTmjtJ$`_#}9G^jR0WUcK zq!r*ruM#U|vqTh3O`fD*a(?2ZYk4sjY#aJ;<60?8ag>kY2XFJC`HyNmzCi8T9C>Y#hBB3KEMVB zAaomJ*{+Ih|0!5NU#j2i)U00%+%z9&uaRVF;3FO3>@zIasUr8gP;(ty?r z-_XRr8+!(RH1s6G2z^9sqU;w6)4uKLr>Ph3vYUTMpi26IZ(ni&jjX$_*!BJEuYeHd zdvAH-*AKn({;Q`(j#%`Hkl*TGGfS5y-A*l)YEg^{s`}GNMTSs81fn1sFOxBCCTYl~ zEV;A^k=9%;+FEr-r{PA0e5ltBberBr6`pL>S1a5qmop${8fs;TSOM8U!HzVbG#-)W z(z1LS)-Hvto*&itQP~Ff$a!~}^R=1@3dd%Q>)EqSa*h2x;B9QKr)y!lhOeLZVWHQZX&3M&t<+Jv5%AQSI@ebMr+NX@YQu5TRa4X$v#(2IIBwL|c zHB_rWc~-xd=Ot%?d;*faH$l3Feg{6dV4aK!m`=co3`luPBr1~xvjeh?b{a{8GyyLd zK*;umt~Z#yD-c{mk1>ytrwe#l@;W4eZ>|@})rCT`c%>XE76RluD;7hgQW*X@l`vQI zlK_kcTAbB`rJ~Pfkq;gD{H?cM`2CyD|MkN+2wjhS^$AG}C;^7HT?C=bbijNE6#TL< zJv@NApvYR79;WPW(9KSsVv)1-y$e&rWIZ4qmykm;O_5yCS)f|NnRbb`UDV;t1i%RO6C}@@<+C$Rf;Gk%dA(DUJG#s5Dzp4nDR~8vy3i=vH;O6 zsAoyTFo3ZF84GadvKiT(JGIXY#&-`+iR>2`Y)`@=`1m~S`y6?#@fs} zmXqLI)luSsu*qToOR6wlphZ6&A)Vid88tHcYYc{oF`k(KR>O^peJ7sW=>bzq{|CGUbTI(4ztPP?l zQ*DjvyyFHp_;U?TI_&Y7W_8l&rgxy%^UvdhF~i#V9tZ!h+DYJiLS{NC)$g5J**88( z82huQ@4xovH~!!!KfLV5D=xnF(m{b1J^0RL7k=;ROK$ze&t7@$sbi=1E0oJYAE>iR zrD(AjO2%EqVvx#MDZrm(sDQV%aHX_X$N^=Sh^pcdc_OOHrYzaCJ(sZ;ayEiTE^jIq z42Y}VUdz>}n(Gx^y`n2*l-ZOln-a&uqI5!8;mn&}?jwfTH zLPo~rOfa+MJ%zlNWT;%hQ^>oKsPs}c0u6ZBMVMe1%ZJib1NB-MSYjbdoHlNhz`%@S zL=*rkyAC1bRKLiV9rW`+%r+n?tBD2ymVQG&7ikJUub|sb0>%n^H*$VeOYUZkf6!_~ zh*IXcUm+@|-^4(QM-a0%;mcnuhZ(B^YNuS+i%O)bx! zo*6whF?49^^pT07gEY~xpsmT_gKUDG7&=JNTa;=@BG{$5Q`=Pt$Qxku*D>-GP(319 z42Z}j4efcFE|PVLCY5p^X{j|8=i5bvbcT8jAnH&G0o`P_l}n~o(gljEQppx&vPG$M z0iF>67PVW_Z5mdmFlZz+#~RdPk{OVI0d-3AqQ$Yf@sndCM<&NlF3*mrWDB6MMNb*S zSYcMw(S+%9LCF`T4It;KW#Gnej3JmAH2-`6f=MT1e8ON7f}s9V3o=?g!&pHfpd*^s zEiA~+x1MQsh+xs|z{?4YS~-T_$_I&MlU}aX$u+nI)1h3=Y)}}?T86O@T%g8YMJ2=+ zNSCJmo7?=sIhO9CEFs zi0xXm3CHSMw86Jb8eB70_|mM|NzZJ*#uiw1Y`>nO7XfTFt4F1)$Cu=XCP&XKPaT_^ z{%YdP`};nA=C@DZ^Th3UK7Q*1kN^1g`>(y`$CrHjlK=19-~ZqL_+8%2_3cY8xay`W z-+BMF`IV_;+*{(JWIk;)GH4)05ot38Von5D*ok+Fw4|<<{eZD)QzEK}g;mM87BR}5 z&%q)ln*t>O5vt{)s#Z4EE2>gKT+A&ObIYm3RLDQ-_l!gWGuhNKmoqf0AlEEp&GCp7 zfmg=m47IYQ1X?2-4U=2}5@47lwN{Nnl{RdSfXZmpQ{{3bpIa-J)>@4yEuMI}DQ>tb zoZ06&?XZ67c77W4?+^Nqr4m!=_;NmDE*0!$&QmFes%XL@j4c)d#bT&jLIkT2(7{|s z%PEi+@b}cgY6ZL)KxQ`Le87tk)$*VJ*(Lwlc9$*&fI}3_dVy$CXh~>;)T&+$r4lBe zM--20fer3NJIysx1*0(7MchTFohG#aKZq7700AyNO;iq}xd1Skez6i4EaU??ra~Vj zW#B-20uU4&rSw3*d7Z`kq$q+CN*lXLnyCA|Y^N2+OKrNug>#ueJm&IwjZVALpb^Pd zrubCD6Z@I_VDj`K_JHsfg#-e4PJ(KSB^;6nL9{SNBPj6(vZ;K&48~#Bj(LwLS`)8t<#W6 zSO;n=Ues#9K~yW)6Sg&|CjL%brq)W;TFDNe1D2RqBkLR8nPk>6_J?u>vPoC^*U z?a0{tEchUJhJl_G2w@Eqcd>DrvdBnROazNjyY1JYk7}*NpcNq>yG&U+O4+=0X$n+T z20l`NaiqX-BI*h?F@DO~26Q!U2MbWY%%VNCXb%D&3YfX#BWk*5Y{&YrgY{LOH2F4 zCy&idPpc%;D#_`Y;k`#b{opSze)Q3=Kl|c0fBe(qcRYB_t{?udf4cbp`PTRT-wVI@ zj~87058t`$!kcct{`BdC(Xg{xh5vmBufQkOFLXPQg#tH;To9|W@-&9w|Bq23>ySRx zB2i$%!OvUBd-7Q)<&ePYDz%(ViOWTKxhO8==JMI8baFfqJ?(WJw;I3nI!~wK3x$ln zR<_p4E-r70N2H{uDi;i`nwy!A2<3n+1R^v_OSftzLNcmaHC`+P(kXi~VNWH@`K-MT zORHF?1)F;CF6H!z=z`UF(5&C%_a06qMsw-qazR_GI4C`!Qi;{7h#{3Qs{JFBEdWoV z-~q(Il^~idl9YT1DOiL$g8W5qP&s3nY5LZ#FU z>qw=VR61(;JZcY?nLM73MzKt!fl@MupDyXuVzU84s31}RcSb_UwuA&-K+=L;bb*Dt z+O0~1c11J~)9m8(uu?it8M736z!(4lMfrPqH}bKRqlGv7+5u&(jvLPzoEDz=<@H+TLC%&Cm=3*+ewjgq7#pzZ#~HOUjSgZQ zRPsfMY+fo~)T_lrBbyCEx{i*=Shbo@*_FzdWa6nah5#;5 z+k)BE!0($*8+ehxxCx36mKJe|q=7UU}0$U3lsL`sYjj_qQ+phYK(M)^{$u z@YZ{Nx+tD27K12uD)svnbO#iBT`0+_mi@V`yTtjRBCrE;qjoz-!mTRg=9zM7eJ+jG zAQD;*tKF=Aq<5Vm>UoIN!Wm~=K$9D!Aq*_g2-%YmJ zNVS^jdOcCA#VVCp1Dyi3N{HitdTlo1>;3H3Mqy(;-|K+wqTP%(>!9yN-xkd)-X89j}(X#hg8xHkXR_W^JwC@poHm zorbef1+Epc!JAPOX5`oV#eR>&@ZJ>u{&34*tDw%I?BfdVLf(~5(nZIbNjtI`Pl=1P z+t8fIawI5<2aU}2ug^7LFV^LtNTQt~qXfB_QDTwYT+eQ96ak{oLUk{>NYbe_oqBP0 z?D(0JUyh&p3Z_;})*3897#wS-&mf%64xw!pB43tfpwwUq(_J8O3j|jqJVh{ln&B#Z zX%USHh`P&jBYgL_7=#2$=^V|j0I(Q`2t8BM85O|qoK^|Vvw*9z1vDKj>7dmG!(br^ zc2TEV!sJ8n5deFM@!gJ9y~UtJnY~!21mi8bamB3u+inH(tXeTA7ePd=7Jyo0hCnLX z1Sw$ot_R~Ph6nOPEbN|87(kmT+kgdZkn=Q<;a4MNSPG@#JiS1vgX&cieWPGuz>ool zRqvtlahGNAxK@osS@_&mzh{-CTmbkki{G;fJ2;qTwT!r+Vcuc@y@8H)TPP%efJfQ9 zT)qUx4UJ5vgRhdp)}#meTCWBLmr4b-UkdrML^3mI85l91Xa8p&)iVOXl2i4x9u6_9 zbGC@W7gzZc+F(rWk7$E2dZ6}4zKM$3V2sVBbZCe*gcF{0#*@w%!U;!w=XbI1-SLdb z?piTfrZs9BT!&{)@A>MZH{W^jkMF*u>a{ruSvK6zr#zUL1ed1>#LyWf2G z{#)+7{0Bd}aMvZ@y6}6v%iyNlub-M5&F2Cbep2R5spvB;m-xxB#DyD;H1AYxWtxpd zwY*j;*z;*UNLcf>YT1voWs($AklG4Elp~!`rsB()N zG8zt?iG?O}DP-CBQbiqK$crQ=k1A>B{8jiTxwT_7RrC$Kivt{9;=$Vr55; z?VB7vNQ*6qG+aC_6OD?O&k%k}miTts`H52#W5-#n0m-_+| zGZ-feOD@qgY_+5?%4(p|Z&oFn!^w6L+i92Sd6XzDkq?)0vmI8o+rfuone<|cnlb6c zEaZw+eE~PC0Tmg1R4+5@?y5;ekuNqgA6LsLS+?9qgRPFut~)RmlfI<1=qI+pCG4$i zI2Gf!YlyIA>M!DcX~V^aSU4^V@v&~iSWu1$&-*>AfU&rVBQ*jZTDK`#gpS}ZmP3^o zaRnq%LzA*vE9GM$wQ`kOs=-7ZjbcRx7`wp4tN1Y>C|yH?XGj;IClO#NafMg~Na|j4 zgr%McL95gqV7L5=wa_5a%Gb65cE!n0vm#gMo8b9PbY-}$4e&^=SPRV=&EpEih)jBB zdFtfs$X90$eel^k&%OM}(=R>r63r^{Fyi3z3+i1 zZ@m6rzxOZS`#%h0ufFMuFAx58&0}GKDkRe?mwf4@JrOgdQ_eD^o$wgjX=ed`Id?u| zCdRc?umevh1hWu20S#Dm<}ntWOVI@pY{Qu4GKVN!gN1%TfKk#wpaZ}O>Mu~N~C7;8!;0A@2Oa{;WQ4rDUn z&AGgV%wk-@-DQkm6pM+S1v6Q>mX`}h89;FLGy#}M^ z<_0$q=hb6zh4gM`xA>MWlphSN#!!9$gbW7Ya5-%uqbwJ+rGhG#QgV4ytM1@q6F`P5YJ8P5_Sq*2FLZ=3XD8dDj@GNUfdt^}?0UF=&-I{sxC3!}ueEA(F34K5qB3@~RF z`KNXyAMU_d#)S%6f&pC?PMef7W5Z38X+Xn(d0G6xOtM(=c~N@;eTm0jq-|-U2D2?v zxWte!ColU=7~`r_i5Wr7;&z2F8Ksuib4ZmaP;X2aOMpr2>j)3F+vl_%(Dj7IcUm79 z(gg@(337*Yev)j>So}+luzZcj*uf)q1$R6%@TG0qc93`d<^W6>%V3uMltq(!T%kC# zJauT~@R!2}KK=49Z@%;5FQ2>j!N+d7=aCy9eB$Z{9>3wK=WlrKH#fcU_C4=?@bJ5T ze(1$l@4WBPAOG;WUElfs1+??J{QB>|`rD`GmZz#!xZg}Cy`g|995hElrfArdOxTNs zK&6tPoIu*n)N7Gq-kwgV6H!Gzt*Mo5&3d@kO;hp}1-BAQSt_g+GO|o^AsL%WM8`vc z<9_ckzxQM;Jf4hA~!+=aXwy#GQSgHTMvXUt+WO_?RkP%h$N zoj32KKtloZ)z&aRi!}Vm7)NnQl&4NE7UcN#xf+GT2+g@=OtR$*b6J-G{)lS7n`qYT zvG7vJHxUg^MT1iz-$W!ZpH9d~rrT+!X=_Gm3i^eC=DhzY@S$H8dA2&@pIM2p9=7Da)FnC=IA49XxQZYTOPjt1>h}t$>8muxWOsW?L*W zP|#RQ&0JV6phQjpunWbjTCCe<$t~N71k5VSn9o6h!C?Ls7`xD4KzH;kD~4GK>=q62 zy@Q8GT3#8er$|_az&l9>3NVCec3?ThfON;wimkwCngw*V zxfg8#xbb)L7`uagWf-ds#)#~tZ=W-or&W?;bJKfIeDT?lzrFLv-@N+!XC8g}u7{tz z;hu-DyzjAVAAI8KN1nR+spoIl{p!!&eD}WhK6v=GHy`=ci}(KIu4}HiX4kGuzIDkD zF1+QQAAkJ8?-rNFeIAopFPWb_IehAXLOdM~T476q0J}{Tz{ylA;e6IkvRENNAk^<= z5I|^8odv}g6c*SjoQ}(jKn$Yy!*xCfGDeT!{?-nd`YxMr5qL+4aAfRohG5Z>R zRkB>McJD@mP8v6f92tHG`W6wuJE3yH91AT3*GBxFvEbTizxPZmw2+7{6|%+(=K{lT zzd#-?mgv~+#%j>mp8xs{s58Uk} zsrQG6*>jC^TR>K1Gd`nEBwijj=|paaHsCW_jk3A%6N9yhgtsW)f^b4i**e7rTC&hi zojDA>2}1`*5sIlxkpOu+)$sgOE~xp&_-^q69M<6lq`?a46ToIGnFHGb9GF^#s6#5c zl2JPqPZ8jlN>5V%3r1`$Lydxv>%~wS3S3s9gv(--(lAVjjjb^n#8s`^uEj1_HRfO& zpqJ39XTZoTv;$fK#bCQ4OB6>M78n59WYHwdk_lUKk9k1yO;V;v7vnxls!SH_bpluMs5~uxFGm%9f{23sEYk z^}%6%aA1NB5;02?ESqCth}HWUD+?HF068mY2JAtpXXhxp<1l~~yMlwFtVP?lkl>pb z3%Kfm30Ce6(T`Yh2F4YN)8dswBZvR?kuFV|4YH}x zBk#Vw`?(h%`tZZwDI`JY@(U3e6 z(j;PrRKl7~+p3iysO#}Qz0-=;tA3z`GiK0V@zPnmLC$&68W5+QJII!BV?|ab0-RPW z*MY55eh_1k6s275@{U+<7Vgd8(L+@k6l~RM$yycOnUB&Hq7L~YFuc*mQtvDyt9i=0 z9IsR$>}6x4$`l9)eU;6AvRPftro_RualiMB-+RpGKI!)kM*@?;Cuj79oV5ym+XQPR zqu*2DmNPt(L(dNUP;8$B71ny6FGphqi)eE$6L|9W2dqWFD+mOz3!aN8`OY@_5W3rL z!dGjxWHRB@Di@`qNt;C(ivTH+&v`=uvqCyEJ#u9D@E)d{WkW3Ss}vElFf~MuTCjJG z92v+$9AW^=bX3Q?!IX=L?7@s|d79W-r3Cl^jbcHE94xpMlnX==&|GS> zD2Q2Q{D94(pn;Y~T;8{2Z}=L}8$c2)-SxYL+!!GuhBwT9BkE9#vEGog?Of%FUn)MJ?-EcK zKvl?6P*Hr14Fbe87=f#zL zFCtyp5X+=rq>qKiR@b;facpk-%i#lW{q5s7-+6KO>-RtT@?F1t_Q$_`_Q#L>@|K5w zdF_3VU3<^NS3dZ}6~Fl9kAC&S%`d(F^Os(~=aWx=y?5UqfAh){H{EvS^|xOB;fHT{ zoI1bPeDd&T&%E%9ho8Fh)R8>_pS503wcA+=D4_DeMuUMZ_FFY#O(>a7QrRnkFy67k z|Kp8Xu);a=8D%2690^PUID}?nVNoKgNGH_bP{^9O0%!^N;M2U0FqXIih=Rd|8Uz$1 z%-J-MiiMo3)re61;O2U+Sq~JlmUKcLk4O_ys8E6J8|F$uSwJ;R6CD8>=(}o32641_ zq9|`hz|udKo*5iy30nL8GLj)+&Vm4KOj)5j>uM!J(tV~b!#iu(8Bv-p^p$sc^b~})VnmwJ6MFNuuuqO#%eeRPX-$Ww1TrD}#dljnJ~dNT;0fsAJVETUi(-ZvuG`_}Ew^2Sf`zc%sAW1UbB>M~+Nj5aJ{T0*s?) zfnn@!|rxlVJA_~ZUi!i&aQ}RnLqfSIO_(UDB@s$BA z>*h9U#OQHYHfTibm89J=8zfrgqEa-cQ!g>SfpZmmuV9>$LEE53g~6>_VLsfZTLmWq z|EW0U*`0tZ!!TVqe0N1wFY8sODhcr)vh%!ujxU7Dz6ngz=-S zko~o-l_eg4UPjMov^2*m8GAtQMnr*1GQT348JJXwXdUEZd7=QY)DR?!fDOHD$~~i7 zJEP}qS+myQq9Z19g+#8~3{V99Kr z(wSGZ>RBs1UNQqwaCmy+LCH;N*!vsbr@PfBF6cPuzOnW4D|-{&_m-Nhdu^v%|YzdVKfGk1E77<Hpv~g89d%0)Li&VStpwUPYSJ3UijvgmNFmw!- zi-8IbL$}}somjUWY1RXsrn|zKb17-qKOOR&js%C0B{=5y4*R`lf@|aH_;R=9r(B(8 zGt0OR6s)G}^%NFEqz3`EUQbmjiDrYEWkM7qh6R+NdNtH;Mu;?~q90-p@FJ2&@m@es z1;?>uCyv+)!jgOr)C(?$LMfdw>BS{3T&X0A#Za*j$fmtco0=ID$Z$&c{-oMM4t8j9 zYM5NK5Ct$CDe5ERF8*~eTFhG!9 zX%Vc{)Kef>{*{D~HjCPT`y%S}a6U!@_G( zx-`Y40>l;2_tR06b1PDI~ka@NNSzNZ> zG6(%?w?}N$kU(o_c^)1JdM%n`|NPM#e|-OiH{N;t&3AwC`kQyZ{^s4gU%lri|x_S4jx4r$|gGY|OZqS{MhBX?+*n!XfczEB3UY8OKO@)9)zVOyt zFTDHqZ#*ttyA8hkt*zSDMiKQqsd_!Nu`y^+?F!QR4vV^i|D9{$a?zMg$^72aHuE8e z^^nhVJh(OjX4Sj~fHZF^7omy^1H0@Mj>4kt70ya|{&cD(v(TohOU3o6gq2`cC|`hx z+HBes3rm7)Q)}*Nzh^Aun~nw-Gf8cuYHQYTM+Ud*ykK5{HG-oYL!nNmSgR#DE(Vsa za=h70Z`%a$Hhgovu-*rEKjdap*kBvN2D{xnCSsA+0&=k^Fo*ie+bwtvykI>~B7|-y z!$@Fw=YFBnPUBb@;_|jwSRVAvdtDO_>k*gjq|Y-P@|{MEJ%b#}Nf>B7V}8$AI=;-p zUikX6O@8fFucyi-A}gXe!>5Rjs8*9W$|fr%PpM$%@(>w8S_=|3cUrMtC%MtjVzX_9 zJQN^-=p?b&(ThBGkaQiW>54KbSE~`trr{c0sU+~k(03MyhU`Y2h#6@q^cx!PL++0xV$c(7~hoKl*addq?(vIC^3~YXKuHC5Q&AmrzWzz3^HA;TE_O zq_ad3AUVKG2nbXKXho%#PGZ!GX;`%w_~{hXStbP?7-F^jluKhO>w9&qDm_k}&u#X5 zENgD#n%n4c8r%*IuD3L+W(Cf%V!;3lEnODAw@YEeMlG8Kw768BXx7WASX?fim5XPU z(s`p!Y%{|EOck`mFbMerM4}oPx+(t%$M281VeSX~b9}6HaM;!~AM& zKzKkPtoIKiZ!n~G(+X_S22iP73u^;0AW;!o$)rHe0z){#J}TG31g&CMkVao^AZCq3 zt&ylZ?6J9RR+B=a7A-7~O)QO$ERIY{=4Ta)<13Rx^JAbW`0As-e)7g&KYR9XpFi{2 z-sk`N*{|OJ=+WQ);qI4zd&jR|y5+guKi&Q6&)@&((L)Dc*C>yL1M*bD=<}%L;$fw1 zI2N%K@@v_QTQooNn^%7M`s+{W)r;kF^epgom3A{);sV)>HqJ*~Q@QFYhK4l4W0ri`hC5p)7^OFHSy=hk48Raz?-9fhnZ8B;`pi+<0T$1$|# zI^%PX`aQ5sOGHKaw6;}uciM3@O;FDlh_6sKP{Mh;u)bc|+^nsySL^i@$Hh1<25?k~ zp#dS;YGr%90$2tbQLHTVHtUggD~1dGc&DA}c2W>g*nx<`eh;YzknZR3WT?lMdS|w_ zYP7O%G*XR5s@394#QNQ2uN|szwoF33<{H&&4(Qc;W#YdoWgj_LkA{2`;lOk(G#d@h z27QykwQ-ky*ls-&SXyo7 z9-e{Duj6wQP)T!m-v^(6^48xzeB;2LKONfpm*K;EC>e$?QiP&}2{EW@`BFuyTmx(1Wg?jtRKddXi}qgxu*kQW~ zHkH?94EiiVza_Y~>i3#`ZoSKnY5_B!kwP1Js2bEs&3d^NN@JDOT4*yX9pFs>_CTjx z!8YEp1qi1FgPTe|56y|lol@E%!&qv80nl6J%W9F5rKVb1t7!#Qz;-q)U%9K5kxO^L z!=(@)5n>#4N-=|Cyg+Q*(CoM}rbPPf|aF?$?N zo7!l`n-R;3(>84}j>;7$XO@qT&mI|{Ju)_P{OH7q!>9LsapZ&jU%j>eD=-@D{c`u7 zeb4{x^Jm}t;NdskzUS39?tJO>pTF|jU4Q=gu~S3ux*hY`G=!g|5~fH{844(K84Fif zi-t}6_PqDtW4FBi`jZOrbT;b+{XjLIN|-`^&6-;kSkp$s4vqt%EF|omJue>>YFPWp zdfA-Mh(rF9;m{$!Z;!|InajS{>jI3;rDd5U=>KD3Q8FfqN2Hmg6nKNQzEptBz-}wp zYp?a%fp%l9QH7v>h~iI!M=718Ar`{-sLHBRvKBI$R9p)916!eCqAUI0lU~()dC_q zP2(=1E5#1*u2^1b3sdg;dI_yqY2GG)z6L>9>(6KOk-&T;FzNf&?gnkPs~-p&S||6k2N0IpT-b#+O4_WM2W6I`}dkE7G$Xy^RH$(Rb( ze|a(Z&t6Ra%n43 z+*!!qh#5}`=>$kPGAURr5>c@qOU4&#;nnN;TQ^IPthkb_%}0q6jK=lid<8lb@O**8 z^9lqVz>8TfC-Qlgp)Atb5o}9DuHdc{mdvp@i((LgWM&w@OgI76Gb7!X``f;`c=q7e zCz`%K(bReQ>xqFjq-W*AF|`zA1IWiJCF8K?0!{1`CK&Kl;3i}V2*%>@$-|?=s=_%I z*Z^}wS3pksNVnqttFDGME1|H)sw7-1coWcxfL}(tjeg3^v33yivw*RXvIUr|=c@_r zr0j;Yazp+h%&~URwE~}DRGRe?ND0IF92$0@b4RD11^U+vjf8xYBTlC%PN>IaoB4u8 z3opLK#IPR)4`S;sHbK zP*4})jA722PI+@#X08+>cA>)U+w<7m3d*0=*h1BsiVLEl0hVbEb5~gQ8=5-j^_)YD z&1V$Zlp+z8BFrYDa*`#8W-dF_qbvcyBE~K+a#*H7FxHXFxUeo^ZVq~^=F9&1vKuir z5t)T-8L}AS*uDtY!}{9nmeUr)r9?z{eVGDz!)g{{78=_tvcOv4!4@NKa5Yw~CM%Wr zd^Lu6yHEowaVlXC1!SqXcCP4NSqvaN&he@Y*mDsJ z70SxH9(ofoG^y7&@X zqy=#KrBO_yzW_g?mVt%Vpb}yd7qSG1kh~bc?a-iLz-9tvtkod57^L-O7L>)PiEyjF z(jt}O5~~qV8tGQTC)6K@BBQU*Yh-<9mNwHKuq}|S5cD3p?S_U>UyH@K4r?9f9oD-s zi5lfJ6q$*~A;(4~z`O;$T3F9oB$YRO?)RF67C4e)-I&d!K|TTFVo2V&#H^Q*lmi{s zBjUV@kJA8H46Y$#r&_F62}z8(oJ4H{z>*9cx!t01*|elh49H=Ow}KhvZLdA?;zOT&w!XjPyiq4gr+m4rFPot4nRolH=4^Jhcl5|3zO9RZ3d@bARGDCJTArdp|Us+&C12z1YFXY^iH&~8U z0RSTaI55Xr;4ve!vCw2ZGQzRLvG71F+#6)Sb=uCE4QE(-s9Z3?qg_joy4x}qit#Ee z-qD5=CUS|D3dp_Sv`aYZQjsp^UD=c&91x`wiutm8Wzl`(S^x@JmJ=vWz)o`%dV!{k z+;_;2n+YdJpti6w@dRCKXK9-3t4qKq0eWu*_4)~LzvL+kxheygZ!)D$bzgMLW>cL#XLj) z8Mu(hEfw4((U+>s)#updrMmlgaWRw6v#ies)+(*YW|3QsQlnO+kW5HtMu>K6fRA?J zFX3cAatpKLebU(xnQ&A%**`JRKHmQ=X0-C2FIaEMCRI|nvvMx)FKDLN39W7bJXydkvQ5+>HbTYW7C z2qjahg*uhcpp_v0+ZxsCR)Y%VTRQpd+99^zj7Eb{uQ9k5ZvjH@!ffdO50Hl}u<#l* z(0Gko=vg-?X7${UeOs0Q$Gc+j;w1!Hijf&L?IRuEd~;~`zRf$EUT@m}R`b~nZ5>U5 zsnaU?1<~v=0N9SE3zxSwpWU+W@P-3N-`#h3!@fgrA2|BffunC7ZC?M)#f=v)Z$5qg zqr)fG5xU^TGiSG$3XDhtWAvPj&oc^jk+6kMeuLox#nKe}8A; z=1ra=Ks^Q^JalrA6z=QGRHdYJ+0Sd#2ke$JxwL4p>R71SRuJU$ zUFNQ@MCL30e8w6Ms)1IGX<}hbEG$gMCUe=IQ230)*(8&15zg%Lx>`y(^?cbeU-8$V zTRgS6kXV2y0~$jih>gyGLo7gR3K)ZY){)Ob!Nl688%V#onS)f@DqAX0*^CRnr4gSF z<>TcN#rG=0)^DY-u#nE@m}tlw4mzS?M>0VrqGwjMHI z0+|7_wYd30>wyclT`IdtsaFbB;z_k++C&}`Oy`Mp$FG)A(sNa2F@u2X>2phJX< zQ7a)g9hpfB-94y((`#mRDj{f$wPKq|0a}Q*J>D9U4RLuL@>T?x4PLOvETcErq%+e5P+un+uCj9m+`^(=yL z!r)`&R@1b6=9^2$4juXIv!=H{{ruHWn;>)G+{G>Jox7$c4vq}(>+Sozy>rLu^B*B0 zu=n8W`wzeI)sc1k55IvJi;Th5wryw5zklH9TYJ8G`NHMx4%-w7pbjMBcAwV};UJK( zn0MyVR;O)j&%Q0&KUp`}cR3dEA}5LDVO@|zE;7N!a6<$;j;gPivvK}W*4N{9o-77NQsF`^=$QIzuX>YQe- zAj)Ti$=EEJa?SaSjri{UB%dGVSYnv<<;j3LOc5(?S63qGgot*3t5NThhCR^WJFK7>^p`5e?^`@i^Mm%7c2tPJ`(~t$w{g z^yb9m2R7^RLKcD`s&g(dpv`fO@Iwaf8URMOUw9QN8f} zo$9R{nWY+s9=bK(Xjp@^j`^~0u?AgW%S*}Swy7tv+Q^9a+kfDyNfkK3sg)JDE5u8@i`mL4dl|r3-R<9J=OrXwkSYS-8 z6Sic0B@d`3=&DA-6OY(BH57_rc%xi6iti&svbN1!GBZ3q(ls{HIX2Q+uQn3_D=VDr z$D}fJZ6KWjy0u1DLtVWloq*9@I<1w>s3p^4!O-MT`|Nn1Rw>~7&|yBes)^TCjl>0S zBCGSXW^!L3_Zl-4Nr>?DToWNF&o#T_v6lM$mGX*0?6)mge6pz=U~^SyC6ScBL;E)d@z-Cg#xBb%7yVM z#jGV6)7ve>7Lza@a}q5R%&thi!T?Eh`#kEBsSjjA#hfD+7J3}*9%l>XZt?j}n#`Zi zir!PGHtP+$)tX&u%?`WuY=j%eITrZXhzMXdEU90x@tKT=r$X$Y#n7UZ9al(C7&PY@ zY7l#AO9jtdk)E#vAj*{_yB70wBC3gS;#gRoj7bn+@rrQ69!I;yaEWqtreZU(kbtEH z(U3Bqah3{nwGylm?I=Ph>i>Bc8zL<+P~_3h1~-?IKf);)YEZ2>p^$xLx14r6PqV&` zSV)kHiwaru$`XrFRJ=hIkrrK4-`%d_Ly<(RaJ9^2QzqIYm=zow8*Q4I{#+{FtJgG# z17nqveyQfcO&>XHcz`EKJFp_Hm~&vg4k2d!K%A0V$p|M7GJ|^EzKN-KhQ^*AnRt9) zZeU>r&+=b z$k6RoS91|yL5cv%!6Cp<;jjQs>J+nT>9k%c1m}Vdm|GxkjdGjupL|{@bP-d zPCd&J8hD987}#3eTiHz-gIWrC5h?-FBYZL#mN`0YI^sJpVmjho+bno4bTFEdSIMjw z1HVnA#RiQKvmmgo5@NTtz^-6NZDR*l&kaMq7mL*E7^{z*z#E-p5*L+G4lNYNZ2{eV5zM z9y;{Nmj~W&+W+Rxrk4?8F+r>O?1sx%VL-ijdGpt2-^HXCoLv!Pag;r9^sRFjH@CFz zXzOUY+P35D`EC9E2N+sZt8&#!2!fbkKBV(mdnRcjae~kWo6ovy)!^bn9J*#m%@g?# zQeU;BJ&F(4s#XHUoH-E@`RO*7t;Op(&G=5y^f9?&vq=1*T(Mc9+M-nLFdL6YxZX@s znu<#kQ2@q7M1&J;17;^Vc98b8D`W@6!mpI_uPug4K2LulD#ySXI5k%Sm|=*CQiw3A zxFi`9C1W$`1dOtg-~`7GvOb7JNJPd;IZZktr#;gFM$ED5c+>* zcQ)g#%z?rxmxgRHtVfT9Chhivv!Zo_qfhjYJk~$_WOx6IV#(HIe6Ui|lgDHiNEUb4WE^Ac|w~cTMAX~Fs+K^7D3C3#463v47 z;gE8$m|R*+;qnWQJb3(p_M((8mGrVctIMudNhfjDC7K$*VlVOZ5Pk%*WyqL{i~)cx z2`Iqs7@ZPm0m2E0j2aOMj|EDWkHB)-)no}}!VMH|mk>HgEdpE(-xe&qY$lL(L7BJ? z0K{sP8&pD=45d>Bl@KEkO%xKXV23 z0T`#*euMw6j>`sxU-hmo$6fQRDGZ`nn~&1_SUW@8oED2gJ|UZ)8f|N9J9+8+mnTkc zIec^@DzA3zeqsAAxOVM%@v9?mVk9h?Q8%7FzwTJ`Tg_+QIQI2h2ao*?d4s(NU;pah z+vhHB86G~WQ@820?SkoZvx0BDE6kg2+%bo#8H+|sUnP%if}z{=VgQTjNSSTPxvX)(YN z7{g((_N#uOyH+o;=w$?8WoDh&pcd*BvpV@K0<1+RgN#|NnEX+UfQ(UHXv;-UnZ+Qh z8*j;p8`oQUmB?g(@?4`%WY7wAs#y?`>cxCi0qPiwKDT+wk_P>bYJG z<(S*9r(EWGI3Z-kn4LDwT7bo694z}>I-kdi{Rmij(1i#!4yziAW$~54 z*Vd>evI<~MKqR17!VLwsV|v)WS&b^*cxyLnVS+U(c>{u8DOAfqbsSy!a3Do`rsp)B?s1lB?eYUohDCzc!!U@NN5!Z`(iV=-hg>?W2~~jTc(hpZ@0E<}+^}ZC-!i&|8O( zu5WGKH8XwAZXJfG>Xa=UR8ekW(67j6-N~4R3+M|u*FqJx%&~|v6p(}h5)9EQ72NZc zU~L}yNy~Ggav6l>q+X=dC`tfV>GE=RzTz)rRS|B~ZNF$SoiG`Wn@vX@wo`$?d5`DF z_~hFiJx`5JysXlE;&dDj2M4pM*=$Ofj7j5Rag>_@fQ<-9YTsBSI22&o7;jrNG#n0$ zMY*wPXeJhhr&O7v7w0(=K9Hc6(gUSIOhmyO}Tn9tMaws*N~-L$)(V<$67 zaO@T`hCGuRPABrVgl!{#@3*TH#-dasxgADL^ zwHz$uJh`+bolvI}>U>6(OM{Fp6_;mIDlix4OvRijm)2)fW>Sbole4_HnD-QN?tB(v z#>xBMLLLx!uINq0O{usp5k)$Bl%kH#2;b=+d3<2xvA&_F`UYPS%xrW!&PPL&<)V#L zJ(9Gsg$OC22`tWYB&#q2wdSCM1^8X4MV1y5E6eHC)zZxy(93ZBdSQ7vQ=3oJYRToL zRHM@;O3Kg!b45=fW637<*`%I4d1EPWClk0M8nzYk0dV!g!6$<+H7bzHbFoq>oX;`g zpxbJcqjn2%6_A#AzR;B*oCNt6N@}q#0oYg2zCsFtmYBItqFv%1C7pxt9UOx3tF)V63dqip@1(iA-S>;V$c9p#8{70OMCRR2bvv%j58dhLaYa2*57DA zAZ-X%fLT~+0oRJaPF^Jt7?^c3vkrm^wTfBD+kvJDa+(13f*1v{BvBmHTowa#egVcp zkI#%o0dwqCCEcO+eyjF(YMBl#+o5H<^ntZwZI9m19?dc@!l8SjR<(^i^ zE>o@%mYz&RM5IW|3WMT&lrPR(Tuej3-*+`$QwodY`Ea>tOGE{ftKDikiL8ss8w4G<^{&aFO6&E$c03;KY1FvW_G#m?0W|E>pR+LUi z(+Mc6&1GN<1C@nU4nz(_yjrV5Cm<}@mzZQss*s&gNWXSiJ2`eN8H2zXQfq5!+%G(s zA@kksGUjNJl+?S)1IWjc@ip71HBZ;AwM4sBEci(pxs7&r>$C@CvTc)sb?x0RoWK0U zm9{?*jlDiCcz#t-yr!+Mp^${$%p$XoNUV_AE|NlgBf;?PR2~Jurk7lBit-Qb;{&hhQ?m% zAAV|T<|U>2L%ZWEAAN@Qv~m8?bV3hhwWLdTX(0mb6?4qOdwR)VU=|X1^0JQo*P@LadQPH21=mdmSncuDVJC+n`9XizP9M*SXn95YIW&s zt(MNF{j}Slkqa<@3s*U~%;FPZCWqP~u&eJXikj9&8j{>X2-gCyfocX1)CgY|qZH+) zYQ>aMBjP{B+YoScv>2oYl@NFI0NwJ*dKRK&N+T2K6cE(~{Hj6GNQgCfT?VhyK)Lj5 z5@S@n+{iJNiP_wi1nB~PQBBnMGGzMHu*eeNbPnR92S*XFZDQdENu<= zthC$Yu&OKuIXUmw8yP%KE#a2!T)-M&Yz$>21q}xDK~OHDS*K)HHSe*#KDW z1=FrIKs!UM2O1E3P7LMd{O)kT%lVxwZN-pV%4HxXVgu3f!Y+@92jn=ELIoRPJn)i> z#{TV*ru1A@(pQyqy9QvkQ_FPe13h{_UVR22 z6MD#74D0?8XWM70>t+#P*M61^w;Viz#@(r~e=E{_JuErVGyF}@w;fZ%1CxC{V{KR4 z&YnNL|M>Ck2adk`<-xZPpICSN)O%;ntZzQP?(mT}ckF&?(m(QMmr@eFY;Na&I<6n%8Hg$FZ`zo9{W4Df`6UMoszdRSftn5-T zn9tFPxE*)Lq>F^6J%Zuk69WSWJ6 zJm{ypb7>Kh21pcy1LG_`#QDe4@!4!jf#e=h1mQ#%tUxU_ET+rl7|G~MVQS)X&22Sx zjg9WtsxJhY@q9*+OPlfZnoj6baV?zjp*b;pV>NsGR^5Ye=guMl+Y0tV@&qiRE5tU| z(&)QK77>omRTkt@fnkTOd2IasuHF}}c0ASA`QpIv>q5~cjqX#m=2MYq+syRma_N4D z$a1c|eAB^F%(#Bkxm zZ4@!M1CVkzyk;hB}h`S-sipgWQZ$j8CIkuEh+>5W+b<$S6vJ{&yGnl4cR~Hmq ziKyPF@1GfOx0)pw+lXPpJpVxcn2m;AHj_dws%H+=*#OA|PsT!6_dpvdrfDa0EVRbl zwaOA`ZIsA}aZ*J+KNf#OKsQM>#Nigr2)qr!YLe;Ivnu%{FIuSAh)XAd1eQ$c+aK9~_?7dQ-tX_y3ZVNuIQdV0PR&YTMeCRuti$c%ITnP^Cp&)Ol+zm}|4JKu5L1r)<7)eCOld)08cR{P!J0W;$diFJu^lxI>I||hnv2c%Ex}S1) zWs;(NMnOPE66K~@dWiP)2APp$OhSsnEyX3$?Z3M-HN(DC&01wgLi(<*9v5ECVW3SK5Zq#b`y4;7n?sH!EIfuR3s5@ykHdC&i zVos6I7!pxcJZc1wD&#F#ao%gJ>a-C~MzX?`ct=3RDG7U|5KHmGRX%}PKfGV5_kgG3 z`dCCC;bv)1k4|%BVDO!xv6n>B^)~ySXyi&NK9q`&*GXy63_u|}N?1`pu(S}qn;(m$ zG4Lc%F$N~M+i)j^4Iqir!LkMn*F{nBjn&fao5dTe*~J=1^a(apCZKTFZXVaEM!YWF z{Cwg^3S^vztJS#IrPnB?G4XY6B)_<+Q^P(eARQ_N~*Gh{Fm+0Al< zO~zwoDH~KMQYMzNg;-}S><9+|=zLxc5~n_|fxw*sFUo~;te%3&(CT%P(iTV_Fosx9 zG)#p9UTD|y)}sXx=t8Uqu#R>H83(!C0mg>fFrU{DU~Hj)Gs4+}j3dBUSf3SBXvo*_ zU0`~k4m_X}FlxmL3Cx@gvRXzk(+GPptHG?3>s4a2PVRA#pAN-d@UQ_NG$zn)iwhJ8 zP?rP0I00lUu^W)WbdgpuD;JKV8HE4q(<#7}W;H0$>0s6aAFGy6FG)!ao3y(hV z^h3|S{p^;{)_-+$>zUKLPoLRww0UFG{#Q{yu;;7SP#bXg1V~=@AAV!|t`|Pq_5?)2 zzW31j5AT0_!vpVq{QIq+J-+wgD;HYc?eE_;F}_bGx!|&ofq@Hxk|Jw&I!jBLN+n(@ zV&j!Hm9Qq_b}pcE*+&D+3^7cJ7;2cI#sZ8e8Ml;*ERt#HW8#mBiwp7jia(b&g#xoI z4M8Q5;6yku6%I_1rh_St?e=<3xm-t->dj)=J7U?JV(ABJ%?_`tB^($?CuRy+SVohb zxGENwkU4fT9GC$@4@^UT5<(Kjg6}F>JQWLpxj6{jzIH7~hS=yr)y=Updd;`AXRy97 z${F!72}`L-L{*uj0USxiAe0!A9PBlD1@+l%X(@;6yv4;dF1`x|h{qvgGvr6*b96MM z2{6+P)#>+Lrm3?w>rttAug!8I=TOJeKH2_)NI1pKr~s}!I&pO?9+Wz(b#akLTn0sMFs1Al|U_>!cmHR zxaZdLcOB1VH7bs#n%(-?Wc_#Yhisxe)7Z!*+wOSg&frZ&B6h1I0zU^hEj_E zBeoVsICnTm#iLXvU4gM~*=fbr1P}kVUM^3)>>5fNV z`^y8*K6>Ay_uuotJ->hI_kUgY#HLUGcI?n6%_p}UZQjVM0-E-}y7%Dg2addoO4p;! z>znq!ilo6?AN&ac_PvjPzxmV0ckOxZSo6DA+jfkP9(S}ilRY7k6ASw}vjO~s{=-~>xgdYprldkQg@I6zB$f(oQpt)>uQlQC7m zH|24TLTgMEg|DEz_B%4RzxS8Nta-VsXPkw`b0O$QUPk$iThlv9x(r%xx~ zO2zc+L`QjJPPl^*AS)7qVkiOL1j zT;77CVfld_hIbhQ#%0T)1dEuW0Jvb>O>T1}GBX&)@Ph+>U&C_uYz zE^SK1MGV#B^R{q-_5gFm<81MHTRFB5v&SKn#y^7Lida&R&zOlGz?Dne3wh89Ap2b| zfiA035Tu_pEBHK@X_$(LVlIlL?Aotey7gf#7v0f__stXyaHLatN4UK)Wr30dSGg)P<8OAS&59VKZzr zOhrRfeSGyHic;~YFP-$KQvsHCVBRe4w%})hO$|6X7G~OT&_yuWh2M_lj3gUeV>QVX z;_=bmE1ln-ofvGB3&(6mrO#ug-6p$PrIt@a81H1iQ6u&^b>uE!^m*zL-`KEF?+zn! z1t<;o*!X6gdgvB0*edBR z6)>n>DtDL4-J|z+X?<(gVLw7w{*PVyKwS(*zVZB|YEMh|h66{RdGDP+{q_0#pZvq` zp8n%+AOHQm58wBjKm76!kN^6)mmmCi`A4RM?GMAN^?C6PrJMe8(5hG=25*iPP`5bu@XLlM7WAn?cA@qjcj&mH3sw6-xp= zoMffNTwjS?Ux}bgD-l%$m@)8*(xX1lM1YZIQl5o{^v#EwP;FMzDVycE*}Q*N^ug5Zx{2wxWwLF4`f?#FB7QE|><~$k z6-mfq)|gA1N_ppeHL$dpBqCMx0u*sYQmH_{09ZRon!)vDs+2QExoM}hoAwNra^SPBP$iaBR8rjLfy4eb+b>rx3TF%HwHr$X%kF}~M~|M!4J;oxqr#jDj=WiFUa)7cD7Y}dhZIa-BJCDW-uI_VE{uAtw}P!@|( zs#OShS1h2Y1gxM4vvMKiZ);=%%y`ktW}wsn1XrRu8$c)mg=8F62hy1ljcgjER_Ym* zY!V%*v*Ud@&Eg&n98yH&ua-|6G(vD3SnJ9t-e8N+%6Nh2^iyV*hBGp@W(R2KcEP4X zL`wk1uqzw{M8%y{fSlZekRMy;X(^YErEPUoTsyMz7~4S9aFD9wu+NEKlS%-Qy2<#e%5k+)^bT?yCkx;NSsI=0ArwVbt>$g3VWZ* z0x+v%c#Qp#@Bp=5h_1LS>()|)A~1Y;xo6{nqff1SmNnO7fq_rvG5?s#oyQ{B?_@(T8V^sgzCM?&1u)inRIS-HKDB;y@{LKs+gk13WTLk^r+`N9oE4XD z#k?2ZAv_+s@~=5 zG|8BXIE$4HY-GxFetwiC%t@S-Dd3YVlA^A7t>R6_X7!rpk+BT~W4~m5ZBb5;OG6xD zT?bosn+OE29|4>U)le$2dOdsl)*SlOzK4+8DzXF^B#s<4_Bnq4{qjHlaUIA6 z=#H%6YU~E|q!WYi%?gP+hu&~p8ZIwq@g)Vl7FnBC%2AMPaW1z*!^7;_3A6qY;V8PR zz!gipv0xNc2%ynGBjTA@m3WdE|KZ>dNUtXLaYPN^>~&C%!EI}#}VHcFDL{UuL)RCdtGmUKZ2$0{NVae z7hwhn1?oZsUVHIh1{mvi!P5Zzbl`J9ASLeuW59T9K5xBY96P?r69yvJWmjvI(^JD8 zZ5NNXUOqX}a~Z-C74S54W;Y%<`uv6ucz}KQug^XF z*XJI3@#*`Y{KGxJ|K%@!^V47b=3oBwo@^WUr;tvNU3^l?~BN0xJic3;)DPGCg6vslYpwHVv zQ>RRpCb4vbRIyR5-R$umDdtBO=gqZ>qnLNEwHFa>Qea^|w7ispHWm`WO7_=9zMpYr zrEueV7BF_jH($1M?1anS>9n@F?L90#T*&IGCGbNfqViBc91e&$|4cYA!}&!aRu~J* z(+N{P>!_C5mF3ipRcvM{5v^1zz+&>j8CmMnZG14 zl9Q#Ju{sB-T;L_CC6RKeRHCFj*H?#D@Y(U1knKOb4|IXw4R{Cz z3Kl!QODn4j1WJ;QMfi!>eAfYcoNKgH9pTNT=a%O**ZS3Uu;W ztsJ~^a^cv7PBWlTv`GwYa(jo;-KDVh%2Wd?{fyP2qJmb+ZuZ!fcH6k#+^;rwsazcj zXRq3hz&mcVjvK8*dS{o~0}=uq)2XI96wV9N+T9o0*YDZ)^gC}p_}pU;KKIxkUwW3m ze*fn`{raJMfBxJ5_>cR3_VauH?VjKMQl{I=-%H z|BKsqKD%lAQ`>hwzvrtrjvxQv)aflJzW#8}SFeF$b_>+buK(!4H{Sj2`i~y@c>5EF zj<37g_W9(*DTj3=9Mo{Eie(g~ynCU__$cA!mM=~=e>^sN+-@CCL_rUqh|Kt@0hjGt zud9QlhYDHUwWZ*#o4FtUzH|qMSeR{Y-Uj${HTpz zT1rBk{;jz?-_`DXH%~gXQCn3a(E%X%fxOllUCe32feE+$s@>dT)LpchE_)mu(U4%S zU@YZK@rXRcP7_^(kn>N4SYectfMYkNPDItAfFc^!7xT`W*8$9L-hkdNazW7R_unsI zl>nh7uYLdh@(({;yK`sh&Yi`-Ln2CQdBM%GV=miOkMp9>+rm&+eBKtb@%X^t`nHb0 z^z{FAWb~cBzV`!68p7(`~$9Ax1eF+HmO%w8&S*Wfuqyy_U&4=8k;MH zc>}^+DOAV@Qwci4d2D8-bY=vT4AE_hGc63XAm0LazY55N2~>08n2J1rk}SUvyk?>)HIgC&NPmS`Ei;--`33}IUC-aBZei*AActUBA81(* zO7}T}pP_))PuX2IEx4bwAS46XzuQc{k!Q&z9bs8`Ku6#}zP=C(t1 zqh2#Bo*tYY?vx0IY$iET-df00MZ=dBLujEjjFj6lumUsN8%eWJs%X}t8iA!8{MSS8 z3>j`AZc8l&k_Taypf21uwPO;^_^f-Stnc zd*gv;9=Y!kSa|*Ncfa`eU;X4~_x|)}_x|*r-~QxRzxkI3AO86>uRikrN3R?|zWvhW z&zjF}ICSjwU0*)6D6y=_7Y2C4V?<>bnd@wk8nD)T3hVzfQ z?41rvi^tK=`VxcdoBFkY<*jiD(7Rm*bR}dU z?{vTuJ%}oolGm2lWK7P|Lk>%;QFqZ{xnMJ0vYWqk**iJ^NH(P_7j@Z`A_nL>gI6Rd z3HnQEX_%Net(-|NigQfBXY-U7!?_v{@n?3OFpq!d|M z;y!@pM||~41vd$&;xON)Tn=L>L@^IugK8OcS;d?ScU6@+@D4!H`K^4}?nwDv zrB=~7?dPYaHg|MCf2IAIX~73>=Q+UGybWLKmE{yB4I<@&(KL%S*qxBpX(Lz32E+Vo*a?`;(FA*5rx>ZAQ!vBQXAO5~flE;86Ks)xD z)Pfl1WOA&{pb?_ufY)&$V}Ofq(bND2OesX;m}!8KT&O7E>uA-oDfAcA%MBamQ{)5~ z3NY4B!Nqv3KCgjx8;FnK?$Mrd=|CMyTUolk<*C!K1Zlw!#p4@4a5iStc$A8Uyg|kV zmVY}e&Om3?7)pt@--`Mu99H=wKR(D>k~Tm)eRY?sg^-_6q=i2n9}V9kWx+pSka77u zX6#|4-KK!gfzVARVGjkJiKI2mWAnQ777{h1luelQViHYe#*H5BF;Q;Q+Ws$o?FGUu zy%INB7Cpoic%ATQcr(HU8D=2hOl#3A8Zax+NCheh%&|_#af|7c#dyi<>}Ht}*4IP1 zTWRW2gzHZyMpN;jbYe7{o(V8QlVwV)pHLd7-42jBaqJ*NjgSf$1vVHCDP+Ao0%xzkl_ce|g}Md!Bsxk*zylJ=qL(1}9FvzW3nEpMCM&HUNM{9Sk2SxKd$fu1 z<92h0!_w-oT(Dctx$Ld9r#H&YW>bb-+EmQB%S903B;$q%rwj!YLBBK+Rgt(?DH^q6 z;Ta+%5R)WWP}H;ZsNHhTVmfKF?-5Hs=<0j)O2_Z7bo^;_e4W#NZmH&6UI<=;#y#xs z%d9Lz7r^bCMWWT5|Nc%5J8)69y0ipacAU}yg;QW)Ee;T8k#KVeQ z+O&EtNkaSpuMp0i4YhDju(WkkPj}_ zLd86tPC`UaG3Th2T?SCc>`3Q zjOJDn01Iv@pU3F)n8~Wk%2Fuxrw{-UW-&(vAqClkwe)E`>PyCb;eeNNnOrt4<27-9 zHwn&#!#)wg!b})!jHCF-^~d))xwT>gA8fw(ZdsDG0<|(oJJhec>n#_N0a#HOKs;zh zzt_&M(xPF&e|%&=<$#NGeSKdsD;8UgkPLs zy-qXMZo!7i5R2r_+c$EZp(C6$j&3CG$;I!;{r`kl`= zJ@n$!4?Oe8pI&+4uUogix$E;+Ki=@z8!z7X=>5O^-7gSke|GOre|pcq{q&xH{W}%dwTE$j#L>`Cxi^F;uGts&jO? z=qzT9$(TIg8**4$9F`WV>AcIWLmpTF~6`S#89 ztsBXunj;<&Ic@D))k(GDgxzx5ZaL+29CtaJUCw5Q{iIs`g;2OvtJ~#rA9T5ndA-Le zs@Y~~cG}J{-rj6d4g7P?mWt~#NmDYWiAOZ~jCsE7yS5Y{NwCpc4fa8h9$5vG2-ubv z;!toiA1>rvgqBgoLZS#KNyg;m67(972FAr3H|AEBV&$SU5fKIb-A>yDqyDr~cHE%7 zM0>^(QGK~cW04su_tvWTATX^O%Vy|U#GQ!Svl(x(z#_o%`M`xdcB{GrrG+q@cWxD~U(2p6qb&}eKl0ny4vD`PrVW#~^xCsGMNVk~BE z1MfrLP!XQU#VyrBEci`l=Ja~exADlh5&>DHbTAtQV+3aSO5z-7~lv_u_0UX30izsAn zNc~-FVV1{3bS2bB*2WY~=0ht+zSO^wd=1dW^xEfU#({IKJZ$Zg1T?0=L1s6rP?8~< z98gHcO?ojVClGCb(q>ZeMK;)wh!++p*c+{3w9CRTzxaz3HNsPcFDAhSMvWA7vn1+P zEEsawk9fVuY2RTsaDeuIM*FujY?Gfo5DuSDCff4(woK+~CfkxswsOJNVBm5ncr_O3 zj77iYf~~BtE$Hu|+&xxPw?;RpQFaVXv|MccYWo)-{^`FS``y!jeDbZ=-`w^2^BX>R z^tHb{`26GdJaEsw58sC{i$nol|NhJW_ZPpZ^9Bz-{PX{M_5M#je|GoYzwX}qmtA{a z#K?jjyIxEBty@<2u{=;uH?SFM=)5|!`zQ5@Y>oz{LaqFX7cRYLI^anD@H$KlG z?dc6L!{NYWA}S0A#-x%n7cOuAwrz__e#YbIpj>Ts%Q?H{oX^`DWcuRasaQzBP=g-l zsK+_%aSnK0!)Qzh2gXBzS)>x@X|Yp8h`zWH0t5ULOlG;o{ znuv<*md=^!FWWob?dp1ea^h37@wml&%;7xfupcp-4(fDYO2j*5(%mM*5vTpM$K6a* z=RD2}R?{i9;;2r2F%q1Z%Ufz?TOn&IWOZcJSEUn%auFA0K)sE zD;~APBi3Zxl1Vw|ioT@<$c|_PPJ)bKp$5T*SR}#coa6g^UnKs@)k-{_WD#RAgZf7% zO{_lsCDymSp-_jMBS}hpxJFAM9G7cLf0e z53pF*McQXDlgfyu1Hd;QM5d4py9JY}7;^dQ?5Ky#sGd5|xowmac3wKwENXp;hC$(R z7yw;~@}F`wR{srQz+HT9Ot6TncTpE+X}6!?{95G4<2iyV@kcmV+JQ?nUbH}XT6d6f zBmY1KV`!>1X{CbE?xC(ryKB74HouQ(1y*+}GaI(<2x zZ_DLcv$-qr#959z6$+irX4_NA&P2Qmz&6quVA_4&PLH$AY&atoADI|Ga{B!CSKs;b z6R$k=U+?|x`3)N$eeEy5d-{*}J#zoAAG(*n5N2_LMaJMKzxWq`+57(2{SW`-`M>@C z?TwFY{`B#!pFOp8$Fm=Ad+y_H&u!fD=S|z6+w{rPyZ63AR9_{v$>9k#)6&%v4zTy0%;lL2q3)-z;`{~wj037b& zz?jF;>9Vyst!++g2j%XiJ%f~cm~s!Z^k|S74>E%c)$4Kg*v;J@$51!`L;)n6L`31h zNI2LPV6OQ6r?k4y`-cA7+4E$3_rsmNkM#}x)neTn33Zk7iiL`CzHFJFGv+f=*4M9+ zf8F}+N0+a>qf(rXg(it>47_6`a2x6sJdRek9VS@98^gVTe9Z{)RSmNAP>>nHi}R1s zo)Npb+imX)G9wXgHWHi;1!fZwQM#TsGp<%1?CaYwJhV}z-0yW=3Be$eS#tlWr$? zX=%?DpSRuTZFN{$rl`ESW7u0@y{yb5oJDOUZ}FySK<(>0!daG zlt^I&I-2@duV=5XL<$)l$4=Nxm#n6<8s#aS`W!gj(4nYZ@bQKE#xyy^8YfF*k z1#V%UU955C5>w2%^EswcjxH_L!+vkyD&M(NH_xJj;Je$E?{1Z@U&}4bLxOju9LBB$ z7+=fLVj&m}TNp|M4mFZ=5e-`sF-J0PiH42gpgO`CGbwYWu+GGiIBQJbnw{vC2!_@2DGYtZa4nLsEwLIE zHj~n#hwMR&l|}^Sr&$!iI<0C9_p76>U5k~7Sav`jN=AyjjSQ~WDF>dB$+_5sn8sJW z`rM6>E3{U*0Bvk$g-%H_5J>a^)DRdHZaV-h4xGHas!rr0#;$?c22bl*tE5GGfuLx8 z`eiAHkF25^%ecFO2jpgn8MZ!kqZojUyAT*_P>Ux9+Im_~Up#vN1zk$%1SHQum<(hT zAUi>V3JoqhNtA8$gQMvHLDh-P;5KO3aoKlceBi8 zoAsnhv0pgzWk=_ZcRqgRsaKwO>a7y3A;a|S)u))0(scGx5+KH#K^l;6WfJFUZ;U zdm25?8keKO>pg8U?H?Ro-`uvkuHmIiO)uBhzh2+?UVHm`ndGpUxnN>0GIX^|@^h^S(0$r(h=c=}yJJ7=utP4&E~(dX)h**hdfx;Qf~%Z$rB&N`K1zf}CA*>u9^IUfx* zM}nQP&_E(O9Po5e>e8mBZ#p|RjSTISi1vs@KPcrzTFrhVTV&ymm`sPX+Ffe(_ZszX zO1sxU9|L06SwoZtji@oU^ED$whkCkqi$)IG`KI)!aynz09A{H;8s)oVN&U=}lhhH$ zu3e4e@fl~l>(`P%Q(vC|joH<3DyDQ;TTIL;nmS5T#jO6Uz=MwuVp!7l^tdxU?w*-R z{QNT{)?&KLfBk*>AHPi^{W3QbBD${xV)!pVXa4)Y<}gJZ*Js#?^Iw0T{^`ajsAsb| zAK5h7#HGjGK_BC?D@K$2*r**#Gm~-9S|{UB2k-aD0$#{l06j4AwM|Y4S1w0xTu)uQ znz(*FT~OcU23Jz30T@0N#t-9Bfz`{91-k`fZ{3uUx4DTI6$Q6ycMC=a*loeC--r`9iW7*lz{t`{QD19!b9GZqX=~lt z-qs5mML)+Xfy)KMFN6MHJ6~u?ps~;vy$4dln>yU5`dq!$=M z7a$*N7XV|!;qw#ert$H*M4~1ht4$B9MN)`K2v8-o%y! zjPm|w+sgHuR($f+vQNHR_T8>Gnwoy3)Msfelw0v|%5L_%TLRw3fVbUczvy;cBBNK8 z&wU{tZGpTj4-Acjq%F0u6&ALFH&+wr=yE#-NHA9q0k>|K(C&4@U>uJ~$qH;Z z`&?~v(>!T-giNrYPZIRWM-$ZbD-I%Ti2igVe*GF8rLSF0-MpU2m5$wrPfggo&JHtE zZJe#C(dubW~Q?_=mWDG@h@j4{G&;0JjP8;!l$M|VH=yyMj$CD3p7PIsPnqSphNb_ z@fuCC*-6LstKl2hQ#Wtq7HtK_06o#^acH*1hA)?$)yM{r7LW{e40Sj5cU(m373%{) zyCvzTRU)E^g(z$+F~Qb9i%ClY19fIbt=C8?#W0CI1T$Bm2*7Ur-CTg|tDA(o7WAs#~MacEvaNus$0FZo4$JmVHO#K z&D&Rfz5y`y!_S}k@|)$S%GQa7N@#6`QD1H`Rl4jKy{M`&NBlyM= znrLvy&NsO14J0e9m*8`MB-m{+pY812(9pP6F5j%z??ZHTIV$k7+sbVGX&rS4FEd+W zxoOMnv=tL#!vT4~GaL;mW+qKPUw8d- z6Pg@vUQZDFbMof(7{qKLw{_=DB^5rB( z5$41IQ(-X4f`Zpi6t+2i0>)?Eyq=uNx)V{x=azb%L;3$0V)Ut9y&M3|*bNXNqxufr z2Z*s_qt0a9NunX_4l5-WcOy9<>TkuU0TjVP5iRMR(aHv~Wxznm7)lPYPUJ6|98_T( zwQ@RE59f5K{gMsPfWuI&#TiLgwUtKe76L4KW*ww)fH!LpY0XBZ*$9yZATuDHDFV;% zCc1zmxBxtqk(KBn+)VnI}yE^@scE_G%s$y!Yb$Y66BHcTgX&)P}j>k*A zzN0qbfI_i#*1#tJ+kzXg-_<*f%l(y2)131%)fupyakWUFU+wE z9+{8Q*@vEZ@R23+m#uhYvMHRg2Rc(NFp*63->#P zCf;1lv1KOXNtP*QjmORGaoz$;c8z*pN5_W2fn6R)dom`=jPscZ_hiO1nQ>2y+cIhM zD;PDd<}iWK(*l3aDV!tESglj2wsDnaW2uQb#hc3n z{+z>F<#yD2ob`TBqu15ub#@?eAz11)%Ho0k?}vxBGxVwKgkd)8n9VxIQ*1P(4R}Dr z8w;y4Xp@lHi2CVXR~9-ti(b`0Q>kj_NOV7q(-T(p#8h*A!ta2@hqNGB{Q2b8n4f-`D! zyKgf^!dKNMgOXN>uqZ*NfX*q-sNqc%DFM`Asz1l5fo!zWSk(=+vp^u^qF=2XMNYUl ziP7X8rXWwk@zqXz{a%OVpB7bleV5nCc^s_3>v@wFnrO(wdF-$t^EfQH9zyIQD*fZ!*z3KGrpp?M@|IW6^r>*Jrz@r~1don*IJVhoi*q zD6-jq;4E7ic9X%l$!Pk{?${R$7RO>|BawvK}&G|!}mS31{zdapZ!mx1y^lP0?^Dk$eB=G6 zKm7{G0l-*78enR{#;va+jeyE7oN_mAef3z$x`Do9P;q6ugc>c5EraS9nW?UKXV>~(epJTNYzs^8}xbP9t4-|cpE#Uqlj)G#@ocOb*S zLK+Oyf)RI=oKo%YDc&`w%gGM<8fEaLNm78_`Nn)KxywSctjcv2tmNLWUe}jL|!wPxeC!MqvrIe9uu$=QFS~b1#xUZ5|79*aRnl1 zlGd?N5cX%r%;PCzI4~0M0Qu~&T@ox8%}ljQQ6e7MJuCF+^U) zNxZb=ImD;&lGoMb_cZz4EgnZ(z)P56Z>LLWVhyDd(H@y(pMgG=h)OQc@Kq(G;EZ`A`1H-Afauf~%{fTH#B-k5|bcX!RUT3S{-Q{x)V6v{j=S~*5^)oY-tiDpP zTyzO7R&%{pRi>vZ1Dxene3aM5O`vw?Vw z3;G}rlO$chv*dL(`P`kcup~2XhW;91ET56F_H3E4B%0dt~*>=HSF)0d!VN6AewiT=n3Q+P9rXvgm$%G`pA9_*A9pFA@ zgW6)!3KqIh|DEqI(2!_rePIBzu$0ZBM+Oyn0~BB74Ys!5M@^@t;6JtVG)pVAD)9CK zCrrt0oF0`4C>h0IKr#eE!YbLOvB)75oMkwyLMFD6GU}9Og9e2Sg&h`oAeIc(P*aFt zOLD*YLi$$jM#2&xO@T633|e%NL7JuI#NrA}xK2H6VKux-XEv&!`hiyQCM{OjV5$Qw zzsO}HFGirGg-KY!8P!IeoOB;3pzd5JHBeF(vJI7R(F&susuR>Q&_7?htQ$)X#KN7K z^l)~vKOSp{Me9e$E~L{9y4V#fUemLml@Z^!eX`ZyjoceyPh;UQaO@|%4fy_ zHeu^M%EPcqDrS-~3cX!K)TFzm*VURHI4Hz5n_VeaTCgvQiEzu~8Ws-fO;TGgg@^<}tQ(6MVqtAGq;NY1Xzc~FvBhN{j)t{U8CP~HFgp{uawU2F zTIw>O<>>Y65b5xbe_Z+Lr!0wqb%ujFr!XK`F4_4@gk`ED^nQci-5(39vKhx5q@^X1 z6E5^hq{jo38DI>6G)P#zF4mwGL%NF$ioA_FiAk@3MpVL)0u8GbOHv)=myYN)5{6I;Mx7Ev_6%AnPzT_#RdPlZ;cqoKk3l)0p4@0cHo*9=doJ)U zG1%b(grrH2x4Ud+m(A?7=0{B%K%dP*>9u0;=_`k5wb-bW;T9ezVUu1>tHpYa1Pfd7 zAy5S6CW|mU{v*;_SRMTE8i|os;V6sR24^0z$()%Nuac#|gEu;J)-YsKjjp`Dt1yZq zn3Mc%J*6DM_#L2PG*Zr}wwN>~13aIFhrZ*)QU=hZ?bVsWNCGYQkSX}$1SF_ z$j2hgdR*l&k~zu=z?Sk{snuNN5-t%{zc?O|65EuRpz9zh(H7t+>9C_66@#1i}bhJ2*4WgEeWtv#UnKK3*gX8 zFxHYDvmux#W0c3ysne9H6o+M!eZxcFk!y=sv{OeNvY1c9zXcGZ0cW#-w>{u(hjpO` zWEBXxkzhB9a>D^o!G--DFvoh^9oA-p?wnS2N~1a`5$%yni+FQwA}Y;h_{%fS=}G%& zf(iQMFhx7N9kyDrXlGN?$F=pZ6Y0epjV-JDdNiMco_;>Sm5`X_-X5#i%tNruVXZV0va7L(SXkpM7KfamaVgt5fs zk;$NecvY3iKzgyw1`Xa09}4BYWvB?9|Hu8_=WhkkbhO+LQ!@_ z2vsAFzWMtb3+f1~Sr6l-q#vSmp}9a!99~)zqe6_$k+h^Q4ByA$ztF>oHI9(>Kz<$g zq1mX!ieBu&RLh`G4R>DB;m(Qi-c-Ch6(2~C4Np%E$72m{*BOW7h}V1A>pkppoycAG zb2$6CU1xm0l0>pJHChpl9^!2vHsFQzh_Q_M9gFQ#!Lday-?sO_UzV=8bMd2hKlJcD zIK)2q)Vv3un2)3Ezie zU;KIPC(GBZd-}7_mVLfq#aA03gX_T2k6K%QWaw%HSRygKNU2L@l47&*jMvpfM4^pj z|5f90RXT*zHXhniIJQ)<)P#JUqzeXoxUsN;6!Sq;Adwaf_}qZEKKG!<(PiaeDTDr2 zhqb}$YK;c_$5Z0;sCYEd>vdhSlF>`AE!Sx(170!7+_OOj^;95-u^4X%mw7g8nax_V z87@0PCu6dZuh;G9G%<~Os@i4mo}RR0kkoWGgu$vLr_`GscaUyw^JLl_3#)u?alk9Z z@~j;8CQYfNow)MtBv29VCRiFN&1sor-^j?;p}}tl2frQ~{8p~mYBU^o2<2od%N>3p zjtsyPd4P5(FXL)lLS4Yy4z{>pe>l)h3LiVefmRe|$Rx!I*-@3^fN13V;h`;R#c{8* zV=Sr9W_aRUV-rzz$T#43_gc*5!^4{!TUOUMzHzDP<@%LRZ~%g-)8lUJk(`4WYI#mgb|89jStp1VNK$k5RNK@JYdT?T9|QlNPa02 zon}}}M9$LTPH3OZV{9l;KbGp9ot8~bfKojcX$l3a-R@H!?=gp?#OWyYde4IF(^ZET zC=gO*snLqmXoWYh-Nb#tnm>1XHuCmQ8S^`Ob~Vebae6i?l;0jW`qHu&Zd>%o-3uSh z@vFcRSk?Z#P58u9U@m)_YyKni+i&y^jk(H~Tc=PRt zR=@ugUhjYO*e72tJ9*|S@yIcg0ZR5F=*(x5v>Yb%f*B&}Y((U1ZRCfIXM}giOKO+b3F11pqX(rRL~%D9^hrp+mz4-qY5h$)P{q)Si}$x7{WnAIA}@49i-hLfIX6P#}xdMbrIESi6;7gRluy7$nB9kjX0pQhZBoIQY$QAi9Iw`Fg!7ecPEM#QK5QJG; zi2{3$477z9>Ov5T0l-s7SWg>urV))H4=FM3jzRxCTD%kD|>Gp zdT~C-QUiE65cld>yE2qWJ7OH-6zGV$Eoni!2CLjz= z4mpR2v9W0VRHlD6E1H^+O{S%ZSbH>lDI7lU_m{cdr~KXv9%qf;d%@?v81OdwJm9Sel_*c|IQ>&FiF*B<|7mv5usT&HJOoA?i$o{c(sb6LgTR;;{p{-QtpOEduj zDbBI~d*Su}`+ojDciz9~u0@aB{q(X2o_pcpm*05owZA=vZ{K`(@v3(he*EP#RW;ky zigJso38hx(oO0W{9JW@0@5t?C>77 zH&QMT@QBDj)y12d4Z4efu7+yfRBz=j5io}qR1zA)U_W3oCJy`iNSgOR&?k>Y3|PMs z2^oTZo!`SmLM-5IoC*0gKDWf}7;)GJY!<+9-qgYw>y3K21istnmZsux_QawDl4=qo zttPF-k>|t(Y~Rw z`pw3ccY69h9UNLG5^XZj$E;ix0<7QN0$q?E*lKxQKslO?r{vNinPi_zahTPgwVLa^ zF8Gw+Q*Y28ZE5+U_QD699h<}>y9fKXwRe8f(*9n3;~OolABsfVIMW%Y&=m4Pzxh}~ znayY>#>5`?xxT?qE;YSe*Z6dO^ON<>Pgd1Ee)`bg|d4UJKwQ(SCb6A^+&aGA8A)X@WgRBNpK-NRf9V0~Wmcv-`+aa%OQEM#XNW>kZ zNpOuNV=Q*vBDzL`T5QxEOkJONE3zLe)~LA=rzNC7OZC5xIJe6t_#L(ZbPGuoywR zg%}HWP?*W&iJpRjn*ingWd1Z-ISm8fP=aJES^B6c~;MY(YJR1FD@jB8`A=-FY&hVBNCw41w0%?23{wr_F@IGmelR2*`qkM75BK{Z^ms z@0VG}(sG5Q2NiT0`2cbWa3gNQ0?=a^LuLj-V1);`B3OwI9%rU7h9}5^~{$h%jX>b zg~{@<)3ebZ+7*iI_Xl^nd@#*AJYTtd8=dZ5Bce^8e!1j{Wk44!dg6hCs|%$Cn7frj z(Z%=9U-HNKi|?4f_>KjS-mzfGoew;DCw%zW?YDw(PTaNVj>V7O{lwGrSFC(&#Y(`~ zzrDNo_0>>Q3HwDjVLEJ8yt&N89yPLi z7{g&5b%@d&)>Ef>^LdhQASLat6v0?k&?g}P+lO;3$?R%$SX+HCuJnWh!{q*IAU_jK zBBoEpAvBxh8}x)pT^F9&$ugP=cyHZJe=@Gd3Fa z4Mu%~Qy2>R)X9W(Y!o8+NZ(52@~nR*>zSUk&rF)e63SFe6!Q1*rbgb>9uCM!_o9m^ z#9*^Vj-_5w;Nl!jP|#OyYofKs>+8Qbbo}`v#n04TdcAMpb4m-|Sb;xBfa@ZeX6tO0 z3Z6S-GM4DH2W8UT!$Vt!hrSb!?3POps1(N;`i$9lmZpxkw{IveU3sGPjfRG`a%oY2 z@AkU-H>)nZaPsWZ(u(J*>sEGkeI**+EfpV>ijSz3#e${A?W|%92RgdfR9}4ITcYy#rccz0lXkv2;FX4aVvkeAn;P|0Ev2dEjE$Jd;C1#9-ev?Z z2q%X)M`a?KJ9oGp!`Qsz_sC*lZ9J+UPjQnI5M~JcNyWnF`%=7;#qUQpa=%yH#g4Drn6JM*3*v(7ldxtYoMoo~X)BbzGTM z&P^+`8Rhh(ER!BiC7WW=`f#W=7HLW)n^MUO@x+;E?3m#APN9BxVB{5ve3eT34r^ZP z^lS`_4Oo#0&O@@))+iX4tZXUgAy=fg*y`|~`2*+=fiP=Xw;<}r4` zBXSzD9X6ZX0Fa2iw8(LMJ!1s8ZB2t8{9!;ppAKeh-R|h0S%-@G|tc5FKm;;n< zmtMbDM-?gM`;_tn8s#a$0&6eOK}9u47vSHlQ-IBv!`4aM0i8bg2njZnjwaN}7&V^K zPp0AEiIM*Ch=Q1aMzA9_J!T&pwI||yBF5q$GD&c9qE9msV`E{x$0>sIzPX!Y+jtX9 zv%I+tRxn{B8AqWWWjY(mPI|^uCWwwn>QZrOz|-w^TmqLF*Wz~cB%&0l9`H>~1_|p6 ziBj1~ANFXI7Vlm=->8rsZEjwF^2}fNA6a^~@->loquF$XW6Lb;xxAIQf~JnB6bGb| zonq0>k&&&#!`}=Jemy+2rKfw#K>x1Zo*gpD0j2zaM6#`>_Wjbcuhi6j*wFZaQn7Dj zcw2SdtH(+o-FtN2ok-TwP;9%9^Pn}j)rxKDC8w#CnL#;4W-8e$h8vI8nE})Yi!gx zF>Z(Mun8}o?s0FGneZkP)@Vo<4yfFYVMw&F0xyKlV!N%3gqhdC5y08ua}OqCR5l}! zeg^>Ru~DDj!`m#hQ_uxGe86kK0tIO6akE#daVrlq!c51jg-uj3X=iV1;8}Ish`)`I8yxgdtM*wad2n{D9{Dw ze4~nsz@3Z0nw@70C-+;b0klS10oCD(VMZ(Ej4J$|1xFadtYBd<9FL`wdi6! z7Q!P5>;5Nx05@N*ywD44Uy%4kjD_1VH--ed;^2L2$}5t(_=ts>*}{U@*houJN2d_= zV(}RQDR0iPuV#j9xIlHByM@T)R8~$~;0iSILCFx9M<~@0Xz=nrFTek6dc1LFx@T@i zKATm{Oo^r@do!7q(XpCjsxqCvFrJ3ot$3ou_j;QHFg6(eF&rtj+m8(l?%98M&0Fs+S@G(9Pd|I#<4fl)Tyoz7 zi|)Dap}XfTyleiVyXP^q%UG!m9?Vh%_6Q|C8yZVD=4=n!E zt$=**19#1P=&n2N`@eVH|A&Q(?|uBK`Om(%cG=-tKCKEc4@V{RLZ?72xyH*R0Lh=bjmO_!A*>@k)VPg{BXbv zLeP+3mWVOqqZX)V9kZvBLVDaWk#CsTl=kC zkHGi39g<|+GCt;>nha%$p+1uq5;48kC2~8uT=q6Q-)Q6NY+RMiQYG+pPN5|VGI;ZB z);U82_H#47*=cCt&1QtLBpvn-_&x0g-RZWr4b`_sJxChX%J@ zs9#-H`EvDzHyfMZp{SiYdPif+J4a3~IavJg!QzLHmOOd3^7YE9_l}=jxqJUJhmXH- zzUKXghL0=Gul(`gWBU$0ch z{Es+=KCf#a(q4QA)3->pET+riwlSEUilVP}YBG>X z+Y>P+=vR1MLoR!(OQ{P^vRMj{lqwrzxr6j;j%MEz0^R7 zc(cmwVEi85>tYz4L_X3*`VC2Qx4d61>sQMLFldF8gXfA8Ak4qJp{J#`zoT9y=_A`K zxX7YhWhB}sS|v8>6hsmL)&K;JLgj(O%HWcSh-2YQUC4CJC%77k86mgWa^>Zz$dne` z0WRVN@VR-xLhDo`P^zmKG*D6tt3`bR`I6j%>~|e3DMtu^)Yggt*NCE4bAA_QGvBIi z&1)FgLJ9%uvH&EB=7G%2l$|$t>}G6-ag$6ibS`)tq}`j)4rsQ;<<~8$2@R{45gukx zPa=5>ngJYQ|K%boWtE9hfN0s6lNJ~#kl~R^wQjw3ghB0=xK}dR?hh2FQs<{8TV|*G z@XAhgjE+@BW5?Xy9bW&AKzNVGx65kZV6lCsrruI&SLxYzfrcfVfXlbh?ce0_Z^p~% z*@(A${F}Xj?UC4#X!JD8oVe8R^@-9|KkR#JshavdI3`Bv^a9pe1 zrO|#*QQvE{JJjl(D&=0avWQYc{75`3PDE9Nm8Ec+%@ea?BB57CLyBaKCQ;I6XrW45 zk};Xv(P?2ZF{U4nsK^pvPv1n^HJR~3(dy+03V|^Dj$E-Tb74}RD};kOhfU;hsz#Fn zhDFcL#%E`uGg;4gN+0rx3AfyY11Aih9JmIoTm^PIL_?y?xNdsVn9XqMQJ8o~z6|y1{1yF@MdcCfk(wx?+iYd);nmVeK7j<`kQBnEc zg}RS=d%u=R_NkRcsC`w+iZ!Z(YGsjJv9+mfb@}<1&egnduI82QzK_L{jSVetA3m}4 z@QJ16m2cEs_^`8c!$AM{9Ub3XtpB8?<;&WOZ=bGsp|t$D;xmtyo_oBm;ibmbH!e1= zsIGq&G9jDaRLF`Z#>Cl-CNpLrRtr-yMq{}-$w;Jemz3Z4vEbu?%aVv{N${_TEE%^)@VpQJ>|xTt;vi(J?>7AdvhtWxnUKAtf(S@;>9U%cFL1UyHRkJ zneficB(Pd>W;!-C=_e_RwAa}uST3U8*gzj4nVA=HRVVO0;ed)L=j;tgq0OARcJzZmzyieX_m2LL(bsC>e4BM63lo z081-zgOA5y6dK{d7FXnUEA6slxGRIZr(E4YKHdtE)}4#nGTt(g=H}R3+M$!jKx~)Y zY@`(cDMKA<=>Vfspmyt)Db!Rj8Wd`?ZcQ-A5xnF^1jFBZV2J>rup@Kl(+`N!fX&%g zjgZq~=M4qgwxGNv&My;m(B%JJ=w(1_TOMiEMDHpq2;|TTXN9*~OwqA%C@_Ya9lwO((0pEa*Fai5wjA0r#U+dsv3@Ip{^`2b__vb)wh+}}7{-#eW9Ozc6mYM)fR zzqe=C>GBP)zw_WjPu@0v@gL_ua@*49A6oIo!q?w@@>t0_mEt_<$d<=K18zsxg}NVB zys>cp!?zfI{!R*iN$fUSDBO1W1qEmA8=Sp9jQ zTat)UB%T!hNb#>b9DuNHvJ=q~vlU0At-|DlH9e|}1O{zflhs`3ur`MSka-0lOfVMC zn`!4{#)nPTkb*+Y@7M`T+(2U(*))@db7;V8h{w3hgrCGlja{Bgz>;gq9*=0rlD&mD z)q!})SZ36p#GM_9(Jr&F6;^W{q~`kjB7u>RuixkHCmAvwkN{vPBh%?Nk!X`>c!xq( zL~BnXR-;TprzxkkWm?rKgjtp1KzH{Sr^?b>XGj`qvwq-|gsHTY3Jy;#04kE`OuBd0j{QcA4aWfi5xVPH0pIm5P0r zn%*uhedbu{qoox~Dr=vrx%fQLuyxC->Xy|tyxQ3Ou8umIouFqXP4L`i;P;pwv%i$La@|;{sF`xogu#_Gj5s8c!9df#wRnuSlI4%7zG}-)vR8o*NRlqKFE(H!G@R> zgLR4pm{o{#Wi;Y}w!XHy_WFw6)(d2jq(mVBL&^VTb)Ivunke3+C4sRR45brzLqUQ? zY*RUu`#nOCpdZPjX7iB+$j6!r2=;s1msfDu476GdF9G5nt#TLzT1eg&>|h0q&Bt6D z3sZ678y1>nJPxj4I><+4>(Sm-$a}$1FZ@;*Q7Bk2j}iK6y-u@BU`b9mq~pRZj)H%U zA(Olw^t2Xm5EGv)nfCvg0l>E~&6;upEpP}7h>cAQBurv0J8OX7piVukkq;301nety z3b5cwhdaH2?GE=h9KX(FUeDXUaeH@Ig>Uu7_lLwUwstS<>R%#~Kc}KzFy zk2xz~ENA@~)v%oPW4n7j!fYV47rvY4E6(~cUM9=?EcYR6-eBQ>;4C{i^KqUlZSUCq z{@NuEJ@tR*FZtu6%kJH^?}MtE?I+KCRo}3k)kEN3IN0R(Ko?9|`No(2a^HOm|E<8{ z;v9=Gi_`4JpAp}PrF*H$3`7F1?hw^nqd4Mxy=IW=Fx;~VhqSwud7br zK~ccyk7(4p)tc{B>hF;#K#T>0lJXR9Y7clNsNEuerz(u&29Z_Bkc=^e_H;nUG%*px zBU+Lk(;5l%V-YrHUg72%$-T*p`^sGC=Jmvl>*JWu|MSl?Sh|f}u{Up|uUw853Q8_t zPF=eODS20}j9#AgkEfV`r`us|vzqH5lS+4vQkRhEzoQ2F7-KlhGAD>Y;+Ro?(!!P# zfo=^+cdT>RYFPc*p~3CFy&J{i4Kl?yO2tl+FjhsDWsNAp*J;jaRb^ULnNkKaEwN~4 zOUsufXI?yb^tqPSPig8Xr8y4pt3FF?AaXXD1zrD7uNcY(|)#a!yT{$CCPZ zSnhRp3w(>i)=DH>;+zVL0IZE`Vd#2VTcK5*QY%gy>5Fd1$ao5Ub5MwY0i?{BoYvQ7RFo8a6W;gjS4pxiz7su(3iC0F|xFI51hBk z0LZ_>P$-}&G#!w~B5Vf98FSeUUKbQVps*@mysLwMCe%kVBw>sNq_rW^f~L2idPPvp z&8`-z5D^IzEK95hyUwwRk(UwSvC*h*bu~FD134EVX-w^N)q(_xwso{{X%j2x!*ix1`q0{Xp7`sEG z-c2ruo|Z~>%B4j#brSluE#0JL2An~K2)mG96OSlH6HuFSdB%Bp#+#i4BS1JH%WuIX zASxgeR4igBEUAfI$T>F?xJFV^Z`{cI{PRrV`st@!9}1-5&BY3x(fC&k*02m84T`Mh zI#z$)pgTvdQp8w1*X!xSditP_D$;5W=%_U4M;h@g%rJ^CQYoV@K;CoC=qlInc&20`FEIorhFdg_kGPh%xH+Qmz2I31V zQ!7fOq7tpD*6kRHN9gpJWh`ZjN3G-+5s7A0X)db!sMP4q&<|143JEH=R zT9^zGVDklqxs^~MI|Y%gpmZwW8{YGG%_@dq6Y|z<(4ei%!LwNVf;9wW&SVR;LxD~8 z?e1dCip? zsS-qBrwzJjT>@*jf;bifx_Oi4R*XehzO^Jhx94J2c`q$cvSjn6F1SKW24EXd*2U5a zgI0`l?2zOck^DKCdWB5ALL`4qp?L}T0saF@ze*&3en9m6i0p-7$!mk6*JP@{pvAyq zTWjJ!u-Mijx_SfK1EIaq*s(;i6j@xW9R^)JyIRk_rDIm;GIuDxA4IJZ2_MN7WYvLXvl*)-rQx>H-!EDnQ>W^ME@ZZ z3s|Jxg?y|^xtG!$HX4rW>BBnhK@$XLl~_zQCZ@sd7!G))L{}g|q5$7d#&nlwoYybA zFVDC}6I9SA^Lro}6{8D*zYWu1BZ?S^AJdWBQ>X0NN#E5g5d>JQpv_-1fU(!dZ(L7Z zzcz}wqe%K)zZ#oLn}XgRj;(+u7s&cMg2~lz^p+pMOQZf#tt?V0_bTK&<+8maLpwyn zd-c?56H|^CO#xLnJiKK@^p!}oK_dB%HI!oBI9%W-Z8qp&*rU{E)r!+f`GM~4Z%>_l zVb6g_zTJBF{=<(c6}ydw5?WiVQ9)fofA99rj!jiHZ*1E&??~}8rR6W4DS!LG(dU0W z^upodzn(bt_Jxbz$|VQYiW04ggbWxiI;_nOYl~?3U`6G-rDcDuz4&%d?*^%4mqxXp zrVdlo9+m2c=9W*}TGt!(wW+vbZrXiiE<86IB8`fqZT4z(D&qN>kKFV3S9()3|sxnGli2}StR87p(UP#J?^UKt=Yvb8$JTu{q1nnND z(ab0)^^it3fPJP2vua7dTG9{Y(eeScbU-Zy)fS}~rj#R;V%R_&1a`uC;*uTR1ciQT zpQ~^%&kdjMT&#)_MX<;K6r4~9v$rOuqQ)s75s(YzzJ<)h&{X0JW3)z{!eXR6j^B$5 zvDpxz758Yk+{eHcyaiF0EW%7g*kE?s8JED=IIPMt6dn$Lh62JPvEX%*d|Dd-tecd~ zy6pObFQ8C{UKl%3YUQ>w5cz1ud(u`;d+U;`P#a+lG3!oh?`%8A^hgFvIVm< zUaSuEz{tn;4L><7T_#a3S7=@Wn3cUEmcPbwpV(dN1?M`JTWhj>X0ogk%T|v_-_X!2 zRn*HW>SglID~$Oa-u|iEw=EDn7>%7xB+3)X(s1;U+qaG7)@bOJO6`jpdL_B@Yh8UW zez)h|*WdZK=U%yO>B|cqfBL=$p1A$#6}MN{ZbW=zm?DPVtyd26hh(Zjwm0YP36sM>h8LZjviEBe{VgS90lYnPj(8v0F6qy=dh7 zuC8ypyS`J%j#8QuT3beIAr*&E&_9Yqn+68I?C;x1Q%3~LMUeEH+jw&)$98bWMsmD8 zr<9#i$`5pQep6h!;)i_;H*UIX+m8Ety1yks1PA*2c64=aZD{Y-i79KkG z%*nEsich_Ew&I=YnzbiNKiPNa#S^FA9_ZhtQj{2U=fIG~HerXwY*t` zm(4Q7v4bY2jixT?sTSVc74$LiKOxSck2pQX#lkAiSZ~msQ!7r(B`5V%T{dI=dKYT%!EJS)%SECs=xG2SLfG#y_+SXJ!WHhI504qwcNNG`uQiA zVDUG22Y^F>U7x+jELqpWnOQ!)Up-?UPM0R_aOl{=Wbi1r@iOwsxZLMhy& z=kx$@du8lAO;TvIIG-cD7d%;r(gn`~UWmY$Tk8~z z7?4}Qx&pwONP#cTsMrUA>E#79CQ2>v#uiE^?PL;0jLk!6o=M;+@+qa6QQ+-F*`*~L zF}0adGL!^$1vtkJOP9&iD=fCPob4k@|E|vP5ifk@^lb6^cX)ie{K1`0_jeTisaXCx z-V-NYiE=q*_zQ1a=MNP5{l|mBGr{1QP^csnKIHUlVYxL5&5I+lXBCXAAOTgTko$_6~5<|KG0hUm* zQ@7;cMW-hHlNoRgjD!1ipucbH#rlt1+CCT2`A%PL=LXnsp9*(j53k%)F6 zu3|*M$Pma`ySly|9Qa-#J4zNtyX>VJvlowge6=;>>&E0^s zre>4j0z<>Rr&1hlYWn){iKTmr9^3r=y_+`Qd+E}r#KyI=vtv_R+qX5fAD*xIpsZrm z_T3Bj9$IqjlW*13uI=gG$?7XPW4*w)VQY_CQQpz< zqgqjBHj>RdAUq_{I;_n)&8fluy-HcJ*?7V0?3x$@PUhN`z|HHi!bO@2p{F1gF-1dk z(61vt4$5U$IBXJ!P408k@Vb`H70ym)L$g`e#29Piniv|43~I$`6Vo;~E&TEmG_e2r z>l_ZE1p+4zm;pxLJlMa#t8;62*SCGWo0akd99td<_D@YPzub)8yfKEqJ{)Cl-kh45 ziN+!Z-rT8>of8e0%B2@fOq<6kibp_#l8l>DN!#3P1}tXnY_Ben{5*MhYQ zK^4PM5jl}7aM^OI10N}K!9_pGD$KcM!Cl4DESX~qV%h>l;Lg<$fSoE|g9VXR(3k*G zff|gF;fz3g!gn^3c))*&!X|k&2)Tv6!a~dfX6gzS+6lN#7a|34$TZSYof?9$3LyiC zllEIM76BQP3NXpMKok_dRUx1N+a+#A@*EvurZCpbWKd!z3}P$}u^M_MZ~qj|{@%^u z=pm?04DAg?_J<<-@k5*ATd{mqd(X12{v`vVr-r4=#PUBY)T>zY7as3X%;OS-BZB>) z)4j*(fv7_j^-A}^V?&Z>Wa)bNo&UkTX!zm^2747bKiWiam5qQ-uBF&Z~Jb~ z6TN+33ikbhK#9Y-k72*H*}u05duaU+wRIn?S^LOi&;0wmMSqyL5L5(r-~XTYJb2q( z3vRpT!Q1YO~n7cD|5jcSACOdD{0mq z3V6D$+$F)%5($i4z3lq=M&!y|h_s!?@&O>hTk>1n;BQS?NZOu|o$_5J$Ht$2$|4_| z|KWuDWRoC!Rb^Wtv&p&?jI1p)|*h`f?InLzG=Sq=EJHY&u)u@_I2zHzqV-P#MEw6%RVGIXe`Yxk+LPpWIz%Ot0a z^hJShBPo&n9NWR0yIgj$%Pw&{WOiQau!6!sPKmfmM_##hr&X6@b`+Hg8wJVXCDTly! z8uj&hs>-0d$eY_H#@JtfPW=7XiGTb9`VnrWZhrd>dKG{ADKnnp+>Qa(a89Q^s8DPl z9Q?Ykf1^aS$EZJ*PAP9*b3@DU&zWC-$s#ykyP8ao34V{lX6e=InpBE96Vv7th7e;D zQ6?OqVi8j^&P`=JnF)U?>54@hk)Yk}V2m_oxqwkiEgevb`w(L(`LJ3#ARcTV>}rq< zwCmLp99CIKhgRU&nU8Q$7YtVU4N)%c@An87-auwf6~-C>P=ilFO?U1p~gp^*&OR=Qsp=(__8krN znM%F6x%H!}>h~(DKH0c!*=JuZ+49};@^f!1l{*~H{Vw-@oBaouXSY8Hk1N;@Xf(S| zoO=JYzuo`9;@j?C@E>>Hf7`te-f`Fcf4Fci;b?g-Dsxr-~vWesV&HM|BQc zKWA()(hbDt*y?e%y6nwXu8O8Wk3?w?>2-&7)Ip7UpH_X4rb=k)v{rReuRFugr7HPJ znYcu)tO@uuGt-gj>B!t%659Q)B(GeC2`V#gO2$;c?%EnyeI;+M^LzTT85RZqGc)no z+2r(eqL5`rRvVu5xGO#GOvTwqP#f}V5>X~Q>7Jd5&d$a!Uml&EO~IE;MaRe7siY$o zvBaZnB&6{;`%KKak&$l(hSv2Ayw}wBMt$@03k}bmt9|Np<)dfMFF8~B_^FB~PL@A) zu4ZL(%ZJ0mn@AY!PNlp^uPbKg(`Ms23tMgDYVB62GvLkT1O3};FML>g;q$J}?K;gV zhppLVZzY!hc7bogV1BZ)WE;;@B>9V(~Yd#tt+{e&W|BtNi{%)&E+x_WV z_s%y!C<6lw0}KpI7(!?XgoKbn2!tf0C!`lAwqwV3+pd4)9#|b|Y0m8!9bKlh55h7MDzpVN>J! zi6jA2e6`~V0tTZQrzV)G3DeRw&%L`74{`lh}T1EZ+@3(&X>H4Ebiw}RC zxv}K+It4a!ug%9jBO$b zqW-U511ENP6LS@t|X&u;YF?yBR zps^WsR#0*ZH`KV?j?%PH=?O#_l<1Oahsn&PD|@C^+^o}9S({PEngE)CWN9k}zmy4r z6=+O=Y!{sw*>`ufq*a2-9RC* zvY9ZgT%Z~q#1ukGCk1@6nXp?x$SulPuuxhsz>{i_x?seCQo;ltHx>##3uwe(fan7- z?c-)(-PNI=#xk;zZWd-gvJhP<6-~$~9H`xJ)d=WM*8|ltptUKb(?%d;tiy)eOxg@{ zo5?WG;OgTgm&>>khtV*cDN+ZY3G%yuvCd%y*#)qoLJoBWpW@VqO6(n}YAr!+iNwGh z8;PIw16O?_-_29Ij)5(COrO-gv5Le4-?gERIC4f|i6}koH4{n0{HzBh=H0n73w5y9 zQPCb;I>Q2~AEzGP4=>K?K~x}iLa90`R~{ObecyW~SKj1QQ9WeQrs>D}zY_RBFrw zV_1N;%Igx2jFgRxlo_d(NRaQg_hfc?G+Ab%s{uc};w*2JT4=Hr>@>&8hxcQ@{&M5j zUv7YUuV3zDL~%d=^X5PPws7ZWFdoq&eXQBoVYhbK%)Jh4f50<3HDUVke(GO8Fa7&p zw}1WR*02A%^Y34OfL)4rfBNah^(A1-nx3@IPU*)JLc6U>qd6{>9TrOtsZ}S{sw|qy zciG!xApz{3u!2aL6dRA|5se=UYrys`md;FAn6*x&OmImj_j^~JIxE#c}P+F9^YJZ$tsE7IiV}=$$yaDASu!3Vo-Oj%LcM%-7NTaKDWj1F(V(D-As6$MnAm%1M#ho zjfRkb7Qifu0e9Qe?4`?YU?`7NJi`Hve77HTQxeU8XY7wqgJC1!Yu6sjXVTQgVZy$5~40=#=^{beIb;TqlnYc&K;3NG z-{Aq(w4gZHMuKcKN{d_xgz;l^@pt=nBTqp%{yk~jrqHaz^qU#$PMdQ(W8H*P>y+A0 zXl8dPatX4p)nj8-V`EhyV>VV8juiNV1;J3R({3ra%YDkxe#Chs>hP&{J~RBcb0)VQ&F>d`>t2s zT=T!)c>lQ#+yC;_?sXr2`Z8FAz5ZXXz4^pTum9KYUj^^~<*zUH^;D(CnADg)5>&wr z&T!BNqC{a|D(D1p+B!^hH7Js{*E{VsC`X#1t659C%g&!j61T2RPMbJP0c^mYeJQ}~bNSlmh zI^B7V=D1vWNTS$1D&H=WZV`yq_YAz%-urq}2N3i34{cFtjv0t6G+9QIRW5r+IKV+c zg#ph1tmPd|C6$w7^0Al%Hn|Kr*a*V6e}d*>dx z^hzWsyLmlu^E%Mcfy(R&HXJZOz7$W5ndWDJm0)p>m>iRPoP7epxjxP*t-3Po=X)GI z3|&E!Wei<{-XVz!il~OC-bmK21Y4WAGw2m8&Ktqa{U0-^+4f&Q-~87v05kvf^U@D@ zqLbsK+rdNYek0ikRaJm7U@e0__vmxX@~IJS^aKvCYH)>#Z3kk z4U=-+c3_@D8rOK(jf}m>Fq_uULK6h?^JaWr89##6s54*^g{WU9?8W2)n-PbxHa4%* z1RfBuo`5xD7Dj^r(_#dry|_w*`Y6#3(aLC7V5~H)J;o%YPKq2KMuUQ(l`vFRXEkYI zIR%JMcUm*0KPagimiwf)X=a46PMd*U_R>O({lseD;SV1~kmwEUBbiTxl0WruU+(3- z$l<@l7ypqr@`t{m-*bg;;D((Z--UR*Iv%f!Cn{sH%5bE}AH3iX%X0tVaIS&G>=G`wMG%>r#2GQR|fi9%6Ldm9=6#}1S4R@ z2DWTtXJcb$B5^RvYP2VF3jVt7n`ge=zxvXZT|8dCT3wS@xNYm#uWaA-!p=Q!e7@tA zwd+^CwdS`UZ+KJ4Z$vqP@dyUB(h zJ)R9QwgH8xfDMj!XgWnKE`aQjTQ>rCZ-sui6TN?L9FT8If!QfjIG}Xd1txmHVHICr z3O#r*|MSmx;6*hplts)Qpag%p^YcIEAKv#)O-Km*vRZXosX8iCeFHJ}E3xeJq2YBM zeQ#Gb{I0a-rIPBG`nX%Qn$sqx*lGqI0k5+g1(m=I+cdyVr}UHKcq*xd6%U$3MC5l5 zS(r9B7IqPMlL7B^*@pvOS$JZ6I=R zEMF@9zOC!?#=n|T&B8dtZ01n^28s&24zq>E3>t|+BQcYjfZMhlwCD)=tt&OI8J(5e zZm?T%1i2t`$Eri*00_%6X-yRPnIjt=sgoLXl8n{PNXU((!bmEBe9oXSQA(Rh3#43j zphIvkM;1VP0+ng|LR^7`N@v12J-d*P0JvXc$863o720=udB1DvdAhm#nYO-F=rngf z-NStu({B%iE+vx{u~=n14lei}-$lFggvWb66uIONT(q)BWvb0XBddo-{v_A@)#}*h z3mxzUkNJZqJiZf7&mOyLm(lzMY20RYeChP;VI6z))DDmDFp4`Eo5)3GF+c(*@^Io% zVd;BkbKc_fPnhVsu{d`u&J~MKwRUXp?)$2{XID%6?!2PUkDmG{ujp%r90k(!IEC(L ziHKrqTs4zY!nST9^y4?fUTlDeE_)Ni*sBa(hXS48T%nFfu&D_cIWlkg;f~{PKYH)q zwcfpDT3XP}&*&n-A&0G5E1t_AAbGy{x84WN4N^06}I*L<8KQ;{g|AY)U(!V zsqFBuXfHV8FZm-|hJ+hAypP*@-l=W*b3y5!3d>%-Qv7OH?^dn)l7p>{hKQ9&r z6)hnjH|$5<+`8E*<8+FiN`aJAlnj=LC|q_PL$%RltC{JxTlsLtR%K4mCkc2)V5BY2 zW^OSv4X}^9l%{}#1Jho@v23j-Yk1_l-hoYZEo&|n{NeP)mvf6&w{`B9N=n8P`lSUI zuxs5Ozk4TzmVk@bLNilrG$ivl`y93=9Lpcz9;>e3RNJ^mrMwDva6%{B2h(Yqk?;)p z%g{kcOW@@QX5stAa#@aCcFAgLV9i}7y2EAf@w$e*F0qRp#57fy255B63>exWmr_m< z^$gXi6F7JLIt!6 z5amE-3qaHWGPI^^3YW{ck^Ktw7=qT?bPcyR9k;bCokf~16Q z@MHoNT{feI1`isq(}G^fax$0=E&9WLODqhuvXKA_#0HQ-4f(Ar`(n@m(=4E5F+dlP zY5=*k3i<5dlLY`TGJPQOVYwAZ5657ACQ2O@RiN3}4r?qd#x%yF@fF!)-M&4vdBdpU z&2H{<_3h8rwm!z#w!T$8+?Un5F9M-+iDYp!Rsi^cz(uF~w3R*S_MQ!fazl|zfzUa| z{GAH>Oep!&i0pOB^eIa1_6821>qPc^m%WvtEA+Y&6Wt8vSctJcwMUlu!~&YPR`&L3iUpd>5|J`=&+Z&owX2ofDe~4l+%VPlBl7G z77N4iy2L@BEE2?nK6%J5iw5N|Csq;*YajhM@t&o=?7 z_|weo8^G9QH8m>~r+ECY2Y6rf4s7V^`>>^Jb#3#Tm5r|y*F0ZT^~bW>H!JJjEva17 z(za8nxCCtlpj0;=21SQH7qBFU{esCc6BLd*|-j#?1}QUvdXe>UDrCh=qV6 z&SmF$9DS%?2C=UYY-=HJP!Et+51x{wzC@?Z8y-GA#6KaI

0)dU_A?hO$-43IYdw zf?QUpl$R|3_C|`RCh%(LjL1;Cb&y1=M|f^EHB&?bvfL(PdXU#WKdA8dd>Q3ODRdNGVuIEm)aBIv1?8zKx-$^E0%M&hEdgQd%v2QpSLBdI?ph?c&785AKjL>H_8MN|U_cp-s*8BDO0 z5-|xNqK8u1OzAiRD+5>vRJ-P6iA+TWs1i2g4hwE&(z=3Z@+30G+MGL#mW^usU4*gC z-K)?+0NXk6OncvR!;-Z|^R7tzTr6?P<3DMj_Yl-BoBg=kd)Di_=nq`<`Yw8WC#>vV zCANk)^0G+&C&Ku#-Mtf9Yxkh6gGl^bB!1H2-b*s;)%d$g>>VTqBa9zg?AwBo!}0O+ zXrPTHa^0SMpT8iHsD}F6>To2NbsX{fkGZ`ktn38`TNMir&P`!Amz;|W!O02GbA5f$ zdvnQsZ4MM+$HHQdgU3)nc5b3Uzh1z@0Y-958zT3dY!3if7x1}9f+O&L-5>O|p=3Y3 z{)|osFNT^EYSn2BJEPVd6G?aVazF0nek4`yA`Hi&Su7jJ&ns0&C6Xh+im z`Uk!ji;8KIUMdm<&V|Ww942yWCX&>>JF&n0wDhlE?)>`e{r~*u!LPsE`sJV3 z9^8xFSaf?`0)@Pg$2->E`DK0U2i1*#si=Fivhn5e`sYh(o-3+&wy5ISvYJ<0IzJv9 z`c@)2A`l#H>)16kbOe?ZbokwzfM)>Kf%OJ`L#ZT)3I}bE%WYPZ6OO5rGd0FQtEmzc z=-37Wp3y{9myA)-5RKl#d#)iFqmnT^8d5`T)sWNH!q6qa^*{hanI6v~^tlEi2g6Pb z41Sw)<&Bbx_xb!|5D9DaSS^9K#UnUsA)KBJOizN;>$w@Hc(gLN@Qd>~>xwHjw0G`N z$g>H&z(h9(Jc4LQ840Su!w8ss2Vj0eJp{=%hpo{_)*?z4F$Owqo?Ld8FF4TIy_3h= zClsC-6=&(RU|5ChYpqgoSt&0tlJ&@g4E@4w$X5$twpypnqsRipNRWE)M^h_vfrEf- zjRfV>DHbGu$wf*vlo{Bve6uSebEDwy=UE1DHdL-3=>Ie zeg@>&faY-!Ug1UA?6WhTsT4b&uqI;^9t0+nX14hV}DKr1xCzD zYvJ#bYh?nZh^tc!n@J5SIz&-dC=UT4Br;neDQpHO(=h@Ndl4ak=1)}84Pbd0Hjxo5 zGps`ALi!WZ88uNK`hrub$Iy+K1T@zEKe!$CJCFny#ll2GPIw$q=z3T-+oFRC!JT2h zE#R>N&43GZe4|hTw*&YKV5g+fZ3i6(7NZul?lRhR*N;(S)GN@kY`NVMw-_~6*hmX| z(+tQu^075c<=!DyM z%;7p=w--c0o!74s4}J(g_yMG_!t^9is&aG5J3nJiC6z(nkj>mn>gyn!_CmeA(C^_Q zzmM720$WQ)!U52-w@hI6xa{>>&1t#vkW6_%BL7Yz|87{cS2Vg$t~?}H9ui5v>>2o= zwflXZaGPB9ol0{|B0DfVvL8&F+;0cC-wh7#69`Vrr1?g&6%57!Ln3OQ7_&^LKs73w zY9^BOwRz9Y8;J+^ryo7K_VD4{{k!oSi|)xWmD}DYlV&%xY=gw+>caBZOR8Tjt9iAq z_0^`1H!B-ny;|{VWy6~-ogeY|UrQyY4EiFiIE*eIAZw>#$3DJWHcKClDNMY}v}=9n@+rLjzhFp{t`wPAsg2{e$d8 z(lefLPK-In5@z9WNp}9`E5)C-b$r9;AJ?jLF-<8&RD-{!fD(<+E<4Y{bR*9cMF2B} z)m)EqT@XCtSiWTRG>><%wPSl-^C!K1`*hkohAu(O01ANt7^}F<&<#!-P!!m$y)Z_g z-(dsBVUnnjOE1YJSz66$jryEQd0wl|RV#~)WFu?oj|9~yuwr(~7z@i8s#>eg)#JHF zW3G|O(%~mms^g>L;|f_;(Az&hO|D5`yPg%#RGPnQ)l&pu0SjrjnryFrxW0gR}^x5zHndzRV{hfL#f*Xo~(H z3_~&$4|_7HT+68lE|g;vTT!VH`=2r6@YMpRSkPzjx=dat0}H{;evc*Svjx2@Tw?)> zl{JGfLzJ>*G$?37j#|=CMLSpp!d4)+olNHyK=G^Y`?Lx=q+(DZ^BbiSu_HEw4 zcZi+^x!=hPiLvZp_*^)8E}qN^gwA^dCmHh*o&HG3-+kj6_2a$hqaUYGy)ukY9=~-X zxG>8mW4drahA@_)8g1rozeki-+Is{xQ>%$?bFqU#-zfSQ9R)XHaJhEdTP@~NJ#j{h zeJ@i27E~nNC6Vou%8p>z35D{QShBC5x22o2ktf(OEdEv^J0O< zX=_KCji3V5uMfDI%Dz{VEOat{J><7Vo&Kybl|FjzECCZp+lJwQ3X6L}&$4s&>U zU4=d$O{yz-*D$#CgXjZP;Drzs-2EX)qDd7Ey%}U4iv`AF=_UpA1vCG30;R-6{fM50 z{$d98k!2DaGEs^M@BzrfKyS2DTiF4DN_JpE4)hL#4xO||%;Tg`yQRgb1>I^!jfK>r znHI6N>HJuDiAaoRMG`07{=Kw$BTjv!CDzK+Z;wd-%on}d%lmyV@ArelFLiUDZ|Qlu zuI=giwioK#e^=l3VpY@cu2wx$+3-qR&uXLTsKa&EWdvj? z(zQS`X7IX1UMH|2041c28}tpw!!oZEWZW64&QWoRLEjF<;R#|qVH}H-P>mqF6(tQZhQX}h~YBL&p3g->l%y@pR&VVX~)d8Yi7zlH{+h2aZRU;W~Q4vcuFd{ zC>cG$8Th85^^^LR_Xh>r<;wkH>E8aqZ9V;)8d^WBZ&}~nw}m^nhcEbUc;ujD^bDrS zha_-`OnPo`a9?Zt*FBt_eEvbT>M}_bkp$4nDizrR!EsLiL6zdF&n=#r1P!h8GxoV@ z%lwQb5|H3nQ*HhCoPk`wN4hY}UYqyBS{(1foO@=q8h5A0 zT*;_25w*D;0vxNBNsCm9BFX5*{(-~X!DDJ=HbXViWGB#%SqE9mfDP=h+KptrL0>}= zwXhVdlF-%Zw6$7wu}pfYw{LfQ=T}`lI|jH1FwJGBt%J36T9|gTvD-+t8Oavf&_Lj& zBvHv)+E9YJ*C~L92fJ?j2D$lcey>&msfh#VrAMP7bs}m?M9m>T>Gwz-wqCon z)nY0qi9(7jFq;Y~G9TAvgUn&7BIFyIN#RS^JU6cg7Z(C^GydsGC#Iz^5 zErDuh(E+!660iz3qYm`+X+~9Iu406vmhdpS5Du|Jm|Q?&5)-5m%%sL)HUxdP2vh~c zLvD~B;J2gqlki5L2z$fJjV;SvE$M*nG*T9KP?vbRWdL|}k#KjpY%$$*5QC)& zpf>@2i-}|?8TC0Vq?rPAvBN@!V4dPPgiSOXBlJ#}C&Im0EaY}Ww{H-de9>2XylzW| z#uLFzKCO+>n@O$JsPj54=mC)_CX20D(jht*{XtL!Stbbw=FhlbGouySF&vSg4WtT<~wSIVEfTJ>Ch>62$K|K@z&e_tv89apeH zN9<-DKwaQ+pLKiBTG{V4_~$b9#|GvTn{x+a-Hwx+adNZKvX!xXg8~vA?p=td&4eYQ zL`0i&ht;ti1$<>b=<@9bq_KOSp8AF`9CtdZQe)yD?gZ}LNyb7pA-|)prT|lqOip-b zCOxS!I^f084$I@@dz{0OfG`me#lj;NrkBvQDCN}>aif_T7*Eo()3)i91+r&AW8-8w z1YV3|04pf!vPOMQhn=_Es(kKdpQqX51j#B4U5#UfI&B^pcnm-y>tL%0yg;S6pjPH; z)J1TM-4zKcW+rJ+M++Ni7q9uR&ACyN*TOtl4Io}_Va}e6$*ksD9`E4L(Eg5|&+A)0 z>gH^eDfSyp=iHurioPfm9`5Sd-N)HCGICrYKQ9?QH!^%oDmkaq=3$ybwK9KX`akxZDFH!^cL&$47>biA6^x5>OK(6doQL+TYgkO?Ca3!^4Him||(cwm5H_n+AOo ziHJzAtE{TqRo}c{uWOhb*TVELE378>qnd1ZY@p+8ii`$F%*K`s7phg~%4O%!EC-`$ zKt_#5mD4|PxU%+ZgT8)lnuOUbk-3>DB4ei~15+tB86$iyp^sn0#^vh(d$aJ5Gc0*a`|G_@L44W_9z;B|VeQYOt72#yK_r?l!q zhqc4w=yTWxkjmGf?*n=*LkCGzs1$jawuGUp>{d`H9P|yrk^+I#)*OY51ZW=yf6<}qi*69Q z2=6RVv(2Q&=&1>2I%S!gwoFf2VZ;!KAb$8m42H=E=VqMIu+?tGSu+MqwI(ye7l&=u=t40U)Dv`+mj0`p-&Vq@T z@H*12|FF-o^1hrQNKrcXiXazz#xI5rO5a*hq3D9;SwP?#fw2kyPw^`p1zpj=m_{P#dg~P`Ye%BH~5UDN0B{q`~7r?|{qev{0CgP>&+yM6EQBlfL4Cl@X4t ziGcD0!V4#A^q5$-yrtt5(y`&No+h@OOrGfxOEU~17j#&#kpOVfMgnX+40K|l<eby9Zt$ z;J;PZ_PhMjC(h^n=KHMwon7?XoU2b{UHR{`mw%IU^@-x@7aQB(R%1J??Dv2sb$(A8 zzZQjYF^M~D&achZ{jB3+BGz+bQFn96841#y-ugXSRI75?BFG+mNa@=o5RKd<)a8)8>S9b&h+|hNLji#o=(KrC#bvEJ7so0oqTXfak0*3X z3yy2^Ui9w{(gGlZ3&rU#UW>pDh-)TA`Q027T_%wn>FC~C*ZfI)&!?i%Zwd00*^yR&=0PzWyXD#ZnX;K0h$ z$Ju+eeBGf_FPBt)#n7#@lZG3Mmc@DM`ZX#R5-Jr}iYq=lm%XOC{u?9N7z-<5;*I0x zlK18k=pcYA8-TG><9OIVgqBdKbOvT{T~NsLFiojWQ-NbwF-@ULkt-0K?&>}1aS9ga z?VxJs#@O|x@x^P&nW->zP6blq)}T*rHg;-MH3Gru>iSI$E!!oM(<)`rAg>TbsX(q+ zf^sKZb{-6?X(Net0&l~yR%Am^DT~lg0}AK*5EWo$K-3{^;8BJi+R#DZ z;!H%X;9sj%&6+W|%T}Pq85mrkL{Wz%CPOHS8dR6ZR6gjN#-tfffk6Y>YOJ)*=Qf9Y zwkX)_urTDxkxAHIzBoJ46@F$aeGNySfWQ_-Y^4ipU_`)j212@aFoUQlLuNT#3su~v z^AZt`rKMb9Z#00I2N2a|V{o;ECmkJBDS$T2VW9xVu2^g#*oU*FBa#lPH5#-MOfEDT zR35u2L-{_w_@|o@KuUuLR(>OkF&fC18@@7Kw~vE!mtT4Mm+0pc|Nk%+WWs>l7&acU zqj;{M&kQ4ELBTq5mYIMfSOa=3p?;%}_k8AXhhI`?)-u-ZZXby0))~Iw4Zkxee7kq> zji%0*N^4hLE_vci4#3!gil+-Jp1e~2bW!#1^U9vdFMYCm;61TylS;cqq4``Sd%tVo zmCpX>c_S~&HGd(Q^@QwZ)DC8G# zEFW#^QINIA$#>a#Cc4+4Z`7!2G^%n;Q;K6(LDUsoln{71p{p_Abq*Up8Pmb|mk>Bh zvFq0pbJvn{iz!$jg);U-Fey>YTAJ0W^8HJI}`bwoaro&Gd$P0|IP_N4y z6`zMauobMO9XbL!wCYNgvPh?`Rw;`_BUuC7Q|(>Hx_VFba!v__SpvanK7YSJaA0UC z{kFOF%d3WQzjO4P~%xeRXY&0S_=Cq*>qv0AOV zQZ6f3D{H1x6sRh=HF5j)BV~H2oL%+s1+Uk@L5a0ndvs@`$&BkLv>L^vqn`emE_4~g(kWgWpsnEag)$y5?UAz z_)MZ5&c@)!C={L;89Ap>9X^EZmHPz$AIXl1(W0Q!+6qvXEQ)_Y(S@iZEEDCrj7ZK* zvTJljHh5+lj0W84UD)G7Mlff(?IsJQ)vH7X4InYBM%-niBLNq9F^GDF9S7)@pjQTc1sGrb zEKu0`tSGM_5@2It2O4XGJ~IkQV9lUS7|;cf!^vQ5H}|>D{%2N-U`OR2_(MmL7r^O0 zLK=4~G#iBCj|K%F^m6~&)cHzP(=!DXPZv}?U0m~gVdZmqrB4=BJzZM6s;cRks-_oe zTc4|M|6NtnGtJ%4baS8O3SX3K{vy|`=8IneFb=yF#W?%Jey7*%ytL&d&*#7JyD>cG}}{8V37nkDcAMeb@VyWm%mq1?Mgu zE-ODz>HC3GDWHsnm6KywDk*c@`&7yb$!G!Wks)GXEllkgu`nGJQ7xB&?Xz44_yLXj zBDw_=cmY~}AdH3Se;`+cB3jV08^_8pO{qpzZot7_2Gi7{kDKU$a6kzRU!cYdHba3V zF3=-$eKC3SdhG7)*o{To^n}`BYf>sM^>TK$bbi*_1uXx=Bm3p@V+zFyne3!Yc5-y| zv`(7`eM#_TxQF4q-9ZucI&H07Rw$L^35D7H1KI6er;01~7L@F&uG`he**i3}zmKzb zaB$xM_h5bVmQxqsIGg=;Q|njFZM(RGCvmLMY62Zwm?nE<_*i@A&fE?j=={5cZ3FE-t)W4|p5{;K~eP76ML#$H_v2z?-$lY)Pj6c z5Tk~DE-s)2U><}4f-ODDn}qV z3JZDnf@N9bwx-sv>YH{JS8Oh;{-(X_7)>^z{sQDo8Bc%|0+=#EdtG9h$mDrP$nz6A6q#?;FhjK{OPaW`jOCQXt2|Y3)QJqK<_%k)SRT#F4Mx zfJt=9VXX{!tQ2rqWtR1;$ee{-11OaXfGWgQrZli3qZB!bADx>9)+ zU2ZeXT$wX>dGt9b${Quq0-J+{bh8ZV)XoU5V$1Ec8BuFSLXZf1QQp_e6+bGkK{ek{ zjYgw%0;KXqJq{U^Mh317VxupPz!PK?*crt4W`esD5oGvsqgRe5?d6E?bRPJMzOcu2 zkmyO_air52-04RN=#+#y3R^gkJ^`;8Wq_|tphd<9m)!t#vovO*HH<-J&`FTRqmTD| z24EXIpQ&$uwuAGM$+~NDx^!l?c05(+51uxfzfx%xzY z>5~v_R~1z~eYNUIbc(B=f_S^Cy5;GXo>kr4=eWWb5x}H_$sH(c*=9(ak05hr9kex8m34J!V=|ReK^UXKzne z&Bfd!8@8?4|NT~}xCt3TV`0toggP1&+RQC_tY}o6XTX~&Lx(}vW1_+S%YfHmno`MV zj#zX?COZjqu-Z!mUO?c51YT~Uo1JVg%>5HDV{8v?Xg1(=1YS+xl zbxm8Zlzf}0#`)-J@3TA9v;S<{0b z#$dSX?86_Y?%zw^z8RUDagN13ckfI-dbD)^{=$O?i;o^%U(O-}m9Nv2o_GXfEgYr1 zL?+EvDK9A$CkXs9O_u3(RWJ__OoQVI7G1ZY=}hW#OI&uI%RX$k2w`!*OJ z^!q(hk5djm2ONiuZ!vKxLk~qX(}o5!0}9O*vaFuoJ#~%S>YIS^p{C)BOZn^0=l->z zWJi7TK|KZ&#i8+mwe+K);DuT1bczo9<*a28#m^eaMw+Zc76zAn7-1|rvooGp7|4t# zCoGc_7KE|M7!?aY9%y4>O*o*0|3`^4$p#@msC8f{C9W1}WgwzT1_=Q|I|@u&(05=V zwTLuGC&rivx1B+wCh9&&M7-!49`La7@c+1XqsoAE(^kZb?!plmP8eD4E_~cd01s?f zNT-qBC--X1n8I;ca66q5VjxARwq(Ki&jA(HWAraskNH2tRF{sN~Aq;!0 z5L+H)(_y(V{tJo3GQD7E_(W_5`tB$RAyf2#KseI?jxZLzLJ0f7GzWHS%hU(JSg_=> zAz^J8CLf}XT9kIUBAW{^1}F{2M5ql|+UUU*zL>$-mL4#Rme;Qm$=;8|E>29Aq^7Qp zP2>hbr;O&k3eA^B^Ij`^1k>;A=YL#L|3ZE#Sb=31{WiPkxA~<{7FGa^Ev;QuR`+yO z)2j0NCtG^Jm+j_0mzkEkxzF|S0EaLleO;pZvrO~0N|%<%F4r*tVj=Spbe3hb>-6N# zP`K#YyyWH*aqBv}wBQ_^$G+Aq5 zx_zz@XfT%qeIv*fMHxB}8AuWpNV-YjO#}|U6s8927)(=3lRc53YH7g*4g0Y>x8rb+ z9JzPfe{;z)JE`_KThyvc{oFlm9p4P{_DE%im8zqFqfnd>i_QpzhtV~EWcVcFE8O;O z6z*!bf;+NST`3S`OGdL5vZ9fZ3+-K}YMYK0SM1F#*qMFhv+DZK8(MaC_v~wG-B;W2 zRdL0J{Hvc`y0SK>aBWff`nHZ;J-vGdxZjQp9}`LT2!?kHgg^t#=O66t+uzu-?Mm_c zXS3fqbLr1{MekSCY~gZ`=(L5%B?qT^j>iFlSWR@Z$H~2Y-3cRf7XI;%JOBFSF3Pm| z@yBboZ%y344_e6{JXn12AYJ5|DJ7hoaD@X(6WuvFnkN*V(CGj_BNm?+7M_%hX8YZP z(ww0J_2vDn7B}ltVA%2Y|Ciqr41brV;gC* zOeQ_o)w3Ig-!`>=RaN)JnM-effA-Hc4F`vYvIxA|%rt@n$3B0 z=HBH2KAleEu=Y=mlbI*!;zE2p=|F-#s38IC)I^klPYMEgD-gy)I%0S{p+X%-@rcf0 z(^GnxPC2X^9R#Z@sAwfIDcD#6y%vGgS$2~?6Rwr%*Un6OA)f(>CaEPWs zm;>N~-dHDmC#@p|6Db_?GGqe6Sg+IK0OBzaS%B=cC`lmGp|G-{09AM|_u4#0SJWVr z8IMD-{0p>!2h!~fsJUi&q3_Nx!w3@7J>6j9H6mZeaxJ(WUFuP`OolPeypP^l(O-u6 z4f}GHWEyYN5s@%QG5u%&-RS9(g>(W#&<6KhsL#u|GN~JQX;}h}vLHn#ZKl%-`QleH zTtQ0@7(NRtp6uYfM$lifjzfXaxkxN45ISwM9~6o|QE0Z=okx6u6O3iQP`s|8t4i!0L=K=Rune@Kqtt$&#MT+;uYLZ4=}GRwta2=l1-vq;q_VE*banlS{Gy{9 zx2}2ZPtW~%^^2RguWfC~vD*Xz&qyL7v0FQ}>O7egC@>|XIakZKe!KsT!qQ!8WsQXa zy(&0%RVq0r7M&3Yj`8`2#iBEE8PH$Z%p8w%7z${0FwP77%XL`$O>{d=f}IQWF;ydq zK%=U`u|`Z?gSOrT-ek9O5>fg5G`To$T%0%FTr%CcVZDA$7YXppOckI1eRuEIy_|1` zhmXkRCl!hl3dISP;(}J4tB_@hMotQahXL%5o`wdfdbqy^dpFk7PT=)J{4;fpds^C$ zclREvY1m&{wIlE9_WY|miYxXOmv1kt-dI+>skCZKY1NM0!Y?l7tt%>DpHsN@e9jxi z74NtAY#88u(LcDIFWAKs>=+#SlFQpE7~U-u9s;O4ax59H)~pd^`?%y<=gi z$H}*u2RzPUmwnjhmavvV1~BA(G|BOMBtf4h=+giqLEXNfjVByv0nAc{9)mvZq7{kG zbaZ_Sxq@8{&0iK*e0uugyV+O17#cdMl$T(dGUT*HC~BhHeJLPvXS8lh ze~!)EnTX17-3Wmoci0=gv={}%EbKhWxD3YPAOR*G(Jwoc)jqd4;1Nxy^o#SLWqvG< zJFId78`i1#IAmE784Jw*)D7=w15$FIV8*5zdb9AoQzS%P>h)iJ_DvZb!L}tdvi}zW=el`|_R$Qnh$doCdDsa%_05cDrFrdf4_!t9!b=_Z7K%qguCJPwuhU4*3FS9j+r>!CDUQ1A_X->pv9;ouin&Lh*)% zjz3;3_|5se-<-Yto2)Cp$uE5ZaRoIkPgOOos%?E5VQgb3cxNTdc5t3XRzjN5=nJucWzGp-lL~CpUe4f z+pdq^e(%+Fn^%9eZ_~w`!=j-Uk4ql(Nx}iK+1RL5T$X@}tJ6}+xw5J+4t&45rhXre zHKK(GrYV(4vn8V!N5$udhd~pTOj4}ZHM{J?@N!3>s2TWgmJi9m9=P;r#59#SR)uLQ zHL7yB1*^fa2Bo}OCasdo%2kRA0+;&%!gaZYpIyvbmtVZDpmhC};x(n! zZ+G?rjLn?x{!IhCFZldD0s)YI$z&(xvaD7cVhqgc_|&)3SkyyT>t&=w}1KxU@W8y(&z4-)XbE}Vv_51T?$!I zALkHIoeh22+4os><6p{Z*0i*5Q^<2!OKU2rn4KbLCTY+JoYalSbz?D^&D?=wz*GWs zBc4%@Q-l};khB9z2LN;H_b7dCWxy+QvZFS$fHL%u`c6_0HdYE*(ID@5ALmpb=TKAY zS6P=oym)y-XZKN=q*$Y>Q7J2QnhNySYQXDgL#xdUwpz)UHX2l*a9xJ#LR=u&ID+%o=NPmU5>9mb^xq2q^kxX@0MktlR*y!>W34J@1RDkkcG7pFh z-Rgxs=CNgrb;EufyV-y^6BDU6(;BCZ&P>rOOCB?Su9dU&GN`&%RPiD~mjhwAd)an@ zCfm#e?sr=P9t&K1SrIwlbECjmGfFvtK?fvCm;qb?4R&5Acn4RDn!BFu=zkUsv9+yW zbyZOQM0x$w{X=gF#p|0p|J>F8SFvo<@aV>xmOqu&z0x!Ao*MhgWIblK9@SId@<%=? ztXP#>{A7OVbLI7aXzyLk6@0`S{-~?}^@jErIyrDs9e#O8^a^j}4?@XnLdk0*($@u} zX?nJ$XH{b-+_eq-99y9w3Z0v)i1W-EQB0um6;d%^e;-o?Ecv z)2*+pe*br`z5V2Ce|c*4doO*yb|tCdO$U271KwuPHOYPlfdO63r+ylLw zZ@c@paJbue{2g7rUo^ITR@=0`wd>On@qU%^1YtO@)nuuZr&Y@H%N;Sfkf|s#=u6DT zT8pXHfS2oapOZka0a&X zc>AF8_LNF_9-^UwDlJzBbTD+MZGZcW~~GxNg_vv==I-?}+|Z6P{4%_d_ir>zIa z@&$r}ZC#&LHLNbJ{&Pdir=4KbJfKmRIa%I#LN`A{gGn+n?67sqr1>IIo<>z;X1E@Q zAmo>Y{EBcu7YS;EK6SvW^n29dfF=^uh68xOtMR&&Zimca8+F*kHj~I^;hK!S27QZ8 zTPYCamQ{aSR`r!om?e`I$Yn)xSrMeTOE66Zp{v0G5!3?n-uWKK5cEX!nvHExx>g3H z1;%;{)9SMGQsdP1#o!P3CSe7}%$++kAYp7FvbYdIb(u5MuE_~@JZT+^8Xv8g+n=py0IRDj<$zOd?fxT||6xhZ zb9tpt7uURSwd$qIrO#fec%ioC%}&l*u3#Nsv|&`fMIid5w&nG@)>nJEYX^8=$W`Cc z%pt~nM5Wz6ELkg-uTkR}@xTVe1EAQMzM;pvwr=ioXc0CddmXL1&}YgtZ}Uc8<_cdC zj=m$;tS60Mdi|&T!OLn@c2Vi(H6K6uhd2J~*_Z$O#aEwMv+mCo<=KQr6puLOX2WQH zKo|@23d(RSU!}O9Q~>9rOp?{rd#bMKU{}vsxvUOdGfnh>%`ya&z2$HpfqPwsSlEmv zkklB6@h9+lnKWM@%#+K2(E!I)OWl4?222=kdSMYEBcxMLls=qrRwCUPO8ZjT&?y!erEHWV$!e z4HSWLqM8v^XrikLyjUtZ)zi0YVDQ_)L6B5XUbCU7d}DFt`l9mnm9?8ns@}g+^1=C> zb>HW{ci{Y{edpHiIli{IVsBT^?uM2vm38m7_k7wr@HuDjv+n*+Tf08#?AzGazXK*M z9Dr2GS;^>WvFHpsGHJG%sr0*h0v-;E{)WMcJ$^R_8dUpSc78aZoSR|)_GtR&e}cfq zOeyxmhfCL&Vlz|W#fA9o+mrY1%{_d$_~_Bnk3Y`eyEAs{M&R14F&5&hltsOL`>X2L zmes6j>)JRpbYy7gh**@Z$7;RK!SVR$+Ip#W%Zv$S1V}+!?#LR%d7nsnlott6r-5q~$f9Br3 zw3GsrU%)CiB%Ot+I1a=fOeCq05AX()ag^Gq@B%N9=y>%=%dOMN!0Bz0U&{KvG zI3wT9KyKLoc(Mha0#ZxDT@v`=n2C%409GV`m+gdakxC&WJX>xzbFSbqT+3LRCN9v6 zL`*XJI4VAKSSWZ%WLtKzB+TUoB@u2rg`gFZ!xa1hlF09DH|xy?4Z_&vJN}bpbx)Vo zttzg5s<`^8nwA#^_-luT*R}P$Q_=8hNzEVf%AU(Fd$zFhg)8MR{mY+0S1`@MzMq{jWI1Qr^pz^)@o~xMX_+LeqwBkZ zl3fKQyLf}eR`ammql|@#MAR^z0M|O?#+n>=PbBS$D3gd9B0;sw&R5Dy1cK87!8tuv z$yx?HP7zY$AOUC42dZ5es)aL*XhoK&?@4R9tZMTOnZqU^9;ll0xy7lvdwUD-@KgO59-LR=6crB03AkUIDSDe zyt}V|M|0b@x~8oKr5p2$0U-d%fe&*FKg`Pe@W}bKdycQ&bNY+_%hr3pHJ$Bi!@tZ} zX6$Vo+vw=nb=1*O5k&z(dhek_=)H!}lMqNqC5`j|2_%G&-sxQ|W0~TVefEC#KIc8< zIq!b{g!fu&C0{x7yw|lZF2I0*a_6(|&t1Stdr)5UjM$ypKM>p0cCVrJUR&39UF6#x zy|-G3-!-&et#AIemG}V3y%4YB16XMxsPLX_nQ;M7U6=&`yS%_#nrAP~vu9jXuagcN z6Bux_FI`(6|M|W9U;n=Opa1^u-~a8+-~R>+cm4)SB>(oe_y73EPpB#KKmYSDs95LU z{_*CI-xkM4NsWZ~>c+c+jM%Bkn)%tDl||~p4131Sb=yHfxywGt=jXO}fl+*rk;La0 zD&$R?u}-f`^zym!r}ysnZ{0tHtg3^GeH&#t3c;V%97tBnstS%YWmM2jq zrPnoVGYe2vr|~ z^%eDxZ|(o`$Lc@+{`z14_2+;2mp4cV{oAkWKmWA++lN)qLjKDNtWKU=2X=4etQ$zw z-oJJK{L|dKH?tr=RzC_t_;ny}1eRV+x-5FT3AS$8d<`-XRX}|VCh5fPxmaPVuLm3| z@X>&W)g_?9K>$avk#1YIQl6Gm%g-ly0_h8kpf*QQLZafO%D-2=L5iZoaJk;Z4ittBQrCJgxqTPF*|{ZVIGd{V8w;=?&2SOQJlh7`r$uKQq8S*w(wRwsjAg zeq5#gets!q{dx7`QjOJ8PwtCI&AJ)%^a{Q-T_~WfEO>r=zXG)VAO(6EVeHbp%x!0C zH9)2=9?lU7(}sANQc2MuBfYWpXqS@lIXKovMa_qO!Y1<+t-oZe|wWEGWH=E4>k&eJv_2 zAUrE1xi})FFtolowv&|6H;~jwcu-Pxt+?WHW&O?anhR;UCu36fg~#tn%RXM+aJ#cN zn8S$^2$G>87r?AkQZ%lvh3qQ}R-^%p1uzRL4xyS0qy$jR(|vMTGku_3CT(#!hF2EH zU}&oKfBkji-~P?V)?#}xtS=j^#{Tu4acZ!Oltt=Gp43*)db$^8$#WhmsQsMQ##_%P*!_)ctdMYM^6-sohuSGnhf;SB{d4zdG}_1W7Q05O$<_(UAwYqL|dm< zoAw`GxzQ2|#RU~7g8Ikm%)t>2{E%3|~ zS{-R(`NJ;C`C7t>%BFqAHG4{GchwROw)K3~Lp>`Wc{DqZn_tRudK1Pc1I3CfOx{T* z4@Alga*wk3r;vDnCq4Qb{O}9I9+@5N^zI<3*L>nwsNvSUT;~gpLQ@l)F8Bx7)hDZRxnsLAp+% z-5nB!NR_ctWt3E&0HQ}_=`d0tWmuFY9?lq173(I-L68fqP=O;;k8YxSN{1HWRhqF% zAR?6)%4Im2EL|vyWpbaici*pRx>H(p1Msc|cM=OPq~&~vEBv;gBrrWMATc*6uOhah z71!Qd)i*#Ci#h~?I@(}%YkNdx-EDm3&6>ub;);u*F}rU+{PbqPCwGIkheYp5%Q;ck ze4jcH&f&x{S?M%-BAJ@VVrSV*Bv?Z!TwVZ8l&ed!Buo}Lds&HnTjBniCx!SVL>^zy=}*99UhruDt!W6h(gDwU#2 zDk+glN;G4QD6!UVp@4!#6KP!CqL7#Q`GhhU&QObg%x>IW{xT>ry;l2c3E@ zn{L))c)sp=|JM84hvmQgzV`mz{PLncDxg)sL%){IvS_`_nKl1w@`=%p_uN(Y)s?=7vM8-Q;6g z0j>g^nf490HXlELzzR)+=#G!X1{izkSGrogPT%APObQMo^3`G$U4E9pzBTj!?V*T;NDPz1$c_-t&!B^}?)VT2JEf%WE3%Rn*-kc0KGHc-%MexTWJpBk^oc-)#ox zAzu_eEQyoJlcmxWsWb(-3{ai`kC!MO&KVmmf{6zeQ#yh{PcrC9P<%_A&{V+rDpw)~ zX{AUy%i~4U7-5~g!FA2|@RhfV%I;D!}S6|g+Ndt9jDA1t4pfYC7Iu;#DneOoH-A$|BHw50zsidUNWj`P{^C4 zlDg@szF&V?_>cei1EDLF8~}{{(=_|$wHFk&I=Qfbl4Q3M?N&&Sz=j~JiR`lVEzHms zW@rp%LRP_*kjMiM!}oH+%SFk#YYoC7l7Gi*Ufn~Ys)qWv+f_?IzIda zR!hiGu(qtRS%&nwfe8(1Leq)FAtS10nY3IaEQ0eYP)8dkJ5h*%$2ZV}2#2={3EZ}68$@7qyr-z`WP)3yJl6B~T9nf>sn-xVbjJ>Q8+2W!G%Xz1Wz=M88{iSn!VT zz?^7^ecig@@@I&-w%EIz*e1@8u?{qjBDCDfxA?-+tp91ZOrf9pDScR(Ly^HIm3 zd3;!QheAKoK-^PQ^%=t0%%Yw7<-7CCcc?JZ6uj?sjME% zy|t}->Ii#bhyhrWQ5h#$yfdf>tb?-852{DIsNkmGK-`NoT@A#&MDl(rb3f8<5qduc zfD=jkyQuq+bQWUlF&6I>M{u4aIIqYkJb(Yz`(k6tZD#_XtgKil%oYeT`Fx4jUa4-PLnGSn#LH)$r*OtHk{q2AM_g`VF&c{&}VJwu<{_^IvYhhLa^LzUo)=sA# zRJ2>o9cE*f-2򅺿p5}3on%f>mCw&?8Y~TIRoq=J8!(-335R*MF*@s`2fBmrj z?kxy3+gP)C9Fta)!eJX*n%BT+7t{MU<~OfExx<`SY_klhM|)A>__(@5uR}8FR++S1 zBK8fdBdTT?s7SC|Ku?Wsf&dkO;6DKLT-Qu!YPI9_zH%|303@mu1d*^sJlrB3ZXXu3 zONZOl3ii~b*z449t^?v?b=hPyE1fol)vPib0Ky{Q( zfg-Z~zunV_*hGUUz)!zo7OO~bnkQ}36HZ9sqKsK^pY_k8Xb|0kv8c*+%72$fa@mhk ze8?VPyEb2x7N&zjQz+^wfRTep7w*L>bZJ<>A(K(d1ZPHQ)i z+S1s^DD)F`ZF^uMA~+QdyJ}nabWjda87I1^2fC;ShD2v%BiF==D?_3)Lg_h)^1M`a zUZT7tlwN?{ix(xzn`T?2*%~u38KxY&uTtL|n+Q>B!=|RAEw=QT*^13g(hslrzkdLZ zDuk9W*~z=Qq=nf69V%socsNHW%oYl>l?psuSb)kckbO5LQc2iSDN8pt?6RxYR&3AL z950@`-n?G=>HQk|Z13JU=De~g9R)3&CE|RExIiH<(~LDBJME~dP9`m9vI{BwIDBPr zaYayFQ+PKij=@amr$y1|(FhWm><9#sLQy=1mP}4Ciyg`4M4k^1{IfvSJ75QKY`uE>dzYC5_OLT+z}27NcYGBaH^7RuN`9aWgW6|?~^Nf%~^5G#w0zs!YfHTBxe)TKGa%jbr_ z{$=&Q|Ldp!{@=g<^Pm6gpa1;FN3P)i`SFk6))r=!vmT+xIp}tD+bjf#u`O0J0j~J~ z#377z*?Q|+0+O;$MJ67462AX_#J)6Z{=FqtZjf2O$55 zT9|SbLoE!+y$;X-{xJb&We%;t;IR6UH$dAlBmiDrbiY^!7`wSXx3(pX5Wz31V?d4! z%%S!_^|~KbyFOypkFf|{F-{ws-nUW>NaTQ0$WWFqLSsx?jGh4;u_ z({42PmelSlhtxp__YkkGIG_SH- znA7?$m7)gf0n)f!oKjvmp=m^yaif7Q6trjNMMtMT%`1qd^fVasgR>shi|3Y~e*$Sy zkei*`Se?-82GH*pv?`P+AtML706@Z_LS7~i;QQ&o&5EyhnpyZLzvNz3eMDz((jX%S z`IrV7F#~jfw@g;7pC5?EFqX-F%HhOuxKTV_v`Cl+hwV~@yo}4m@%h{8v2h(eF)i&eoumv(Kdy_E+(k-eu?rRQ2AhdAqO5IgkH|0mE+X;kr;(?M z${x+n$X~y-zuW{*BG1c4CG)~8FaVCIC@sWfT*1A<(kIQtMDcL-l&%MMvT&R> zaArNgm<-eT0RQ}AV-nS^F3d`o7eJjN>Ni9O7$xijF8ctg&~n;XUZ>#o3;kdJScawQ zKmEV|`*{j#~g98Uz%@}b+)eNJ<8Z=`q z=ILISeZXe!9UEy=$g6q$5{bB0sc1&oYw(>-5^<|Y*fPXw;n3TL7)0?9Ny?|lcr=w{ zU|cD5*d{jCY~ZWfbXrYnqh4yB78!I3(8@Qd1VLNlQe^Bxc`wsim03ULFlk}`3t&4@ zAEMQuL9;8e*4m8-W6`GD;G1mya4t;}LW?8}WsGzMcA z{N;Ja`igUN4Q9?PdoXt}8huf6I7Sp;)>_OkivV-QPt0O|;jJVLHj(<5WzPTKZ`K$5 z2wk@rg^Pw*m(^f1PMCC~XpS9GN86pasp+IPC>toK+?ABGJu+omRLZvKG~Y?g-kx9n zd2{C}676a?^+J8ysjB9~Mb*2Cs&1yCWJ< zm!Q!Fj7{7f9lx)gdSoz#7)+sR?L)=rU8(YWsp_tLBmhpc0ctJC{WjYoT%NSK`NEBj zj*az!*DnQcUrRUFMGG@S4%>jm*k_#TlFKS+^n@OAGM}HV8LPKjNakts=m?QQE{%$N zaQgg_FHi5kcI#9@VVqpvzA!6$_k;QOU*~@Rb@AnyH|s zNA>kbQu`z6gV9`W9G4r%WJNJp&*+2EgNztDBZ>NZz8H|dt zksgn8c*ZTC^UCIDM&@S}3$r7ObL!P4{l=*6|Xbhv`2 zdmOYG7aJXqlkKuIU3QwoN^#lyXIz8xGrZMB$@4Y&tLI}cpN~IZ8(mwHEzJ+xEd%P& zCZnO<<>>TJu+270i`&`hcJz9jfGC*rP#0!dHdD7ikj>|1(&%xWJz-6Ru=1LR`6YL2 z8lsenj@K`YfBAh8q$&P7_vY<3l! zT_qM0WMZO1+AbCnM0~;!gUA^qFsLo8{#F6AM95>_}a4L$M=iR z*StnOa7&@?LKs@;BM0;vDSBmkQVA0ECr2#PsK0y4Vb+=Tpv~l?sTPc^NYLfqv%pc- zvqehe@(VjX|3kdw#B@;DREgPFVLO-u+xsA#L9?7MXWJjviWSSEQ4l>I11O(s%un+o zix&76rZg_AVcBQ$1wLO)Jb*mEJ{SjWwx9wAq3c%b*21iHc>x{!^1Ks4B`P(*nif&m z0^DuEPz7H~j01hMD+?ZEj>E#nu{c~TFyZ502c=v-8#1&c`{Ko5q{GSAZ} znZhtuuaVWXoT3hX&*tB2?mAOiw;PAwnV7vjI&E8QI)K@@%hc`e1Zl zeKND7zMudiZ|GwtG*ueHZlUji68Nh3rVqx?(jmp&HZkYSa5_ z(+jI<=bo?5yx0JD*crEWOwJfn^}8Ly#W~&DvSD=z6mP99Ps2zP6FMjp^~J`-=Gxfm zl4{m1vYGp!K$QEZ0~c6ezV%PDT}~SbF{>~`jDbd9BuIcg>;zOQVY2{?1*5F3%k3Z` zfL)xWc^m`d>IRU=t-vefWyt=_=VuQxaCAnFMA9_t5&rz%_S?^{4?j77`O&tzG=>sm zB;r=?P^C~%D-zW4`Hg&jBdxzCF75u+8z(Pb`yx2vdj_3go)(!5fS^XlXh3PPy&!GI z$e8sAXWZfmO_xa6LZ??U8P$Rz!Z5E>$ZZ#L+lLtKEGiK-Wpf5PL@WxHn~h3fZPS{y zB@p7hu{tw7p_~{U9*3fRR3mH9fnaA;Esd11h%2y8kK3R!)&UnNphD58#Rvg3(0cq? z0qEBpEwFq$FY6Sx%t8>^QdYmKqG4Bc^Pc9;eVvpq+R2AoyZ2W&@4?r6=GWiu zY3=%=r~f3KeO@g8UZ#39B)rqz_f0+VSXn)&uxaWzPNtn>@ve)c*SUggD)l{BPxDBs z`c5dlq8Poa);<^=4;meRqSii;D({HpH$>9w9KogDfn%M0M+aD6jgCLiPlp*y5mUxg z-Bg0Xm|?QyNoCoDj_B~%vv&dxWaZtV(h51;avr~=pPpCK7!?|E>FbO8kDT0j^31M~ zsO#rZB~(&0%%ya9bMp3O-a;$?EUNszNm>K}-u^n;6k z7b%&}C=`jRhK6c;DA@(&p>3VON-vd^@cG#sZU&PT+t(l2LyBtac$#1GC>3`j6?Y>s z`+6q+dO_898Tns7OWgY;Vs}8uw!4qFeHZ-cozQJ}qIL$w97)C9oYd0ZzBax6!Gp?f zUcUr=1g~E#{_t}C<)(Xm#pSS$I4q;9OS+9!(~Awqiw(dhK;6E1ZP~i9YC;iMb6%;} zC4&1tzQfAUjd#F&Y}%}w~ZwL2rmkJlM0<; zB!j-yYy?X#tGUBr?Xg=rA??uXaR3mW^H678RI{;dL|H1AfjkHGs87x+m6Ryt)uXC9 z%^1-*MfSRQi*pi}W5}Rq81#cq8_#LuF<9knPWh;+b3#KBiwUHj;{2k7I}guYyLsyJ zwd1!QU2AGA9+h%U2C3D=x0?7M-`m87*|YsqlQf$dBzn2*LY0KVptMrD>sVAGkKPS1 zmfa;_^l}C|I0GHL!EOPQBi3-%SGj21Y z7rcJlKshR?^UGow2SNqN(;`v56Oj)PuCb9A%SLcorjRIiY0eF*wfxYELJ_gTZvV{b z!WL(4+cc0a0533*KS6cGgv&M!FV_wv#QIb8A{MOur!Fub#W4rO!1FWKg&9ziyRzt9 zU39N5de9*4A80)oaf-e?#6KW7#ccbqcF!hcex9#K-D%s*uD_pC)}8mJCSOh}3NZU(2KVRr}lOPMObV2YNH1obnnG`1g4$=jY=wzIMQD4hc~Tzv5*q2&7bWZ;DU zpJs^@@AC_Mo5?vHX zz7xKfUA3xxMk3 zx5@oL)&-qDfD2WE*XtDWYKgd{iuz zJnd3=+PSAGUx%a|4^KE06ty!TWc$4*+X6ziKaAKO@a)sTh+R=BN3siVRn>>-CMd5q z4Ih4&-F@Mck`83SF4Fey3MZs(}K>6cY+{muGx}|Cs zmel-@>@Soa0GIC?C+vJgdD>{;#HwMi&hFOkP00i3nv?@xL2stzZ!f6aUD0r?qW(xz`*}9+iBR&4 zI{1BW|IPZgGgS>|I?2~NNjGr#T?ma!Ye4`)H}xome!6e)JehW`tM5!x$DxV_fU&Jz zM`fzVE>Gs9A&w`y-`jr?hX+#{PoEJ0*}@l_>5AC83Rk zn>X%#e&O2g)XZB{S|OWLRb25TH2O?Qc?6SDQ&tsw<<@6sFMpDedz&_hM}|td3@?-B z$z`SL(aKR(l|WF~LQKplx>H#8u#1!m@1TZjh@)q3#tkR=%5{@yF0ConAX~nR8=2QR2iO*56{9! zW|u~1mqsTSN2KOFDkux>q2O5TTs}WrD9mTF(urMBRrNvHg}0(J&Ob;z9h7)9AZmX= z#O?=SI}o}ChHZQF?DNp*U6F~05^yIf>u-DB*W7V^<>e^U!!Om{ei;bzJd6mV;faVC`lF(|1Bg%S}qTW2+JL~3y zh+U5qc1DO{-H#CVz|!F0N=1V!N@$#MQRckl`B_jyi2*E(W#}*&+6|Mf27SBD+yydl z^sUQTwKmh}Jno>qO`(Vv)gmOUyzP0q~ZJS*R8kr8I*KSfdt!k9l$oT%6I6 z1BqEwC4+hm&@t=B%#&d3M8~8XMY-Hai}ev>!PVY6h4Yz&*!PmEl1PsW4h8~Q`>YUb(bF5w?E2=u2 zQ?fg=2-ssY@OzSSwj;1bFq>1dv!dZJnRatTo#b@my)$KV3)L&Dgq2kSpgm{njHV*Z zc(!Idi_m#3w`_k}{&pN5bSV~9eOBALr>*B`KjVr(e22xm+0VGp&$vvcofk^(xjngd zCr+b{7fAzKyH2-uou)EQa|GvEyfYvHMtWMMxjLe`E>T|Mi_h}KXU8T2rc5D|hR_N9 zL!tCKq4z*V!>-&ipyoxrjg?Ki8{2_etY=^^l#m^uGS1LB7rBBb9le1!?(O>e(r3|$ z-%@FXLqoOcxsQS)zN)H^5D4m+%&Pd5%MU}2!Z-1wDo{!brX!H=2Z~=uRi(qC!d^;7 zU2{x%O?V?Ap3>Q7)~U)&uqI{EQf;H%MWUw$$t7Z-H4F zq+wZJC!%TxW?W>at=(*F1>>s;1>kg;jjh`81}+!J=A=N^6`s$>NyLRp1s?r#$mLmB zmI8FWfoDe8huf$b11~2utuS(sxiBk6En0T7&}A3R&&b^l;nZZGOwy^6_p0Q*BQmN= zN};hjGxK7C!oQ7432taC>h5i5X|JrV$))sFx*bB3fi0^LFnVP?nsS(_mh;te{E`fm3~_de|(VyDqX+v_x;1}>X%La&i)l!9@U z&@isB8FgqnMQG+DZ~bA{7^Y!jQ3SbL(*}?d0E1Sn3=v6EF(E4=S^@B`%x(o?EF}D+ zv=tg@XIvH(1qRZ^{ThbVEyXuzB1OX_vOoOk02K9O?5h7;z+_rmHC_I1%C+hXt#y8E zRD=h?GMI8D@+`k3$7nW_|Z=IMw*Jjeej!W?GTh(%`wC>CN@_m^_fDJ%I zL0l$yiNo(~Y(GtB2be7R>+3CV-csJZBfWl2{^b|ihYySo9~i&eawb#B@IqN>k}s(@k^L?1Nnu5CR`WnAMA-;t?;wE8H;$YYr*$ZAiYoh!81(nrT5 zDD=w|`e`cTt3JkY68#W)aBn~B3!eBZf%J?-d45=a0dWOr|0R}xN9UYtY~P2k`5f6L za!Ph0%D22>7ZxB!=snO*I@?RV)JK0<-EjNrcY98Ly(1v#N_%%Yje$?jyd4^Mskt?s z&u`@Mfk?8jG>FolJ*sMencDznVGUw2jFHLb@$&lVxMpHvPGMj^KCp$DC>Gb~CYlh& zs+9F2Q5BU|*i1}J$A>>nc4AF_Ff=^96tb77A(wf+ilXs=YfswOhQ01S+j1;(S2o z3d%X$@=;aW;+zaB+)$6|3WzEDVGdyF3+MWZ8D`*s4cGb#;A4SWc*F7L6^L7Tu`#*2 zGzJ9xpzexlHIY$Xjb7W%=Qob3+MG5J><=qce4{MN&<5==c9zZDr`L6vr>RJ%>abEh zE~4Gqf`(Y&=YnLc*+`t8YE&tUMWO<|uEuI^U6>^=&QTX;$#zSd(NM1$!z<;5SSFZM zQlyZV`TseH6Buad0%`GfT>uIlIQHd-@58?+DLI&0n zi zu@Xj~N%r{?U<2lTqu>g<#lztcZ0aG+JL|S0(;0&C)h)HKSh3fZxNq>KqCMA|KhDt) zfG(_b2F;&anGHX=>H}&d@m+~l|UR$WqjX5z1B&&*xC10SKrwN;$eV( z_4~T}&hUos8cfks)6wI)Fce(_Y;5fot=n7M_Fvn!ZQHhO+s3VJ+qTVH+j#Smm6Mq- znar7F)>+%ThiLU?SktB=Y9Rss0ZV8u9T5kCXpJ_~FJKfnF%0>{Gxan{$yc?sw?b(T zMWQwVqSxy3y(q4pK>yJb5Y?J4!sGaoOKDJ0`}I^^s^uEo1z@pr=-o7Y%6j%VRVPY{ z!~1k_mxRJyzvo?5WihzAnoUfI*J+ zlDv|fr9TjcO*scv%3n;@SjAG+!Pl9_w_X9F17rphX;T!(v%8W`3BC4CQD*Wr-Uu|W zgjbR-PJ%jaR9*Z@p)ownN^}sNuP>2xc~g8uorSdYI9$XOd<>1}F?Vo$d^@5{bB=g% zD99Geg20)u}^Z15gyzq|s1+O5fgivsFFVRM;SX6Mpr^=dra@#(fih z0~@=Gmg~gO_vFKTPt-x5GazHy;5hG=NS}&2AjlOzI}UK^Tv2+WohLDHl=-_|y8SpG zKSrjv*ag(p2VV)a!77+MzK)fh#kfIz`Sqc`L{q!>PKwV74}4yeCGc2b``sLX>$Ots3FY_7P>m z;LCc@KHvrFGj=vln^Ab_4y(&hh8<`V)zK@cqgCpwjpVuX}$}5}v3wWsR?^CnO=(j&k7jt0% zD-qovZ`J~l_R>{M<>V*HM?GjmiXJMbbAsAKp7yVTLh@dyAWBoG&J!IzPe490V#0yY zhg7d*OWZ~0p}9=NMK&23OU%(MOhj#)pv)A>h+psxRsaJx;F=_!>>zal(T6mCW&Nf3 z3TFabK%DK`mm1_hBWL2#)V06sp@bG;y` zLm*{#Hs11os^Al9;J3#3*QjP-0HQ|F;YynK%9VrOlP{e=xCq;t5rmiVhVHEQCOZ>W z<7@+{cvW0EVENh|tmGww8@t?^6?!qiC_L+yLF;PPc{~I^7=anw)T)|LGHKmkV~?=k zQ{3w*{(gY@Y};$^`=)xk3w%BL9C&`F%Lc7mAeT%v9BMR(JSTCP;vAw)s{ODPs z4gWY#PcbH%Y!D~OQi#3TC?WD&M5IlESVnpo9Ny#=Ssb9F!UfD;DMWDRkA?(kC!KsW z8X*fjri%WR1$Cr%;k}}+{D3vOk`n>JP`eS?3~T(@2~G7bQf}Mx{gN~|Kih-2;&!(^ zJ5cU|$J=v$dow?qCFpfJI&N!|FWDbF2R*r1(6dmdupEs*cB~PC$40IpuV2nvUX7s? zj3&-b9y|$}K`kWdNKP3rNR1YWEq?&=LsiII!hDMgTQ6@8h#2k?{PRk1lt+dt{(qp0 zwKSV9Wy%Ws2=TWy_4W0(a#Hk=-dnnPc&JI3@b#~ZwVNNjFCcX+7;Y%;?-S>lg^Qnt znx}~&b}4IK>CmyGu*^m<4-Zv*HB6Tk@trqZ%K@9<&_zz^ zX03GHa>p1s`Hgk-GIn*??d&ks(pi{q3{&*Z8xgo{X`q_EDPQI5E)5x`xFR?jG8p(t z^fS2rQx{I;*-eS0}Q+G_vun90cC<;lpw!^bB;MD*e%B`0L2_*>Rf+1D00Ml9(G8sR=t zfX3Kh+(;B^1J&Y4lrchxF{IItFGGo_8xW>ltj1cSDl1P}W(Y%TWCSQi1X#oR5Bx<| zMDj2^`VIvn^DDqi(iDlr0LM_EJ^Vz@7@poH?z`{)pqLbhR1TD+#vczNP0bmfTBxs- zQ^hfqN5{@Trqz4aT9n|#F=JjAABCgl{kbZ^H+RTtKplgZZ;R$>nizK!yK`Y<5|hvo z<7-l}xfrFZ&$#16TiU6?5NDWzZqAsRVVR`qkYIcvp}W!4M(i8Ot=P`U#6zpfY2z=W zFyqT`NDm}j1E%zT2tRwaV)T83$!6|$qptvBb^~p(g}Uvfy6uj-S9y9!l0q_>yIJe> zHEjly*@Fm??ao?x@GJQfGZ|JVHM&sgAEU6U`TFZT$)A=^yf5L%pe7U|grH=-9z#?l! zpx#z5ZOI5O3NNp)xR|&g15Cl&O*FggfGeQ>7#xlJaf*MA4j9G8?GFby8j$f64qu(+ zhVF_FN87Q({?fI+{>SlghkTsh^TYtk*8d5-e|`h1f5YQ+_-I_#rr++vLPD?6Z=IWE zf(a*6T&Q1HTF+UEYu78IRimO(K6bHUoT?;Aw5<0RZXQMBDo4-^K3kgxd3Ca6=3;^E z+GDQ0(^zF?_K4LbQOfDw5h>C=pA>>m$_G^$F<&F+t(t+3n3@JOUqMx0VNFd%oQASy zMsARTZz|F!2knKF0-L+=-I2(%LH@Bw_`yiHiA2~DDUow-G1^#*I|)+;H$D3gj8(G7 zS4do*_&eEIP_a9Y2eg|uLY|#BZUZ&jbM)6DjBLn!?Jm@&0hKM{n9*7NdIv0d+yJFP zBMj&Em!Q~0zX4-rl5o-S(~GI{xF~1}GG=Sk1niwmQ*?ti)w5ud8MUC_&1>L&-N3=HagiQe^|Kc}7i$G~_{^of}r< zT-SJZ=3Rioiux8#n;LxeswGYH7PU-TQ#S3;CDoIAmYG@Pw9|4s@x3=p!wB`6Gq%Vu zEijuRi*jYQ?ZPDY0zc@Uw9`g}fg;=IizI9@YKv9%QC_)30R`0 zBd$%Va$Yzga>>~;k{G;12Nk2$h>&2siiAQ`#<3FN1Ss&Uf#C=XBax8u;0j01Bovv- z(t-y`@LfRg7s$APK)49R;qS7=dnn_dFvn0}nGUMq)elDWazn)*e5s(I8(NH0-H)a5 zD0uXfC?pU+teOe9CkU=CO>1ULJhR+2PTorh)ivtq*;Mee?lq<}*15U$0`UHx-mB0K z;5bi(d4nK?){oVjsXJ`G+p4Y|9sPiTdgci5P}Fi5O0cv6O>L(dIy&Ht?(&}3%YXJp%J=vsUJbU%*e zj}!1Q?EbDd^)6VrAJ5IIc6$hFXw@#`Bn>x~@hBDu;1Z@mI=!)Lb;hN#Us+%CEp7QX zBs;Zs-P&M0OIW}9WiW&0z(84MbHAc^AtaoKm&eoraGq0F5o3=X0SR!23GYF->)ySF z0bym5bUV`_bu93)8!$h&uS5E&MR2a8xSYiv>)~CBOgVpr=VaMgj=+)`wRnC~j z{A8*URkpII@qT9Yp(Cq9DhO|CGQ@wHT$A|Amlh^gl7P^F*6jDo%~N5aRbahQLMGq% zG!A|(8q|f=#Ulz#@gP$W0XaCY47AilELO>iw(K+(#)>5gk=p9z#c+v^oq5?@j6oGk z7Bt*wj-_$$go;_ys<+NCC&J26$v;=!A&`P!jK;2zD>}X=&<)@)@N40mzewPJSj>V7 zwGQSq=4jH!Y9`?Qju5m|K$7SGTr^0_19BX8(!Gltxq_G68&Ppe@5z zm2t%Te@1U*5lOk2cw#Eo<%?x30`zT|y~mi;)_J(X;Ct&_uz^&Vj^C(scO-;?z%Jf_ z9LfO=C~kd`_$SZrS*>41G&bMgWz{t`15FN*5G1D4T;ZZ;jTeSUHE?EaT4y|ISg%j8 z7FrE-GaG4V*N69-zO31S#uJuMk7qt??$5u#_?vZV+2VN<){1+XqHF1j{X z{aQ;-RW<+e0w;HPT#SP_OLfsBN26O)uFF6PY`!8nCHWF98__1`mO4GxUf;EzMlExe zv#RBLF=jj08b{wAN3&KDdkNJT2!ucO%mG7}*nmc(&JzdAotv!Ho|X{dXOVfwrR9dU zR#*1%l2FX(3rwgUNXn-rmsxh$n^53YPNr)e+sgrfL!IGPg@PYE* z#+d!~&izP*{43s0>)ANPXWR|X+sQ6K<+&WLn&{zi zr;PCXR)Awn{l{;hXlbvQ=s%M&GL!K1b&)ruft#8T%z@2%K3`(q+uh|6qa2WYF^lsLUIv6P06TDlRPMM%2Yk4u%?~QJ|?8ohFf; z?;=%O{wpek?=pQ_fl>IlJHXK3em48}z2&(K^LX@`8q3->RH+dE!ldaz&aKb8X~3F@ zG)gSBYFMfr13N_8ldhAPfrFap!A||_XStI6uo=!Nx_64|tF8Il+)YkSL_$h=azE5O z5BVPvLUIr?O!4^>RhcVdMaOoWU3&4%iW0}tqymEw2Km)7= z4@yS!%;YR3g@v0BoAYIi6v4y@ypivxiqjiiiTXRxT+Y;YS1SQ^y*)4*8vs`8cl+M^ z`8XITou@*!&36#sMYMxpcVKzId9aj5{D(1*CIs=K#5tW3<%3C`Tt8;QBQim(1I|nv zvVD()lx4i&iY(d0poyV`N0<@XUOC92ji?QM(dSBOI>p5cZ%!eAx_&?j6Q~{^p{ddw zWz$4Z*ZlM9uao@lh&dRsaG3)!^B)^(G6(BXo?c#Cu%zoK$dXa4-n^wLNT?bS(-YsSY5Gz)gkS*}!x%4qeku%$7btuPF8h;C-XIS73(NoL~cL2=} z2#-SxXCfi}t3I1Gw((Hj3*AwpiZK*|2pU<={WWRcz7tK*I&vb z0m>u^&oGc($jm?;q^I@(MHK}4-tB)+RE$5@zGJ+JCD$Xk1Z$)IHr=wd`s=Q^ND=y7 zxTM$AuvgctboGpMR%isi>2d$FQQw;YUOn*}P2F7mrd0Y{;-;?2yRVoiU)bQQ&;2d| z$Q45z?z$~j0I#~T-IAv|#>#Mu|JAjj$fd2=$<}KJm7H?$2>5!-nxn>cTSI(dtf)Q1DtQ$ zrB@RxAAUUXo4L{@YD?IF+VcWY9Pe^Zhl19^21Ge-0xEm( z#qqTLzwR$y_ws)4Kcn~jy#w`r@4rXu2n06Y5$I{9ouO44i}C8(vH%$^a4gWYt=UFx zjw6&OF(XG8ISY%)-0@|5Kr7SOC>|)qPjpC36XQSi!DP@NC4Bx#59|cgF9tdWq}K?U z8TX9=cx%HnSUl)lzZmfHOUD-gd+EfpCsh9C4_dlve*BkX*g0=g*_@SauEWxnTe>uF z!jqM;6s`#*LcRB&xa!SYc)> zUM)}sp}$Un_~(?^F>MTB?>`P=vHi2m=4^7AP9(%Npg${n5jlolT13>)$fS}LB z{wPsqE*W87I-POtc-qm=nn+?PhO@io&{%T-ahtyE-*G2i!|co?{-tK~#gSr`nBBW&cu8XRCF-N~1r`_5S>?Bqm;nFF6gkjyyRBB>X<$TGI;a*#G ztq%4Ho9J1Pj)bKCV0WFN*us|ciK|tuQNU4s&EDoVU zecS9Fq4UnI?Pdr<>bUq<-49dl#4)yn_%-&t*?F%ksoVx}ydJO&4dQZF{4O^19-fCy z#roHLURJ*oOW|}nd|6rFDPwjGS)%ovS*t=a@NpdXGb{47+uPY$-wqb#YLqqsL4nH; zS$BF;trF`&4>!xjuJaumgGZ_@h$z7QqrM10Y@CElN18s4J}QFC#W7_8hyl))3SvCi zYWXG6=PQ~Awr*YsQ_xo|cY4!R!rSEsw2Qphnpa(d3zU}LII}yJmMPSS9%KTMG;BhiPM z2iuHp5eFyfml5tie2D6$%NL`YQA!-}{D~?c*0#2C?ca$;g2s-?JkKKv%c_Vl*FrC>LPOq--lGz92^u3=>0D?@n%j-+F z{{c|pTE5OEG~>_C%Z=b2rvbvS1|B|h>UHRM8kVui(rw37#OsdwLR7ENg6olYbYP-@mnqH^#n4NnczmfW$ zd0w;CTb+CNwC0RP6p%AE*1Y@Hd{Y%sVSqa69j1izVPUHn1P*I!k?PrGVxOjCUP;X9 zRF>!vcMPE{Q;nq`i!-P*`ssH5wVeE0Ir&e?4W=M`j*uArZ@Br`XT5w4&P`&OxHlmX zqR>#pk9bT1WfU;*>Ge2rnYfo@ z$Sl~+A|6>kmVWkMaqqIR`8^XyJCzoe7M6Hu6O6Ob{(oW_5v*dgOm@-J)Ii`g>-Gqc z&WUOqjTXpMqz=);gpm!r-_yImqx%y3Zz^|B)djU1thW`CJ{O#p)9S}p&_L2C$hF#k zC8A>q97HUX!vvpohOeCs?o1EoO|D7(cUaYK`=_w4|oENIGf2jh%vOsm& zOMSVMx6X-Tj0`!|WI~f`;#Ecnb^!x&4QiNh!Ke#_IB0KeynH1QCF_^^ENvIq*(7Iqy z57m4mnxPC_*Tp#R?o<~_QNai$7ytB~hZ>EKRxmKC!V)Ika^bi}+9!l;ngwW-jjkaX(Yn=lAvfYVZI1y`}H}{kCPl z*AM#e_3(Hz=Drm1WfOOM-OJ3H*VXtY)S?l2Sx>!|c8PIzsilQKMlOBM#3d~AoZMb7 zYQ_^->%Z^$+y5vim>%jRJ**;a;exfpo-z?D!WVb?H^bDFE&^;BN7S0yljE(_LRXYy za8)gjhR!5DPVDJ<;sX*K6!c+Wpgi)vj4|Fk>)ZGB#FIo5VPxaxq^ zb=*DUe${feXK!?BYqe|P@x4qGA8$5x4;4V0*q9r8YCD^1JCn^fh1ckTb;i92 z4JiAI^ZKCAqlxIBQ+FvUzzQLk-(yp-=|7?bXAoe9^=lYKjCcimPNOBj>69NKjA<9H zYxoqyp^(7(QNtuFgq4dRh6qWMLvTZOVTXHbE8ue+;yScIH*OUDadU(>+<*Auh?B+C z1Je=OzMa1^`90Ra(AtG-jfHEKkAInsBcT+&hY@B7cQ5b|i|8Cth`I7E?>alAy7Na-6*#s@vYV6(y4dt4qb z0P_In1FogBpqSrJQf)vRcv#MIO06UBtb-iNfDWN6Ieb4Ye%c}%^F#ET2drkAJM@2Y zLq8MnEKGP+Co3BuCcEJJ7?#8nvVOWqcBo2+kKJL`x%c-qq$;kRZ*7m-UqV~how2kz zV{2)gtmuuLusgxnDmT`Nk>^22ektL5!wNrT9@FHui$HS=%njZ&*A7VHeK!Gkd~vSd z?etyUI^b!0bhH*bk62~yqq=t&)X>oyTop)}qNd_g)xuib!N`1pzO(yIV>66>+1RxS z&EyQ0<<^Ee1wJ19#>*6%^ni3R+_}2@V97w<QU!guzg=^RJS!U)BBAiE3f?@Nfe z5I>4~YA7oozgbLHQbDTJG0cGU*V3lj*QQpc<;BF4y*^29as_R11$};)2n!#O2uB8g zM}GHc>Rx;1%FY29Bg8kZly)$TK}3{;RH*x|Bwx=oAl#zw_GRv9-CkUb+m_Gxkam$bE1VfC0>+=Oj- z7+PTlzc5UfLqUv1ZoPO8s}!VX;NJ%S!DX>T$&e58bh4?o2f)V%XeWSCUBCI-TL8c=*-vGT2 zHw~qL$PF8kHQ>k7rl6;F;a!o4j9H!HH&arxR8}xj(ko6+Xp_wIs-Zw(7#1v)IL7pl zP?I1Ko%Ib$xnu;HKpYCU`UJ%zgnBRnJ9N?bxM3{hAVO(BDnW{f>E1`N!dEw09>CTk z^Ds%QvYV6hV^jrvz6EJ-2eL!90nq?m>M!)5$s8dm@%T0;Nj`3RJWO=AX9#yUm`?~& z;i1Ju$BFs!_R?JSFTf}PW^b@2O)o;)e(<a4eNlru1`4jIrKP>B=EZ z(heEh|22J23sqMc<@ZHTPo+&4xd$=;Jp_4U`~pZkzFC;3qQ$}~4pzV)!;sn- z2_zDw{#fN>)W5It@c@0b?qC1?s3Z7!JL>ws?!NY+7+(YI^4~3b9DDuPXSXtTw$av> zU2H5V<=GQsX&X2**h=qC0&|lQ2NF#etX%TK29X<^L=t_AX*3(EhjvOm6b@Z&p|Wu+bf&1n`@pShUi6@HC5A#*v6XY!;Z|D+S&P!uz(LS zJ@0S}0|7oJWL~zcfLNjkE4$h$>ze=GVU4Y)9wwuhmTp8LBgzGdp0dv3+1CCRlHQ-} zis+v`9u_U%N&Lv#JNrITf1rDzdZR@3L}sFl3|?pTl*REXze0RwS?{~b`$yr7jb|EO z>|0u1n|)&uXse}YYNxHKyrVLpvor)W6dYX)U`l5pg?(xfHUe2_ELy_cac@iW{MpsE zn9swyCb3Ikr2>JY-s&rnr1?UftSJQH!!uf81uy+Wc$tNDnU8xR)OJV0>(B*g||3K z`k^vxsju`*A<2&Kk}(sT0^N?1^NUi9^sRl$L6o+oISNxfvD%Pw`3i7KMYS*pt+)`V z)Mxb^*eITfbX-#~E5@J&85n(7&>DcW3Yd9bRZR2rV63>sL;nZ(urVrc;`chk?SEwV zKbn}nM9zd%iRH*$$$k}o7d#y$dmFJEsylm%I+@w?8MwW@Ey)x+QW)A0**Wnx7VtDQ zg=?w{)6%;5J%l9Z7NqAEyx)dNX_t1xeHX?rGFpEa@xG%0yxfF1tHah=U|(8g?<*4T zsJ8b52U_h{(FVwA?*TZlf*-7W{S^lhx7>J)U^m^w6Ua0ZF}UA?Am9u@%$Pr?!;-hWGMRZ;KX-C) z44?7-J_u)hKbPVDU*DH|{yXn?ZaSRmE6b>nMS~5Ft{x1=X2`7LPp{}KEG4Ds|jL`32JP6nYONyR0m(f3j2W`DlFK*_Bpx9Io1Kz{yKL{er>pf>MJ<@iO6M zaB63A;b?nitamEsV1==-uWi4F+2N9~$0hc(#m2^0v}Csc5FDS3v&VeeT2f8fbR+6# zEz5WgJ%6>S6AHiT_}r|PJX?l_*A1`7#T~<@G+-S)}25W1kZE9yL)-VJXq`=0n5Z!1K4DjfT2*GOK&V*z7Ye$tK z^n*XT&PEMe%9P}UBhWJoTs4|oOES|Arl;e8FX&`QdKhU?45h^7i+Z+7#N;40!j`1q z`UlyIB=NOh0KZeOS8ijyv!xwbP4O({UV{Db#nEwDy$DK38WB5OMceRxyoD1rpsZIRBLpRLx? zrOMWYj)t|*`B%v1p`sabbNu&NfScz)<0%)#{YGD6Vo`2pRnog4IjPi_{!B|TQ`EW3 z{1(XEM~bBlu~;WcosO)!TnVr;j5;0J?DTMy0<{Np@TImO3|$TIU+W&NDH-f7SpV-m zuQ+!~-HTjn6N178ZuTFhZnu@kP1c_vHQz_+kJB|+8T!j?T?U388w;n6{_`NdlcdNE zQ~R`?;O&HlgT~295w^YE@h8}kPUT)M)XUAq4!;< zUU|wnise0u6mbLu6A^GG63{Qy^`wIR`N0l8LmGYXWb}(0TE<7z3=e47e@e=7=dy0? z%=9_rafN%bXK3l7sw$%C>f|Zv~b>jDE@}S_y~FKI`PQ{0_C7l z2MooaHyVXZTM)@2?Nc8;7K3?)zIH)s>*H1bJg2(@g5BTzKleL(zdtho6Z-KH;j#`v z%)Bg$d(+NXAJi(xM(iKtJuU5jGc%U)k0|z~5<$k_2kfpGi#wq%S^dc*4kuC5?Ln75 z3#9@3Fh54Raf4B4b`&wr5;FySw5L3E1wG{tG!^xA<*qh1rcVeGsW7i>YcR_=BU{-c z*3|FC#*8g+F;P@|4^#vpqpp1Ai*2z>3;$hQ#^U0L?SR?3%4b*Re^HF0^LO~OlMRO7 z9!@xow-dB+uzwxwtgaET@%8TEURcINV`y`qUnrk zEiLWJq+OZMbDIe@-5)4zcb3WZmA$qwb7Nt)rgx@x*AD9w|HAXsR5TOz6x9BHTn)pp zjUe+_*o9>PJVuCt2t~hjd})JqV}o~OOYm&U%PhfUTgS^P6deXytBgUSp$&qAMXzl8 z@qsR8k0>!4yKkHJt4lSP-&+d)Ns7RHPY71Gipm}i_0aa%vgX#TkbKuTP(+U(mxm{!Xy)Rk(Is?27&Wz@970Q0bgxtdAF1anft%*Sf}Lw z*#H-bemxEHGPYB%+1??xvfb<67IEA3U76ZR<$Yev^LjUc#6ss&r95=ejIDKr%Q5O& zuZ6+WtAEnh%~GwSP28a0!kMy`ZfZgaF8;Pf5zdu%KH5*)Svej^;_WC2-mjMc`l<&i zdlO>jVP|FGjhw~p-4@8>l$k1!AJRqmPB#8lHvUG*>dzlY41A8&uChkL(RQp{DX+j1 zG_6tI6z283lEjoELPLkk{4=)C7*hciSS&wf96wTO0eArT0{UedQdc%m7+9!qvC?7T zr#ihrwKq3d;hrkJ>arlLl(W^%>3eE7(4N#qO;0(&!vGJ32zgF__RGy;A!xa<23oqd z|LEF0Uk2Ig#-9Mj|8J(N>GjNu-OqSAY=!&ZLGdx+GIL_`vLXP)#D=^e!E$+l9jtKL z|G3x4+Q#Xy&Uab`h=6Z?jM3@y|Fv~_sa(+1O?K`~fmw^8*JJK2H}_JTd#dI;$th$E zY>0m#As?N*tYY>6VK-JDCc?zlUu0imbDC#$Z*eqe@A^>g3E7pw(nUPL^G{FKM^(~A z*;vO^)kK4c)m2>~a&;}A`rA%J{-5l)YJuRg_Uzyn6$ZJalg5Y^2+R3L$isnhbp+7g zpfi+7@taMB6s>&<^F>pa%K4RKdl{6i`@zK5RrbMb{F{+j6VaX1)8ptVJ&{y(a=tp5 z8TyEXG4Lc|8g99B93(Jk#{H1*A`w4DVz-wL#yP?W&^DmZDQJ(b(@t zsksZSy||b9fxvYD058JG{bWIFx;Lx3H*1PUODks!J9j%;7g`!p7q&-lFJrGSC(dtt z=9TidG@jp>`Ccr4dH}+|t0Qhs-(-MQRaI)F-t%4@|Rt1wb`Vwaw0dY%0zeVb8K@EjU}FnU*&4OKq>qZLiH{QJAEoNnSN^ zVQ2L(AfOwg!Ev4fseG`ycCbg2ck!E0D@DAYs-E8&l;gmI=8+ z1%QZkU^b0e(ZcQZiuPaG>D3)Fwm!PpFY5MgeLdP9?|1N7ya=vnZf)s}Y%dOfUZ7EZ z`aC0*S={>{OTAiN|Leu^C4s*W15J>I7)gi&m4z=%T5h?qtx%Yqo~GO#gDX#2;?56h znW(2Jy2@7ZsbFz3ViHMj&%v{c<_H-LxDFUXK?yeYAYf->glfYl9Lmm3pQR0rk(gEYCts zBc`yI13nStBV(Q`s9&zEov3P}DCpv^w`ZuYf#Jo({*##%siP&ClNpPHiSGUk@d^jc z#a;5+#EZ8xi=9Q1vBLT$rA?3s(J3}ohn>I5#s#n-)W+_*Yu`EXl9ktD=P)z!#4xRE zXZG89CgXGS%B$;U_rW|eG_7zH>HdG!&|_>~0gVCZmLUHeqB6-*g(fEm%Q7p4P@$ox zD1(8GaB%mPsES^p4D#4(a`6lX?;t^|mcgmeFRUpFX7qeKs1=uKocw{nhOe`!RB}`01 zj07+cO<=>i^(pO{QYo$(<{0}RKZjtB3;mCxqL`xM=}>qiCql9GBynCk2|FWLWV*UQ zk%x$0A*=VQVBS60)V?~rRk}$;jyuwEgDZ_u+tE{32)8mGY4(ma z+G^3PO}ows;{v=-l~9#H84aO+l?F~jhVU*Aq=#t=tZ4WRwnUyIPZSRJ9F?YN0=qW2 zJU>m@$r+lCe<4ce{Whm_WJHMQ>QoNHq7KvkmW`>#=w)&IYO*m&%vq6CqwR3^^GBEU zc^m56!F2jExMdB}+Fq}=dslx1f$xAcrtx!&$|PHr+PxM1r8TwgWkH=-_-6L7*2~b` z+Rz+@Z05(Dhy#n$yS}HjI1+;}sq@|fnhczD^k%iER<%J5z@k23#4u$vH(@k4O0Fd@!}-f!!sB6+ zs%^5gd)?QO+S!!N#7$3tjrM}@>Fxr#%F1++{Y`b*ZMU}s7t4p2?7~QNeUbaw`H$19 z0-+YT1Nuym4`eq8xvJ93N|?E+BX?1{#+^xITdv<7CvH>!4SayauMdP!jN70RfU+v`~MFCh;Pu(6JU zpun8yu(ljVNmX(4;WC<~htQ*{uzPg<@?tEJcpbKchV3r8Sa)_eu9EM^(h0+-@e4$C~qDjh2nH={X zr1QJc)jIgN>QJ9>{&;kHX+oYY9(xM+Z!Ub!x$f~Jzud;`- zu#LR!0AgHo5Z9 zSy9^?ivJZ4GcQGDP*XA6s>jYSy}4Jg+3rlU^Nvb^K9Xp5I4IzJwUZLDh(XEO+HwG1 zKwWOa4Q$4071;aInYU`yzjd&S4#dR-Y*y0diipz0uvGM4TegG_Xtn!EV~i$5{>Q2*8px2EAt1z=$eW?+ziRH5ku|`ltN7 zNqh{{f87M(z_((O97}7WMpQC5cvQ!Kg*)26#t{dkj!s83y)C9k1vZQ6=XgBD^)F|0 zS3{w0&dz@JsE*X-r%PK3NPyTKW({{gz6wz`z__=dD3KCSBgvPJ^HE6Y^d!XFT)+&7t9D^ zKoVn61N2WZ!Rg&_`9wMndMIwfNRH3Ph?ld*l9T|0lA|hvxWcBDA>z$k64L;T_D%LGJ_Q@c+ z7Dsz-mz^7IMF%Ct^M6nV2PD%CyEI+R&E*XT7ez%X1ZgA&G>6dBb&7W9z@wco|1_xf z$6`%NO(m?YP%bWrwX=q_wTm38+KVc=N&x89Q__tc?Tfjh^wc4=XfW_03q)Pk3`s}7 z?d~^@h6O>=!8A2I5FLZ?e+5#{3JLqtRLn=rK)bjsGZ5W1+nq`&H47WtNd+Ife`E9! zQ0${b8r0zVTf`plqyRi<79Mf(Us$$pv$ZjG(E7noppu&f4>N`SR}l}a zrUms;K94Ht-})kC%?3#t3-kR@XU~Q4S)lH4d5Z$^h}#)~ysm(F2J|Wz4knaZfp`EH z(R>ndY=)s4_9eHmjE*_bC%27}Ejr$(i_C2-7^kTBw|j1}^N(?zxBly!uj>rc-y5#2 zI@@2#$c8YER92s}w1d9Bmb12+wyN}h!y<@2)qoq_Svx~t6mHg`54TD^!$@fWvpzXfs&$%5Vyp|y6GVK`{4Qzi%y`_X zpM8WjOH;0@L`&W9c76b~Txw#3iB@!mSpa~#@;r2;yjD+6@ILk|75_SSfYx>}3wz)Z zLy(nqF{*O>OGwe8q$>Ba~u5Uwv4e|bk~-2BmQuww{{|C zm3gJDPq5LA)zRpy3&vuFQe0ckRMcS^fLoV0t1F^x?4fKd-jz|z7n9vzkDPSi|fsK1eVH5s=m=37jff%w%{?5W3?j^L9B1S*(e&TRB7msPkNr?g4-Ui3ff^0T z3xm|(kVm{@-`FY|{jR+kP(bw?}llg>SS+d zWe?fgC=SWd2=2m^bb#34PwJ}pp?kx?(`3j2^GH_oALM95Ev*JzBWq?u!7`VFv^qvz z9W?5W?+^##0ii#2?jL}-0wdB?NjKf`+=PnATwyj=x+XLxTtGyJp^wGk&;1yl?n8B% z2Q2K$T3ZP6XeefKp0^_}))c-@oCNSY1Q^xRG)Q{eG%;F@C)H`T0g$$&IWiZ>E7fImjO*9RmO$Fm8<|-B!k-MV-mypLHBX<-9^^$rWUx>ze+TA zmhmcU1;xQ)pMl_Rfqe;JV_yMKd=$bqnhKJke9@<;@=BIh-eIhn{f zeTXEr`?$>eEC34;O~eoyPk{-x(`tSvsM}Y#+y^`r5Wj(PHIAq-XipIPG~wCrp-fj% zbF8vL)K$>CR#QDw(7cQz;80y|2Lfb5N7>kS?5jPWHgL5w(zATTPlf(R(KQA~(st1a zH`;9MY^;rK^NqE!lZ|cLwr$(CosDg4V}Jcs-I?i{KQmR`=jxn$p9a>ZIp(H$*5>5~ zdxo8`(w6H$rhu}eYPMw=r!mf@zpL|r%};&KBvz6fjI=Z1;hQD1@fDxQPi1*tJ{!CO zVQIjV#vZSiQuCv-sTh(A*D|}Ok0?S^Fp6{W@Ir&icC&-Zh!*!Iv)$1jy7IxC z+&m2R9ln#b0}O?g4W$bkrb~0O^P*I)ufG-AU<*pKm&`P5Sxd<2lRVZ1^hJh5A(u3h zDahH`>W{r)6K?!EyE3JktHXEE`%-pjHkFZC$__+*^>YB4PcF(eKa^)hdK#K)y97ln z1r5P@e0?#oWOlQJ3%(;1apO|gMh2LSwqs-If-#5rv(KE6LvU+0}Z3~n1OKy)FrS;Q!c|#3lQCn+avcm99&3#?9 zgFrcDt`*+Q^B+COJx5f{Vc(p+39+MS(Y!LP$2AF}jyPUdbnDB{KTrhwVY0WvvM*Ay z5(Tmph2|8SJ>C=_XH|a}%GBErSsy=yKRZ0AG|v*#R!5JVS@L=4>?ONvFw6HcCtsUYU3nb^JG1DGRefZAPcvp z!HaspC#U2Ti1ZcJYfsWN#Z~uey|wp=?ikKV?91K3+!jf4+M3`e(NKFKi_g|N*{?KX zCW(-5?~{@;6Y#{NC5aRkhy1tq{JfMc2dAqa4P{E4sr!uWtKO@Y?w@LChB9d74%<~m z#L^>=cHE!ja09*lLrpzLzPJXQS5N#6q8&v9Oh^Ij86W1C8uOq(-x58XTeqH+r~%E3 zHxR>a|I4S48wj5;fXf*!Gzl5s2cN5^&4dEM?7LmFj=m zer**|Lo%B8$YF4NeU%9{4X9InTNBg6BY(#y;Ph#xhzJvd7@7}=^k@eA9b1%}my`Nr zxb!S_J+xIFpl<4R_y|d00Co*$9DY2Rg_RP6CHA>F# zcaw#6o|&DIVSagSLF0Q3EANt&*ytXYsB&U04}V>r2FUG~^^szK4*9guyoZl=@ia@{&C5 zeQ0I9#WXHBNZUtLN=|5JCvl>qX&zy${W(e(8`O;#$%04h@(os_8>ex)<2{pDu>U7C zGPfUEFoH}$Pb+t%5c~z}K1)@7Qp)kZS7p>SA{sqJua%v=IZDg)pNB0g+1ptH%3U7v z=Ru^SrQ*u$p0?udedn1m0K}0Qnu!V?{j}f92dWDD2?;g?c;SUtkmgXM(ZNGG zQ=_csCr5J9exXuZnrKLntRte{&fSi^+loG_<=B~@s%%N}OK0Np@KPW1?D1eU=PN2hSb0@$1g;mK! zie=_mDXv$i|0WsQHSD8kPOuFMZq#Dv8*(k6wT(Ab0kpO>$(UJqXm$~YCB42RKgcmP zLGmK4$vmkCX1h22ZNAkYZbCzWQPjOD#3yk9oUss7bs^`7QV0wjFILcb1yWp$8JxlN zJAyaAeq~?;NCjsHb@(wEN@2)=#Vq9IoA@m;s{FpN!heGST5d-Elnmf*t)BgAg~ynD z;FJAMeVWt$3K(VN@;j-RN(KWcN*sM9tAZx6Wh%K7@u|#4h(C$CuY_Kw2-{0ipZ|3I z$-X%*8h?K$!Hd7P%T5aa^$Vih+-vSaBZ5?iVPr2x$H~bvvaTf^r-3iSutM@DuUjIX zk2oThRNd@iLu;u`p2@C6oikgrZQ+UL$tkvOm$`2xCBu0727`FshFU8W69l2zW!WRp z6S3sA6twis&TEtCZ#%`VD;oNd{wsno7c^Bdg`T5NGYJTBc0>sEoh@_mPtDF1!f1Qx zS2>tBmYHT(K>>`7ArSNV%4u84aa$@#N_rpsqqul}(|IwQ*&cyvWU&;_Z>f-RJbqx@sxdOGZrjb+?N5o&ME&j03+X z_t(A)^YvD4Kgq)@ac7ryl-fD!*&Y+U^fAh{K0u9_>p~Sh5^UG>tm$sAi)n0JP6cT{ z^j{_0t9I4)RB^3nyR}6)HXA8$klUm*Pr3tHTNR0=YW)CM+)aD@Y|gf z#WqQGWC38)kp+l94@Q)g}2 zWOM0ATgBjgBf|ZaW{0k*Ja2(}`Y`1kHT6Dx}A6e6V0pN7Fwx_5nV0 zQ3-a~=r^aH3o{DgP?^?{g#Qp+(bLi~JHs4!NOun;sI768+^2q`L;vbA_33}*hfm~Z z^L+1cki!B-seVG{v1?E2je40e=|=i#;SHb+M#2s&S@;5t3^`Lm8NXrvoLa(M^EfC3 z^&hj2XS7cRVe*nmExu<|B6StOetvH)LiXVt4Kya4 zSrYIYDaf5;(MmP2S4$BjPmD4XVK*I=IZiGAzXb|DCVu` zF@vNUSl8z`gM{%L@Ct8zM9@17IkBc1D$zdL+Va|70#5i{^}`LFYem3&{#pTGK$lY$ z^X~@Q(wcos8&yFb(-*_=QsN={K~XH0g`Sh>%)j#Pr0BAM?xKLax~Oiz&*2MO0pkc< z!D|PU@UoCbV!dQAFvi+6G}dctt(ux`>v7Wl0oO8iuJZ0;mTspcl!5|`P!BVVy%_8& zX#RKbVDzcTYvz5y3_z0FB(FzFSh_szz7gZQ*DPksuxx7s&td+yW^p^h_3VsR?f-_% zfa5Y5{UD417TqjXKpxxyCBxlIJjNw7TJCP>4Zx)ZFy@vP-j<4NZB>~nn&_#Sduo$! z3XXZkn8C~J3)GVReSz0cUyW)MSfE8bxIL9AOB=fQZjho}(dnsyBfTwnRxw}l}1aB)y z1f?CK1fP6V*J5p8Yu<{hmYbKiRXP}vH&^2Sr{tuwU^2JnIljR27Sp&6$ue%%w#-ciNj0KeFGjw5{A!H@_- zWb3npZr zvz$5R-eZx(s#OCz?ydI3G{fv&^Pwe95f#pE=1L?L|zscM;R)~+bD{gKmJ z>^v0}K8k>pFZxsTppmuPf3S{ER+h(c{fcUD;1>2z@L=7wF*NUJ((ysWH9t&v2Q>n0 z5uj%`MM(#cio6#A&EJ18L{eD^hm4drxFf!1hVF-xK^yY7@-@`cOQc&s+kN6=#ZE}g z%q!jS_Qz8)vl$#>nY8Z?rcehu9cU55ohcFt7YVmc0+s~DEBTl3D`BV zfQTJfpB>MEyRR->8WqxT{)s3d9*ey$xS?7R7LsgUQiXYxwj`)thuM`fQEs_O(ecFY~j#- zC6Eh`eRr^6x4<&>6Rx#wXei2?t-6m~88;A81wojI58O4kD*_$k`pdq#joN&iQwyj8 znwyFPYxq_brfm>o3zk`*GWW&;&&C3R2dY*!N918WB{8%ph5!|9GZ9i>ZRm@LBelk= z1+e@l!nCHbLg)E)U|x>D+~>vZDJ7@b=hg0b-(#`ws@~kr((I>G3oUIV9W{kYil0ZO zwstnLrm_layh6UKouQqHg|UT~k%N_~p_8qN#f6@WyNOcRtHF%~EIj=Km$|pPdZse% zLlvzm9hJlXWh3N4cjL2*O)C#kXp54MXNq&Ll5Oq;T1{EF@c6>2xTa7VCwcSHWc0n`X;Vu zj0L90>G4)+y)SMj+gk?!7^B40lAMoeYLd9y^I&7?{cr*Jc_iM!a8#?8VD5ALh7Z(u z%y5ZLyVs925~2Q>|MI&d|7%Zuys?+{o8C8hb(5<>OFY9!)|naa2n9F0)*T&ZrKYhf zv$&_N*-7lY5z^Bfs;E9xR)$tq{idQ4#5;+46EGD;ONWGI;VQU&5?TE@b`C9)8tGTo z3_NkXQ&2`Zz3c|ysMy*?x0SQ5)}}n{q`^druC+p5s|Nh(fk(*wg_2=YXl%p(e33;9 zEfPWYE%GNAD8>H-S&H(^W`KBGaNifOz9n9+W(@4z`+rDc<5XCkHXAK(2U1wO1bB(a zU$Gyd{uW{>V0sGSLqw5K^B@KcJ&b6eD2*>6W5jq~SxPtPRm+9%y(1-=0o+KPw>vpKdo&{Y^?rNsMt5N z=Txw3G%p3Krckh8(-1-kzd(arp7)r#s?*n2(_P!x?Q7TXj&k2FE1FwsG`70UUtWb5 zTDDnyKI*Qf&QTszvfY_oTsmBRN*?uEd{lYsnt34pYo>+|?I3qT7Qm0auPH7(TJU=k zfcwb;&TOzo(T`^=md5~79d9?*9v^Cv-PsTvt()5JwM#N#@oF8dc;%LnG;aU&iA0G_7003e#eEJ<;Y02 zuc0*#W&1B!#2Y&)v=&(XYap9v?fiMz_^m{#eulKTU~jE3av}zfhT5VOc&d_qZu*54JQFKKT3M9anQFp0W94<38~-_AzOBGxK@xw z?M?oFrliC62H^Y|hFyi;2Y3-edbQrZER+3#(tHph_!LG%60vxg=U@_kbOO|PuKWUD z{oF*yY66DDVi)qH1ub~3;Lr$leEOEgSXs0s_9E^`_LMMfG>I9|A?h#V8#^ZM;n<)F z7{f!F^2kG~y?uVd034x$Zyg?X_}HJp#A(0Jyn5=7{m*c)5v4=Ttp2mIZr@&et}T0& zGqT)_c>n)6B+xx`PzNwIDmk+PJyrK=+zy1fl$tE=$U3oRH2Y|%R6ya!=p<&RS!#V0 z>zh2d@~>p|`ymuCr#YI1IW`_YgEvi0IDkN;iqVb2`pZBFq~7Zu*B#-g3qCB1;3Tw% zA=%we<*9cHZi9(U6oG=ku##`8a#&~%OGI1ll}4befLK`}rh66j{=B(&-DCan#P9O; zF%$6h!Y;I?>$AH3n;v|4TK0ZM_C7!gv(}&GY!9-3H%YXxdF}Vih2?{lO>=ckVpSEP ztdm2NrALEo)8950emF37fImt-U;5pkdG1v);S>SC+Q3Y;u+V~;2>a`Iv3r534X!BY zvOMSdTKN*NOCF^P2ey{0vZeUA7E@LakGs?Ac72Zq6K&S^_ItDa=BDC}Ev;TIP12^# zt<0_UY|l1!@VlG!luH{Om7a}?jZM0(&9UumpH83HrtY}MhE(-!3*b z(N(*(c9{jN_03EkSbBNA@Q?!x2?Io$R)v%2?ae+)ZbCkLs7Y8{iQmXu{jPXB{xPr%QYnAHN1*T?$;Zd~NB zqI1Q^Cn>5PO-_(5PGg$sLx)KhC4bnZCN?c_Or5?ub5PLOSw)v6QH z2HvZNrKSHRDl)p~gkDBxT1Djt)&Wdlw6nA{6{q592`)gje2Ku*{`4O8#;)JdUpodf2*nZdj00V>hu2j zZ&|nb$K%oR@6t32QO@Rij%X7FVi{YvPQbt@ zz`)o~PGM?Ulmo(43;Q>AWrsj8xQR5De``QvwfE7O8+!gdcUS?imzpAP#skGTptRe^A*EgrUo*swRRikWlAMq{_t!x1I{JPkdRaMk= zIrM>lxb@io8U62Y>)-Nq62Z2NwbZUcD@slyBJV#I4rDKoqK+#x-on&4A3SI!fzhR!v`~8};cV!HJ0-rsQISAPYP>V^VVW)d*hkLV}U2TXk}Q$F*fQs$?y?{G>t517zZaOFEyKytqal(N=$`| zjeW|RNqfV;^M@hL4QRBa=EgsZqqgQbO&IE1`X#@bI@m_2cIRF~Eu8gypG3qTWocbT zAvOYJcTNd$$VlEvbfo;CSz%9_U`eKwUUuRWpDe?%7QxAR{m;}vq`0uqKq6wg&?pN6 z*lJK0MvS9$EXadW;~jOlpKMreT7;90n7MZ)c1H>&C=X;v<4K0T`|EaO5Q27Dsvqhw zxMXEHO{%NUmbI<~YyEt+Ep6cgA7bdLCBBvO2m1G>)xyjjIejSle}2@Vp~#@&crCxU zF2A;}yzZc_-vQLVbs>kEXx#e)4m4P(dZA6NW>V92(UtuAmYP;nl&T%+)Dpf>MAVf7 z@^z|z6r9Yg{}X&&cyH{?ECfjNr8ShBnf!XtMxX@7xWE;QNS{DqFY%GKA2HxdsGDHw zmhz9VZ=CXx9qqK5IO^Ki0$EEpT^r9|o~@rdYAApz=F_If^5gDl)917MZ8>MrMwhme zE7i-1!K0SlrgkTBq$y;igJh>*Opc2p zCT=q_uxlPObS@t*EoA`W&`^-3-ECVVV|z7S!A9D;+R3`ArEAUR_0@2*A|O1gYEPAZ zVyp9)&t18v$1%~Kah;}vQdZ?w*!EiV`c_v%i_7(5#m3Whc8`zJ)2Gw&x@tjBkL&*MtoXdKS>puP8?@0aw}f|L&2^4^Km+~a+* zFq7peU&t=xLaHv?^qViE!M>R=o{dre;6+G5t9MWjEdN#(6v9*sP#lE{=p;yF$NOhs z&?!d!*yP=5$O7g!hASXT`q>zIImKy14W3Bw0ZY!&G6hMk%${M2!O7NP&bJ{0VheKhR47+JpNpZAKj(bBnG zWG6hmjybzlVrhBCOm02kZ*bK)Q~_=m;j%D_wdMIg^q~vLAxJ7X0DryW%oPs9#>a7z zOIjHQPK!&Mz?!J-gcIeUELp5|_QWDV=-`&gyISMl5MDY5(-D#IdSdWnf^mPx<8Q$U ziyx95kWB}?8fS4o4j3@bzQ;U@V^!?rwx%*P>vAyC{p?=-<{R{oEQghzu>5orw zFigc^rwCle{MMNz5raR*k7fdTrF>*Ad1dI~W2vdl5B!#DZ|&F)^4Q=R=4kv)xKj{q4SD3V)~>9#Gu|3HD^0# zd$LC-V*7|^Cpf|9J~TL5!%&xcWap$LpkbgNh%||<(c)@{M^*b>{zml(56dF5g#tDB z)(H&6d(zfPi-|FLL zYH!5F!A3{6XDw%CJ$pq1cT4S3o|cti{ zw(z-Lt@tQT>Dc7f9Pn5V#oS!~$+L3FzkY5nt!&=cD1a-1d*phmyD)pNqGb zk)@~W2#l120mH^m{pC2=9=9S(kK^5muJLQWN3S=#gMszDu1nbm_k|;ki6xDvzN_cy zrHg8fFm(6`oL*&braM5mP%XP>=LMZ4MJHEbi~SwjrnTE2u@WA6*gy)Vw=!!YWx4V* z-|!(QXyz1ma-U96ywkP`tk4z$Vh1x22F%7%z}6)Hb}=JhNwfPCaw5E2Ec65_`?-{I zFYsb%wYHUcZ-u$vc7vOQn+TpC6~_)%5mBx01V(7ah}Ygz<6j{S zbdt*mxi)MBf0%zCqp2{KQ0;}m8DVOE+m1d3gocB81xsmWXza*WV@w3 zT&#Q^&GK&NErCRI+r87PwM0Qpy%FL zY$nL7$a_bSaOp~)rpHtS&691UfAS&%QwL|aA~aJXP6C1c@8>e-5HmD(Ykei?!8$aVMPau-q>RY-}VR3m#+vj0m2Q+mqon`?kF*@eiog&%lF{13z9d z<@COV$cpM)ssHT%!jx;*{3ZUX;fL}3my=tzamqhQFU&L+oBT*E(Km`s-GZ7Kw}RR9 zI;OpC4iV@*z*6T8^dvWZsh?o5w;WMG{y+fLp#c)fD{wcY=ADomG*O>gGp_ z?$PJN2sCtQ%-8WY0r2|vbyt(M{mJz)&=O#y+k}6?c45Q5@1-)Zrxj#hZRAvja9t9f z@;fYHA^!Hi=|L6~gLF=2%tm0uz3UDM6Z-c>xy6&q+0)bdcHn(zdoNTzFwB6 zx4qK_ge{+D_#;2x2c8LfV!l4ldt^S&lo#OY zsKkaPOf2C3mmW##_bQAYFNcQmC$dH^M&qP_UrLRyJTl~X%(!8UD5(}AAbG=kMaPA( zF&!)l)okytH3U}tsLa9|@2KMSQHfT5R&_Khv_uB#bAiP(PZBEkQzG)CyPxM1-%0|w zob81{waPpuvN4AYT4Ts6+0>?|MSq4v8B!+S)J`E{pcs1p%l z&F+ucOTKkJ;A&H&;S4GV!O2P}!}^hABO1n#ad;kBtuTT#DgtplzhZOJwbY3#LA0F= z*$oAAtrIc3sQOT;F;J0Evu_D=AVYFjv#*5s!@>XD>=gZ}VMwImj5IJQWW85v1q^7W z4G$9bsmnHrW7+VqsewVla)yBd)+O0mbrddDqcMz=fA2H5A^>3o?QiP8K)$NL7*Aaw2&md_ zDyL)`0_%Ek`+aPo3JG?4dYFcB5@?ndT?xM&SI49?b$qN@#+ zmYF$JI_dOU3VfQXzljLW%_Hoqoee5I)zzPtbBaITax{Su-fg=%xS9pIrc887Yf4+dR7JUCf-c*x1K0kqzP$Sg1(u2MZzatQmBIAp_HO%qJqlGXke#ZqyjLfyoE^RFe|F6%k_t= zXIFPT+fUcqT2JUU>Q8UoJ!uw9M6iq*tDKo{p3}#8T+9&CWv&kf`VA7ZllrW)W+tYM za2ORDl$E*|?C?Ryt?!Cl%t-@snBjLr@$X5C<4sKs)D^!4Fx04ZH%C#!8(O8PXk>XJ zP-is9k!dmMVI`9xlZ58d{@5)%7I53>Oc7&If9?i6?{n{F*l^hBI{17nzuNHId_LZh zJbwe9gA3N-Bk*`jzuD#Pf4Vl&8OgqowyxseaiCWk z{S7~u|I%j9b(pUwBMME(dk_JFAaFDL!xJLka(?LHbIYHXQlr(uOyQS9$wOA?67%nH z6Re7NVDrYu6@SoA21_i&$jZgZ%!5bF1NB%x29mr6(7Z_gb_)M+4Z^!YNV*G){~j3^ zCYd}%uf*@tcj4I}GsM}oU!vR)ao7+|AUnmMndw2;B2rc<*3!OJShxD9SaG+AJAH}I z=fcGsG9nx@B4p#zp4nrdvYD&&+eU3Uux;~no!|3**lOmLJi;Mrda)^c=&+hdA-H)O zj{IoHQ`T3ZU#&D%lS*BUT2n2+*mgExM{n=o`UL&-Fa>ER1)2e@PcBy)`p_T`JZvS_ zHy$aLh<8@uM;jhwQRzPHXb~3*xvvODuRXSF;URq}KSnkZl8SHVZ+C|Na1!E(y@F^8 zy$FM+%2}ZnA0t!2_hSoBc^_YYBU>Fj8bJ^!JER=UYz}0+fV?b7qooK#pU4-Ksr+n> zJd%JEDf&vlERtKvdO3h3P{xJL&skualrKCwBObr2? zCX#lRx`G2i+dqewU6YJFi@#1E6gKCx0QYmFE&S*6f9-4ZF5BimudE5Kdm?bJ$$Ygc z8FOI?ch_tGf&NZU6Mbko>=(L`x@80wB-l#-t7I|3&1rLP3LF~glPih?ok z-V4HpG8%Lepx8}2n3<+y0;xe4Wl6RRk+IvRs>UHof(W&aAeid1L~XXcSpHpoKV0^y z`g~0|2>IdPQiGYpi>;w+gAzu4I*CV26(>1kP$xHweQ4<%B|qERW@qKm@o>9uv3Dp* zOl60H$;OMz#Ei?)fz&P$ZQ}6H^?l#bn9rN;k0%^A2M!ApGEdOJH0>brSeTfk@nGAE^dq6!yT8aV-a@rR>iHVAoa7 z4tq~rfgfQ=lkEa5>(Ce0E52oT$lGeL?Hsw=_E!< zlu6Wk>$nvk85M@>KSpw{WO-N9wl{S=8~;<+O~Zs2YpwLa6T=N~_mkulFUmBWrDG_q z@|2A+HA@`H4q%wbqgOODTCtSU8KmDE(eE1C_^Y=CJ1|NbD~gnviW%!-1|mZ!-n5V5mrhOG7oMA;SSJ6i|v5@F>#Vx#Qv)T!u+ga-?ZMi?XnxW zQ>;*$_29v2r5hHF zL)tH_w0;oXX)pXqU8uaCb(P_+=^5Hi;6F7-fq;ulF~J1%M)!O^Ib{|BFlYLKNUm(( z;oo`A;H68!ha-W<`hFYk2)U-p(6ey<^8xa_Y~?V)xFVPMu3()XG><}baH6VZ4xqZD zeoMbD40Vn>e}jvy@W3tvmG-3Ile$rp-uxmxk3;ap5uvJHc2<$K>sXQx2c)c1j5Grb zjGAZ}#ju%K+3EVInfhpnTQGRLoCh@t-Cg#upDvcZSv3)939x?+zf^xl=w!HTwj;0}H|0GT|MSwCCnz~h{3gA|i}8Vzv>{ONcaCZbW;mJ#UK|8w=prfN;?msS?Z)_U zi+?GmQLS7@7uRkVfd_>kLeg31oi!pZ_>BGB~AR zG6wOpQeiXu-}a6n!VV;$sgwF7hoC1Ij> zB1t&t6+f^uZMzAf^vg$5AV=J!I1!u?;_zw(9!cW|N*!sU;so-F;>yg>G*Gf&-y5~A zPFcsSc3d{oHa2nAH*q{ZrY3}F=b;*lho95nb-_usdetl0@# zm4G8^y>ZFkQ8EkR5`ST$m7ZWZk?^Ww)h!im=WnzME&l*Q%`Qeoj7(x|&Shr8 z8-m+JHU3*o@6HRMi5We~7&FK9D=;H0F(WiFrK_-^D==x9nUvvjybDVSlSq)7;L)(m5t+B+aKh2Vwn8Wz1QeI|~GuB1%P@{t) zlF#(|hIc)*tjpJ|)1T8#jGg?l*2A~5XrG9Lnp2IXzz{mU??Fa6N@h_X8WRZ)DU!A2 zFH7@$izA!!jmty$zegFaS9P2ygvoGUOVmAxd)$jf!#J6F%2R2dAH)9tdD+<;?QYMq zlJX;_*9$DDcq5o{abScQ_g@UHoEY3#3*bFxn%$redy};b>H4uy>|STUjAEz}!v&Yt zfMBCuGNdrfp+2#9{Z`{nbiX(5Jzjh0Fq(#|>H%A&2#?tYNZ1CLeyKAP%YHYo++*w* zMZ-iw4Dc+PZ2<*=qv`SP2W;dA;Js_>+f)YR6o}Kn46jDi-K=@i4{vqY-Kto4?Y9x5 z8Aesz%Us;=AcEhIoTZyu;FOl0{q)DOFfaiJl=9O?^uzfK9jer}Bd^Bz5nZ$LOLI#w zpF@umx+2Rnwft)yCdLE)Nzt@!byXY&zQQx-I^HKYCx}ROr z5imm_*X_#m=86lV$*_x)p3)^fjxRG)cIOI65;XZ)z#J1-mgWEtyRRfTuy0M0a7BVz zU68O7DdQ%y%WHS{38^>=PGz2q#K&>Ayl}nBeZt7F$I5ZW%CHA~!^(Ba&x^~#d(O&c z#msvwLHez(l@af!W%DoNdic@5Xy8afaangBY31wpu2O>l_^hd-zb)4+0&pkMiS1 z_Kz5sc2OxSJhs{JmSznX7p`o6Z1;MIS+!mpW2R|j3^)pty^@={g=a{nA{a*ZtUcQa z&JBCNT2I4*-P@}!6S>!BHg4?3S5bpiaAZ$@G>xBW_WmMV{}rj+u2>le#;7rL4QfMH z5O@&7pA@ckC8bndBFfORB{jWLjr1y_$MQD(ILMR-C6}X)_v*!hYt^iZnnO{1FHk&) zHt=JJkeLsa_#Nx+EX~+eAs@yqWq4)=Z2HR5EO%t6VbZv)L7it zUN~Qx|0-eMTxXKonHCk9o?V&#vsdlA7pNIf&3a}=efzWJaLCm0zn9EM9eO|x@ucf;HN zpljk{ZCYW|=}XL+gO`hKa*7*T89!0`g_TYnaE6V_ijT_5YxpO$?nuS_1_K7FnJYQ^ z6(hRXT5wuN!2@l1<`-orln@ycaf&MmbV(cVQnI1TPU@ zUcKTqwP(fO0;H8Z7kGnsEMV^?OVbaH)&cu$#!xkgCWMaWMBhbl&j&B*>_kR7Or7r8-~ zct90OhPW#-vXOz)(FmtHg(1}6IlWK}XWMXu2~qzV7D9n{yU>Ybt)KJ7vhW@p9Pe8h zR1{wp(G->w75%11BhA6$(3`rJAp|j?iuHyb;}nDF$MfiH@sZ`Mht=xS01M^G@dAFh zDS=IltNKXL}6aPd~1#&<#hSi=B5B*af38nf7Dm;KYFf3FwH6xWo2}%YI2!(@uh;g~*UB&jSXMXwj zvNCZD0CZDVv!lLkwY6^U{$%bU(b5oG+P*l8x&+PNjWB4((%T)hLwf+sc#(d*xh zLJV1QUJsU1dHM5ev{f?izHvJ3uK0V36>dgMX64cZcV1e2for95QQyv?yt{c-b+k5bmdZV_R&8zynU9eilA`R~|UmqVx-MoXKTcs{mAGT<$fiw9r=kM$CVgMmgAy$ z=h$VZje69Nz8;~9f0oYh+eyp>hX#jEUAq&m9+6JBp|pH~TLpX%DZxP*r9og^&^=pm zQGNXpv8O{@oL;v9ECYVAy^gaSu{#9e68+vX(NU)HdzF(DE!gClJ`2KBQWRtP(Gr(p z5~z)}fw|rp9P1o^KP3HzsGXP@p9b$-0#o8ix(}SGh}9p-hmb9w2Lm5HZ+TPk%SZ6y zc=UXx1EEYDLMf)d#UB#uzjs9ODTR|siyD|(msaQ?|1QE9oJOI4`yf|i!s)=7tzi1Y z{ZIk~?^OC`mcj?U>lcG)s9UBy9a!Iy*P_8*q*32VhFSO6E95jQToC=I2bEE_d z3{^esq(g6(MO2B6(UxyM@K9df=V?sPN%+-#{ub@bitomL7?L6P8V*s0XtH?`BNW3EI{#`6 zw7H-j>6A>aw#LEBe>=?Npubh#_DKB*S?VL*adKtz)N--MK=Pzy8$QV~&lrwyk{X_f+d#U-7v>8eg`*(s)1%e9rfVzKufe z)QQ+*dGpyi&i8c8RF!O_W^V~DXM4}&F>c4f=A5qP&ixnjsb7-E7(?ax3{K@YMT-$e zExPyX=NCsp78s*5+%m+EeFt+!T>x&Q3+~jo3u)j#O%9K%hS55u#HYsw;S*4 z<#~5j);TWj_uk~%x!SrpLY-<EU&#YOmv3e3R#1*v{THgB^IQeBKjo67rYx|y&PN?Rz1#L78X=`)r9F2T%%>WeAa z@4jK48B;|VDe=EPo@v_Rm6B65d`I1$yhpP<`i|+_vdbE(D;as!|0JsB=cMN0AlZ%1 zoAD0=wp&{~}|qJ!x8*-o1qz?1=fHKq~vvDrEfpLYjmDUyG#*t|M_db zU5;j4GMb!sq0ZA-Wue{6Af9FTmL)`EQ--tFE3wpZ>K%1%FS+E%HJa;X+FJthS;>eq zjk`7YREMY~^kam7<4r)7TVuw+zisLE5WK&L8~uNm4v>~f0|TkR4E%ObDRqS{!mtlFf=0OnpzrV$Lx zu)qd3^1OIje6!O~;5%5WPzWdj@2_i&^{+l_Oh*yKk@MIlA>Sy$;r}QNMSctlSTg!@ zP>$teMI50^gpv85DGsjhQ$Opnx9PoH@Vg-OmV;A%LL1Qhc7~gYjTe84pAOdFUKeJp{~uQz)f%IlF>H{^3QtL_V@Uy( zpDiOchzWc`aSqg{Ruia~IKl9Ml8S}XXPQ6AUppGQi16)y=8B=j9@Z) zMzi;c7rQc^v$0i+q8WBWtcaU2L!sSN$8fF+^-vjfAm;qezGG_+OUAVqGf zlC1tuWGCI-n$E^Abp}&8G({@)qVgrEH7u>xFK#EVt~V}h2LNq1(ec%QFeMC1OoUC2goYJLIB4QB zhc&s{nw_1!x>P!~rVo#axWV(3Ty-}(zcz#*gP`*bk;ongiBBPe6zgcZ-(P>vyj_cJ zb>VxyOIIw33##3xrpoDvX+ncB7CMHb5-KW9IRizMNn5O}0R;S{A0ZupFDwr$)~os=0K7n#50EqeKJ7F>Ts7t+5siJKuL9(*S@9RE$M zR=Z2QX3Mq;fNgWjuO{kgY%Mc~HV^*70+Q;-PF>~BvbIeyVE2@v63F6JMUouDJeP6l zH8w1uG9ab1;MmXCHs}Da%imsFkn>0_O~Dx1*5NRz6e5+^#(lT^ut+5YND4}(u~Sqt zj&cUkQZ!YU@WyDFxZKvBr$2gkZ?Qt_icDB0Dy$3~XlcCuBE*zpt(ao3o}Fr%ooSYz zYno%Jm1D1(rmmaoTi#p6fE=4d(0I62nQJJodn-fbjT{J0W22x`Rj_b%kRpVT`tNwT=ff;z1 zHU*C;eE=^`lfj|EUVm%_#xd@lismF~V?H1&wUQ3Ex{GD)1 zRe&$fvp-Js9qWS0H-v|NNR#RhnY3U&{H;>>|0ue~;L5rs8cdRj=gY*lF|j6?*tU}! zCllMYZQHgzv2EMtJFjZhuH@9c`H`yAySvxwUPB}PF)e;=l%(YUIV{mf2*^mLW$Gyq z)SU1K9oXVP`MJf`n0X!y)z!pLbw!SMXe?J~aOFpcFcAH)FS0SS55R@a4-+ZfsbhdR zM`QoP8rQ5u{UcHCq*O?O*oxjM=?8nHY|liI%#J~^&GJhiCLEosFDkQSNLpX8f9#^c z@4zGxQnDziZyVm*k9yNp?kt75Tq6M;J3E6{N_+&D)bmXb? z^9{n9?mA4tzUJzI$liQ zIt&jIOj|Lt$a-ykzhid-1k!M82Lw5{No*WbcT7`Fins}SMMai&lG66!?v!5gf!h1XWnCgi&4n$UF-Kqft~$V@*nz7c6%Z=+h=e8=iz|~IS;2Lt?zGzW9x)S2otg17SYLg-7A0{Md#)Z) zh20uvt2^7~hKY@9rmt5Tiw)Bcp55Xv$l|Xcvoa4e6c0j5Ag4=--p>gXW@+M|3H^J> zK@?!3^ax3uDtUHIz1Wn{OFX9xZJE3#1j$ACSvDm_o26}wrSAwOuxFR>?RFb&yBR7s zR1>Fe!EqAKUmOvI$w{@HQL&X-!08*ARGYc;9|Mhv^;pmM;Tp{BZByiIx!@czjPCD8 zU=^a`Mk;#?X+!0#juXY<7SuErW55|MgI1JKP>fnkiZ2UG2R0@b^Gx)U(Na%^bf9Ou zL#-w|Fh>`+gYHGZsBi-&8{R-}`m>HS{GO0MB2hsa6_jfO&Tan1paE`<@>a~p2J93% zN(}NBMyEJRC{|`yR$@;mK%)>bgmlFU_=!F^fFUx3I%t?!w4Yc^iZ=W*@Zvt$;-Xi{ zQ$x=?bEy+6+R=yWu_xV0e4f!Kohew6AxNv;N0;eGhbJf<6>z)^WkvuyTZ_EtbgO9h z$hVC}diAsN6r=o-z~mCc`XG6Go8StDJ%NwH);H?G$6;FT1b@fk!gXL=eQO*XbaZtX zkBgfH=>a7jiBeWO-_w(R+zHc_8E|Ml^z+i7EQBXdT_;nE&s#FF z21>~4)zo2D&|>dlW~%q-B`TOwfZgB7UOx}7tm9JiK;GaR+Oz}`GcE^{#nlZJ^}&dA zw7spn!)&<0@B|G(7#sJ876@@Mr}vM6Z0o+|?It$XEi{%bEHGQ(9Gs;aLFhC3b*?D6 zA*H7ICrD>+E}O2ew%r2dFiQIyky+Qavb&o&j!O4S% zhtsL0N?aW91t~Wt`TcvFJRkVZ?HO-D9Y^TIVhKVhWKlw)hw$+OkGG5yn{d!~ZvV)9HQ41GCNDJP2=nT(D1imq$-=G5Lja1*mecYVA=q@+bYTLPqa zYTygBd~=-6b|NDe-cV*T$z;NT|0-$8E<>iFeK<#k`L?;5sc2e|h&i^UVw9N>Y3VA= zC^r5|wNaz)u*lH%2rs-SmSXV-y44%*Z$pSav^uy5*{nJ$%ElpbGre=8hH*odUreXQ zc4b3#=Fl+85BmuZhAa`tEpF)nBAUS>IKt#*MaC9}sQ41R&?**lw@7FSzFq;h=5uZE zPO2JxnyY!?68^-yzl_;}A81h{EmQE>zTg*Mp(0paQ$}tTl(NQ_9!nD!)6$S28n@Yq91-q~S<%mITK8eq#oCqW9Y9#Yy zL!qy4=gYCc$Gte`{Wdw>ncjI(=}e#9ZC6xOS$HbosBUx_dRtL(+i^e3;?uw>pUbJY z!TK8mjA1}+1SF5JPJ@wSV5UU0yRlmT|J$`N?&Rwy=t;o{6~vc3_D;9?!yKYy`-d)O zs(O}d>BD;Bqhjjt9%OvRdmNs>LPm5E?v1$ZZCK-@c-^C<-CaaeJx`g@UZurBwb@y* z1%QK6>ytvGgG#f5^1mm=`a7inEG{ZdcJi$*3ypzaFDngBOHB`{tS?-bS1qzDkG$No zKSbY=abf42qJQ0n+un!XL2x_3T6IxP9^Xjb-AFz-L|->PG_l!EvpTZy?p~c|{=iJe zM`A=T%hA<8|0+ZC>43obp$_J#-QtsDl+%U*>Q;dLO#K^jTAOS8CxN_2ePOP@#H?kHE(#LkmKWEK+;^z$@ z7@b`Z*1VQJWQw0gTyVYE(tBxl7?f>W4iHrSOUzp9o=b`MU0g4x0%xv5Dbo457tR(L7= zD09oh{%xT=s(7l32u$5GPeojT@{wx@%a=L4}|K9j`r*ldVmvm7NZahFLtx6-ZXL`g}fDzgDeJpAQwIC^CVH zi@1NRKC;|C7C2aXh@PIT+HP`~zTyf&xmdGY9TY3i8Y|DvRbl0#*1um97L}27qwhKX z2PBF0lpmarvhdSBVXq!(pol=?pRx>JQJkk%cg=iGOJ82y;Ycc1bv@$D;6bkpcpEeu z9CXrX(BbGSrje&2tM*SQ&j{S1%N*+%8M`D;kt|QUraw+ZjsRbjk2;b8YWXo?ig6zITmy$^dRu)zikylNwOz7Pl;D~H9YXnH@q|>0XtJv< zzK`df6?TT^EM9f@IvSk~6uaec_yQ`X@bUd0Hv|7dLpNu7Cp9;kvD2pZ6HSEi!b54p zqUl5W(J6*dy$@^2N?|GLXJY4cP~e?^7Hp~(tn!;fN+RA$!v+P0>IcI97igR7I&)la z!@W%tA&lI_!PxM_fc6&IbA>6!%V!!SFIpcBZRqw=4D+qvRoM8=BvWdXHk1?Fd4QLH zn2Ud7NOxmYbz@c)ZLiewFxT+3%5myxyx;@aKqBIhG2tU9g70{g&othe(8s%726y{b}lvY<*R2Ugp&UV&-z#HbK? zG>!Pd4f4wd6JAZX+^(p zMSjD!+36;QPAjSZ5qqJ^TND@Afq&oVtL=I0B@V9b$(tk*gU6Al!1!+%Ln`WkiQ@U3 z+hJAx-D7mw@cCH@)iz;p=%gMenxMI|ip{R&U|`T--SY6-WqibCU|(vz z&j2n{olv7n4Fxs6)J#YL#XnJ|uy~SAXlgu=A;ti!B4eR3Ei!T}9Y>XYQIl<{y1F7u z7e(p%Iqzj+oU2H84=k7N0{ORKfJqvkGG*RPZKNfX(&ES?j$ z&yKypa7~78Gw=83FvAB_@6wPVF|s(4BXmCus$3~2+EjFL(qM)df#T~=|9q>9r(ely z2v-j+ND z=({EA?EVm&{rFCka_+!|(?J3JQ3lF=yGaG{iMy7`mD0^Lx9S>FY4|D6wx+j*38QUa z%<@Mp%Sc=!9)DzElVkLR)x<2jyWuh%uFE+f`uKuZ1L0?eiLb)gMNC*!27od~NyVg2 z*?+ZMu<9B!rD8Qw6$*O$y*ZS^mT@9>Q98z5>aUF9>%_?*VJ}~xL#3nP#wNiZF-Uo; zj?2Q5+-T3ae^`y3nxkqrnp-u~e4sHfp~={MHK5atrLBFIsjin2mmjZdVEdZ!QMqQ@ za$-l_>SWyZy!ucQC$VvETcG?IUuX>1p`kJOFQTgllzxTX=qz@>L=Be%8V)3x_UFxWrn5eD)6;Qp@>0^R|_`2j-o2sYO! zCg(rwcHCcf%D-$mEzk4x4)aV8D?)6Q2U=)S+;MwcaQj;D*q&mVA5|LQ=bPYXn&79K z?&XJFXngk^m&Y}Ghg8RCX6KPD&h||#PYVoyRu6UrZ1_`%*bNo6Y(*JWhgavmjQd3A zTH_NpfA-RcrsK581KN<^5Iv12Aso@~#q|{w?Scgsxf#}(AO!R}6>eROkdl~TuY>4D zWE*F=+v{lf-%iA(K!vwdIW{fn!*78R!5H8YPH^S_`zPNf47UkTxG;l|jfkz_KZ+0=v`*k@l~4+T=ZnR{k|t`7k0KI>Y<#U%LQ z(lLe&DPRZH^A6U9CEiB@rlwOEd@<{roE~TL4>K<`+qv<85`=))5}*%&Vh5cfuzw7d zF4@Xzk;mQX0}uoZb0a%7wIvDvMOfv;u8b3+(jY~ni<6NvD|C3p0$QqLr7-s1H=O?@ zBE_`+?Hq(Ag)c42UYuuaZFV(udOrM%PiM$-=JkZrs^;ci0`>-6@^*F9+w0UR{M&=9 zOzc(~*QMTvzioX-pvD6j3l8R^;Pj`ViMOe7s7IvNd!*N?aq?*C!S%Dp<6}Gg>?pk& zAAVoM$&8qaEUh&TB2*JVKR|1RnocmW9T0!=c^7`!;;Or@bqqljBr#=dFI%%rM(YF{ zsUuZ%_%kH+xpjZ$U(Dv8=yq+VZJbNz0JiOaslfhXOZsXlO6s3scdmXyK2>=_KnNSv z_*b`&o9vk?eU+nlsl4O{Jy4$33O=3yac*J?g^TtgzvlM!UpFL?c-o3Ed0}zJo;~e_ z$G*egr+!^|=J=U9jtXPq;xdZjJQ{e$#nqP@)ff?_Fk`hifIn#Sjwnn2`ePxAg=2z+ zapk}HCrrmTc*B2w9l|l%#UhQf{8a7VS<2{%9#Yy0u9JgmA9-5Aq26uSUvr!djp|?| zq@pM!z(*Rx(aBh8$&JGJWXQzy2px-6>-zX?f{adu`Q(9A;b8f=cCqRDHCzTz8vvD% zU3eluQA}}F#i*8TdQ5;E3iLb+zaAtP)0Zr^I<#EhjL$Zlx%oZ`2bEpkh6xBhKbD4p zA%tStY+jGoJzyr`bqAPBIN9>{d+Anvx}RE+4JMwfPq;ShucQgXkmS}ylZP$sOhTh zc-)Anhr(9I5@$&Xx9;oZ<5}GOkDoaPVC(8wvucHweeju0gzZhp;GLl&+Wi`X z;~J}P|MPZy<3~IrRF>2w`#A+kUZn>+xa!(nS)UPKEy5pB_$E(+BkSmHoe5U)*Dy>H zfI*hCNS60Z=yYA#b^TGDhGw``W9CJRJPgbS-rq>QLuJL7I4t@>wSi11PpIiGs1D6n ziC@)V7lncRVXuu99duo@pq+8}1(1A%@P`I}BA(|jX1G;DW0Br#__P-Oe0CYg7LbOJ4@`S2~0z|pp$(f^uK<>IG{POdZW0b?ev&K zijr^=mfHs|k==@c@#1xJti%>-Ip18~I9Xol zdax<~Lr6=mxOW&zL~70woJ^)p<^I@l0JJ{Yt3`ExQd9JBoaE#))umHXn&Q^qzZzDTOFZvO^zG|yEy{0ZobE377Kdf4&d*t_ z|FZt&X?wOQclz7uW>jh5TId0sj+JnjC~W3QsxoyP>h(l$&t{q_!MN2P?@>sWDrwIPPFAz`KwOcEUU8h~voNh}!W!D+Srym+ex$4k}+nWOaIYuxVutztVzad@qq zPeS&95j8}579>Q2$I#sA1uTbGmmpfIQFmma{;rXoHjalQ*=@_6(;|RjxO_-MZb&df zt@PLY=KN~vzniPhx1^iBr!}DRxqsT~`nW&o_x}9oFP0_ndO5i0{(N`^5|YoGVVLj5 zV5(mqkU~peFWK2UcrJvMfu`>#b1={I|4(49cmAEJ;-sHtM6#kM%aSOX0NG0Q@62R{ z3>4jrSoJK}|0{F?_ui(^VC8jhmVF#_M?ksA;Lo6^>*eYWP0I1UwH%Pxor?_ZsXmxs zm5}$9spyg(s2%Jtj<+;FADgAnifthl{8*ss8vWNM$Fd(=&ua5simAyw>dL312qgOMvjna=m8?^_I9g11i zr|NpKPjhfca)q3KVO4j9emkb|izwoa_h+!DvCNxghG9kK6So^KT#tdk-nb`mgnxbn zi6UJd$_1nWZ6`?tkH3RQR2hs2Cb`ivMsn~xYeCZnZJXoX{YKPjRMSw?VSTZz(Qx56 zj#mYke-GH`%C8rYRr+=oY zFouxE{3z7RDbRPdEQZ2q95Lc;K;(~r8m05H1m_bVm#K+8ga{3(q0PX6FItI!nIEL* zI>66n9s!8$WyRl%Ea7a@+L+oI1X?L)S}EPqDV-cVt_FIP9Uoba7VUrDczV)kn9sk^h;eOHtZ(v)8;s2T5Op zT;$;n!y`fg>H5AK85txRowjps=9%w1IGH){5-HOrv(pO!k(pWpUUJ@ewTaa6Wwrm| zOagG5oi4UVJN@M}abp0+c8xj>6SXwa*+T~-%Pkzs%S~bos^cbt8`sv7+1O5Z z{4Fd@*a;S`dCFGYlZhj z0?;*mmZbR81+Ml9rL$u6uN7$Pz88j({Rwqx%@}6Ts{>x~6EM(!KAB`~GTY~F(2=WG zW*cWs*%`1#wY}f1)ginx$EpivC5~V@6`4x~LI3f5fIqcu{P$GNPRs!VkT_HgEWRab zl=Sj!iXN^AvDRYKu0pw);0+^e}AmI7T+an5e=cB!-ObyZ81co@7ZJS|ZlRKE;*)!}7d$>mCTbBO>%2(F0 zm)Fw8)jRIb86S?aoxHyuUW)lPJzw|eeCGJJyk9@obiKea-%g^Py}cm#J|9lnfsgD| z^YP^!zw&+E-FbgPn$j6Id3(7b=RR+Fe_UVjZGFApl@oZm+kTcM5$i649k+`aP;V2NN9! zjJ$A%USWq_VY8b1YR{A@t~6?{U^P(H;CHly5GigWf>LZxN+MBZsrMVG5!$GPJrLAq zWqOBIrl-)$6^YSdk==2XTYt*Ipx%50`3oFZt@lVlSJZpkM0-2cKkz|5(k%VtX@jKc z!-Xq>6e-UErfsjIn(i0%wi{Rbjy()Q%xo*vG+X&?Y)v(55LAtbIWF zXC7+Us8**@*QNe&EZ_esg2*xumP}-~SY4TEWLr1FLK_+`LXdLJ$gevCb)(c)l|&=h}w1^KhwJl{q+QN=*jT zwF(1?)T|8Sr5SalgB34~ZC;S$6BGa*OpaA{R3;X{1{xi3;_WvC-aWX~^9U#f4&QHT zHwe#6ct;_;KsbV<1A^k-jieUI17fD3JR>^PLN%~7+e>iRvr04(Vwcr#w-FB#DOba{ zhz!&BgxilsRFd7u0)tEv$YWtRK^9n$YaeY+Ol?iuP;Dyw-3tRFCn*GrjC>W5oCQ|o z#rjTicqB03i`O9)<k*Zcivwd|JoRQ8pe&&QRw zHxLqzR70wCC$q2k1ng$^h(Tl_eRB;2DGgM6kC60tSVZdY7e^Cs3Da?vyL9 zR4dQ_*52^#;FRoOm1^y`uJ+0H4jFJTbAR|Lhej-g$t;G-%tnhdeh*uEgyTFuM?N%C z$$G2MdeUsWs()I(I;FWHD8Pxg%u3PEPTJE_!dB9WP0~$6$vodXH-pZs6>(bql??VU zTy|Dca#jM2_-KASnfM4d`c=EomZ7b8ugrU2VM`bldTwQxqYy=BVtInSAr;)7lgc6U z501CY%kvLFBGZAhclZ!!XW$hmUc$bRXyQDZL?3m#$^etHhX;4H7s1E@?`%Zf!VM0P zu%=LSRTg9U5ITI>8@&Zf@#1LpcUUApd!Gn6oE_AWquXcH0iJ?76+1W^89NgJKQ$z! zPk*>rkixhxWZ#CE(*y2`{r4ps;~q=*aThSEGxvcplC_OQ=Qkvl!w8~k;9#P&a$~bI zBz7?*#uO@=4&zt$Jw^!D6e1i#h-yQQ=0=EEy+_n^e9~;%sy{yE_RB^#v1LjMa22?* zrLT?|tJc!ASoT~G`v%`&&C^j!-8!n5NEa6-mSD-noGdV=MrvaFLIKrB?OPMp&lA-$ z$kg&qyQ#nx9zVKcRAHUr1=K7DW9fi^ivJEZ@q2*s?g2B3_{NaP$E^gXK?szjKCR5< z>go#6lTod{?Dm{{Iz70)EB0r|dg5I*KVVR{dkG{Kj}?pS-xZO^{KMgQcb~CG`YAS* zGm}Ywk)$~~!LDSz)eQ7gP3r1MX%r#=0;;q@0hiw0f!J1$^tP9OVB2&ZzOK^W*`7!Yhm{?p9kZ?*H?qU-(=5x0} zg;KgO{!A_SSA09v2)8SBD%ZbDvRVz5H^dC2)b{daH;ZNW5{-7sRBoD7jj_`;pW+5W zpoUPw8ifFo1;=qT$MM95aa0yMBy$bi`KBahXVp7c){#+Xs!|7nQUZnH(G$96X+uwm19Ja<!(XG*uAlGgy!SJC6(A+@0*cc5^125IANNJ-&3{JCk)hA;Vp(iDPeDbZ+>B+rvxTxdVFX3Rp(w8H1Og48`5q4mJ$^)PSMo@Y6 zfLR5UqgTD@wsrz$4r5M9hrnYA{co`+xh%%C2Hmm-ce9ukO4uY=1DKOSvU|NEe1RHb zg%}E^SlthDe{%%`zCU0jNVF`=~C&ly`V%#Rn+&%+MM0^3!pC$KdOmu!iq5HS? zPEHoOfNbksINk67!TW?Xz3#w4J0D-CZ%*5jnD5hEx~{j^;~N7v(4*Xc(#-~v17PtO z@5@DUargTfmCO6m(e?Y|*j%=^+wDg%oei&5_nXz(#93qWR+YC)^ULQ({?(?H=acc? z$=(6ke-}d+MeDN;DGDLb4*6cxCvXcU)PNE?;?Y$cYGEq9GDin|r7cjIb$M3{(+pEl zbCfv}af&(kiecZJjS$51W9VSu)3JwA&AT&sN zqXFkPJ!EhfAs1T0+Rd<{tnC!nv2-^u={mw<4gaj9p#V!7C2(S%c!bPkkub=+OS%&Z_aBvZ1AoX4mA1|81x=Q-0CF1 z4E6Mlzx^#6l)m4gaL;M<;oz>}VCN@ZKl;_KGSC^#=I+#dMI!iaF}53yD}xxQB1E1% zZC3oUZY)RB)AdJ#gO637D^fPSZxAWc7je;@CyAXaqS_e~F{*9izkZv$+_`7McaQlI zm+R~28Fc}USX?$>j*!J_W5IfT!TL9o*_2-JUBFF7A?BsR?;j}YJ!aA0(8y;~#c_4e zcSR#pQ$itLl$dW?R+LC7$z45GtG$;`bdZZavgGd&7Y=dCfLy&{Q=xOA)U{Z$x_rt> zZ{F2|%QOpf*wExIdDP{diLE$nkSc!`&MX3CXbusW^gA#d6BjWfoiQ;WU79R@lw+N`_WAu(VlBV&aU+wv zeJzuFjeT(J==x6l=|{xHDf>CWAcCyM`~`5i^xr09R0?u z=%lr&ccu&ppNY2@W^e9cYJMfVdw}|4hItV_t_MZx`Xd@)aIGh9oJzH_Z)?|wl z?V<^tzaf|h>(!i6HM)snSAd;8Dys&A@`%c!a;NC-u8|Bl?;?9!@??bM&PI@MpH z9F%{*-miu8dAUCV_r6aiF$-Gf2k?M^x8md4>!A2*DQ4g~nD3+J?*DyBukMeJ;NUIq z<<})-P2eFU!TV?bV)WM4=l$NE_s9MI@KrNk$JXn$7eIp`^L1@KfA;q`TdO!Xw<66yyJv*W3NUK&5gK!X`Q~!^9NOw`!KQR`;O-sFt}Zgk-fg4>^>d zrcN!I8C{MsZKX^4iVQy4Lzlr38~Zz+F5{kww2%pvQPhyoG{UN`RI{&QSLtq}f)kH- z(P4+YqlaCi=v`$BL`$mw{nFPpf+v82*VP+_05MrZ*=PI9EwSDKElQP!``G43<)&Lz z`|olO2s5(dHUHlfAn^+!eDN_-&H*mL_I}IytbS_MIy-TScMZ(u{Nrim<)Xjk${!5= z9C7kZO6zIm^&I%NW7xbM;)3n>8OQH4jdydkxAV<5>}>Yno;p83I}PyWP-R@VltXE& z>3xOTA+?k}mlJFbyr)5@wc5?}`Nv@f=cZeDN{N7}YH#F2U!(>LZcqpy=hNL_20elI zOgc=+@c4w+4zy+kzN|@6$3Ur&>1mMaR0*QPjE*Gy1g81m2XL`F)d)+HLo`$F?>akE zFPcf-f&@e;KHh@y+@Ynfp3<6|aI`x0mz~yb7kSzb&x$H9Q~;5HGgl@c#4NTUnsfOI z#d?YrAMULs@$6RtDNeGGqYJf4)RCU82r!zu{})!jysiXC7d0fdeXWpkeoy7GJgs(o z&9Q8mH$BU-u7yHoEwW|GrSTzc@@aZn${J%Tvgw6qHp~Fw14VTkg|~L z#o`L)`H<3JllV(X3#wER!xqGJ5j{P+qv!whwUKrdPw77_`uT5$*}hlEjx-*RA#UHs z<%JYZZ7(w=G>vRP{sc>kZMoUN{w^RdZlUq?VS*ZHLNr*)E7UeBKkb{%jU}`1-1e2L z;!Wu`@T~H7lhOiP3rr4q$Ztd+^(u6L1Odl+!P*&n{bI=BNpMY*G*RdKq0w%U@;_z# zZfw4Eb`ETI&Qx(y^t+Zw6#2FOhjw6?Qr2lG>pbGOI|6Db!1^K>-&_aCUd23on?w?| z$s%i`SMbbPk2E;Ps06D&O8L~aRYtsPe)Gvp8#7D!SB5iY_T?SW8LS%r^7z=fC}u5w z+({OIDsaVfPy9W^sD}L<4L&6A9vedZJ5c}%ArWlnla7$Ql2Dsc?Ih>am8{F+)2G1@ z^U+hsm4>^~1{1<@#$GVZ(&pQ0k;E%GB_uN%WQ8A*%;t!KNH%G)HCeT6Qs5SR9C}O3 zH!Qg%rd1bClCX}z%DBY%vdUt=_HKi5*t?$Ze@(2RRCN!0#2T4z4wtS{_*@K96L#%B zqD;nZCC2548zPt!6J^t|=gP~gcx90}+*Yse$<`Cs*k)|qYAGD|tTlK%&fTtJA0FD# zkE*bZj)IULixH3yL(Z}hkP|BDoUg2+FRi43Ee+l)neTPXkAJTYo){RsJUs7~o}Br* z8Qvd0Hh>4t%_2^0qFv`wUSCXif31yx*=L#@>%Mn$vOW*U}l-`8u_9Gc&yT)HKrYym{zv1=L0Yy^F?zv?7p%6#gsl zqSab^;>9O4s`y&*_hxV^2aZh#CPWA3yOlR&(*6g`${TXn6}aCpfeX9bVU$m^S}~uG>=~8pjqu7# zX}l-nH(lwt?j$tj;V5&NN0y#;%{>v#ugkQpdR7hYFXd*Z4)6H z;8IcaV};~q<)+M08Tmu+(y$fMaek7Ti2Rd^vAOR6%9xiRb1(Ec<& zay)TU3xkKJ3K#y7ZN%|QGINX?mlI9p+zz6AJnkP+ZR;X!6i@t~md2%)=GE2=-r5Y> z+Ol1U>V^7feffWF?2Yo8+z>AiOA1+z>Vevhne`mZ9+?DG3 zBfTv>B~@9a$m#03q5QJN?%WqSYi})+r=8cJoI1GS-&{AYP4aeaA_{4*_2(ZF|7J(> zPNvn1&h1dz*W;9fj;<>>(GqUWhfg&ax_aWZt@=xAcHlDxq_ftk*E)#rX6;~XL1Ok6 zN}@}#hl{4Z97^loEO&BaL6Cx?IBESZ3t6;L@b&XaiaXR+_ts{}7M8H+A)WySS8k!Y zkQ^Lm%3J(skbt5H^(_<|0!I_c=>hi&hhuHdxvm^0fHVXf@)@GIjLSe7FIR(qA6^`- z5gj24HnLO}l=e%4PI9!OHL*&Wbv~(yge1SYs5pDFYaCd`U<2@6a{pj9#z5Gqg+ zD8pV2T|H^k0q`)TbOes71l#na*!k!=_w))b(um!RP{k`^+)zwv4sO-7C2pt09mZx| zB*pG!>Hdh-wV~(xB2Rv^v8!-0s&)R``q#xrgyy%$b;xS&9#`|#noe6#B+p-))4Y{b zm-89L)na^4K2gQP?`7toF1#cXiV!-3WQuj!t};@-%G&xeDq4-Ks4SC)=;)}~I* zx=zNn2Q$;Nx$R4I4$}L*v2bg5hFqPG&xem_>$|P)E*Q_MDFUZvU-LvaD4$wgz8=}< zjyq`m^YPWu{kLY%MCI`{*dq`_poF4fA@}#$F{oKuhU$LaoG7TCwXJS%s0?x##8%4En1>B@_mu^uik*_z}0| zotLg!^^(hEr7cd{CDGftsB-!p5s=D4#TC=8XvRt@*WSvLzKqJ3z);RZE`Blrh-pi@ z?BS`g5tTcqlDwAhd!f{5p;2=wk9}{b@N+o#k2wE`wCs$e&2W66RbG(ZS8gq5Jc^Q|UHF%wG)lP0@M(c| zoby`&T0;2Q^2LPF#>M#J;d@a~$6`?iaEjQ<#FHu$OO%zj_Z$fSTDcro7>;A2&}r1v z_VnS$Ric3VLkteS3mr6hkDOt4#zBlqu&?5FTgFpnu5BJ*#VWx5ET?>?qrNwU;o|Hd z+U?@{@UUK)T)zYjy+>`aZV`yj<0(%9vW4uQc|hE>tdgeRU`aR5(zVnXA7bVN#vI_H zt+nL^e}v$zwL`i=`>k?87V37iO_n!hY4kqr!golH;xgUZV7V?wc=%@s_;(P0WC z|Gv2Qn80oO1B;c(bNY&)~^Z*ljDN*-=YNjv`{=g2;y} zVhXhf78Btl|BgXkYkz%7wtQMmea_MMjOs_k*kIzJEjKJo3j;fLPEsf#hBPRTQH&nM z?0gzfIYD(9zN~)gVYu&?uPSxqPIf{j!W3mF4>{&i8Jmfk`)}g-(Q!Ok#W08af%TFg zmkyGSuoUc1R()+dNsx}NGnL(-iw&sp?&3_r>tV2;!l56;$bPA?YrQS2^Q+YX(VfQE z?j~)p@Ql2eA$1^=orap1mrpeuR~P5w)phsmXeKk#7YGTF3t0~byx-R%8+$8d`^6Bg zDv&NMkuENzJiZ_>-|p7VvbS`+zqnwq-uBkso2v=By+3N&@9<~Q<|oq`+^@PkKd!5@ z!}+=UJ284xz?u=~Mk{4oIs!74=rRx>RM;-3}z+{yR2QEMtFfB(dk zEK3WFu7m2M1k;oi27&~kbZ(KO>gxuV4-=*yx@#iSqx0HMa(KJ-(eAnu?QgUj9neDZ#022Xl|zo{@J;2v=KVkJlgM61)LcdlGQWN~$M?VGRQR!6^gUekBdiNd;JruI zM5RFsMYP5KR(oYrW^NG5^YxOR`n3Ajw?Y?pw-w=d?+o`Fg)oywZ*G;b?+^JnLL!{%4oZn7TU&+~6c%vRqS}N}$tA?Sg?itYgNNu2w z!9*!c%T(OgR5#Sxw23^V$=c+D;5&IqA_g~F`#5d_LC9QOZrr*}RE^tErTEdo8*yb|Uv^vburv1^V z0$yhBX0i0%8HOE$a;SnKm8B#nYx#U<^+L4f*(6P~vC1c)2M#QAzPFFQn~td)18~pmu;Z4IEQ)@0c0LZxe+D z`}KW_Q%`H+=AN?SXL|&Jaf@a#5>9Or9^Lq%0Vd7>1^g&wpg$^D{`Cw4RKG@YI5*t7G`P^8;ZUd;J~HgLO+-gjH>=ZK5U2wI*Ix&&|vCj*X`J zg_=$(b#;2rH#U&Xab8L6=pJzEuuY}t<-6QAMzmvnbKJ7 z6N2@>juBf*YXf+bO zgX^o_B+3jOJ~|{wPc3_Wb-h1YD!p=_Tf@me8}m%J&>lf9`mB8L`c~r)JmMdH}cO^#Z{bm|3pV$L$NxE`CQK?&k~W{cGU8Q!*po6;HE$VMw+`GFxLQCh|E@&hh4(gK!v2e3 zzwlZ38=cD_cs~EitR~b|Ah}EK^(LhsM%Z>BxmzwQuqXXd2`?;|OO4n*3DT-Em}wIy z@JVUVJwHT;S|!UgE*fr7b74}qmrV{mNohjE4Zo)yav5UC?aSAh&&TnzfBXeG14uh$ z7e5JC$prczYI02?Qb9SeD~e0=D%#oKp*8Ml3ux?N_OI@k4CKpV3W`qF&UXIKO==E> zI;g7JI1{uFIrSk9Nl=mcVkTJ3oE$k%=D2|<=AQlpy4%X?dsIJQ9+C0EX@vQs@Xt=T z=hxAdG_S~ixL-q6p?Poz)S_Ay{;izOFXZ_~i`o$UeNg8C(F<_`Jd$qyknL*--5eyz zpE;8GkH`?i{Eq?|?=Wd0xMrhIt}w&Q(ZV)0uvUSZ?u2&FfGuf~31s&jxT#+T(L2m3 zkft&2Q9+&G)tQ(kJkFwsK#419O3g5x6^YOC60;rOb$vtq!IYLYs!<;F)b~ zfM29{NnW}kuGIO$Pxjel_x|^QJJej`EJt>6Ix@-Tlt_jH->(AN zD%eb;Bwb4x(DoQLp845FnkKS%8vm$@$y4geO2S|?*;x3yYYm)jSwo05HAc7 zE%zfVjYd8-tagn5qYw{fh%=b_Pe!-CBYR(_6f zMDw_udz}P1ivLS}Y1J56uFeLWhk{km+j%RUE-iibY&Elkn%Re(n}?j5$-6IQuB@cJ zR#MkrS%O@k@@HQ^jSQ^aUx$}gvbQ$#@I4Fa5yZFnZ^^i`8lJ6D%x+bOt|1|WN9{I4sGm<~jYiCPrPJ&eL6H8jo zg5$HYKy?H+)XW^~1Lqy^R3|1sQDoP<2poBcc275KzL*am|8pDQN2U5MjHE&aT}Fg# zs_+9&R(a@!a@zydmKW|VMr~|0wr$(a#a3fm-}`2EZeC_? zZr(5-^QCKoni6JuL>Rx%-`w3i~QdEV?uZh=8U%#*FkDbsDwIXSWKdQcc7xk z+>f9oMg)Vdk+bv8?`VIk%3te?IGTcpBTGabrbDJ!7=E7iT?jVMVR<%Rj8?jsI_rJe z?db|6@6UJC<5#LYhoPA4awVJQiTe&=)T7xoQ`^;xNUflhpHeWYK?3auQ32oO>)EL1 z*+|HS6+5dwqvM}k`N4+%&IHx-pvCc`#X;GwSRP;$K11X(A4@i)g%u*pSG}r6f@EdI zy$r&gTR~r+yO`fR54m&qX?uAiHTH%M8jy=Fd|JrwDz@EO~_D!gZ>Y(cFD;GH5HM=})#F+H_2Bg8R7(eZP-rRTrm zwDgE+=F#KS0#(&r=Y{w0|JtRjutT1t?w7iK(jz&B|T>)9UK<6hjaCD zwf@Z3bP@x@6-a}YLmu(s zdCn40_dO<_w&+g>?kdu>>jeSEWbz{3Qqe~3B z5>6J3A;cYDfU#&IP=5Yo;=M-1CUFnjeszy&;aj)xCB?!!JNBhwv3G1uc_+t)2Ag-H z*FOx(L|vML+>?6wGSZySv|eUU;YcqezS{ORRs=DFc21gL*em*H)k&=iNYsP_Sg0iz zgUVnhWQk2BwKHS#T8lHecWKUa8i{T+6|T5*pbo3Z2I)Y18U_<8&zCVG8>eKC<=90g zJEh^bKZM%BZa`xa!T+j|L@T1@CQDfwWfY6k&*Q%Q){wB!%yE{{@rcN*XYlFkN2u5cAj9nbGU0jh}FIUPBXKMvk z8Eh2n;6~O!3G)J;hsO#W9QAFCGg4E!FLaq3!l5lrh6rD+G`^9A!cj1=!HYIh1f_qi zWzwSaSpYe?o9yrW!A5?4ss8=VP^w-WN0Iz%$+kDyrM^_nc7_YlR<8X1$}$=$Mi~5H zt{%T(M_K-9O-XOI#`(k{kFy5@I|Pstuno4Awzi=-9y5ILw0lHkymJaxV7y={bjj%O zATvZ@_V-m&S-H(tLU6u7ye2&nBqfIW#e-WU%dw6!axmw;@*%d&86*3tq#}-AG-ocw2D-Tks#$82 zd%1JzGZkOd{TZ>}V2o>7ED}CVx3E%c4|}K-7X@-Eo?)bQl*Tl-hw&hXE{w6_h8+UH zy=rPU%yvt{Z-FXv1dY#>pPY?hy{Nz(+Wg=bCI3B-FRnt=oC~4~uS;O;8=dMm+4O95 zVNTa$k@*E2bE`;xRxJ+xULr!1hewmBuO{7IN^HC;ZBMyg&Yia2`OZ#sDF(g~`Yr{b z{=Pmf9MyL)Hyu%0pT5r}WNsO?atB_Y#p#17Sga(mFU-BEP3qILTT3N{Yf*M8+vf#m2`wI0-K|o2?7?*;YGjid3 z6jN!Jt45GB0!q#HmXpR8%79{)DM+YdlWPGg8Dzj16{!*nNBv`d-~$=`dzvrmX)xK7 z;SW#nLF>J8ql0{*Yf#;d#G2ylk90DKMIOaX4$nI$?6Jv= zvEc=V8AOLTG^Ye4HyKnH>9wa%Yg!e)PEjl`P{X#sU`@4AHk};0LDw8n{Eeq*jUBmo zZ8j^2EHtH*Zlqlo3b2Nq$q3_&fn5A~20x1V_~%8&TsHuIpW&0#33?Wv43P*XLDZUzL4 zD|@mxRg6VWvL31eOCL312N=%vjC>)(c)a`4SNZUDHnxjI+SA zdR6X#qM(57e%NxgnaDD$y~yPveJNHwDsQvgzK_pUTp&|!5$g|k)xv=u$ok@ z@{i)K_QX)J$#J0xuIZr;7RK8C98dx$<9hyzXL-EXtiL>BO-UK!1Zg)5+@INe_0c}p z|IqBd>T+@`GO1xKtlz6D*pXF}(`5L2!?j42Ax>u&oR17^>`0;O6E&kvQd^D{%Z-U4 z4^@bfR>4FGPH1iA3U>6X$z_SHNsYZmd4~LRKIt8K^W#A#cEL6;0FWp7AkZ{#(+fW;fgLd9FULsPnPh*$%b70PuW;$nikL+(tRq2c=1EE;HROvZd-|WS1MfDR#HB zU)`QA%n92j?4$m1t!t#Vv}^Kmw+2fEYp?D)>ssARUDm{3+f4mvdW5!8+RpLwegz)$ z%e5~L2k%pdcbiiKhc5!gVJ|nKFLmaW*fRz19q&%Q zt~QpAKDMSlzP4uWuZtV0e z6~cns8@_)CNFz8mZ`0$T;N4oE{J?9&^bugWne)@@Ak4~gZP53ywK?m38kn5A$bV~_ z!4opkcs%Ip`c+rgGX6-Qi)_70JlB9E(VzD#m&B0%JrS7t8~NV;fP32c88Cynb1qZ)VQtR=g(eWtX?o`6sgw;UJ@0r7f;x+KFPfW^=)~f*z{LSnd4^zp^ z#ebSFez)}PsWhSVZh;qMB&I|DuXV?r7iVb!RM z1;d|v?T1!73T_w^uJ`^Tsq7H5a{qVnrT6eS=g2IP)M&X9hEeQqQ5n3Jsl1k&T=-gC z={j5};qu8}B!XPPd$~t2SvVN?NPp2x(8gYP3Zc?2)?T$1b+nWv4w)7?OB1CyUfhl` zYG@GK+wrfi_;xgsx77`mhns5jKHAywFRc2tG`>q3-|ifOQbP}M9Q%!=RDflAqqW!8 zD)a8*;`Whp>jc>yEPPIO4*zr|+*PA`sw?Jl8`h(hqzh6s6E#+((|yIFHV19^ejcGo zd;VJW!tR9I^)&aS-bRjRJhgY$+~Mw+f+vLT zFIf{qhPk?u@!AZ*j3CCEpi7DTz9Kz|j5dK^Ws>I|a+pE~mjDMYFAEVbhbu~nDm@7A{!DI2K>^~3=6z8E4);ie|t$e=}C4B zl7$sSxT%lEx3<$w1 z{R@;D>zQl1)|;CaI6u`{KARj}OYh%xZ%D3>Ar5V! zvKLa%YI>=!(Qn^ex7_}eYi?@k;B5>~vp=aYe`=jTE3;=!n{aA9pY6W+KiTO2+4v^- zssAbXsU7mCnOCP?=8v&+^{W@psSFLSJCVVygqx*MqPp&kzc-ZR#p#wl_(H>nIk;Xv zW5}>xOvfwAuT%J6j{LsG-mftcc%9?rXS7d5yF(60jAv7j!{I8>Q>Z5#FyPv`J`^y) zL8(w=$z|a%Zh;U#@u=S1g@$%Jdd_-3Xjy&*+=|OW+hdL>giAaL67(uS^1YU#zq5~a z;V^Za1e3m4q4+R)5Mzugj5FwN@rW(~38b!LI}ex5rWEI@`iqr{uuFxAi*2bkM;D%G z8CG1GDb(^GO2s58znd=xS|zHvEfnMhP^nP*fdCFLsZc41bjF}+rovsKf_ge5pti@4 zaQrjp624IQi${bXmufIM*G(IaG#A!*uB2tUDbN;q9UVf{aOlo zsMq69fex#RiqyaO2l4nSK2#N$+J&7m{GIV0nV`Jt_NTM${Ow)B+`_!m{XaT{64iQ3 z%2pSZf?(tq1AvG;8!tXM0hq#dDOD)MM?TvE1Y{bDagvNsb7wt3RpjEsMw(8Vbe+tN zOUN=t&rUnU*3tkyk=ccQ*2|7(Am7UysPp*=?cA9{6-*XnrT+=?0#+tLa#*2uYb>CA zxv_+a23Sf{O!b3B(XQC#^^3XHIIuafl z1`Zn8-GCRMY%0EY7N+xi6I^l7z@7?FLiojvGBP8-g%#tzxT7uFeb0-}!Y?`sBBpJ; z_p2nVHQr0v>hx~v)VlZ7x$$g&ymdR8GCO^-dcRSAUe}!(u@Kht6-6CKTFD&z|DPl$ ztk+rWv=gel>nzKG2yiW*9bVxn7g{=(8sBe*R|uFW##1#%x6c-?h(Vmx zQoJe+l?}Z22T$m@3StsHU`?hq(3zVC57w`-)z$HN3bXw7@@POS z*gg6F_HZ(O@vnGsbFnbFx!T&yz`@*nY;#K8*WdC;(?$fCTzwgHGdR_xL zHhJGv2w?{p3moLfT`|%1H8-@2P0vbrjm+%Gj`luMpOk|v$G)z|NQj8UUcZ9ApvRHW z)1G=>dsqeT--*bF<{cXJJ8X6~KfwVo2iXt48k& zRub`%o(N{UMna!KLMR6Dc^(VrITl4VU!*HW3L1l{QB7A?y zE^HuGo<7>jGDLF03B^w1JCV<>tp7>I2&J(}tDn$e< zqA^&7SEPtumA$jxj=Cp&CFW;k_}Qv@^)5Z`djqdGS2x;Bd~DEG=&~FCb6sq3UIT-1 zL%Hqx2Rk$7Eu9Q)^T;n1qxp5B1x^nei~YN&{lS!KJK4^bg++8)OQbS;2YZ`)6i#0C z1vDIje4kJ|yEv?Mx6}WH5w#*ib|C^nWCB%z5IYB^W_hR9Gl1&HI z+uWZC`x0$c<*sPVok8x?hGpsiv`zovEd4(^X|Jl4r1r7zdzjT*8q_9e8EeA4ZOCpe zVU6zMNJ!Z)SER0cD#sVeSGR8Cpzk{mfSVl8*yl~(`P9*_@H?cgj>7~aN4J;4^9|u5 zZ^Y_#vzi^hvUiqkq6DXm?BF+n{u-`C9@NGz(wbcENDG8W2$lFqTrsGl9LtLwb1TRT zIJ^YcfOHC)w{AD|20Dy8dR0U^rZ!4ECM%I8O_?oGbkr&fauqqtz?jrrxd|ocL+Lg% zC!Rwm?e0DlHvc&F7?++{I%^Ah>z|C)R*cq%Up)Re3-9SJ!VzhUGSbJ#G`YswEh}Y2 zp4~!ruCJs_{z33Pr4_N3F?2UH_I2?!wXp8|JUu$TG&mBdjFMM?g?=5=m%jzf9|9fd zQ?fUQW@7l(!QN{<1OZ-{NYFhlDa2a>KAWDM?d-P)w{$LFQE2?$p3lz#2j-(2<%3&j zE@yk)azn#RTi0A?GiygP>(>-)ARPkyiNIyY=GtFKgeHT-!Ys)3=KtMQn-}XBpa~#b z-8YU#-A6>-mo_Gi4u-@nF}~uZKaC#4ArlX7$#ePdywvt%2bv&qoghZfW%1&8!pXJ; zl|l}X!SZ5zos9Js6n}R@U~p``Ze+ifle^vx%}-(vG{u~9C3^(aI;gFxaMMY{HNnXm zEOxblrn%1_&{wMSuo3>nIWP7U|ic~t9leEG}h+Apdfga~iTBw9{61V2ZK93o>K z`JTF&unakN?ITVJ@^@V{*Qm9Jsr!1kM0Eru;@~c|UFVrcg=rX8Fu-GhwN%rAr=uiNQs#>N8 z*natWS*ERlH5@oab~Jm9ob@y3YHDH_D@8L&qZM$eI$0F#qOFCRIgnHU+HPqSO(NGd zRFzz%Qa^dhR)_p&OV1ON0T*k++`M;V>`p@BZ#kc$6cO^8R(XOf9i*ttIEk@|X1}Y2 zs~vP3?PyO1!m!jKyjB+(j-|%7DFZ6F;We6c?ciS~2s);j!IZN^2r;gBXpQ$*7J41O z8I=B);pA!2_M@aMR643q<)^{g4aAGmDb4VJ*F#N**|lZaegXt|KLQM_{6A_l%n7qE zjV0wGU>bOV%reRBHOZU}YHV(b%+Bgi{pV9TfN9WC$H*hcR1zWs08-AV|tzE4(Vv=nKmYD%S<}TM7U_0Ec z&44g>ez~1Gy)-cM_w#3Si@)ZGkUn6wkMeZyLx!_W+HxdwX!$& za7Qq7vu%ah;fC0ht3iT3em8x@tovmDd|yMmGbA-5?G&`1wyWs=!Sgd;=1&fFQx`{T z6U(!sk&j1Cgu!H0h2g&AjmmAj{W`Dj(@)XmmF*sM(p>C(vR+f2ScQL&#RgB7pDnbx z$O*>M(o-^`oNKF9_jh{7$L(`-ehq+%pms`z;?36hd<#{@q`<|V;c-Vdc8U@Z;Tj&K z7M^}CWGWqwMj;x3@#PT)CW9y?|B~rrD!dz4hbpUohARPL~d8#|~B7#JC%uurZa@;J6#M z@vG`max#!QWL{IW-4iB+V+;-olXY_Al!tTYzf&3LqG{RqpmuXsy}EB`mVP#=i4!rf z66+{_ol1A`urtNnJW%t4HTHG3D2U`^#eU7L4lr; zq+n8WUT(z?IFQE50c#v-aQu_(qgT+$qBjj7M{IhPq$e9s9kxm1AtQc;z|Jdw4Qjcj z3d0sRf97B#OiS(H;P&mTrJ!MraoM4yyvW-P!3RcfQ2*#5;->X~3+ijNmVV5}NlfIv zg(6y)W;*IQY*WAT{jwWATYGxB$sZc&`+lfzj+`2jQ%{#=R@UDN;*BUnR$0h0Ig8O| z=6abC3h^cD>gmko&Z~cgJ-bs|f-W>c+qI`PkomzcHOwL%!L&m|kF2L1D0b)<{98tH zx-Jq(5|&?`_1Tx@jGgu7og5VAqKJ!fE-A_ly7Im5vsU-m&L@24g`oj~hr;d#u%T7k zU!5N#T%I6WpRWu4QB;#ak=9*n?-L)I>Q3+EWcF;YfAz6_&xKKlVq90`aaOy2Apd%! zGEv$x@GQmiv7@5fXI#I}W&`{L?89e9@Ra;ZT-~XuGT&#l9w<N4q|y}bz&q%NuFai~LFo(H50mbegQtVma_u^=%f7Re+bCP3jWhQE3Kt5$1gMHxM2 z7#db4^mV2Ie#Q~Gk;@On%KU7hB64vjIvM8vcbqtoyse9+p_Zemk@?-#9;v_>oSpaP zM6Ca9t*KIh9RCzu2Y+7C>cVc)#=c{DGj(+}^`nEotNz}}9D&c?FMVAThInf;6+(+) zjnzP~`|ZW^;dGNAhdH&35DLHL^quw(A?=@cozoxO_V#yn6%W<)uGMsic2zO{&wODM z0}#(Iduw5r23!80FGRVI3!tHe7r>W1H?u4JIkwvaTt5sg46W{u-R!@$$6>2lC?gSh|(?kJ00L!N|;$>ILW7EnN+%_rfMgz`Z)ph2p_eQc+IwSLh2 zI3PO|z+!Ob9(CwNph(;&c2B_tsud@b$5FXy;MP8fO2CA7IIm4 zDN*(cJ9(;(mfBgEb78=!aUPABCwrH``6VdAi1BseWg%*IuBNa+x^PR(mZ)UGb%;%vv^tk}A;# z1^4955dZus9P~Q;)<>@jxv@E(PM?)6gQ>i4bQXt(PDd%|;u7$XC;q}_8aZwqLe9#o{$8RB z@E50~hG)UWWuwP{X~W_hvSND&-uPaEbc+Yic;wJPU8@_WPj%>FNgQ)5NNvKc`*Yw< zKK`SguAU65Fu*8&dWL={9rQc%3F7D{$IdM4WEiyt+|i<`me zI(y-K3MRv*>f{JHlk{Kdu}oZ>j8(6thpCbaeU!Adbu2|FkaxtQa9&NX_=3sO`o*ds zfdCtTnus{ryejH>r58}%fU0hce<38pij(-H=s9&Lp z$0SR2cu#3fb%`78&g$U|-MkYWolR0;P$Pdpy;=`<034P|-Lcy{ zsR2_c_-DWY-ubt-^8RBGx$@S2@NzR8Td<*muBnQ_mCoPhnQN;Rpnuz;oZ4b>s#X}Y z1Bns9+iL}CTcds254w^D1A&-y-QM~?cU5$28hF>%@pX1RoXo$R#vrIL7PSq=m+`HB zC~Lk|F6+Q4xf6@8x8Ofq)f$){Tz~kXzzQ;X6afz!SYzU-3@^v$!SV8u*}SKS(m%od zBo`h|FVYHIt(g#8GcrZ;N;gd~YCEEVlR}Df;E+_Ng*;d+SV%5$!D^%lmBLJkD2USG z(A<)QH4KW7R~(8_I3?m>kz(kN21>8+4;rYYG(T*WXO#&N_`#XlL%#S3RrSfIEieT5 z(HYv*BxBSAbwDKur%Ty*+Cpwdp?>=2Q^*J4PD0Hs8U44NyFgG_c{}@i4PJ5 zf=;1pNDOyQz#UA-;jL+JezAw#yK*DulHg7VcGg|wi69DsZMC18fW`Uax+RZafuP-31F(KQ-t#b}P%?B(`EmJeZ@ggRL(~^BilKxL2OLDX(2XuLEZI9{QTR*z|maZ6#T1a;Dm#WVO`p8PzOMqyL_S)+Ljd~ma zS!4wYXtQFDqW@0w>^EIcck!)EuPfi@K19e%gZ&t6 zH9_dAjGL;!`EubGf|6btI5IpXWyh}QB5QAM1A08TUHDN%!6H@2GEP@l{XDE*%p^1j z!mxkPx)n1MNRR*R9kEMttsE&5|CcJm+4`z^J_NaFgV)r`uDgyib;xrx+w0#0pc{PT zneTgn-Pc#8Mk2SpUQ#GgMoG!#8hwq&XVB(k_&RS|rUM;hgjh=`Qa-MMvj>Fy3t1mI zUA;58N)A*VowQtG;%(VB9)h^9eB)#pW*JHL+3`Z$-eRx&6!!&3SOi(c1=Mwodxq8$ z7834~3jP-E1{NwA2A)auAR$aiEnX;ATJ8#dJag->i=5#*sjFV31c^ z6NaloBT3M3f;LH{mV%U+34!@hc;6n)-ft-k)n08jIiqZ>KCCL=FN$S|Wf=&58%Tu! ziH2B#8FiNU<18zf_BlXGp{nJEW$Oe@>ukn6;d0??NLZ*^1SI+i>so>(^tg+P{&iN9}Y->Hs z*V)_Zuy6k^sDO}%J^^F?xK6$7_b+10Cqar(QLOek z#&CWoKy#NE(gG!vMO<*K6r}%Ez?C}W$T65}d^gRkMj!!+0$iedq@oji3Jj|@Doax@ z%$aZ;d|WMJLL+au>WKgL!ea}XNyW4h#geAjKO8Qx(e?qVmVuuH#%wm^%nwTG@;i7R zwV^t?n+e?aUuq}QH1~}3$W`SrmUg`CTrS=Y|I`(>8V{RP)_=b@kf)fKShu%{koL0kxX&6J%=nFM zN2T9U>T;jOVgV{HHQ(Q{`09g%_(&xNNm{nL=v|g_8L+5PVth$iN+AG~0aei z&lHo$7jk%&hYXOtbXLhT*80*4<)uoOpVr2JyEvG$UFFGx+U?P8d%m^V0NP-yHp82u zuj-^Tbldb%b+C)9IGcESP|zXgV&Ixka-VIao7xIVs2XU0@d=PF)E?8 zNl~6PS+yx$xhYw>5j=DcTkR`8e4D5iN6e4e*uM$g7e0%j>MIrs2W*&Wv7J!O zL3y_}zd{!cLL`$_^JeVfTlb%qlUt9A0IwO@ltRg87k~-mYHT?9dS|!ka3O2zY!LZ= zQX6*~>lW$968n~(NUt1e} zM(30EdF^|Anj*FjVSH)mFUQRp*WaqE?N!#sBd`45BQ5+%wh9OMuVz5b*(CVgMI zAX~#74owif@XOR&ahwP6&WMY!TVn>1!j!7?F@<~Q(9ly`!hw$k)Pj$F5o~-fh4cre zX2l8y|01Ol{vEWB>#kLi($~cuAi47S0Pn{rox?!!15%L_nC=OSxBk{nC-j%6zcyxQhf zKc{rX_1Y>uA8Q@woB_g4Oq^)P6N`ZY{grO~+i~koaB(P54gmg&uT8$Fe{^+)bC|y( z;AoH3lsf`eFl|G%Ekj1DJG9@yc@_fI27g`2W{{jqH|Xn$hWrb&`x?)R6Uc-y^m%C zbN4nnnXht0?ENEKn13lrOG%0YtOszWdD@U3?+7hzt^<9IT409Dn|9Q-K)**^N3!tv zZU(W@jFxRwGE}U;x0ue%``5G#?DV4Ndq=dqkqzSs{o9^-PO0ZS+NAW;l=OUMfFqE` z|6_Unr5FcjBfmm3RS+bh$62XUsjpGvXygfKRH|)Q3ho`O3A=yn2}r9VmC{QgL5_qF z9mZ3dfljsp*vwEf?a;$jXia_*XItZnriipqYJK<5-ZyM*e_C=@%$ig~nrh=9CXguB z`Yp@WFaRkkx{`u&qtF@M@6DVNC_~Y7z65#(guyRv)}8*xBK`jijesV`N^ZbHay;bq zeq=jZtTsBPywUes_Gtg~hxlYEIRogoRZr1Q!Ad{e;+X2TeY>jieKtatK-Z5Sye0*KWb(`MI)fB>{Sy)yt?32Q%j+_zOTJy zL2K#w?_VyEs=r2yM#}|_9Mnc|?e{p!ZuOuBQax#53EULKTVs%fssQUH)W-#+H!e_= z4HFUQ=}XM0ja~`deuEq%K~9mNI>9eY*62v?_RK$ILvsSd5!ETeek43_qWo6bHEF#`V=pBQs#X_X=!{j6quY8nBB+qNUTzDEdFFpRX6ITvU7vfcFZ?;U}nC?obED!7K zgQsw1mfXL{l3mTZJf)L`oZ8fPonv=zc#SUwSGE?tk}I>as?%gR$3~WXQ-j3Wig%-6 z%Kj%OHdR`EBn~V?a|^CfxHcBu^D4-w$YX%76mKB=>zK3EC80sL( zqWw|Q+gwz}+eAgiLM_T(UEtb{?Bs-h(OkK(JHh}~XF zCu5HvZaHmrd6MXuu!W(#acos!4W!lcdTeTiXR)oy%Gm zdwVx)cyl(EPgj>S!or>Aa5O2FpLQh7U&r~6c@jb-Iz%rz@l{w7hcpQmm=WjhqeRsK z7x)oE-)Q^RU~9}yQ7U(EDi54QhwsC)R2rgHAB7=w!t&!jmOd%YI=km&*BW<-Dbp=} zUa~)Iwlfb6Z0guBgOF za8h@s?6xrufR$__R)HDo72fOrCjWr?fxX^O*Wt|ks4U!~)dRQHUy)WEAJN><+MJkH z-ICOuDmNn%wyo2;%AdLVv;{%u^R|VQgA|+gV2;iXj~ap$**V1z8J2iTAfLM?b@Ya8?u_`GO^+Jcd1?l&#Iw%Z#;2Kyg4bW( zHCly-z}4fEs4boR)2&uls1*Y~KUySNm#v63_+ZM=)77ngHzGReKD2cZ@0!|a7B1dz zr|%=K&ptk@;BfpPH}TKR$@4(~*$4yFf)KxcWhLQ4;7&N$zw|XjhHPEyc8E-4?%>c) z$Rd$^H&*9B8RA_HbyloR^RHiF&_=LAEEb=>qH+z0e;52M1BsK7{#{@HwIRYH^82x2 zhAR`IbdqMsuC!E(oybC~VI6%rc;6~?5KS^zA2KuI&9mvQ)CyR6f-_Od;)sS3Cao3b z$+eYBQNCfHz1M@Q`J=Nwcul+FER?JBEZS6p-t%cfxXU4|9$Hs0+NLcOUU)c1!p3(<_uV7g8poMsrDy)zi)1|=d4dTdW$g+&SrQrupjz( zS$p;WG~V0#TzRY|)V)sWgaj zv#Vdb*RSlV#H$#@1^n?|KTLP3yTMKSstqJ>hbD@@iM+)?f5 zQP8OQd%szxP=Zhwh5x#n!$6(=NL`Axh_NzUy>Au*654^Ja=&!ftkf4;@uI2 zVQFR$uD~tMni;bB`y)&Nwbuwq$b5#39Qu{er2}`zE2-XIguLfB-RMS5vxbflj z0Ga*llM5*q`L8f>$5(78Sl@pHA0ymvalYYK?-i$LG@m@6f*iI0VTQABi%g|*vNT!p zR))&gvNT=I<*hNZeZc0}M~oSZS~>bDSFU>POm#KLkw+1u9|Oc~`c++8gj;1Q8ztJI zvs9%EBMj<;3u{P-!)NDgnc0nhiJD124R$seRU@zF+uO-4Ha_^zW|V~1mqp1EFm<;~ zGBkdo;C{z?66fNOV&Rry>6XRSE7nsoR@BVR)G3P%^GeUEj?JVe&StNd*pHiybN+(oFR2^*# zVyKLln)D_}VTg~VPFJG~ww&ojMNF4!`%*{L+8;hzOH6cMz)Vm(na)ATtohA>*b^?a==?p7dUOLjw_7p=Yuv|NJWy@K z*hWDJ15gk56Rg?l>k4MB$z`z?y)7;#BGWL96+9$r3f>%v8|T;0&=c21tBi=AUId_A7i$Y#|qP`xk=u=$)kB)_8BQkJ|m-Ux4Fp z`|)7AGhTfBXMQtSZ|o%u)QJ4+uiVX`uFq++J*)Zf226WF|2t@-@^F&f!N0j)ufA5j zy)EdYMz^j>y{R>HL${ykLP^lTx!Zr|^F{6I+-!Gk@+rptGO!PTX4M8}e(x+XlHH(> z2jjgXPsiu?wVkO$l#HacarwX5(;I8Fo13Es z+J4PzvYn8r4wW#3IdLpvb2O%L?3r$tPkKUuvE|L|_- zg+yX833lUMCv$>Ax6gTHalj0NVEWdheqqjDk()6wje`)T>1;qjYk&y^s{(~HBU4v; z2Sh$FYup36t`X?C#|l(-3(N};3aP>M81L17g-8pO15?C)GdR?goEARW{=glX^a`~c z=%{C8_)UT?vqSLeO!77I_W^Q3D9mZAuh_x^lkn-EHcCC&d}*m98~rpBp;lGL!mqoi z9@6A%CsBfH$S~E!WVu z)5MLTF=;nlrvg8l20Md{Fo_E9FG+Gbt(ty*RMp}%wbC?AA|sUwBh|dzc#6VAtjw6Q z{50_AtcY4#q6WUkLi%<%mTDUzY7JsCMUoYTS{-I%S%DiQnHcgsiGeZ&_*o$pGa#!X zgxbw|%SNc$@<|n!4>#HOBndmyQO!@$C{Hmc$uTBMi5JwzMIe)-5r+;1p<^JL9d0)x z=f@LK9)XEjdj)#tLPK^JO(q1l(ucwh)?LYg;28zJaPm`D#FGF`;;q&hzP>8aYN zt(PE#u#zb`ORjtuf#D0Gl_Jf0xwFjdeLnKnt5Y|2eq>)0If_eRx@%K#;@KtY^(`%Y z!K2adL-y@1!of|yx!d5yh&_QZ+#*-=D0$2{!?nmP2s`CHt$}cybG4p9_nprt$kBNo7_O6D<#>dR zxz-NHdaEBKbyUhtQg{B-a?_ZuNlbVc7%8HzJ*@m6Mb`ja=Nd#~+g5|dwr$%^8XJw- z*tTt3jn&vjW4qz|-^`hBk~@<>_s*T&Z};rkUA-Lu zXMYi2Q_~tIIB93I9csQPHd3poO-hbsy@3d{T1ITZAmuKX$PJ)xzDuaTPw>Ch;l9gp z(-+-b_PQQEJ^cIcq%2MpFzGJcX3#)B2bdIzQ0Fd@X^oFMT{o~w?GVK9L1lXtv7Zjd zgFl7N=de0xVAl@&b?6q!lypjGtD=L>C*;%L7z>PUHHcI%(D$V17*&Q87K=MnK>Y** z4QXpzcBF;z(uD*`EU3;YB{qb#|LOiJWjw}KCC-<>|d0j>1uOa zOn)PV9@WhD2VW2pf}9~~rc)VjaCiT?y9Jl)37(cx1FK zOs}6>jOhk&m_eoYv{nnu3Ulz>@IRIdFYeSbf=yvJn~-eb8EK+KSVPa(t5Rz}leB*yhOA=*m>S z#o0>h@hU6Y_vk!9@QPP|Y|MVDYXS0-QtFBqH3Xw-ynZb!d8X-!vrn?J=Uonq<(I(5 z@o)JDjeyWrG_#a&XqNz<=?Qc8l0@>(@7Uyu!fKD!>MwLPPlJ^L_~t}UO>PyuKw|bF z*D8fw?OrN2vy?7>z{0fp<{8@6AQ{$AH1J9)&OJSymIm$Esgjc=ii>TI$Ja=BI7+{) z_b2m{yTZ5q>Vvz&`#UfzYk|@LShF_Jz_qhSw}&%0t}L6?B+;|crVXvf^T|L(;;>#l zWZ#}W|IS^5Y)3|gpZ%%ctYI&ULd8>tej6XI9lcH54P2u;YXqwsXNCsk(eQVarD#W3 zh1-mybo9XnWid6$k1g8-{cd=qT-<$ z8YUFxB2g^z#Hm+9#|PlUYi}~Vqg!sUn{KeXMuEP7>KFczpua0Q$SWsQidT6kOS1># zdE~|xeu)yfD&aRm1O@6P(VOOfypy3V?eDz8wfn}(^qV>k}1dYBsWPQzb| z0T)-EJU$z4f^Fem?M{ZBSxZzd7!;~X6?oPF9@_X!ryzu}O%+cD5whp*Wx`&9H!gO+NY2$?qWmhq?k!GuRK=;&o-oWD5P;l zq+e2d<;9Y>xMcB_p;QcrMs&M6E+zP|5%~&*VjLa@wt| zfEanWPpquSrfN1~r?DiY=fIFJAC(yWcKLA#nNX>30KMO(yTOjzlS(p@Z4*URk-+=d z&KGt1PZOPWW~B6@U?kt8zdp@oYtnZZ|1r-{N?&_=ZG)DNzb=4hJoun_>!W!1FG=sN z`c6D9bOX}^5+2%y@CPWJgu{=7Ii@tW`ee>}G-w=~S9qmy9biLxkD({%bU-Z}fz6Uz zGRaNlH!K}DyMTt-`I1_CWcudkI6-`rPuKiIp?}n?{?vjg$txJYh&FWH0tblo_h>AT zmum9Lbp?gLuTk~u!zVrv1q+{~ra&@PU3*n!NWK@SVEMtd3Ri}f1lnq>JwbgO2KYKn z@CcR=Unz5}I6hnYLS`dJfmp3*I@-{t$F%4u^>%AFHfFYUCpNWfwKs;WZBXVCw)ww~ zb#@x%0t~=q(u`uoO5Vv%-;>|RRA4AZ*=X`0|HP;L#iA7h=goTiO)VIF?7o?Mv^m=Gd;2jvIl)34G`-qhp{4eBvBtoniDmS- zPm?D#gN3d{$QZyz{&7gj-l5@_jDNE14rMwAu5%n}l$+7`$KktPy?s}QO+U(N$iiE zPpGO7U16LxvN@x8?*qM{pZXQ91sS3HO4A9M(IXF%!Gy>TIrNK#r5En1+o%HpX8&k5 zp;_z9$dEQI__UCUo&Bw#^1(&9a?DyfOUU_0Syyul?}4tJqu4^GguYxb2V9YXQmvj+ zsY)z!Eg+J`I=47H(OBw~Q^d4)SSG$ei$#I~l7O1&N@U{5^y>a~iL42pq6sfipDO;~ z%ZV$G5@v!340xUeZ6;Jc0bH52+r#DtpJIV-;JT0mg#?WiQcp0v6_yvT_%fsYutv?sL_i0`l%x`iA$ z6)%Xw>fXm6EpTFn!|a6^CQ)sq`+O2YKTiX%fUVoqA)g^NXK%wv*^&he;i(p|+%A86 zNj^S|UYY7%Sj2m+o5gXpx{>jel|Xmc_zJ0b_C_`4Mm3I>1Q5Nz zQ^rb|5o_G9woq)GACKwO

lln{8~icU9k{S(&m&NoiQPbc=0SS?9$?8u*>BJ8 zDd217%V(NP%!>594v>x;`0bCF7eI>Og4ouy1|{s~UF2(to*k@zHlQ#%faL2?W#bUl z(YEC6%;oIM9fgCgVPBidxk8zS4cVm|Ecpih#+w}aJigF`dZ0>FPvQXBzVA1?ulyh8 z>^9bvCbncSP+=S>YE^h<#@|>|Z;}@aWWgFpElzYks(rTl)#Q)cWQ;O>i+0hf-dX#% z;~Lu-!!^3i5x@r6yrXIb(4KL;28lm&H-3A{KF$O!6CIt2{UG%Q2i@x zgy?sv!SCIjXVl@t0akd<1~P*nRMoE1h(;?(Vw14$#@i{xWU-U){S_~X ze2f`$n<(V z;BfIQG8Sw6mSD0a<3{4amAFoyB~6*L)#@8OqwVXi)=AL=?HcLEB1dvLb=HI_Cm=~+ zfK)#L3PzVzrI`Occ}a>irnB=#C^M7Q2=w>Rh#@Rx9(Xh%izuE0;&<1_BaTp+YPN3P za_jgS>v-QIizakF%_+y{eskSaJ#0_U#&EhPf#OI5)uj;zh>y7^D?rybTjxl z+kCC;-saawhKD{&LWf<@p)J1w=sK1_s$IU(;>I1IBGjL<7d*5zJ1svxF|qh-=ITF1 z_)wD_J$3OoMd(00O&F8CC2!EF&Fy{0?QM0wCR2_JAIu_Ys>BA6hLHg+*x$nvXPv;D zX2VgW@Xp#1HiwM8`mShEf3UMWqTR>L(~JFslb~_uI4ukFUD4c(N-8+;9RVzMLgju@ zY;Dt(K43@m=+v0lldtsRa@o!{nV!x}ak_YF`fp7wn1U^E-?#60Ij{coTJ)`#-P$4} zx^l}VToA~0s%7K`o~MYt8E2V&e~`QI?dVoOiO{xo4@A04d~@T9Z`+da%8mGf3E7X8RymSZo>I=;<8s`wuSO+&-?H4ldmFor3CW3pxgI#p&0(Q9Wo+ zg(mnR5wBcSbauSJy`m^xF*>lI9fB>^QElt5qdLTxgI;U z$Y0(F0kfL7G(eC84q zu$wK*etklS^dTB$@z6j5fkdj}4y*v!u58U!%bdL8{+? zTFdzcK2;#VqvidW)b5L!yB`b2}Y}PknQpVVoYJ@!q#mx!{tSl>XrD9;TSGZ8yya_Ih<#l{r8tA zjiR55All29t}ZmwAe7S@2OxMt(e!mmBdAN|uKHea)cb{A0xUS4N`!CxX6n=9?old)t5PV)P&yq+R!E9vP$g4Gmh@Q{n!P@2*oQTUj^;(U zv-5^VEZ}POn5~b3%s^d(H{;>t0OkO#(pAV=y-Gd)+s_y~lHynHALB8_{RQZQ_@?UB?vwzzfC>{qSfi zzv19^mX^v^mMOgZI5-;K{l#vO0k(aa!P~xt7cVo&A?byT z4Y;lL*nxsw_h3FBw_p!xl}AA0Y8xb4^fr=Qfi%#Q*z0MB*XF;CXg(pC{-t}z*kw0E zlk52_-}9GrN`z&3{r~CQh(uK&0i8z%GDe$^YAhaoIo>tCnEh!Yz5G!^)LFZu8i6FE z{Zp!A>(8Lk{1AG9yi(Diw|cp4Q#CH($pB>Q@1l$lsSF7viRqz{!KoOl`s=Z8r3=ct z1nS=Q!Q*8?WV^T?3VNRgdY6?A(5tB0LUCp#+=`etsdE?|>06hys;sNk#bcbTN{rQe zwB3S#ipMBBl~}k(SeulZ`DBVo1UlnNa5517&=%be$0(c%x68G4SM&F$i+Y4oyqTI> zP@GVrsfGBbK%w5Bt*%p|rc<)GZBB*re2wo|WivlXt12^rCI1I>4m1>#MJy84q}$D| zjJH*V?uML|{A!*6g2-KWfW>|_8YHaNI@=Xpx+1t>kkF4Z-4!?QE>1nWn^zmp)RQKsb5_${ci%B%95Q*MY+@5L?=p*cl;85dL2 z`Y?qS0QqsaNG(VsGc*m@^EhqK(jx5g6YH=1>$n=`#yvr@FK?wrrCaQaNn-Ldt5;L#P(_DWYa~x1eL6 zJ&LM13JEwa2$@dt8}|{olz#j|gjj+KvKG(r5zpbP)DdWsB4bu%qt<0>Q|l%`*N8-t zA9wXWqO|-$T5U;M=x}?O(PeCw?V#Z2V=S$wEbT0+9McFP?^6`H7S(TJ;J9C-=Fw3r9 zj7`01zgMo$(}S1Cl3mX}5APlq)0WGN@~?X;P!=J$TE-5lu*H^?bz%Fn8>NZ6H<}QN z6F%+>0dBJ(`la#N>806e%ZU^k5tm34kdq3)@xw z6YI4XRmZ?ZoWKZ%@)8mULid7XOm;QB^p;f>%D5tWY?)kQ-Vb{t@xcX-1kUF@P&|1U zJ$iEzWuMAEkA>J;tJVoxs?5xO&8)XOLK#6}9+iKdY=s~>Aoju}SeS8$@uaZDtNdY} z&5~_lI9$~}L?i0A`yJ~TH>J2fB#V~_d(vrF4OfVCE!Xnwr_0+1Jp(pm_-oAj zm(@$p(lcz!148Q^LgPK09~REjzFUZ;g#sN^j{Jz`7Pu{OQ^q72g3I_I!RsoMchF3B z7G>PsIS>K=C8m zvS(8h5u#^Gq~ZmEq)0>t4&*E1_t-b%;JSFXL~+{TAep8D)*4(}UAzS}4);B2ym~TF zC*tpIO35euI^aH0d&+MKPRAA6b+^>lp9Px5?y{~m?mwc#qvB>+6-lXPBpJc;+M!{8 z{)!A4mL4uC7cZNDl#ucK73moY(-9UP(v2$pCxNzdR!sf$IIRj(laKzp<{xVHiSJ;% zy$>Q8%{~!RWYRYUGSiDb+wgSH7z9KZeTW5~QQdAa9}yBAAH5+4Hd(dEnM5cb zVpOEEgA9kzv55I5^9g%QCB^Jg%==fD_DNEac}H*5ai> z`H`$qn50A56%&rc3vrxYxRW$zU}}8FRtM=}NU4=JJri=ZyM}sftQQRv(X=B2g+q!& z3CDs!uEiLJYjjpq{sU+W*5l}v{ml{QfyC$R=W4cCUI+~oDJh&9!)9&6G$Q{tMFfTT)3 zFy5~|k79Nmb>8Ds{dR|naL?(Kb|1xdc{!X-0LQ>+QOAfa;O`ZO@tw}U7g%qL%fEUl zC`k0_%>T8q^4#Z_@#W6?({0Ybe&f@x*ZQCD;~vp>u+Ptw=SvF$9TUOa7~fvJFZZUK zzg|ywJ{O1a9`%FkTV{6EEJz+}*k$ik@SmfspKHv|j%htzYh8V;uFW%of;sJ6ggu=& zU7a{UR`Gv8u?~LY{d1&)kNDM#_4Co|Ze#kQEo+={C5bcbAnn-nnCjq|3S3CvU8$=* znTa2tnTLoZ{UJNsm!1EbhvQ$j|5!uOB#cIk5#AvnaxrI@iO`PwhA#elpZM2s2ZJ3{(imO>;&5CZ|3)X&J&Pe<}pX5{!rxI{-7^d@A?k0up(tGu6AU$eAg z6M}l|G=IoQhqFf14m z?Tv=|LC&Z1AYbTd-=N`FVIdRy-bJ^zNH83vk`sxis+QtppO9)8QZE^=WLxQ~9ID~o zU@KQSwt%RllvFB=IiL zs70aIHlPi&Qme3EEdOndp-FR`GXA$v?QTL%hGvQ8oV`-`M=@ydM|apkOy#otlvkoI zjbYS+ZOK_Yj}dbQS%qjrD2*h^WUJEO?`(uLTM+tX>51?!ZD$gpJl$Xgm(+pp7>X(m z|YG<$bE8>Hm;4DUtF$H5k4WMT_9VO=r(265{!+%;7#z3NLMo zug>lV){cWiW8V(3d9!dwb7r$pj^$=fW#G_e10bl%K{0~vz>)CKhHgpTRvx&hELNu3 z&;{{qZCi9jJ73qe2Ea3RqF{}i^Q&+{iLgdJ{`VQM=E{Aqv;2`wh62}6+-{^FT(Bfb zYxIm9<|W5vLBGg{FD3JhXIWuVV_)ODw=8yee=-ieK7;wv}9><2%-1D{tIN-ZPB z7Xd&L@xFo&WL#fBVCC3mY1!lg6!L*dDt_D+_N1;p6Zo)=o($1OH0bp?^0=2}o&yOO|BPm}gOXVz`E9f(5EmfQ#Yygj@Eb zS?-0?0G;6It!xFQA(7)ftk6e^#|3c(RzanMVKURG5QJ7sB;4 zXdMDrcsAL&CGf-(DbDbLB|Ojese&RHTt}(pp(9}bekh$3tu(}undF7^V#3Cu|I8x6 z&HA2@&XAW}nV3))Tgmq0M=KSWRTXRQi4+O{Xd4d@Iz>((nj{qiMUN&~HTVfObXM_# zS0G}C7!URZ^N?1Sjw2a?5(j4#0>lf+dUJ_15tM}rUnu3h;^Y;%V@T(aIaY&4x^65W zf@|aTCQm%Bq|ajE=)#S~a8DRx`#L33m!13sRiL?LMtmXj|5%tYH_N)91J~v`<+#4% zts=hv4mE`2_uTse*}mv|z?6|w9j9F$7p+KJ`TN-39Qfr+B!&B)4wefKnMiL$JKl6sU?cq zWe`J^8Hax?vg0zX0{c`Q2>%cSvsc@Im4vl;?!H&rq;_Ms?CS(xRAv-!$+3(xsz%89 zK(xm8Ha430uyGcURdv6S_N$;8&vqW-V z{1#^DtZPf2%h?fU{1q-!R?vAe9DU_O`OUJvFK9z^;EcM+L#@8+d{iQ$NtF=Y)b4{o zj;f_QN9~if-#rQ6yGv(OvQx0z^3oo$wyysB;zc-@jVTqRIy`FoyU1n>h{XV)yMdiI zqX88`xcZ&b0#e`}67i_W1no!ohga zN5Xy5cvaNDILiacw{KZC{8=_R-WxexS%e$si;u`Pyjc$iAGr&Iy#QG|>Efv>=;Frg z;sj>^Oi6EVTQc=fE!ik0bB zlgG6dk3h9|K-WXgj5owIOZk)0HiMzgC zky*E|zPVG;MNv7>fMr4_w&LA0FoScB0vZn1Rj+G;)d8LM@7g$G-s7Q~oQyBN{sx-! z(`e&w3CJv(n5XprlquAhE@Tk6K|ry0;JFZl+DGwKx`EXgc|RkVn>OG+I6ik*AuHCQ zXg44$L^QDMM^S!W){yL_8p9|-$%)4|ihAu%YtAv7OL>zjF_}0JW7=Gq)#$! z`S8WXs1)qj3KeoLAHdB&Z@!<*E&Us8C@|@c8*Xw|!_&nFD5|x5iLyZzw}0$T>AmbZ4g7m*`C?Q$;~~go}OB6x2PWO>y2%ioV==heCa&1c>;Xt?7t@o z@T5$eSkj}}D8H43WsDnRKQ+RwR!93O#2OWwB^yo{r+Lgpb@*Z?nUaG@Mm;ply@>Zy#ASgE}38d6X$9ObKV2S?A6H-=Vln*1H)v-G>d&K@+iTFn zl_Mya!6%r}%|$543l-YRo3&Z<*qa3Y^y&Wm3g>3uY3$T`J7{tb$iZve@?Cp6@Lrt} z!xx$k<^VtRi23xJ?x%fbcefPp=7H}k7o@7$*OTerx8vb1wZXNz6&FJXq0hY)e1V__ z5D`VBjI?7qocU0Yi`fCB1BD%No>d2(&-h8gyQ)zepo})a#YL_t+E^{o5SYKcY!53R zej18qw3I>#Dw4WS1hg|*kqQ!LArR-mT@bsTpE+Gzv^tRCm0^FUII0*js|>ZYfigRL zDJzF4u6!-F5~h4o@1BTS6+~`Oa@)lry~Lq*>H61sPpuUuC6pfO70hdx{-B7(1dC2A z-QNGY06HZ!d-)QnMPzm*g0V-AV1j8Z&mGi|7%vxaVvSw?1H0Io81DO;cLQkr$ZVVeJBomhjtZ7eNz*1LKAXo{evHf7rLHb!#&nLj9QeUy9lP3ab;2^jVfv9=V`vO z6^^SrYin~)3Dhf+kOU|C&PJ%{dRI7j52ENMC1{_!NwrVFTkPsnBTP-MY9$1cOh>+4 zQ@#j4bO}Mq!}ss61rF(sC%OsEw(4}-nKB-h&yXVW*h!aEIB{6fl5z4w^75lI^u<$u z%H(D!ZzWB~ZvH#-S;D_`s0>aksJmPnt_Fp50qx=YwP&oLxAOz|G(*0v8OJk;4 z?9#v}n!zhmbm4C09il(*&Sg&eNgCQQKBzi{rOIg|j>WSCQ8XBVA?$<@mg**dbvBkd zwJ4=#E}G_?>N-Mi0{4_;%6Ll_dTO>>5{%-0<0R)JTH{~h!OpcRR zegrP^=c?>L@!Ny#>WIbB9;uf{oAb{OM`KcNdjS^<4qtl#Uu#MW9~S3$?9FK}b#(_l zgBs-054dTdl&nxeU{a9;8WS&W5|ObhiIIcgcj0upA6~wi#)~0CE*P8{wl706OkE99 zbm|L>#3v-KR}(T|O?5m5>Z=ogauEQd=$W33h=$oHd${WWRCU0-*6t=Sytx=DOAY!A zV|6yd^>?BTbRzWhA`SE=^zs7`qB!?-_GXY=G>}j+|f`LOFRnesFL)_Pr6SMH6v)~H-Q?o=DaVcwshr-> zB_vfLf}(}(Ig%0vE&d`QFV86KVIRW)H_c`*3rduX=-l@dLNm6WqQWd4!C6^@pMs8l zEN`a=@584rBQ#N~nYXVsEu}5tjTQ9(D*t`cdV!>ajRiy@#R(Vo(n?^DDmq^&#kG^B zqVdO!e&@@sDSoK7v!fNCKHxm&e(&xN2P?sV*Z8<(Ec5?>{~p%Uk@@Ax@$c65?#O!Y z)%odQZEAdPb?p{VioXn>-V7+roS|76xB6LBx>^(PbLuyAYjyPqZ*H6k2;lH>L)iAe z{al|Cd)s9DX`K4L=lgtRF|+Nyb6gd14K$%$LYHfYTps2L%5Un$fXH4e&kQKM9nm=X zLUuJt^KC-90@WN;F}0C(9fut~n~m)s{Hr!=J9Rnfkq%|N7@FH7W8Vhw;`Vs0tWs3; zlMJ+Dtes=E)aHw3wQe@z8uR@twB&wC3UT7y<}^NUN75J z3(r)0sn%NmU#s?0k1BN}Kd^9vWwEHV@ zX=7JAbsc>-3pZ^YH(VTT<`Y|(niG|P&XdGqAU^q(DFSeIS(~df)+eT>uiC4A8lwIV z?BN@QoKFqR1n#6s31YU7;w!l1=E1O3+>#7EN~P^0gu(1$#t8HGm^0{GH)}RSiv94vq;4$wt=u3;Akd9Ia0i>$m>2eWGI9<2>20Ehy&8@xm{s{n|`H}U89Fp&&>zhvxh0(?Xh!E1EDXY zStdbiN2b{6NK;p;M(qyK&H3B8vs+~pbA;rO3uhk0C29I1G+Nx8mT%pu)9io7=cd+f zAFh^XH|3}r*uQbDOHAG+L{c+R3Ta@8O^S5(3-7xp`!d))ptZBaY!1n7lJ6a@_sdTV zsSYr%r_VTiZWsz>?Opo|e%zjK#~C7jern<@_`ZCc<7H>`ZPR_cx_z`c;7cXFu>8n^ z?Ad-F)IT~?xt~%yO()5kQQSj5z6pbW87v>>zWe-hGr;g2*D&4AUr^Co5LnP4l#wwc zugF%*;6ZeayuC!`^YU)Be^E|6fsEvSa!#CyZBZ4-<59W~L_K>3D}R3q;@BgvdT%WD z*rD_`H)!y3X6rp76?(U1S;8l zMf|Y6JhdG7_Q?2#)`CQiI`aeshKM|LQxq_b3xfejhfDuV)4)y;#{!S0s**l3a_Fn? zU`$=V$sMcm1(DEfX~Ap3LAeIGpO77&G@D`7B)mEYisVV0QS+gOp|CuHX~otzY;Gc0 zl$wq33vcM;w)kbJ=+^1@M~mvYaMY8v!p5s=rUJ`K$(l;Z&{cCksVESF8Xy-f+Zgn7ajw zD|aXuci5}<;Px&>2zenPV|cK)aLwafu|eJNqoT^Rf|+Kh7N!6!N^TXf$T&W+EIpkX zE5|3ULD2|}#5STQY;9~P%*o0`$VInA8$3jA-7`@@im_Omj6E}75Qr9+q?m>{@RxTH z9O*R(^@-Siapt}tteC6}KkOlevaB+OW88OA4$@Ss=%fF@GOagkd;{yk=>=SkSR+4{ z1Hg0TsTaR97~bmln#csoSTv}78~DYE*U+2b@2J$;rs?CX;^(Rd^tQM3W{?Ag>kIdj zlg1)<0nyBQ1bA9CxCu<)q_Iw!v{_<`iB#93kPGI^m$;RA^n7eZ^5wbLcIZ1kbbYz- zNjBm$2{_&ubh07k%|CbDF`w*}_S*lzo9)XT5ZQx8UVu+_R=!hm1qH>*cp3&8ckU*0 z-CaBE?9BlRrp2xMyGYe>;h{BN3(c7ju@?~>2%QOqUv};n4r~YHH+hY@q?1EQqY7*W z1P7sRk^Vw@9&NA{lephMs4OCba(7HJ=4qxoDYrT)gK4UPZ^NEmp{sU0KgWI;y%+w4qkk_&$+;BCW6a-=`F&95WmAO2VKIhvcX80^ zY_HqoxeMu*AMzkz4>u11Hwy*~_X6F);%K|qP$u0l8a+A;;f9YJ+&;K_cVO=Z2yM3X zQ7l;7eZ`C3RX&x5Nw9}6t4<2B7kKW4TYWVD!ql0YM>O;eZs=U+<&*+J4rg2OukMD~ zMxSD9vTBo85U<`{f-2)@tp$z!R^q*m=7Ol`aavqecW@8Z%h0$u9I^L5zQ=LgF8w87V?p$5=u z@8mnYwe~|dyDmL*iijW@e|O}G2_0vvBFVHpNB+<09#i8g-s%<}5XJ1QnaL@#MI&e` z0KTOAnon@g?;o45FLxlu!t8S!E}&`$dPZk1{CXrYBdq;0!5287 zCnc{tY)vRb*9$AM&QE1#+qAkQkEJ`nS9kZfB4TU{il+m%LHPPp?pzC!nyX(85spn} z;%RJ!e{h9D` zXLSyBL)>4M3{qoh?HC)M+?<-+e3H^)V#HFajC4<_7|!Ueq_Gag8iZ(;6cj zTlVG3*11Ap$OeD}Bgw+_B$CSGP-d7S>IPKbu&ibyo%gci*fLIhl!l?|uOIn(9e&IjkMHD5np z@VYQh2hZ*hB8}C4rNt@R^_#IQjY?B$iEm|Uly!1w`eo5n8K~>kmH1P#vrwUbGr1|w z#yddv!HPnB<4=+HXa=gsaA6GeWVSZxHa8dr_-F+A#A+N$gQDBVPJaty8M{}wy;;X(1$D}pPq-E-;$tT7v8wXdoD02ztx0gtPM_?Jsu)B zD}(thEkgMD{Td=3Y4n=l;fb7NHul@I$fW#TInr0230tAV2XRL)rjvh3QejS2VoVRa z2&rInY5sy%e3;=So#_oqgxndjS?GH5Ft12l*6Xj`&6l^nL8SH7zqw)I4+vH^+A^}L zZWjgvqg1!!9PUYaZVca~0``ucXQJ}zO8tHE{ykRh?lPxp#0h*H5R( zs1|q^b5(S6)1qNkRdr&lTdDpPJD-zvnac;Y?O8`QOOo=i132F_am>j23}B~`lW+!0 z$3mQ&$(s$g*kI*kI^NgamY;Gy9sg^WtKIH#BpvD=5fXCVrjs} ziVtl@%+H|9Z2rBINA;*$OAqj4Yax@y-E58$ZM%Ob5gV`rHw{?qq1_`glJ_mS5J{;H zw%62I9=HQ2m9|5nU4K!{)ozg5VDr7Q{u!_XW-@w^WZQf)2=(xWrve%wGW=@Wzg4hC z%U{&RNV^@QS~vV8v>ZS;%o_>&%#WO6gyZ#J=_9|UyyS>ld5B}{Zqy1?0I`un=w|5c zGtS1Q)~Yll1U5_Wk>lHZ#|P||K59*IzXe%3AATmG%q$eZB0Hd$?5koHp_6X~2dD`! zsTAkSl>trxrzR(*OjXYqEPYi0+1-#S33GMn^7*UrF;_97%%;szy^XslD4ppzIx+y2 z*%*Xe21}XQxIddoQVc^`SuWkhc0*x-pOt^h+j*7MTku#d2f1t;`Fh9c>~kxYbw;4< zj3KI2nU`U?r`D~e#hQVo_DJ3;Vk;O#CuLedFT#cynk$(F&UXIZHd1SutfEWpl%dkO zBA#11aF+V ztUg`h4>v!WRH=V&T}-8KW&8`SH*KnAnG$3U>Rh4Du|<;x%K-+FVn2b^*P2`ZXCV9* z_k9yAo}~FTIfiiJGAc69|Mc~-?JCdj%d`%AGg2tBobc8L|Lw+W#dFB%e2M{J& z2eIk11&6CcN|&`8F&TOI*4k-riM)ENaiUBF+bd@YmiBHaD!#qds-7Lp`So=y?4QB1 zEpYnU$D{xwND+%bdU8mZA;;5)(&idmXaKS@mN(z8HMZ*+za74_KHb~H_;qJdU+CF_4qo84KL{1c0cMM`Sodfu%Xzg$>{y`Oe0_Q&aJ;zo!Z+>zR}dYqZiBHJ=^<} z$?tv%$z+!2-{<{N7WCld(#hIw|8T|8-tYrMTbCu3v01RKt;DcJ#PtcoDHM!`{Td36s|Cap4OXi1?_qD}@= zL63N-{P!IB30MZP>j+r%--4t`{k(kk&{G&O#LR9q@3K5ZzaPc%4Ia z*#H2kCFA>#qH6$hWQ(F>W5>33Y}>Z&9ox2T?bx<$+p}Za=FhLX)#*yo)u|-!zI)EO zXc4YHwM~0zmINA!bqn~H+78+f2&gM-npi!vFkTE!7*8(e9N5+Hb~4Lr+X$hpFI4CG zewn3dqN8aow}Z#bf}lUeiCv^sXrZmDtjuh^!Pf7!=;$fu>il($UsvKX)S*W6>da_= ze}bxAda@p0-eOW=W7zHzG9{^UMqcHd`Wr}AmBlchZ7hvb%CfIYna~AIwS!WXL$uos zYZbr9c)mI$#rCPXZBJ66lYs+&0kAk(&iKpWbM>V>-=yvjLSusiwhLy!rloT zf$YU_r#_jy;(GTn6D+eX+6HvcA!l)Ybu4O8Np)e{f~j#!xyRKc{(y;~7Khu8zQnQm)Jsl$2(u1fm+zFP>r=NuWp*L5ff*!OJQ*fmPW-47;02Is@0Wf#zlpT4WrBi z!nnd3d4${Z6yHM10?sRh%5^BW6l#7s*!9XS*v_JNLsbKf=I#8|@)AZ|hM>n#7{^?d z*y>uY-rB`(XS(BE^Zh;Av1-@e`Z7$fUr$#9CSKaL1=?Z}v(B8JnPWyE55&Vp2R;T) zs|)-PYVyG5;laDohF{-J(A-F1tOKjhBx-w;V5iSOpa)A=d#2r)+$`I0M0jwU?CGoO z>?7Xkq4>F<|N5=E{*V5Zpa;3jdsy_H7&dOE3<@C)KX;x65?s3zJZ$t4uxys$f8NC$ zeWtfbwOXuJS8-NWE6kb_w}H8`QIayxkV0FZHiGy4nCmF^2X&0XgQSLj!|- z*I!!cyTAk%F1&4i>@{{|CT0RJ&;BmI_-y_AX4v5VyRm@z@M|I+<1ld>*otm~N-V@1 zY&c|9?gmnWw#}soB^M8Bm6`%LXw5R>s`@H9-)jABT9FKkheR?U*6YwDlpc^CU&};Q6U>Ec#m*MUzhy&5+9F z&cF{%V6|r?|GT8$_Q;^sF|V?uSD0^3b9sm8=0?T2Ro&G^#l=PC$&Km{#fCZt%LRF} zN4LkIj+I{R(uHQ`JfJAEz`IS8C7C-19U$FF}vB$5+esv7E0vP5KhDX|s9Whl7oy6i^>X^mMD_ zZ0mSoP<~6_p4qN*+jEQv_Nt@&Op(6t^4;RBvEL`-N3EZ{-;3_cvu#Gl zs{YmqASU7E!rS0Kk6Y?_MU)SHffJRolXzfz8kU~NAV{v=QLD*(_z*{~$npkfH5W|b7T+gN?nI0sqpwAy zsYR2slms1wCQ)|ZGEz$;Z|m7AtOwPteYU(DQK?k87mM?3cKdWeaw4_$NC&YpyYW%`n%k4&`zrN%4uA0(>F6BF z&o!8E##2H_1& zLs^R{G11EJVPipsq=SzAsm0!I>UFKK?|QV{3qjo-g+ZY~-Jz^W1$GR*m2n+iI_+i_ zJp4ACeAYaC)|(s18ES7yi%om=pAiWR@d?tOtAV#mSX8dt2Ss7OcJHgRoLn_P<6_Hzsg! z&o{a%RZoqAkqM)2*F2kR>o#&I8f)V@Rmc8vFvW|Hrsho^a-`%ks^%@Xq(-J%4L``>B+<J(Dzijp6EGlR)t(qXRJl z26XNM-x~q2u=toStOt=xMPMph|ISNizeN&H&htd`K^~ieSuUQyD z*#xWlC%eKAwKxaERA&39NLbIN9XnQNRXK>mPnS1^VsmoGp=j_A1m4t_H~LH zn&}zxChz-6#=SVSzIeSnVO*cRMYUp@4prO}t+l5%3evAoIipbp>4ep`oJ;^F41wOZ z_X|*hDd=!dkrOJ0)iM0Nw{ghO$0yM~rnY?4YE9+-RdT#|lFEK_UTbSYVb*n^yglNI zHjuPu|ED+E$9G21tMa5{*-6=jucxyj(4r_e@+ z&^ob-WvUDEI#uUbP4daYIzeNH9uH%dTD8z54v+tU^3ljIzS!1y&@~SIV^O8@5H3|{C?T8zKwG(wwi8=Gb4Yh$cw4)>H zm2D)$7luK{_~z!ULVaQ;YMsO}HHUH_W0VLr;*i-x>qrl7H$yCUDmEI2)S#g);e~6W zv-6hS3p>)>-b&9GJ$EN@{@#hsHoCTU>W+@$9DT)>7v*VgNiXrD5B1@j-ovi9Ko>84 zZ%4sR{)m745!~$(oZV7<9I{*h&!ShNziFbgbv)?gspxF4uecxSk0(UIW`bkTq-&7HoDcvj1@>U086+bf#e-_BYeS#v;<;s6on1)P}5Ji%|MO#l674p&gToXvQF@A_EN`YYZ`%TG zT>>_X=Wr-;jfVMP>d96Jwm5^ixQx3zjZt9x7_*#9#GZx8K+y%dHhojd1>h`HnERpa z@a8^tU7)JE-GSIXYnVl(910&o@cw@yTvQo*@e<+z)~ciy&y@W*9`IZ-@Ynb55OvZ4 zZ>TDm{*q6Mrr?pl4~qUiOjLcNs0|^cSo%Ay!g5k1T^Y%u1aT|YxBBR3lhMW5*{ARGO7iS!W=)nhTIb16;(JD0woDI472{~F} zoz~l(sc{wMd2pROgH$KNM0?gLFBQ06pF^Kx#@yM)p{o>6AsNIoi8e(flQnGCZJzLO zrt&aKEi;E1GYfpIKGeAYi&W)VrV20;{asg3uWN1{=3T-nZ3Nq*o7@Ki+`OXH125w+ zPT4}D1jK5z^|sf1xM;S_-(&N3t9mdA`F4l3QzxY80ufoUIVn0LYJH10%zC{* zzJr}o+raskY`EkEIGRkUN}9u!7LQ2FXC#o(%o>KDE1QK+6dRxH1wpj=CAj%Xgr()+ zTpQFyLl?5B$e#m5Cvz zhc&sm{ic-^Mw6kW5Me@3JIu9TRnQ5JwwVq{zP!@J4Au`aUEEJYnxB$kRQ=*GWur$^LPh?C31k*|z^zA1YR! z%GC{=8@;E$&+Lp z(2C8G6pcH@e#QNR>%7v9ukRT6Fs`_I14c3^RUaEC^WgLRm|L(-6`Exe+ z#qZ1ErW2rW`#_2r@c-C9UzG-_zeJUe4!W%-)!vla(RIcm;vhbs({Mj|rFIN{l_U+O zUPwQ!L%+IGQ~ymto4J--eky*Ca{J2VNA&$%`?>Bue-=F|2`O1d@d_gmNEeH;Yb9^6 z;xI*t?%-0>f9DNbW7ms`_~-lSycxPxb+1Opm(&ay5tmOU4}A9k(>vf+!o&YCK9=#!4OPx z!F(LWQ&D}tpC1PviLwne=SsQeiU^EB?^#IPdXR2VK4@XRi%p+4Y%Py-f8i@Mh1Hpd z|Kss|Kca;Mgf-H~JOWJXNZv_{6QPN9rot++)WKRj?04Wg$AlBlaHI~9eU1{0dsWeh zYK1d~I`u(_*YIH5;9vj{{&s7I%#+Ba)`@AZ`w-&zs8=B0sCohwi^s0@a&s zcHM1>Kh7qK#gHlkSRkd?DR!c?EkdEG?CuZLyv(rGjWAakXnHo;ii=J|Ptyeel6pDj z@?Guww&Hs)|7f}L_#*540Of3k&djIK+Loxan<94HtvEUGHlu%jD9CtwL#)1ozRc{m zy)Wf_SGKRc%+~rWqn+7l%xjNg9*}g3YYg7ZV#i!{_hIAYKu{70COxy=V}@?uQ;g>v zIGYGep9aTU!GlgHg;I;FG&^nb*MUrU@NK3YCm}IWQXFFJEHeO*Aj&J#hEaBkzkVep zcOL|Q8PB;p+GiSZN^I~(?`%W`n4J(l?}GW+X9nk4z3-hJqzN&VHNM0yxIHr?%E2HA zmU*o|>y4j#)*Eu#dD1l&BI*;tO3XIKk@LgK`=F;j!wt|NkAOFUr0W8CSgYhvXs}W_ z%TP5<8oA$_e%t6q_z%Sv$H^s-oBek3oOp*gFwl2&tZL&>-qLB^+G^=-yXxq)>E*Br za&I#OHXE#Bshxy}sP`4*;T^QTA%cfHqN4{S{*a|+zeg!gf3$Lu4%+{m+#vdWoqhIx zw|&*})A_#b?LJ9g0l1bs!`XF*Fw;`kL7ndAeE0WEoa6KCs|^4q8uTF(N}NP|uDrI@ z?)hvusjTJdeb@T>7GC@c@?KP8DI{rnIXHyR*|(%9h6%qNvz0dQ-e|3X-&)%Hw}CRZ z0xGsZgCxPL)Eq{K34?$blFB>tN!;wkP+v&j(oPah?J!LX>fr!ajEbXP};pV~K4(!eMZ%N`IDLP9&Hl8X>< zlrr%R>lmSPJHgU2>t{d{a0D-iM#%6`n}9Wkd#Vwh;nz>Y`)C34JZ6AYBW(a&7S;e- z50^bM^oPku`Qg)Ov0K=v&QMVUlkmrn;8mINwQmzy$hh#;A{$-B7e}0Eq{X>}DyzT} z8`K@yU=_?lg{i->1HY zt?v3R8&ih8qh5!f2u^N{<>jsQ`2b+;WpQ7t6Ik{98-p39 zm@$ru>EKnFLmf{17qc4ya%_x}8^huscmWr{44i+@gm+$B9jo*Te|<2gDyvsAf2DWS1!l8*PGij}KbTL0iRIZ(l&q5? z6I6pQA!Dd|4UMqb{FL3E(!9L>1*3mW4I&*=Ns~zcpzq`CGJq2(bQ-ue{CxHum=V0( z(;OHVowyUNIAWOSLTPS6tM5Pp-qP3ha;U^%JifvN%jjR{CAGOiZh2YwmGigV4XnaD z-e@O~#Dpc3G*nd1JgEsFamXf0Af;>Xql$auOM0Vcyz|gZ+TZQ$uhLP?Y9OKjJi6Rc2@IAsjeM9dC$Ai z_r*&T0Y8n;yXD)%s@2WTjIRDOJ|4@Z^(yhNX*O(5oIixDH(Hi5B~n+o%G{$Y8C0`! zIJ0sWafN5oUM?hDy@$FwPVzIFB{5TbyazbWY}tDeO0y- z+M3iFn$!%LjxJG$Sa^cWscV%HX9N*h+GBI{sIxeZX2*+=Vu?#z2^B4KBP$Z3!i4(A zBMAl?j@B8t;%J49uLj!aLRK(~A$^X4Kp)A+@cFCk%|N;u=y7!|S8DDLjS-?BhY!h$c|u-qL$zqi z+8@?6X7Is;h_2&j)Rr;!&bbJ!{lx#n!@^B0#6%zGHNvx20v ziPEIKAOCW3vsU&3az-uN2ie5bcw$nI2S6q)Yg%KV7aa@Oh>|g#1>0Um zscY=Gw2B?|#Xt;5=FPrLp@RM)6r7Wh0v=O4p}lbR11Lb;jxV+9UM}##zg62D19;ol zu;uGx$KBDBfLjui8mge$+0MXS6&n24(no=M3lDMo){VwzQ5LIjJ@P8Z$ws-@9yCox z8V_}>94TMF)K>CO(i-|D%yR%gYZ6tz01*L_Q2t4;9}Qr)Xz~LySZUBV*BH-PAT!5+ z5~m=$>8*PE{*a6Jx}EGhncYIHJufw*ETE!MKGmReHId%3%=%2I?g;!6?mue&d;$z` z%Bo0lsdII3Qhz#YeE+!d@_jG=0X+2n^!%LYL-@YdzZ{4^h0){C*A~N$TxR}Z2uD@> zH%*&}D4*^XW}1Z_>5*l=Ew{BnmeHzQ>XJ|)hg#+Ck3+n`ReHDndlAB7F|oNj?ZZr_ zV73d1xpH*5vKwZy`T$yOj!Zg#ojNOs%?)O@R!WY}@g)`|r>)F`0wzM<8*CQf9tIax zB-B=01o#maI3V=VJ35dow<8*GOD^${*l1smp32ai9-sBFg?u9FU~FFEo$V82;Rc|z zJA)xvzJd5)^X)k@Rd|^H+S-P1FSkqLluRxGUn!dQbP|y3Fwz7J1Vz! zB(LgeFE}=Ac+tH>S%3BU>`@rE8$u z#&yHRsYHO+M1WTY;1%3jPyvv`iacR{Y-J{ej*dx94QQ$+L!{&F9>9zS5Ka88xqPj= z9=2H>^r6tP;p0T9wNXk_TXaQ)m7keZn=Yb*m)qVYtaP)z7^Ka(2gK2k;+u`I{a3JO zQE95|Zu{MX4FJx$qTSpnsH|8Uo72dEhw$~1wWqjUaM!lJ9tNp81dOE* z)`USOST^k~Pz0FY_SYf@Va7SMfnm+1ab9cR1+=lD5!_+DK#OTW+#OjSOTZN59E?Ev zh3uhc(%ZI4&g_mMTf`nhIu%aR#6X`JVRZ-``b-z$)ORcAYVHHmm;|s0$rve#dN z34}i!9Rgi8PW%i!A(QaJ#1~M>`ftT&Eo9Mkbo1a zEFYCC!K-ehYF#4U%=CJTzzSZ8Jo~^!T!k*xKVJl=Feic$WAyZ_u!M%5`0l;GelzTD zwbM$>2iu(8-K+!yXrW?n|3UXi7oCxZ(<-y^bvX+a)C5cMG4Q)BO3szJYm9`}0*r+X>)Utue81RwgF)BwT;Vq~A9`7Rao_#eH1JfP~=l0opJ;QHJ2Dk`m^ zP7`7-R4Jwqv0;$&jw+?fYzc~p2L>i%&&#*xC4nRrN%1br0RGZdTvRV-FLPAC)$f$ z4lgf$Lnn4eFMh{CWB^w`*IW(t;2LegywR0p0)P|i@@zN3IICaIV$n0ExonhrxnVVCpEGc%pYeqo;Xo(e*3LV`P8mti5s7*i~$)k0IYizl`!3D-z(&A%8 zdE(vpKz&5lu;@spqzO~~$obz-Rk=^*-(}VDBM<7e zPp(=%o;}*?M2hA;6s>q;KTp9$I&wDGJZjXId#T7zYKWmY_~b& zbAe{(IP%a~TF2rmhR$RUsw^D94kxQwG2!+BU!IFwLmBcU5*KhnlG$LTfJ3mYbpW;W z?$BPxUGcW43?lL}Iomo_f=S9>TUkoi<8bdY9=MR>zlqR&@VU046X0w>mRNo0ip^fa z(h4^Q4YmOtf9TjUqK?xV%%08;n8yc`?t+XZbwQD&&}vHhYa2!?y$HqZezVx6*lM3< zahn2cWVygYcxKA`ear-AlicbCw!-q>L^|Le`U=t=#$IpFsmk9`+W=C49wk2Y5Y7sD zxx(KY+xNxShgaYXHUDg zE`TtL$}x6Pmko*p*iHc?!v#C*j_>NjWNpyTr_bw&eyHt@MXSNJrNgPa&FSF9!2GrRC;m9|q z0!Np4w6<X80UjNQ_KL%mGImY7o62b#|e&X~IZ9 z2QmW}666UGPZC?B8PKzCQ2_jbNSfVH8(Ojb+@nNdxw;N0KC9k}{xYun+i5AMu@%0) zE&Y!hM540=t-AvWK>!mPUQbVcjxIE27DCosxG5@u`a)zXR}F-n&TiJBqdqFZG* zibsnDDk?^m8u6AslBgR%xrRSQV7)|{wSl3my&f7ugQjDM`pAx_bw3oGmv;z!A(}HPrjY5y+L}?_1 zMT#hfSE;bS>Smm*96rHVNJAt{I=}8gCsp2B&iNaex(XxsHeev~BO$b7FQY9VUxh!M z#(9XUjnV*|Hbc{tpW1ea&ix028;Bb{@R^frrGP5AgC6R#YB_&+`Fi>aIBA39K&!(L zt(KYeIVO0C#RH@95_wF`@byIS)r{!X%y3+7uY49zlt&R?t>jd_5LLwt)7MIfp@C8c zWDM+S1`3UlYbN_!WWwg*N&-AqKE}YlZR8h7^u7ch_lFUCO9uV*1)s0V&u8QD>W-M9 z7GN4;ZijrXSM$T06dFSP2ynTk-B?E(vi{BleeEH={z|v!va>JIR!bdy4e4rYiUg?p z(^XVOl2eXh$i_SYK!uI6!plP|t$;2*A9{=hf+^LFUc%|$zT0;!Fl03}+l9ISEpf9(nW-m1KOsI0UnJplEM6bJUc7dS5UnGZbcU2mubZez08Ju=onR@sUG zd=L^(G-Fk$C58`$i{7H=4QEj9HdH3o=!uTF+15xC;g#!*zwQ>))@IBmH}NGw61!`7 zXiBiE)5ePJ$OgHj9T+bqPO3>=u@&&9`#6}^c0@|n1MqI8?(S%tnBMc)J8Ys{5tw>yugE3w>O=eP4*P0-^FS!=%7bp%uK_llN&k8 z4B)Ksu;sn)8jb5blRFNlR;)tmg)r zPBr?K-Orbx5nlgG<<1l#qIl+)u3-la6z{H=8I(6uNKXdHk8_WHC=d{9x~0R9BPu{} zNILW8|NRd@rp8Z$L`E6>fj~@CMG`pV+vv+N0lRh4=!u^I$(-3Io8{3Bz zWP`m;4HF(R%yo`Mi~u)a%RWm>4-pMpYd^ST`<+f zxI~<2t2;aZi4ug`st_!t|IM^!H&jl9fsLRmFZf_8tze_mxa3#@uz5F>n;Vy!yySWR zgge*|1#X4hmIBno%sW6*)%CpO@MKuW3xU?QpLu{{ai5b{;o9aOxM;D~m(YRW;C@AK zY4!Mm{@U~lAZ=4=j+tnfTELz0x9D1zGDX1qek-&+$BFgXVq;=6Bc5X@x4w*ZhMoQM zI!tldFJhBZ2$36Wf*Ifkr6itL1BRY0Ow27c(K?#%AE8!1o8or@2S>L_UB_~KwGGhRx}r3b8}*Zu|$W_*7!e{0jQ7Vm~>g`g?F_A z2YCPdnO{sSApygSfKRj`y16g)Iv~~8wC$yI!rHl8%no6rk=XjOpth3a5^8>B^mvc$ z+mDGT9nuulFX5B*F$QO8GU8+o^EUzOF_})lEIfT7WcHNg&m&`$*uVz)oJHJBL)?r2 zUGY(#L4W$~3~toSfm$I#+mlt|o#i7h71 zYnhDRCZd80J}otzStQZ$3L|b$CUgwW_-r|=;qz;wo@AAwXh}j3u@NuQVsLhLb!*$l ztt}cIeK~^bms~u=t~RXRc3e(gTW$T?OaQXKmanadN?jcuyD$PvQJjiYNcdgUPGaK@ z_Zzf2yPnF$#L26wgS(}Y-^JuFQzdQos`}K~kZ_^sfiYaX5Kk^Ut9Z^Fd5l~bd@Lkf zEaW3X7>l!eP+lDQO+doXQS-pR&W*`Rsx2Ww(PI8)I16s0HAU%w=0viAvW+B5RYp!0 zv)>h?#cM+-Rf!TUk|uR3AaH9sh1tpp0mm|g znG~>icRvrraGt3hAanz3p>o)qJ&>~t0reiY)G>@m2q0r5Dp=58yNLN} zKzw{NrCqz@yL-P22lW%&%OL3v{i0UAcskomlH^|%^k?rh3|%~1&l zk$~eoMta_iq$(7{P*e<|HY&CkuYeD?G%?-2fFz`ji($-dEjU_BsAO##jeaVr*w&Lc zHwjrJO8_h5W5b$M-* z)ENOxSz5;^sT$-UH)QCa2r01lvBVZOmw-u4#lKQFjv|1hZpF~lvYtinWUAKYuO!;S z3UbW$>Rfr+bTV%t06bH&Zcm2k(Cik1TjcEq9}1`l=y_YZJBTyp|KYj6uq*8Sa@v2{ zG4uO485)h_MUN^{!lEfhIK7Oz!Rm8+7JsqaZ+d?WA+uEW;)pROsd7$QHEc^U=z#PZ zF?2*Qa&S&v{zt97b@7jJdW$frt{ir8DdY^(-e)4t=E5Z)0sPh(q3tE^{W&Jo*&N-(QoOv_-8-&EB&>3bcMfHG8^MkCZN=~uNq)(OFlw4 z9Qp9L*$6pvVpFsuG<2ouY*5sHx*TXdO?hXU5==G-X=;n*ri)itLKQVnZ=JLHRJ?LE ztZK(jatvodm2e9d7$pJ&jCFFvg%X5?0%&{ILdJxS9_nc{3B_^6PC`%Sy(3TCD!2w- zNhi&#mgvV=j6ouZnOT`x_@>!uW=Kf4X(?&jx^?S2ncuIUHomq70A)0LzCX)vbAUYe z;j`U$&*fK>gbLo`0;c%kyFGh{se6B>4*slb>ayqxD>)1E2rFB8^9!k9pnu%ye4Mh{ zEKpMy_pC`qhJq|?lDX*+4BWl(wd^Lc(?}zLc_1SKD`2N%m~h!kCDoZx2=U?bN6Y)i z&*s>Z;Bx3WdDcTE>c{C^#!9r& z?scNj0t}vjF!gy2S_~nGyN|~~k(^^Fs}RPMLQPbGqRH+Ert2xKXDKGpPrXdrcCl*Y zx0-*&cGWv2;Ks}`0XY3)Ui3SX?xSeE3zTc)hJ3cCj;CyLWJ62l**MRuM>4nmkAd5xaeg4S3|!l zFE1j)6c0ojA2p88XbE)XaaT8J{Jnz%87CJyw(MJXCMMAs-L3UiWVU|M_4R|ioT&P$ z<8Rb1Z~qJ982am!H1)^_Sggcf#5&saS(zl|oyOoy^r=wJo4>WawL2Unh>(6eX3vkx zn1jBL&V3mRoM!3XYB7P+R=pC9$k{Vzr$78%@w$jf2u)erP9W^)V=L_AiU9k=UY^ks z4WnYys4g7lCfunFurzXe0~&7wmX79C{rTV&c6SoMIGbVX_~Q6XQ@qq?8YZmYt)vz3 z(z~>3VD?2l4cL#n8UuR~yMt-f{GW3_gNQ-=Uqx@%o!j!c9eUlioAq^&;!2e`f#MX* z&(Gi(X#lvrP>3?W-tO%d$ITF(reYjWS&dI!W^m#11Y$ES*)ZVUCuEI%kB?>aMyg((JF5@Z&q}gubTuAbbFcjTF%4cYm zupypNunJ-1VY2!@zVgI^LaxD ze8GgAHv85-h8L)=*0#v{DjbhC{hnTJrVOqot`J|r`j(Ztt_6m|lqMI`6}IBKwtVMM z#6}+f8F({jBPx@65R2dX*jhw=zq5!tA;Y_a&VmLr294j|!BahSed|_Tz1Qk{)Y#c1 z3Gnh~&nDE=I22N{(`7+T3`j+KMK+s~^|t+k4+jq-=gUn@oq&{glU2d2<4LLN6avML ziMbhQWwUi8MJG=;9Fa0ttaX1e3BSj;|r}3ejf$Rt5S}Zce86%&T6n? zT;bLA^$a$DzLz{d*PuJE=%@&efpcByejTYEJtMfe^9Yj)I}O*k*lceFA!Xj3&RYQ0 zNtj3bS;a*)EZ4VBwj1}^m_V==zKDYfuncKj74RoN0fYqEEXtsR{Frkf$!ZMoOS0sT z3N8m(T}+Q4W=Ht2y|p^XQG^AFmZcb42O5n0N%!Vzo8G6OIy-EmNrE4|m zM*#M=vt72=PxEC@`so9)8X)1iA+9}yT5}wO{`-2+1Q7E(-ir@$evjX;2e-Aoa=t%b zW9+%_`1siDtr-AtG4pLMTA0?=4-Zn+uv~2R+oV6e$COf!72|(VhSeh#R#(cs4~VA5 z#qv@_wGy{Nl@M2K8>k30y|l>MN5T<`#^0;0)2b4o(l%$Q6_efH}MbJcWp&m^4uA!c;z6OJCPMByD@+b9d_sm?CT( zz(Zh%Q6hAh2hH!%0M>z~MxAt=k7JvTVS?@V0vE@(hjXGV%{YGURfdQ|L}|y6a<&l+ zE=ts2fkfzK)^vjd0?QbLI)un3;y*?cYe>YL0VFc{$wL&#pF$+{K*ohz_ItL1iodaw zmj03KtY7nQ5@e+(vNxkqpEszUgScRH=d;$f1c))X0v%|lV?>;!XP&~PTo7a( z?_%ulpdAszZ=VnzVW0%TM>t4-ZH|H03{>O+QZJ266%ao;c#sd`>#F4IN-W`8|Hl1jnV)_pB5p|Fvp6ZrOy5RfZxH7|MR_PL|- z&yJ3Hf-{9Yf8cexuSXdmb|-jhm8AC9^OE~L7f{q+dVEXS*^?Nl_h>F02}|9IF&8+{ zkgcTgEJv#=Vdd(e5($Jr3rlC4+~<7d4?^G45ONp5;0rjQ>g-0(apM#x>z!d(lUC&q zp+ocK!Tt4^)9#6Xh#T537zIE#ME2ld@k^lUq$FdgYmrsygcjefMUTpqa@f^z1y8_F z7UnEaM@0%$i5837d57zUHa8Ei31C{>GMr#D001mvL0034xKg~?8QjJ?ZEK7A`_AS? zo?rI!c{W1dFZ=U^zts23<-RnRyytt(>!rYVUL)^1Hh8j&m6k5Zn6U(Y;t2la6M3o& z&>8%Rky9)83tW)a_*1n= zHzVBHqhuyKY0FbTKjf`Akt$cM?GSxU@I{OvfQ|S|B+`k=T^OfcKA1;}lLhh6QQgL* z?ogf7)h{@II9Kd|lMAn37J z8_<5e#sa(9CuEGoOux#Tf04WND0cwuUS>vK5=hoqL`@#MlOZYQkJrqP(;-F38SF>+ z;iug5Fm4fX{$u5XMa#0n$TC9&k$bp0xZeb*`E!P|ydYIygVjb#f0M@XgPomu`e|vf z(U6bvF=OGwZ-+ySK?dvFe$T$5{&em6PwP4IE6 zdphd)Ip~?K8x!%cnc61QHD$INOf6tBjg~@x4c^J>CdlNA+m`dPM4ByFGn>Z}=9SMS z=$z*0MM{<$Av4bbgi+vnIX)s=00VR*K%WaFzhfV_tC6;*vZ@0iS0!OUNkYkU6U~GO z^AE3uOo$D3TI-i?KHkMFV=9Ko28AY2rtX3Crjkb4XapwWR2>uew>(1@8HhbhGYDox zJnfKRI@tqIBRIMam@yHmupw{<(l@fj4{nm+0xS`R(mI7II$v(TIG=8~&ys-$$F20f zS5o^oX^Wpxjy%gcGaG1<%+sjy7Yb#MqDgI^h+D3Nx|^T2yq}qRH17!F(e+jEeM=Y* z`qc@(tEP)6zk`cw%V<>*65Dmd%MD0sqTF`kgQ9ug}zDR1< zeW?h#G*|LLEqM-e<&V>#nm{WgznwgIndN@~!|_4-By86dbiO zKm&sHx63ck?f_Xc0Rv)Wu zb$hCRlGQuU8{)K!krUd$p(*$AS>Jg>+}vV9TTd8`?EsdG)2w1rpC^&P>PorDMqF!O zuXY+3_kw1sDP8!`WQb-Wyl*6=z712a_Uai9MDFrYnn~ZDVNBnMZ^wWevcvoM?goiB zUv4V_y_rr>MnJkCK@lW8?fssuz3%P(UY&!uwmx)Ir!$gC4(lMn+w1U@&!o;5O0~n2 z!JUI3bXQUnL>G9cWI4v9xke;8#wI((6CCgv&IttDaQySo6sstbeS8)wKHDYs|FHEQ zUTvm#_V`(lgnIA2cL_;Is30VegoFwzAdmo2gs7tT-mAgg#&v9u$5S#blgUhGCdq8F z*<`bmY%-f{&F{bYo#%b9#@~Fu=R8N}MFxz)T;2D6?&sbh!O#NOtUgT9`pA4E8Ys6~ zl-5Qz*(oX`Nv6h%WJIZ)Dphl3N)`lIz-Dz6je}$JiR^x!(Ls@E(K0PsWdu#WIt$L~ zqPZH`v5dNVpmt<32r+i&uzPtu-c_(9I+TTyd2y|Ab+ch-KX`P~`rtv&ox9x^*VE@0 ziKCO~&70|W-|he9FBiZ1y8PmuG}4shdiJvU?t`Y;RqVG#s!%`x z3l2SEWLb>k?gA^i09t!_@B^<%Z=QQaK|8q6*#(4%*PvPvZy}Y1v=#^jIiWWvg4hmW z7IIpDkJyY5n>GmfdMW@}MO2i4%?IKmP~SYq*sv`&u5MCB^UObLO`$20gO8kYbu z7W!uOb@Jx8xxeHsO*9n8wOxJeL?)Fro|7$bTav_v5MZA0K%_;2PlaH1Rus0T$*mb&t`AV(-T@X8lY)V(QY}ebJWGpq zkP@BvR31bkfK-G$F55#a&&qG#54`_j*N=bF|C65%e){8~k3KHE_d)*hVf^&M3o66j z&B3AA*FPWs`sd@HepWa>caF_*rk8oMYdjcY@PGVn4q*$UfXc$&!1;9$SUo;R7@ftB z&jVhxeN2aQ?d@ac<{{(wLU`?#^zn21(-)3AmzJ{|+O0$J*c?c=?jIu#P0?~gc(}J) z7^TcEDJK`ixj{yG&Tx1V>>pLSn$d7C!|DN#`9aC@hHGlx+}?xF6bPVkdx!*0R=Gh6 z64%Li{4D;eO2li&Y-{hjyP56|jXcN8ZVwkOY8fA-br zFTb69{4`k@62;n?{^->=A9j~_1N#@FS*}*m5<^dm3Z;0dbKy!d~3`0}owO#4f zp=9$=uz4t0-{&rEf%0*LGhg+j#5*yWUQAaJymm}3l8~+kP`10e!&=$l&aE>?X7H8z ziOEGEG9$7x5KPL8#MNEa&M~K=(=KgFOOu?g0a_?YZfGIv?Kp)EtFsZUZgNA27|RHn zVmzZAF9E+YnA*(rJ(U7^Ja3L`JgMgcoTsJWaJgiIS+%A|+a)z5-i&1JfT7rE|K0k%sZ< zmhtKE_;fheUzg}mX7akBaqIYueSF5gw&lBao;o{^!*g*x@$zNvC!df0<~Pf~{C4u4 z_tKA_`H>SmKCdl|f~1!avVI;)Uj6dI&2(=B>xq(FL87r9qq7l}7P7VraONM!J1!6ve|sxixI9hCxe%G(3_}3#-+Sdl zfZJh!rgmQ4@ju#@uj&w={-#TQebeo}t;uybM6wy|Tq_bBw<2c4gM zTKMUggJ1lt^vS1%(+k(s5_f8aJ-f!6SY(gP(%@7G-W2zafZ@*YG=6dsb^(B2z0wp8 zp2wfiT=on!6PGCzy#KIBVS#MBD5 z#tu&F_Kv*m1zJN07!n#Iz^vQSMk$VKrsj>kB|*FklN%t9OoOT1*sP#1N@>dzU=qT; z2a%uuV(O27-1+mLkN^GO&;R}3&%giv44&WqZuQ4M>A!iWd1Ft%b)didp!vnCwy(Y! ze*c5Q=AJi@z=KjG}E(lmK#OR#&aJvgh~JkXEKF!Ckr#3CID9{}GFsB%CO zTR~SCbJsao1Jf8%JvGbQ8qh5KcJl56|N4$$c!n}CL7HCSZtUrIj&zetyvZd-d66-` zK$%(N9h@2VPwZ0*nzk-B#8|-LU8qQgGBJm=zuf{3u7hj5wLKb)qa2?nm8S5i9t>jO z#KLx)gEQ6X4c+Mt{m!vsVUs_(#6Vu!s8FQ=cKR_~d67E3LWM28bL-5N9q#-FYkq?@ zyLLr=q3$s@2L^c)i)`fYqAqTV7dH8`>zwITetC)4J;;e_i*vfW88NbS>S{qre!%LJ{ zxq_kAv-Bo`u2w8Jh{XmDUq|MuiCi_2uf}uLHDIQuK=D-I2;^v<3MEpa`3j6!i2^G)kL0(A~I3sb##q`qjWH&7NW$A6Pc?;=4!dU%9Ef*h7_%% zGKh7Rn1VMwvB)+S1$KMASxRe30)v6CSJ;r}RbUp%tq^7_t>SQ(;fmb8@yRRuxmP^4 zj}M)+kgP&bh0lP^vd*jZG5C0;{UXo9KWgaa!CBkY#4Rsr>Xv)ca=C*-6Iauv7=eMJ zakEU#JVOvH0GdPW+6b>U40>#BL6OeKmf9FR4T&qI1IvLJ$C8kFau(EM2Ztl&*~sXW zJ(cIAGK_pTaKnOpo-M=nfenfV;65}>NG3@wZDb_J57@NADROT~I$YNDmBejb1entT zEJkHsvCJD76Z8%9`-+T05!k?rWnO7gnD3*+JFgN8-9g|6X^fyjwnQ(QZJK3$>K>vs`lBQ&Oc;#8m$wkBEquAqT(Z|oC z_aBCKj*K&_g5haaX^K2LOD``7Cl=X-Q7p_M>>I<5%@HfVn4_~-06Q>&g~K3daDpDE zr7bGF1Km4gT-=dgKDIo6$MfogmRBEu%I{|{n_qnpxP0tb+7iII{KO)A>ri+5e)G%s zQrB*Hrk7=qyGD|$-It$pjxVO?Paj1j$YP-?TVs(t%q z&o|#J{NuM9zxsCX7vF4t`T5!>AJ4yfXXyMQvACu$jtL5*fI{yaS{^(OfA~@F&b~L= z&UEFq1jP>So=;M`;kKvZr6#lCJJ5Pj8s-T)MB_w3nB7 zr731%6aZTPI2DK(bK>42zom`x1I9W~ku+^$PI7PtF;;tY!QD9E%x|z3HtBQgoS9W- zX_{P`CeN;M56-kV?pp3#)?Ge!+_|(JU+C9%rE}}T#ZB?tx^R4+H9SKfpJ&ai@|Lza zbL-5B1@Kx~UZhSf153mFI&WVJs&XVE3YkZzJHqV}35kPNSX^Pu7%5Uvp zxx!eh2dlB-wKg)m->>&G+B$i|W#jy+qkq`Z5MYUvXtoR^(i4>qhCaa3hgjMmP2EgT zH{sNc7`dCMt|x2js7fnYrYFfYWTl3#F>*{+vEIs;m>A$*QcYwkP;3>Rt0uBl7^bS4 zCr9z*D83p4E~IF1t-=bFc)kK7QKRKXoZ3p%*a#X6QER8_9W=X#(Gn9UJJnqUP4AF$ zcG-S#6n^Wi&UfGK{^*mQSMRmIcqjJqz3@k$Cg1?^@^Ssvp<;GLQWypjK}(XD%5%~^ z++YIu2fPt%pp6>H@B(R3V}x(^FqKv^Uq=>c5upHiMF1}c+8VC5My#t58L^^ToUj%v zFk*Q6YLO1bH&jWjRjxQGTvW7_BmiT3Se2Z^zu)TW`av{+0DEP->S6hW>=8gk;tJ<` z6#jOlB`Dw5CkOF9OfCr9I&F&%5}uVb*(~kkf-%<6n3V_L}kl~ zOevl%p>m~kh_R*N#Oi#0a?Y2^N!r`!eI+5tGn^11SSKlsi+cunv36!_nv!g1#^Tgq zg5U|(G{w-_UcuCYb$-P%I;C#!B|+|5ngV;xeIv}SK4Pi|*y%e9#C#tGpv@>Z-$zO0 z0LHq5m`W&C_Yl7{DJ)I!0Ez0ybrf)^Zfd5R)IXwp@UZjOzuNryH_K<|v5DE*_FjP} z0Cr=25nL)yE6-|Hx2!uy&YdIY=DulmMK-kvGTa76Xs}=rv=t+}_8tA;7I%J9qyF4=AxNAAOW}RCx_Kzsrx`;g` z=Hfc=xFTCWH88R}k%P59l>RX)D8!sol;(wfU2#LtlZp9-PK7u5Xk#G0$Gw zVr=X)*Z0Al!u$qjd7HJm%ilRt9A9Y9Z`0kHd-+4dq-iP6Le-wK7)P3!y zaraocu`gZQldkT_K*iXRa_307v@M!jXO7Jg;E%6Z#`O$ii<6|uCBW4u7HGvXnClHs zljqh&b8C|EIezajEjPmK8Ruq(*wGv{*v@iCNk%_W>!B*_c$I@-a8c}yv`~hf8<3P| z^^!miJ~h|Y&nXjK=EWKz7oxq*MOtIa+Nr~g2a{K z*>W@o05V{?9CfuogBNP?dK<$Zl_tBi`2l_RplN=^eS8{y_^|7vk4Ap_#rTVNQg<$W z_aFM+deH>7>Yg=RK6c%{Uw?MPvba&dc4j>oU(o~qa?d|6IdquupzP}*w z_UPT{(gSB!V_xjd3xfrDuvg*FD(b^hnUzElRMR9Vk(r`& zGR@6=YeZm)@GTKOjI%IDx#lR#(#ori@aiI>+7Qp==PByxVk?=aA#)WJrAgwCT1QKn zo%P|hwa!ARu75;3wEzsy{lkK`E?hWO6VKGdQiNa<-5ka>gm8g44(zB#(V+yXv!6RS zsu&uV<_9?4{jA}#cyOGX@29uru+c2IX@w)9bQVOc7X}#vBdnf&YI`>&l0kbyz^)H% z>Kz4sdoMMXt%+wrzE%KXxeYCtrWmd`rhWRf=kw1NZr#ofjOsnj_{yGUofqwG!G;p# zM3x?HCn5_DxZEIVbdEc_BAH$m4o}nIxTkLnWN{Ef&W?0d7UL?b7 z2;rT6tfL8Ck2v2V2$c1dh#R}6CvPS0Uv}KS+y3Nf_w%p$XpZv32LjxOt$sxLy0?x%bKQz_XYBw_XG{_Vm-s;6*Lf1L`j$8FV;} z^G88)PKC$TI#CTFTvtD1c$x<1`_R}hI!hQ|pv-OXH}=Ix=h|zxGS8f-#`ZRW5AV_*U|GDCq@3Zxz#H!{mC z%?tad1OwyjY(FQSV+7jiz7*3PrPjBSEPk@Kfui=3Hmfa;Ii}|A+j}h!9~9nyfAI2A>*;m(@wxrRoyG@Gd{1Bap1lmd zd@uaudF$zgYivfF$R$Pk}o3IiK24JkM$`Hi5ySRaV&;X90-&K48%p$0S z#5w?j1&dvvoGV4QcQC!({EF@hi6tbu2!8T{MR7|>3Qw>ohOJ?h4qSLDxn7mWH)3bF z6#@22TzxGLU$NQT4;;8Mack)2ft`iCB+#R1%*c%);ICk+&=fw3si;vp=(+|rRAPY` z+sa2&Bep)oFh=OLF&;eDDBIG?H-~wqW}d>q6c{MbS1?-29PZ2>92J*W+E+Itn>+rM zO~=5fqO}9;*|lbf(5no`H_!)*1Ycu;K`YPd=my&g!;^wynV0VeZCtG#S5erh9!z0` zGBm*+DGNs?C7}0kT+~-$r@E-k2{imQ1Xj{0!56_bMldkM8}6RC14zJZoLDHyH+KBT zr-|W7V`G>Cb7t&~SX%=M<{{KNQSiDU1d0_#rueJdn)O}H)CwCZgCig{tjt^y5ZeAR zDy*kLghZel%&d@RR%yunBXE9>&vWND#4B5%l6B`quy@KoI+tx7an|-|N9VGW8vn*OKXO#SwE(|`EG%Afzd`LF-F z^)J6)|L(ip-~WF14}aMI{`<2(|JyaB6J+_DpU?j6XOlnr(ZIEf=BfGGOb;j2hJ!+i zx0P(J2h(|zyV}`=@yGBmu{_gDT;0~cdavu3zg+z7Z^0-3!yi|F{>#3%-}Rr~5N#cb zw+*^b)ernQSF_mZr^u2eGz#1ZuH|%(_em-U)fSDt}CaP#eF06Y#%Ax!*1_o zwIqR652jUAFt&e8xV$Z1+7c{oQYIHku(YCQ7(2ZJDj*j(Idkibg$>5)u4wl}aeS^n zyJ6TpQ7vw8rF=;H7CSc3<{fbd~X}W)yl928RigM7a*w{scIiZ<;9trtLs}y&1q&wf!{YG92gO% zd$^G{uE9!W2{Cj&9#5~r)2ldgveZV?cv<=YOXsI(U1W`mCbxi55nEEj7LnK@5?9O; z%UL1~L#(EXv=kt0D)CGOfL3s1D7p;AMA$4x1|HdR6jzDoD)AfzjwQphR9LPG%TNN0 z1$XNj97}~|t1&`Tjm}Q=goL4_GLn!++N9~6YGTH@e;C?7c5Uw0!hnlsFQe~$7(=Rl z)6d_DU%T0`ykY4d5w*6Hy%B=NiPzbPauZH&ph#;eVq=ZafEJigVhaf9P&unKt{R0M zK&aSKEit1+wJ4#fN@T7Um}^AlYOw_^wc#aJl++5e0<8~KmtpwfEqoWPzMY8d$ueM~ zEQl`bV1mXOu=+}=?96)doFCMH0Z~O_h4zESUS42OSOM6;pfFezSAuQ4xLGp@9a3wn+VT?d<$O{8Kil%OPL%Y-(7Klv*rV_j+GUZs2iK=ol zbq#Fz+G-4QtZ}|60(h(;%mCqRah5qwuZ^>6W4w9_ zpP$U^?d3K$5?k9XV0C&z+t$TyO;cKuLq_*V=7$(?R_<+DfaGB-JQ#(n|GT}F6y@rEQ_m#$$42{Nf7U(`yx1GHv#8r#=Dv^ zz6h?RjU0#ry$E6~;;KdaTZxHIM*oO9*RS=3*>w%rS|`@xB3WE0c;MG&a)6757Ia~h zvc6;3J27tVE0(vx_S5tVZEBf4Jj2Q&_N0+n?$V|L@ppm6sQER)%!+7ij@3Ot&kZoU z`f0tx^x_0#Ze6@}sMt7QEdU+`#$-Fk?4t|r{xupBfkH*Lig-K6?3ooXU^=~FdGaj& z$!ER4_~q1ZezW+m->v@fk86MYTvhm;lbM&{ro&VQ=pZx7_=l}87JAeD%`~UOb zmw)@;```cR;vc`=`1v=>zy0mzci-*)<3BFG|Nii`i^Tkjqo*is&Ct9Ng5HYK)K%NO z7vaw7TXo&*Y7zj8$asPb77Ot12n zx5Y@zwf^>{?cvj=m+yr?`El~guktgi!p#H4)`5O$LpeSx83&6hie!!kUkKoRipc>m zcBm}bKd~I0Yd7}AbL;eh2|QvL!;H-_7dOS@3zWhrE?30%jp4`VX;Ulgxpm?23>Cs= zv5bRrx^(YVv`#pEm6u*DGe>5b2*8q{{a}27UYa693-Q1NZDK~YykVJNv6N!WO2SY!>bj9x0lSVI733)eJdXuxn-6cvWpy+ua0Ul4B>=&X1O2Tfp9Q#e>W zy^6-eG37Xkg{o~}8G_;D`Km9E8;V1F8Uq$ae2<#r&Hur01mQ=kZunuGMkd-E! z#7LB?xgsrvr^oS408!<30I+&bmBv}6v;+KKsH>AvsqwNVK{sM0O$$l{wiFqPt`!zqi9m#KSC^r@8SxF#^-U668GgZ? zvNtgn9;nBHo`uefsv6-f{ zv$PE?bt6?9plbtksKmZLTw{b)8)MhTS=Jcc7GqiB0D&ECB1D%Jsf0&Cm#u-It#$Ud_u z&-YQ1&@$SIfo7z|P4Ufp-VZ;{gP6*X2i|&-Jig{$-7=IWWa%DmC_!nA5JO3xH$n(R zso^BGHA4k^VMxMuL(5f%i#rJ5H{LGD;liqRbz8H%shwR?j+CVXW0HJP z+|>{Ef&+1EOBy{iMVng}&abm82FtlM^2Py(SlKVrbSbshE&;LCCum8FZj9CA1>wo_H*8lgnhkyIu```WR z>5o61eED+l%P*F``)>E&zrXfx|91HGSF0cXXzIqzWO>dONegrql&-GY6~s5kDUA_q zsEslN4H@P=e@e!c=}P`2p_IrfPi; zWX#U4bBbj=%vA3mr^3(@IG03#5jQ+dLv$Wmu}mGCW0j_;#WD$2T|h^13<+#Ua%o7# zGP5*A9hjg&7xZA6QeNVLWH2!L|_WvH9)O|d+2nj^-tMmUCMy0L+% z^^x^{snOcb78MP}8|_!z#9B(0*xXv8EjVaY9Pi6RtVt3~ z(*nd5X{WWNuQ;u%3v7hJ9$T1#_@)n`5(`tAe7%XWaQO8m##XQzeonZj@MZ+fo6!2^ zsXPfyUrhy}FE44%OPw)^(aTiV)5I1sM}y^R2x2ouZfB~T0An?c0AOKlF@#x|eu=0A z?AjR5)XKI-nXoSmbSy>%wy?<6VoqkSGGR`SW+BEdFQ*pQLP(~WFp{Kuf;BMgx$3 zCal#>hTVKno3%B7^xD=8t#?Scb7;AB*LVM6@Xn?C;+E^$P5ZT*_R|}{x7jraif%!% zcqcJ8$m<#A^^`=p0pZ}7W_HQ4w&h=1^A<{Se=D{*fgYV>&8z!YGvvqmdxwh%KbvyONPv`#pCm)D|4Rs&^J`j4k{88X!Sl==19$OYSWWzJ`-VxmJG^@PC8=2*H4g%dE z(vA+Kf2iYuHfER`)-#NSrql8w$a%iF)$sUP^xY3q+lS`kbNj_D@7YE3&Y|bxM&#M^ z?Bgfdt-YqFUb z_&jfF82}wLS&hxHdMBv4QEJaHu6Kf27^Xt412Ol4#}out3TSjnl&BU5G&(O=W8?94WCp-k9K=|G zf+x`kBwCJGPX%LoEl>aeeFej;YNi6kQejytKwsGvT-D;C^a`)}CX7H!;OTJ!1DN#- zj3mCEAkbliMxt0xl^UpW3t3?%)Yem5lakVed1wNpa%~+rc8;8fXZ8Ch^^sH0SR-kP)kTvVas1Z+qOem{Zi2>*)z)KG&MSIwY{+9lEn{5lNof3uA29aLjg@PsQ>RCB zN5><(huM)yZz|6T#0U^$Z4Fn9(cuF#{oY29&f>OQ;fJ2-WN&h9}_Bb(0Rayc@{ zH<*`|7lb3T%;9NTd0xD8V7+_`Jad6s(n zRp$B2%;n>jJNNz9Zg@8LEE98*jvfY5D253p2>wl>B;C+eYk`$(|5%b#DTLAhXJ0rM^ zADY*;?Tf31@o8;$Q989?JG~aUc{{Op*fKU_4#vq)#<93ip=1s4uuXAcb2?ZtLvS^j zqqXJ8p%nOuwk zrAhw6s&;AJF~6qo9g}wy_~CX|Q-%$~Khs=Ko2Vhlb0q}MD7`++ut)HY7P38p52T3c zZV=Qqzb1o=*qaB6-DB{t>nj=ktvs=wK;zXAm{kyCN$hGex01j5LSSA6 zek!4!2zD7vRGFD%ZsfIOjH6SY_3g&lWyjRKu{>)Uol;NC>c?ix={zTtAPtv|F!XzV z)iFL}Pj;&7d=!-tCDvml7P{QQ(7Nd=7f$V}QPmhMM{eqp@4$>f}L3)|=*42!L>9ppq^N(Gsex6^yNzz96T4)5P^A%>Et5 z!sna%uRag3Z{-F5w?yzJvjr6ludkc?#j!C(&xkeGUl-3PYU`;YB^rz)6ptO=z z4!YXIPT!^vunADTdxTErL+SWIkI^J8j zbFuO8_U`4K&D*!8clQdVaepc&fEep+!n}?s#CTh9v36#8&bqYj*gNu^T?DrF>nCTG zy#vg)3^vt{?JN)nM!6%?f?}E4H$oX0BNT>ceI?r1lyH5=w0mS<-7<6yQoLaxE!O+7 zt|mfDiqSW$$_+^4Sz2p`1gTUc1G-~cGt~A1e{9A)zv67mO090Nfr#89p(^Qlz6dUn zWtGahgOkSF_k)*@TAw^i!OX7r-|xA7FL?b{^ZrTw;+kS|PL=KD17Acdwk1Jpj9{Un z>j~jP3CeKUxOdocevw$;Zd~4Q9iO{SZ+Hn>{bN2o-Lr+a~iwttM;nyK-G(2*3bFeKX8u?|gWa{a<| zp3@v7d7AO99n9jGZFb2yyA0NmS~Ik(kVAKMV+7sa3p~HV+?xF0wC*+t=l2|4vn*@~#^zW(!^C_En;9f`4H9!jT&kxA zZVNW0P>~GSdFdYF4~$6)!;-?VbYN1SA5}#Ag}yFMV}|33@_Od<{2}D|+*uW9%`2sBsSko)6ek}>Ssja!v z1YSQ6jj22xmZu@{bp-gP$V9z52Bz3dgV#b6NoJ*4eB#`ob9FO&c0RCwl$c*}4vg6H z1DbdzJJ!VsxAPp0SaUrl+NS6pGz^Z~2S@GQgQh^6*yx}DtAGisbWk-Os@6^VJ5W{H ztH2tU3(;m>Se*-}^WZcMWUU9U_2KjlWR0I-Y{cn36kQ`u+f1-W86aGx<0_P(!YwPN zR(Fp5<`x(P*f&+yidz68?VCUffx1F~;U7Uy7gQ1|6S1pyo9@?(56DhgWu+I=*9zn| z<+*__QCmUZKkS10beVju)yWgdu^chjypz^alrFl;LsvIaHGZ0=nW||5R94%}fX&6P zRaq{?Sct1YC5Q=LW312-6FZ}-hM2Og%dxPMym71i_`%lW`#X2?}ySi>lEn!5ViE4V@D105+B550_0FJIyC&(VMrDa7FR-y65zU z<={*SS1(t0Sj*eY*){6SDr572e{g2pIkZj8sImoyKSHduEx@o`e``&10@*B1qL7+J zOsboc>gHwp1n~}5B+HC+(gO)xZ8OT^2mb^|5NmG%1}$3ysxeCJ>Jv`RTc#Imqm!oI zlA@!JmLH-ntSN8b4}A1-_vc@9|MbhkmtT#1_4UvPA9ZXV=sS8St?k%I2Bh(T@bCzz zX=n&zJrQCl;R{n5LfCpA&Rkz(u%R4YY9uYm zq>XQ2-MEEM^4uW!@&H;9Akkj4^B8Y+guZ^DU5ykq1;VwopNAK(t3{NVi z7qlC@wbyPooZs}EUUwg!**5mob8C{RWnp;<1lW$ua`PkfSQZ16jdp;su#PjF#lgN;)C?c8V|e`rFtxaL|~2h)V|f^cq4 zGPf>V+G5Oau$HzMTL=2pZSY?`&?*#b@Q}gMcr`>8AhA>-imk+hv5|_x-~%O=%&H+V zuJl;3iYwHy#d`kL(J=uvMa~tL>JhmW8ENr#uj#9n^!ggGR>)W3kyz-`S{Cw15t}JG z7q2;P9$!eGo|m@vx(7y_*F>ROyskF|Ihj)2e^5V%7^cSs0A zZ%jC`0V6SEm2R@Wfvj}bK#Y~!uUM(+h!APLhmLvq@_AfM94`J+tUsUiUApH_a}Ei(~fE zgmq@oy}8-CvEy6cZWx(Vb@Y-!?N=uT3T2Q}wRV7}MJVyX!o;3I`pA@|uOyDOGr^u$ zkk*!!wZu6s31+MvoJ=0HH0Jtg$!@Sa76$8MoSvd=V8qzfr}BnCfVL9k&LOU(Fd;vn z+}QCQokaHzTTZVsII3w7N8*EKMjo3VeSn z7D|P-2Ho6*~M!w_SS&Q<$o;{7x6_K{?57tCvB*Fl!- z@DwxCOK3^rd=XqAMs92+*c;J0%N6Xy00+2S3^&Bx!K?O0le4&p2x>G(XaFiJrzQdjlp2}C z?Tk#nU~JB~wq3up;h0)fk4{O3%95#h$?~RdW6!*GU|HWaEN|$hS2UB$s_t=7xE<@u z)P!@CmK>|8jo@m*J3<&w3)UAU1Trjdl3AZ&y4v~nG}oOGxiUgmLh6jNEG<-X0NAvg zQBpX=?kP$p=WFLztc&aFB8h=7GScA@ z=*a>-nXe=A4FtX($J3Iy8X}Nebwq(4c{Gp&deRkTH3FCw*V4qbG>L^St7FJ)G`W?c zck>!rwTYZ3)#GkS8STw{vzwu|)0GyA#6T9PaY7YUtf7lF44IxS)3c?3Z_^Y;n$$=Z z=?Su0vZ9Wta^RHCYL%;6?XFTG(=MqE9C>ZE)Pj;&szkNbA_I!A2S;G2<{PW|29&^5 zEihG!&EQCFRkFG&U1N1!8_nIp_Cku-&u{J*1_#9ztCg#hd4;P8{{-jtegSf)&xfl7 z-~$H)O$biHjf1OaUOyiwilB8Ceqd-w5+0U?he4?gaxDiH3`I$FP|{rBHDuY24yL7z zW{Q$5NgCXJ0R0F#Nw7y2%gIw++Txghez9$Rv3GVlm&%2V7LiCs#d9 z*yV%qhYyRV*ATnWAUoep$@Y>nz1X%qK-6dlI-bF|rm((Nv@cQvnhLuxnO*WGT_VL#baFfL++4pPKfv!DVi$_6z7l6}R6IVbDbLoH=PZ50hGdrS z3)3B5g4Kn!xKXZVbgo~qwC-EmY95)Y9htP8op(HZSiF97U~x4%y%2u+_WW009NoN~ zYwu=edl_R>+R1rK|A;cy&S(r1eJ%L5PR6zCEsx(yT)*9TaB5%OvaIduj?e4vUb=7I zvz}bno?KU*T-P37X!cLlYrFF46;ba9DcS)dj1Z+7?QNm>!wg@9wLb%<)Nq3^kOdgi`cS8?8A;@kA+3gwuLL zm~@_uv8Xw z^x?n{s{pce1}1Qm%eX*E!i2bMke(P8gmd&j7unyXb7qpIkLqB+a+rI^krxju$> zN2#_175rKynbsr|lsBbC^+~R!g{W?*(Re_syETZeZvi(Nku)ih!wi z1y3xHW>(o7dxnkOx}gbEASTe6=xia4$f_o>z~l>P0#Y(pL1c<>6b>3k`v(%U22ZQ5 zWHn1Qe5rviF$k0vvCIsP!X%K}nMw;!VrD@61p2Cuz&GH9CX&EF1dm1%LSO$61}iqx zz_F3VR*I~SDz%ZNR*JliuB>Mn+&rC=t95Xdb!=rFTdZS>j10h^)og*9A=dMyI`CzP z^;90>)CKWhW{S*8Q`o6WClT;cWXPp*&G@_0!SD@j3&OqT6PF^nxZZ2+eU5_KVxu7zxfQ0wB{rcP-lug&$>O)o?@ z=KD4l2bX4glU*$)s~BP|SB4SS5>%cm7Xh?sT`hLCax3Us!B}MA#e+hDJtnM+3+fXh z1Y-rZn849?^`tW*jb&{U<<{Fb=3YEs-q~*(pRvv^Sq@KvXXo)-x7!~*8hq>N)WwZ~ z^Bbe@zO(Vpi=7YO-~7?L`yal)`QSlmeLFZfrh+9k`94Z_A2Z)a>F6bAd#KTNIt-y| zjG$v#T%wcI*+(gqB!gqBu08|&$7Xv?u(l%*VYj8|9a;9!gl28qa&+dsc{gzDZsf-8 z@Qs^o`-j1$^@fF2XK~!t)u)Z7dFh;>IBwk7X*$1X-#zS_UWlHam!3XZJ3AdKjR%XP z-h1~-pMJD+=Wcsfft~FmjZG={kNx-Vb)KC^Pp^fqU2nPnF#f@ZUGIL7y8FO){kHAk zw03<5l#V@n8GY;R@V$r5TlefY?$lqq={!CMVsddp5Xm4(3#|kIXozZIu19%8gbV|Y)5 z;B6&4f*{vOgOn+>BuTv`>DrEIW6v>&RcHWY%9;1yNe>2AmNRpJ*|N2!i_E1 zBb@E8K^CTnQ!DJnb>-l=$Qwqtv~d?#n{VFA!ks3Gj3JUTA;xkgB#s!(kpPS}J4lX3 zhSh_ub>iz9(19eoz1z?~>`3Qy^^F{v87DnOzNf=9YfQGbT8i<1r=%b zW|S*f<8LK~+gX`@L1tJQ859OPK`wMtmh5k*2YR@%lC*759_?2JxIt4-^&wCNoPl z0AST7vBIKKTEr@gLS<2C970u{KxSh}Ygq`!QuszJ-vHcM$ao9D><IGUi09cuo4L+@rAvSP;0;~eVGO-E#UC`SA-b4Ua z<)CZbbhVo(v!kx&S%^v?GJ_&Bp{Yus1G`;9Lp8h>=&Jb!@FwsW)KoZyf|3H*F)bNKu;V7dGX&o;I>Ig4g;`vid5S3nlZQ6<(aO0jKj3>q5Lpn>5#7 zw>aOizSvX2SiMOgl7XKoPl^^BND3!I?`OXu3vjMxTR)g%uTU2r5DpUqaskBH`lQev z6GG|0kw(OXR$+6SzN6Q@f7pNjvb=v3pIvrr?D)>k?4!<;+VX9fYUn&z1)JnVOdX6=?fE~Pco7L z@!iQhwNR4e21KbYMJBH;&xOjfk)APqXVgmc668>vtO8dXfD6^O2u@IrG)m<)3~r`tJL=hfjmIFYSA$T98R{$8-6( z<=M-`$Dj86=;MyZ&!P{XhAtns+L7SPY8`v$bmVR-ilINfN;?e#3yt7 zM3z<8fHv7t-Y`1eN$(q$%q?lxw(Z*owuM#g*o>sRNKW_GWD10IFPQ%OlPGT-?Q8{| zwhdt<&@2e*7r|}56SRuQI_RYd+2+1&b=xvFBk38WX1Z}e97tUy88*jo_C`>PW3ETp zePmZN(-k7y8v#n|Tvhf6-k+evI@yUVt2IMyia}JtOfS}5yJ#7kQTLCC+H!4P#%-U{+B$XcD`cAwUULM7{!eWl8L6EcGAov}z&^ zNV04pO(NDBv8U`eRWnUyrO9kGO+5n)$L&;w zoi4YM6m=w-gQ#$lluqDrg+nd@2qgd{7UHVJd}WIjnX4<-0)-8+95~Qw2YB_@JF7L$ zDmB7>)h>Xso)iOiCc;NZP$9;`2m*MZ$qGmW$b}afX}#$>fKot(p#B|?ef_4exnF$c zmF*QYX4&pGnkz0u(n!gL5b>65(iptuTk5uo+eIL z!Lm5t0eI|uAHdjbP**F47z=o;nWAzt4PGuZYrS@A@m?EZuMX5>A&&(B`&x{xPl#M? zlKO0eq;KYzA%c{#eV(^Q_bmL|*tqw2vi)!>+FXu>c$WgVZfgxcs9 z7r1#DQdv4ZuUuN!?w?q8kIkF=n%PxxVH5{_p4mQjYX^WzUjhY| zt>R#O1F}71_=YeRsIqM|#6?VsbrKU@gbJ>Df;I3sn*gI$8BqcyN~8v8Z*Rgk#kk;p zxfxyCSnX&5XqoNh_m`E$2~lp4(>XxO^kOFGm79CDeI<5Jk(KVHhSOKk=vQAr1RY8O zPZSI`2LabedR1nC><(fw-NN&W_LH-uKtmQO!LBG@K@=!STq&6;!?0x-z6z_bQW|2) z=D6JIWg6;fdKb>(BIztBxe+Bd;uL0Jj8R(2b9aMHyVsXz#1( z>L=&=d5Ij)7ia0~X>1vW%Ei$57={qTmScr_qEJU*%P=$%8tAd?YC4Zd&AZgOdKZjs6XP*Gx`3r(cT0nl0t5LIjek8(Q`per)!lG+$jD;+*5 zwJ~H6W342)oe3QB4z|w8)zow3cCc|KucIrR40%1}|EKFcnA^I}uF<(fA;1EF1tf^x z8_|J--h1x=Nf7LmSVW4VD%GpiyJX8Mwz}1uRc*_194AiV#Jwgiv1Q42;w10)-CuBL zpAFG+-nlb-FaShS6a_AxLbtKw;K+$SyqmI}~R zu)~F7EV#H(108^`sgh~|UzO?bQUgw6#KF;+00GZS5dgE$ejqXdj5X$(eSI9?0Q+f6 zz^r}izZmRn+5k-Uo?S`!l~3S_U<;pd_b~kfT#(V-3Cd?-I-)v8LXuQ%q+aR4Abz|h zLUG4HCv~Qu74KkJL$z`%`X-{l&=okgp^A%C^K|tRU9Hv4%yb$dz(R~oW@{po;SXUlIie*jjkbTeI zp%;#g19R!7;NlrO{FKr8mg%LAdDDjOL8T|r2ICMz84${y?`34m^k}wOZzqQ0+%4OO zUOKb#=+Q+pOFOn~AG&sV?TxGJZ@oNo^W{aiZ%^F0H|Ng1IX7+sj6HpNaP!v4%qqv^ zBJ0q&cJ72>#TxhG8D}WjW(n4M!_47v*Z5R8kW`r+ZGk9zYO(3i;RLAO*rDD3obBY< z;Hj7VhhK0Vd_J)Epm)_e-Pn9yd2U^%TnQ#kcUKpBi6iql>z{F5y*zR4%G9=99nMgo>kPmb>SqJLt$OhGktjBf&6oAf6$Ii9zBsUxrMsHD`da(u0MF8* z6+Cq7U%X_Rm#7uV9!Qo&KtuV01DueQ94y<;1;y`n7L~yf1G#I*K z@qy(6&~xVm3D0dJER_T1p$cQIIY_aF8cjaD&IO9d-BDbwpV>db@14i#8E@?x0bbL& zQ?z*t*_{JuV0d41D1)~>`Qmo8O)r?PU@RK^SPxpU_f!#qrL2)TscL7d(n3+$NLmX?W3HBH@hlmZBgYH$B)O$o<7)FK z_`O5+>7|M3rHP(ly)#F5CTZqGn z8i}_?>Lp0M1c|#^=p^v%c%B){L8>YkYsbi)RVp9W5UMf6C`g#Ba#zc(RqzT|s>O)S zsF3Kw=sZ|s5Y!46$_?Ffn)AK&aKRzhL(LA*B3*PxtW9B}u-og|Qt+$CQjoZE5?@0S zs%p84T9&wu#;>LG$g|BXY&o5;X%pz#LVXLo6>8Wh;cNrwiqHgF0Gp7tf}4;!Vsi^0X%d*4gyu$}u~vYP z*yyGJPm7yf%L33kyHKF1U@V9uP-EF@l&osN8kZ0OlvRvWNfA_WLT_ii2hbKPH-YP{ z3Y$gqx^=O7PniuPwY|M8Uk?wo7?hci)56`Zr`Nk6eT9PUe>=PSIN$@yAY%}U58%^_ zbVre)OEoFNWT~%C?5Yzxhyn*e;wDP%6}`e5s&ysFsm|7}Ioz&(z9-qBvH?AWrN**U z7^W7-(&1TZtVC01@r!famRXFQoYysrv1q(DkX8f+z+=^+Hb_&U5<3gF5K%Soeu55n zyMSdY#xlfsNP>kVm`DO(EIgosJWaQzxaI`gm*K}cMCl%WqK_FbGs4}xNQv%Gx4P4G zUy^P~wK`MH@osv@khoaZE*@ExL+x7c5R}-#M~%%kavX()OKmFJFA-%Ei@J zFD<+E^3uC^r(w;(oqKa`-Woi2u5Z)kQ12XZsbAJTC|tPAw`Xtnk`;kanql_TdxOke zw|aVMV$Mi7mX#$7;)!Xng#^xC-JXMnXAc_>96~m2mn~Y!nOMvinWPVlH+K%yR3gGq z+e5qqKQzW$ztMN`{N(kk3-|3Wl!k z$zn@+n4j&Yf*pz^h|`JWh{+Bxolkb*I|s-E<4yAx(U+{^56^D|K~*_i?+A6?0_ns8 zL9rJMo?uN3@O;LBUo>5!WxA>G>fZ6BW5R|~O>j}~2XxX9+nWGlY$#i4tLYmf4o%d}nQ8|9*ev)P`$vS^wx^b^aB|c&Y;8S9Lk5gXO@Xc? zy1cdxT~lIfYDD^ag}F{+u0iu9>eZ%tsg5KuHEY~Vz;YAvNQbP{uj?H%lt&E7K3Skk z;LGuxIfgCGG{xGGC>1P#hntk4MybDE?5P!bh(Z^EZ^MIIJDzU`cX(DD$AsmWuxtdw zF=BXTEFWdFa%WY$t4ie|8bURuNTbeQuXbSMs1N}~1E43^s{jQR7wBE7Zv_ZIR0$eZ zAVn||V-?f^pez8vR))f<#0=0C%ni-pVFh?m)KvzEwhmB6;O*gfdpI7@#s#Wj{JjEi z4-Xb^K?H_ex3X)1^4MR|-7*`kwPo9j3|E%s&a{A>+YFd#T0->_7lm)ca}5N*(k>#BQlzQfL@>)UR57(UrWVK6Vz_F8!bI`JB&FWU zzzg!&SlTI8vgsVaV;dPjZ-;@g`Y=-)X3Po#AeS3PY(Xj;h3YHYko=WD7FA`Lh9n&d z1+zRB;;kjkcBSdg9K)GwapqcVsU{$2rKr|Kvo+alic!ExkZdW6KiwM2(~|{prc+Vs zS1(!?d}eFck;7mjcKpTpJ9m|ruFMXMm;%W*g98T_#6p?oNRH}HH(PvoI51Kn_3#=W zzM*X10xPT`uer`ckTj=y?uAiwd(_C&vmX|=Z<6>ys^4mnZ9{z^7xDM z_wOGbo%Ce86y6BO>0^vfg;%Za>K;G>vDWf1f8$o;vxfu6PlpaZ4-yPEY!xkD#Xutn z>IOz@%fofE7(2V-G%%O5e6{1b=Q=_0{g!C|h%}l7skxzKqc>V>_EdPR!iv$^fK$s8 z0SWWz5-m||jOIb?yf2B17wY4M`bdr#NLN~Tdpv{|w#*F#No)S%`rtTTp#%TRC$?iWHEBC{q;o#1Fj&;u%7OvpU zTgdL4OU?DvLSCIJ;bEHqn#xk&F+lL8!M^Rtq;mU?`1BIJ+|irCIS?yMa z%-kk7wg?Oje1t4Al0-IgyS+(k0sq%}8yRsl={$6UkLQRgd?{6+pa^z~!`-4tuPD$Z z@N{y#S%xD^wMDT)0AS5<=>XMQO;r+O1z-W*qHwBq zVB4KwM$nEvRGNWGAVz+HZh^5%?I$>jTvv(fDYF60_OhJCR#z9p*T?aexxkg$$@G-j zu=nu4Qdwq$Vn7aU2k45mW!em}7Gto<5o-%1SiuC<>8DxTjY?CUNQ0MZi9$VDV5;X^ zYI$a&z)qCdG3{QQB|@=BNbYETri)n`;Ke%_I)4rLR~xHXdMs0iVQH{@EkR|X_)@A; zZ(C+Bs=(oTt-Mq@X?R@oLVa5BqGG<}q*3AfGav9kcH34^1J(M?GJu!b0GRxW^J zFMS-`F{YRRW>G>5gn|smmgU;=EL(wN$usQ5Hd~%<&$U`}G;0CeTCy!js@afgG$g>N zTIa_hff}2iq}do)odrLmrwZTCl{haG7;zT%PW1^~G!5 zQGzdC>x+>rexlw5Fjnip+x@lvM14met7n*(?rM(Z>q8kLEPP3IkXT4b)3a%yd)py`TdTUc$o(R0V|51W35! zUlVW}k$2_*R_2iWOO_ zgjS5uMG!ghA}3xECLx(tTY=-q^K1q13O8q2;Fluas*h6*;d(=utZ)NiLE)&<`UoZ; zSP3x&2$m42!$yM5>HuhK)i`NNdyB%{BsDe&wIs2YB-YhHRhh4DfxaF9CPES#8+e8W9zy0L4YTO_ z^Z`iwgt%4!78M8hhDM>OL1?Ozx!Z=rd#_iEgC&TjXw#1!3|r zTme=f$_^#Cku)nDqFddq23wQbNNGpNVk3!fsgqjj#Ws@Cg*W?2wlK*T0}k-Mxq?Cu z2MN^y->(t8Y*>0MONZy^>WnsOqF@{xiO*^U3&uO1U@R2?EVBW$amh%0eWNl!g8(~g z(E8O%SCsC^@!{$LsuHktz#j{4O$mU#ki-J|nqvZV&2qtuFvqbKIG}L6$hH^Rjv~)d z0BvR9=L0$%sbG?u1w#i-n4t1u)h@i!4kqAMcdIwZ>>e<}O3l$pZ>eABkFj-Df=q+e zT5z@?#q1-fjDWt1!0ZDnR)XN5R9?PfRcY_O$z9LP9NNG7>g8QGuI{=0@{TJP*Pl7H zWh;tK72USuy%v{#bZO4FD^ZOa?#cu<%LU=tJZaH z-O<1PnV!jok)9!Ba*<*C&iMX=$-_r8+jrZhXLu8fI56jVaw$lD?i~T!Owg?q%HW>x zx9ZNmy6!^U++tR3M43g!t`mdCJ@1O^s>emXcx_xd*@1Gg+_n8E|dY0IN4HD zXI~qdZ&T+_)Ow?pOhUL^;E7d3P7676X}E4`8E?x@>zYl<;y}%8iD9-($&>-J^~7-% z@gYkLX6gWL=jww=QXtg?`k12i24}TYixFzjqkYXA`C7-xv#I546@`96vJ+qIt?wVF z%v;D_yo$ekP5Ywdvhiu=$b8PSRqfy#-e@+andUUxoa9(iTuYK`OYv+eo+BXyFQE+A zp5s_EZAhq5=c-m(a9T47aWpw10(U~{N=xi%zAewP<{FVyy%yjqRv!fiiQp|syd_RD zgorvnUK7A+Ls)eHFZbfbPQ1W|VF+#7;cl zhUZ(cB3HE{LN=sn_5#=4DRy@7+(nTqCvs+m&OFDKWt%fhONxd7C4poJkad2F)Hx=^#q+pKg_+wC;D8GxHeM;55-gxVUu8Ye&{S&p=pEhTUyH5_@3NK4Xs znUD7L3A*lbHdz zl^CmJs9Yd6fe#iw`QiYgja6z7-j!|h7P-zW&6#d-X27zlud~frpxKJ8_5#h>)n@N( zvlm-z`4)JM?95QD=~`2oXi5`}X)KbeMPejvtVtE5%DiY^tCfy81ZEem7D7|R+7=vXz|tiW8_4E+Ba9)(Z{pL>!l8?z=q=(tJw&S zja2iEB(tAWD4RzoBeNJgHr^RY*u+Y91!Gx_t(;n#kjPb&B`$QUD@0X=X<*SHN}oOY z7*mJ32;lHb8>OS~-5eyrK@v<;l4VRYkQ5!f#xg8xnq$eZY&niS$F=5BTrKikoorVp z+u6Z(7MYF$$5CL}3v62f{GeNKGmsSUhKYRp)plbpy(hDP1L>j z&0RnSEAZuNNYBDqTm&U=$u8i%hr{Ja9`8%yAjVp}c%uskQ);u_t&5kN)@^i7FGXT` zvLl3dhH;4wa_=1O+zH|ExFD2jum^GWK(#Xj99f7HXLNyb2YZlU_2J#IUxrE}3tCof zlx*CloHM@(^cZ&Gvpx02{+d*&1|mP|r^N+Q)xIRo8Le@JiDuMP;0V{c;|-2r9mw&v zVA{=?b^`{yd9~xkY8+pI5vnoXIDTNfW#d*ONWD9gy>@-*=FQ2Ir%Su`L|!~zyn21; z5sv zHd@`St^muHW;)UgbEegpA{Y`_ZMbsuQLH76O_!+oLAEzhue4(zYm!*2q)rUHs0RZM zJEq-*QG4;40AA(8%H80!dx1D0apK?+Iq^`im3qmlFwL50J3A$=4m}8A&TCzHl_#h1 zbxOT?aNIe8D=qS*1;#i_?rRp9@q9y-+RfN;!lkKsi<13g!Bn?3lvQcm45^JOv$aTVREV^E1fHjuEz%@F zXhI{EkASQaxq~WkHb`xCA`2EJu2`W7D==2^j8$Ai6-WOI&-|~s6<1e*M@VjG2`xA% zSryx`vr&h$L|2S{z|&L~e!AFPrEyieGcAD((;90qhU;~nIwU|sA`~RvXiPQgGh}0? z$&_j`#3=AmMH{Lyf{O_70zjfTYlPrQH@Gt`j!df|%G7umG7C$FNE9ZC*2K5Ed1eRO z?PDavt@(nWt6NV}F}8)-(86qBaKTb7M+$5$?VgGi782UZt^q0z zXrgVisoh}Ul>`S#@MbkwbB<}wF|B#_(|0U6mZJdaD`*%8RJOoy6d3j*+u6l&b^uWY zjt*gnjVVLX#A?9R7OILPo*GZ0&68-;+iH~v@aHQLtTRaVN9drOPE~8q_ zuUxuH0j#I%9E)bmOIDb-@5mlGJm=)`g-4GJtzMs=J7MhX5+p)R(P%4p`|A`Jy19vB zOT18%F4e$F)_4a#(NPo5f-7`1hz18~B%1F;L^DzR--Fu^( zHit@sJPYb*HM#MT94L_-nXl^X=h^&3xJC-o1wneiQ|UTz*%o5#-~@0+t=cGMjOTg zTn*Q#BK4{eQ5~$-1VH>3*zGk}>#VhAPqRDN=8AD_8Ja0WH6-!6c(pc+)dXHzf#;d}AozVoDI;r>lHe@Z03V zK%|vAF%V;AehS1`SBJz?YIk`IKJ)GK#Ts6Gr30 zIaAHvOq(S_QG2j()-Si?HDR(oN>QS3yBdGB(gTK5PzeXlx-gjRSQ8{ys==3~`m$74 znr=@BbU_xtSc^<&m#K^*lT)ZgSb8(v;c1J-c!f@RSD&%euS*uhfhgPLq#7J#gM*AX zi54#rBt9ib9evEfF=@J&Z3xxKomDaiQEsW%xmulZL9W{}J{6k9*cH<~BV!#;V=Rpg zFqXjw2~-kelgdk--8J~tzzfCLS+fC>V3|@ZW1Nkon3jaloMoC)Jad+1&9i}PE6)Ze z!?EVs_BCNLZ-ng^u$bua_z?MXP;es{MhQ}pI^LmVSlMV zZ1qYN2Bt(#G8!m&FH1joFeQo~Oq04Urr%oT-cEiJ=U| zmm>O;1cx8ezW!jE+G!YVj3w)FcTO3QW&%ItB=pGVY$HTsH}< zarIIPJx!539vTeLP5;W)gAxIrF<|}-5VSrVux$An40f_yi-#DivtkEEh3hvP4<7RG z+UH%d7Af>Fy|FsPiAC+oSXTt__RikM@^E8Ef7{3ecl|TS@snWnyzk86K@HoX1zYIt=IDX1Y1?rS0dD%HVi2)5b?xMK)HjK%701c$G|8Kb%r zt+ou!kfG|61a*X{3FG0H(nqnzG&WbJ505KGr&P%j+vp)EjG#c=o1%8~^ZQ4{!}BEl zBb=dmf_Y2jlQW9)oYp{r4E|~qW)*HQ=W2IV30(w<69+KX*QAcN*$QlTm&%scI*K}1 zr`**kNAL4&d96Du@nqDFjKCHZ>O6F=mI%gD3QW7D*%aa;ewN5cV#=#IQt%@csA_p) zLMs>7K*zUotJ$Jvu>u5fv$zzAhLtLK)~%lZ+TAOkfBO5czxvbHU;X9lum1A=!}q>_ z_};_s-UsL5#}B`I_v^3z^tUg5_r>RLeev0wAAk7j`+vFq)|)R~xpw%OZF4536WOxO zn^7Zvp3>PSv$a(0SmwrA$OU=P_GYOIC>T;#gUneA&8u7^u�R0BON7V?j!NQU_aAQZsMivP`H6*i11SZj?_birx}1tJq(X06o) z8+FkpnV&2|-w2-O#%d?t8m@DunuFPTPrAXAYw_k9?1?5zgl`J)l`fvfDc9H)Dyv*+ z;HwR-N?nT@X)-(7A}M~pr0N|u_YRwThIHwoz@MZ!L!hC_7Ob`h@!n`HnhPx|_6i&c ziq?Tu+tI$ZuppY(4UKvhP3M+Pch2(IS&XG~nn5%rw~oaFumgCYg{1N}SF&N_pz`2p zNmdf=M zv6Z6(0~%L=K0M~$zH?;nzR4xaGr_n_rfq8D5`h_s4Gx-wgVkG`I(phytZ^TEp>XZ` z;I$jQr_Sc~Kj%GsG;`(h$#lH2MY^A|Adl2+tIYKyZ415uHlmG=-w08^)ynHcI zp@$hSH2D+tCO62Uk?Fy3))fI%c3`Y&dWN@fMldkm0y#6l-og5=p(bz!X_;;cD0}te zU16}8fVc@t1n^bZhUSXY+9G6w8)xv6Y(a`ELNd82DkFG1bBF5}EHP}^7F@N?J~%GQ zcF}EqqRI$<@6FWqBMDG?Ln$D)of2u%_*`rR;`WURgr2<1dv!m0%!&CVr}ODd&vsxse9awa8T~ z_fvHVx~m{?b|_H>%Y!7=UXX#8H@uUT+7nVkfYWZLF%`rnA+}kBkr;ss0`W3L>Ng)5=7aH^>+0*goCNErNfurRB(NHazMnw1lm&K~>pHh|`91(Q zD+-cU&;>J_TlmW*Kf19cH2exeD}t^0mnwu=Rblo(bOpK%3eFO1e) zopCYZ6>FR#2(t>4L}o%`84QgIBg5nbrDoZ#_JI-e(71MZ+|V;D2HYk=c7%Ye=Lmr& z)}CQrd6*w9wAjNWqr1`Rr{znA{yF{y=z7=Usp9|UvA`e8tf`P#F+rrjwi{~Yo(5&O zMHOw<#Hre3GnjBCfF=M9v6@&b`VjiE1OP};vO0UfcncL^XCK<~960KNFzf8(0Ii@1 z9BZDEz*`c4L18D$+0A&`*yie?TZ%M&rb!X1mAHvQD+bmBsU4t9DxRn2O1x}0JD#U{ zqa-vb1rKZWR7R}H*^n=(m#)lCEsmuNa+$84!N;|*aZMatBcp<`JTXCIBIUaz>o@w& zp6$MUxA*ql-fK6yo;w`eyxo54WdH43i|@WNe)pBJ%U8;0&UEeFn_05LJUp(RS{yuZ zVE&bhyI;Qg{Q1)d4hfCodm=U@YWTj z*a9S&`nY_xZP`j&d590*<^4p1gRIbFL7+!F7XIF;9T-=b;7v9eoYe{w#^lCjx~Qu* zm~P!3dGpOBfBy4o0IeTx`}m_>AAhv#qYw9e^3ijD{Qb_GH- z?!|*kbvIreL@_o|ZwL}Kwo3Dw!9lPEDb`qnDPChtRs;5%z(a^OrUAePa=6ZcmZ5R( zrp>;iN4qy{3Qa83&R=9$wa&eBSK`Rg?8(#llV=kzp7NhQ7kS}iaL<0{veokPTuQuy zYzyO{=AeyK>w?v)Fi90AYh!J;EEk1S#jHy3D~!#_?WuNqRIYTk35`_HXn^(=q8Q6T zd~B7uQLHBN6-1G`PNahC!*whkMJR3+E7|Rc$d|B|2hwZTFS>C0$ZsFq`{tVu9zFW( z`|m$}{P=T-v9mDy1Zlti`R8wc{`p&gw?BUI^FROkkDosO;qk|R`|Dfpzx&FA`)5v{ z*uG@>V4{eaLR{d;wYA8s%@TWKyR%X1AS;|ikqs}jfjtX}9gMdsE&_9v&;m?rP@Cl- zRotiG@o7C4&bfY(4vfD7*k3+O{|n5*50E(kz{2wjx}sn!w}ICZFoXn1Y7a&2Z83$o zwkS}u^*)Z$4g?UWHn&?E1cn-c5v#D^4Su3E*5FFiTVgfvI8)7zB+VHYn}d9fi=%N0 z5x-dPl4$MV9Y=1aDU1xQi)jzCLuo;>AnqBG!2Qb3eql7*>z5*|=yYs}Hq2}*mQ7ASSDD!muyi;*%V8X8c*&33aB#g7XT z(=d6JsI?N}Io8maqgd7=&K7|j%M#*R_}C^6u89SNF9si^=Bkagy~FK$_9RcA&0o4w zy7qGM^3~klgWiS96kE4P&z>2)a&_?HrM}Z=^GA+m_v}fo+Zb3hV;&lFtXkK3{Mg2; z7mi;!_u|WgTK~h9zY%lh7L@6uO&-#+mb%a2CvP7B1&5 zUMX0)UcF_f<hArx4Yozm+vKFlrt$W6>af@a32F;uae!NJ6 z1wCju03nbd&zVpynlX>gSGXfJuzwkrm>V28Z>%m>Xbfj4Fw)lG$NA%R@s8GHXHz6s z53OI;AP!bSbo4Rvy^MIC8cI>@0iw|hga~V>R_DPf&A_i~_7NP>dV2&g^L&}PY^7~< zzBZhpXsuWv2>TiodMsNBGL+bA@EatzR+)meu4I$JT_e(AR7Pwp*SLC}``*0?*yQ-$ zdt2XsfBT0Y?D_D6Js-Tk=TC3%zI1WHuHBj4d-8@*jmlS1-o$pS(2C)ifEWnO-UxK7u)dL!u~gjiWk40sw5dh${`(Etui&-tRhcEPmx$ z-~9)Z*RGA8I#oV-s(kFl%&`;6Gv^A|U+%ebt?Q)==~FL755EvRbTqVkzjOU&D%0Ty&0LuYSR8z~ZPtyJ46Rki~CN=W4isR}K};8f(hg4=hmyK?@;w}1P} z=b!!Qn{PgN{P>F>e)!_iqt6~a`s{}vzWB#KzMh3ycz*u*+yD3feh<%o{`2rZ|NiDb z{|0~h{lgEx{rg|O{pOv|K6&%CyJvRnT0TA%w?;%tXIs0yQDz58(K07aY6q5kXuJo< z4*m<|s2^C2avPu+pr%mhD|nihLPskP%`oI@E6+=;zqn>3 z{N=tKC$|$-&Ki}oK_9Bvi4l(g@kkMuT<1|K9ZJYZ5syIU;fSqG0yAD>s?oTKmI%cZ zuQA1I%wd8hR%4BU$(JX?aVJEk0NWlFNAtR5w>w_4`IAOpO65zb91)2%#BoKKo_I?* z-IOTMbKPu6;VR`OL42av2!#C!)xfAUS>(ra!cxC+*>va9r_gm4V@Jo6fw)m9X=Dj; z;`SPq0gQee9x!D?9JF?8lh9r-a*`CGX3&R-Zfu#-G;@k>NPxT-h`JDFf1$5=&eK%( ze`74H8g~^rt`hJSdr=1K>I6?#Ah!qXbj)I`r;h<6bHUbIiR~(}EP19WPPRlS-UMkD z9MJs>dU=S`GtBFoE9mH@!xY(YswGii866GKG|Q7$a}~AB_G+38Eu$A;Su$+9o=_f= zY~3C`dOY^xslbUdzLzefckXkPhp0V6jK#~5)$80#mKzr?Q%^2zADyq5Txb{?GlY_Y zOc%1_nT6Lb9lU<=*u}F)ubn?}``Sx)Z=O5++{R*$3Gi4GVYcGd>Z=YU$ejc1MKhYk z%cWC`#nVfL8@8&SJM2AqHuchl^y!xpM~?@dKjt`gB5?LX^zyab*$dI*r@b$n^q)DG zI)AZn@^p6lZd+GBGgbgCDzKYuc%FR2rr@Gwz}yRqdz1)J3>11)Pw*3Akt=WxTY1O<4BLR99>l{x$N#)3@BC@& zd++Xe|K0uXzqk9&t>xQxq=&|2u{<@=K~o1oehU}fqv7hS*jljp-X_PiN&$w`l~|4r zOgGeSg2qkOIcjwdkg|*T@y1ZK35pDHfUdR-HrWXR2A8fD?t9jH^+wPA2NQ4oZqe_4 zzxe)x$vbz)@7dLF^1+l##vt`8)V67Qp1!GeZb6BKtvbee?NMak& zT0@qqR&1bXZB(tfL1`eXktVH)tu^trCcaF~R%+RLvmjjzu9z9SaryLne|hVhuiuBb z`uOn|KmPdFM~^;z^r%8*XW8t}KY#nLe|`J!e}DUb|4#*D|Ihz?_pg6__wRpw_2Unp zJ%04jqemY<`W}@IK7DZi)nkVaEy;CTkO;fo)hu<8Wlo&f2BcSrv5?0~9KZk|M7>z> zmV*GC1iaLag~A=ob*Zcjpj)&&?XL)Gg}$O#T3J#+Jqd6NY?kOkPz9K5$3S!y+ps_x zaMmfEWVNFS7>RuhosWeCIXb6A<5n4bh~BH!x(x;&qI4*Y9u?ve0iD@Gl9-4J4_J0K z#cQkyk||zeiB~%l6nCb@pJls~Tvvn{%BqG&JlbZ)T&6^u zw?H~F!S9>fIx>&5c9Z7t3z4%I66Y@DUOeUBbHH}skm02Zkz02<@4Z^Ob|Z7*jBo!T z!|nt2BQJW-oXfs&BC%qPc4(X*?*Lc&!4zR)p=#^)*plTIyT1muwW>`(7i~u{XiFLq zaS~y{E?BNHRRO^k0pEInV@XtqD`;=N&ju zxc}PB8xK~#{rh#le{0hpe!uy_{dL!`E?K`RnCk`+vd|~Y))QG85V!$B6{2g4J6_|$8bW}_nv+;}7H#7ip-eC5Zr-VW{+Ruxi{U%>O22t?&hOuz z{LPyauihVf@W#Y%-kf^y#=JXs2d=-|f9Xo;#Od6@L$S@1>%4i4qHdFzj zfrD%0V_T(owyIvDBT03nc0;|@%MN8UoqfJ|F%U{vV;N`9P;i`tb7~fBf4&{_!;g+yC=F-~H!55C7*s-~9XE0B`^GFF<1d z@zdvzAAkD8r%{`k?ucR&B^%{N{@vGJMS&VD=MXUJS+l?!F6cHm4CS};Nf z9tr{+q^h!%VFdRpqbPLWjidin^&zbJ=IJWFg4v-Jq_2>{a*-!I*7%E(0MS+EsMzN! zJl2K>y))isZGfip({+B1&L_}$L^``nV^^R6tJb;=8lP1ka;iKQeaLC>A*P^4>1+dS z+BSmPjW>qtA&*6(IAgNf6t8h-TEbbHFVA$v8OefpbTV+@!2DAuHk>)N_T-6W$6s8s zcmL?-t$nN3W(G%Xxh`qEAjp+?9euo1k>?Bpl?)C+G9}vF3B|l=(0nv+fpfvahQFATx~Wd!)Rry4NRr3g|Db>X5*;>P?0HhHzN_YF-iv)N5II+0duZ2 z4au|`vuzMsfn_TNyj!y~FF5UjR2D8BxH`F>Zt#KxbFMCyw*=4I0PabgF}3OA1}}^2o4CsJ)&4IJ658{x>{46t?6zG#8^;KIthwJ zCZ^>J7E1?4g@qnwF5lt=R!^Kzg>B;jeV~lLvOK+r4$= z{+;VCpFMWw#NJiQhr(&G+DK98D;_h29`xIKV|85v?5=)6x}zc0Q8PG3Tfaqp{8aS9 zrQAyw(l4Bh@7eEs_Hf|hmEzr3dS7{M=+>RimoB7_o$&0~W8SgHeBe3ju6@o`>$Q^$ zMde|7rn5HRO<%aww0lo}#cHSBU!yVtfJM7>2@u#JiqM7eik{%CPIu5o z=Bwr}QjATBK=a@Pqhq)+TdqrV)W!<6!EAjfLvckaRSQ}N*5C$oQR@X=Hf9fA7p#Ws zX$l80K>@~V0e=x!1p-3(+A5=mfVe@yL_2~>mpEIuN3UO92xf>^r_P_7IB{Zd^S1QJ ze3LKPZ1hw^`(Z@>NMn{PgP`0x`{H~8x3pa1@^fBpSmKY#P{&tLuHA7B0RpI@Qs!B_wI>2E(g z{_OjQA3pm2{l||!eDv@yUw`@AzkYuI=8b(jc8#Pu6?#{l)PV(s1%S0;`6hIs7@+GD zfQ6`N!tha*fM@unOcvx4Xn&DmMd&gDJff$}t`&K~_{%s<_yk~O4pfY_p{jzpO6tJK zop`B-Eb}#~!ZakzH-%)Dklq|p8@%l*hfM9z>ODHWSBr#fNXVuQI(1@8_6mOC0&rv;TsxRJF>ep`D(S7a8>U+00 z-M_cx^;fq&cy;q@cQ;+WxP04=^5W&eMazP#)zE;HAh<~wKXgK&87?k74x~aA}EVAXPK5f09AO*Stc-UWf|}xFl!aKmK@8Lhtn>O zql4q>jFvJbd z1&t4(IsEXTAUw#6^|0f8tN>9#uD(zoBi_@RD${d)^zI>6?;OsYdE$v_#nP3A6>IhD zH(OS&H7=UbkIq+g_6eN9CIy0L%fR+MU4*4cuq`4yTY}}wuqq=ikR%V!<1CyJEL|yH zvQjd=R5m)F*U?K0rYLrQy)WA0@;7QM09|=fU~i-IaCC09%0LSz4P7NqG-JzkIM=Kk zTQXC~bZK1yx+lc6xTq>4aFmJF7{pE}_JFwlKoZO-yZWhXHtG%?4V}N(aph|F%r3+cu2EKKtZS{J?)DqFylxY4U z^|l?U^_!xBB-!jCAkJ!|v)1CTv-s;wo@$K+V65JO)!0EIhY+a}8!Cw$LJdZu#TZ>R z#a_MD^LYmO2NedHZ`Eho?TFI)-SxS727YRA1-vo~&LE?x;=yb^x-R`&G=Bllk) zzjb@$+O@G$rv~>Q$S+!EPZsD#7qQ)p73(oPJ*YbYxqKRscgob_IC?w}!3it`k-bLh zY0yMlP3a1q005h311g*1g2`{3=ZuRDA(qa^5+ih$qNzjJtMjp0Vh-b<41_T$Hozd+@JzoWVV0OTKkKn2^MKL7cjfBosl zPal5w{@=fO_u;pHe)#R5zy13=U;g!tkKVs^>B8o%+Xth0K3Hf)$6KJb0Og#JxWWs3 zc!z`9V%f9k3h(QyAe)75pIOX=8mz$hi`-h#3ef!p{*$nm+1VEOcpFv>Hg@YYE^>RY zNgZlKfZNlVj#@gn{{@g z)>UKhR-1ygpe8t0ZAoK&nR;KIRZA4rJW(A-+Q1Yy(%4iAjZoi$CAVQI zY;21d%TQIT0*0BN(Gas9j(?CHo(p;(l5=^nVW1vF2SEEncz_-2Vo>*XS(XyPa>QUv#*>2D z2v~x~lj2442EMdbqOA9X1)&5tU62ipx<)61t`I}21_ns39Bc8AK;_@4G?b#G3Jo)B z)Q4Y)o;_E%c&Y2!^?_S=25;OPdf|BHOu|^Oo44m)zFInSE_?ZE@s(Hm zUVkv}&b{GlH_FFPrFZRdEnR6CpHgnx9NNA!)78%nr=2=uUH3;}{O(P)oRO#T|coHZDk5&Zrt zOyCxARhz?gp$x^5A{+fgjSGxa+RY#gu>Hv#S>XU4RLECbr5KtFM?)J)Xfj-@xEkCM z<649OqX5PV2~<9~1Eh5^fM1RZg9K_j`dEjK2Cu)|apzw4-mBSHUd!LPm%VW_cK&h* zoJ;AOw}-CZ7=HG-{E9WM-eF~`lV=S!$n+rhhNS?19bJK=EAf?EaIkbGmZ8M5G+4F) z&odK+_IjDG39jdu(x?|P%?Ckq5zw*(?5|*li^12lFy)PHf`$slvT$TNmQ2SsaIrK= zja1uc@N-JNiOuU4UcYkktv7Ce_Q~6S|NHx2fBoL)pZ)IBPk#H?FW&z0Z-0bH`}NoF zef`y6zWMqu5NyBu?vqE4K7|{_XMO&%Xy=+4moP|Nak;fPL%Xckh1n z=5kM?uX{uOg znija#R}Ms7KI(u4B7+SlLT_im8DOmS7sTc~nR5Yw7XmC?pmWtqJq;>b-+y)8^Us%u$JOyX6?h#ZxO|>8G$x#0hOAoaS-CR2 zU}19cRB`@98UpOln61>W>&S9rF^0|6sx*+L>S~^tz!4JYqWU&rBSX|mV^bU3YKRU0 zPfb%5iH2!l;hKe5fsw2ZfTRLnw*bzk99=A12h);g%)%^Wun=8mcfQ~U*gIHo!wbl; zWudnufO9VN8GyR8$6po#>9r4FEKJ(*_OkqaJkVS=Du|B>Ak2maIsO4|WRM#j0w*+w zAMa~9_43O%=ia(GeeuG)zBw9=r3R8%z+*)OmJncU z8@F-$!Pr&W#&mmo5+Ne>QmRc=Fcmkvn%s@4Ye!G4}k0?!zxcUN{-QcBB8b`|}>WG4<+e zqt|Z?9zRjoyxqBMm1*a$Ha zXhmsj$I{!Y+vNB*IRIFOsv06KcwUKTsPQn)j%%vsnyQ8NIz@nLPIJJ(D=9QZIi@HF z334Hm*uylf z1ON1b6+}Pak#iA*77W)=i7SNMm8Gp9ElUsdSokDls!}^vY{LpI08XDKv3%1nk}U5j z;1!yy;A9PK&$)pzpzycAk(MziH%4{lqymXLO$nzt<(|b@t=|e@7A3LzkQ0fy3^5P< zQ*#7p1&*lBo6z~=VzZ}SV*~1z+6rLS7zM(tJx#C|8y$HP8eCuPjZs1=O4k5;*-GT_ zk;Lh<`RgxtzI>~A?MCLp<>dK`oi}fey?%e?AAYy}%?Dd9UYNIPt*389)y7+RN*# zz4qD^LxPU%tXP*H3PuTVJr&Q4GwU_L)_$`AU?{A5Af0Qh}@!Hv_3M7_VRRBraDU}mI-FJoAw{}@7m|u zcObB9Z)DwP?(U4-6{(VFKu|@bRB0%-c`B{Gf=H?~GfJIW%{y|!edT(?{fFI8KTkjZ zV(jhP>6b5;uU_YJhif?EtX9C_4rncJV#H2Z`|y7_I&K+%cY-xzU1D$!7EogPoHVJd1Lh1(^Y5B zbZ(mUPfTg++iUH?d~p6pNP&1+AYNwj6e!Ff4uB4(%;sou#E6iCP+ZqqIlWfBW`k^c zMm)V%y>x|mc(Jg1kXF}TiiXruou%!4wY{TdJ;UXlqm>;)<;}fi(H@GYvCI)GGWZC{ z#-n!UyXq>yYTH3|B`f4sn8-uM1Nc#QU={^4=4}9^&E||L?0GUv9u_!;(|YhZjHQV1 zf8nv>zjVGT1<TI5^SZImRMIm|{+_ zt#O_y1`HO4WNi=_$dH%U@X1tuc@4X~l35I^X0h$wWh^{JSg16y>pGlUH!Z(+>-?`j zzx?x$4}bpg;qSkH{m;JxiT&l{n~y)d{P^Rmk3YWp+b^$w{rTOWe|-4kpI`s^$9n+V zKY`-@`NxO<{MYvYx&Qq$#&W;=fB&EF|IdGY`}^Ns{`SkWpMQG#%g4tbe|+@Aw|76h zyLSER?2cXi17pEhv(lE}s@xSaXTI35qz8FAgWvF4D3QYxmyxb8mR=bF< z&0}IW{;+??nFk~m16WRO7R;1ABFEQ4jD;^@&J){lB4@tTU!jjNY;_z{h;0l?Z4sp< zq_xEDsLUy^sm^0gd5sAV(p{Av3nHJ0Hkf_f>6+fjesF4iyb|^v~V9Ai3Z^zZV(dky?ke)Cq#-0iNr_xg?;tzWW2+tgXx zJ1p3hx-&KXBkUtb>+j!Pg0<#1e)rv`pMKi> z@#Bs+Z>FzZ%bY&djxBABtzPFETcT?1WBFoaZ>-cDEP|O+JM_ZPG?rL}QyK{>Bgx_+ z#v00chv}2c_=A(Q?o4&>AiZ~x-ko7K_tb>zi_ri@eMe32P|e^ZZE#7=z+_d&D2y-! z8;VUKTz38rTPG?r+lxV3ox$|M2x;hW-77^>=M4-(Y9!zec$(L^l6lt&&*^=!!+3pmqL}F+9(QZG%!`_9!P%Wd$_lr=rSmh}pu! zET<(19Hp{AwK7Q6glI}162E(g!^rCUzy0$1Z@;|$`NuDQ`r*a5UqAcd+sEI3^X$j(o_+k`>CZns{r$Js|M>m& zKmYO7-+%v0{rSg-fBpkTX#ev+-u>6V-u?NH7axDP`_21nAKqPk{qoG?hX*fT+I0Bv zVl0_cV-NFm{%W|#C7kxSCD350Z}+WOliI#3vwKfwV9er)lww`fB6G0R5`nTODnQ+;FNL_6 zSb;57Y>SjY{&uvqzJofoTD<>=?b0>>l^gDBbN*X*qxTvgHor}N|OD5-FkPCUam|6?I zEHoR7PO@{dUD=>rL}F2SxC18|r0(GBCss z4{>uVdUGcTW<9+$e@}I|t1R4AmPj+2hk2RBlD0uks*UbXRq7q(JSmRG#jA`3u3%-N znUNWjtl6O3xJA2jpY7;L&xI?#TX#~ApLV|ZqWAUNv3KvMPoC&#>E_VbqmXD|8= zANL$T9Xfw0dF@)qr7NwQW_=y~oKRh9Fj<-zRqWZ@c;ZC=#S4QcPj>9vUpKYN*fYrU zL`cCDdDrgv-P=pQ{dV(DKW+Wt`%PbeSo7@J_~k2Yd-sQT?TK%i^-Zm^&Tb9t*_)VN z>u&6%Iikf@Kf&xP^2E!Xb(PK_3HEu_m9_WD`bShtR_NEPH_dK0ZP;R(UMuSztm^Eo zYV4?vHW35$g@Fb#N{p#%qYR8oCzi1sSvN8p;tLT8!S zU5RFn1SUwmrpuw*Rl_ZaWzIpQP*AL&nz5n%BcfS4l(RUvn|M>kQgttF_2YCDYZ?Av90S({PpLTKYsV@*PlQC?U%t7lp!r-E($*18t8H^wsg zYPFtfg|AHRF5;PS2wK5arTa8ZfB+VjaH;YbYD{$1VU%#vKbQ|eawU4*HLHpH@cgNItUY}@Tp^;CO zwLtGAnS5o|7^D^$V?-2%VM-9J39=_eM6SjcmgN;!6rlC)3^A1{tz{@mC5~#C zqHSQmD9SmR*qC(9-wGcl6SLd|?UpEKYWf#5%=xb+IiN!vF3n#tsmOwu@ z(8q)7hqb)1A%1+62evM3OD}>}q!^h?sC zvfcqgmulK^GhC@e#yY^c*Z8gt)W?H${*f-3N zH&=A_D<-E*E7$tQmKx)YTr{PX>Zob$r8o96x(E5IXB?+abzHtYcKg=wt=s9Fb3JFy zB@Z0&Os|!VEtPln(?Y3YXQVKcEcYZ!t-gX-Q`OLfe95$7$+V%hm*I(#bT&M+tFZWG zz8d!5NsM^01ur*(plS~jy~%PUBglSW6S^$eYpd|PjQJzqn7jG_^ zS}i_$N_XXk_0l!V#%;pJ&SJg}N0kyWAp(aAX|PmTevK55fhyEOq@^i{G$oOyB4in? ziU3<(G=)qZk*O=fibz)zS$Y!3Sj;t+VFA~bOl28eQc}YwSFwr!V==7&k=PnunLlAZ zaCrL<-@X8k>;LzU|NgI^{`}*^cOM?VeRb!}tGn;t-v03J;g8=v``fQCfBpIS#~+^l z@a^MozrO$7hld!PW&!rQ4^Q8IdFQLwb06Ma{rJPfufMu_{p#%QJ%j0Cd$L*S3f0#Ec^%2gh+%u_6NKwP2FJb&|!VJr$3;N;L%f$>;vR)9rHEK~pKW?x{*lVJIi zInMxFzlgKIhp=$&09pWg1i#P@gK`mNfpU9_8*J2gIvuedpQqdHZTC7_{nkdWt=VU7 z^rAvqfvqWIPI*Cq#r#@u5_OtWUTdA%-{Oq7+FAyDlT(pRTf66Otbg@<&%4)q&z$b- zO!JK%h}VGIOf@uo6^;Uxmn3t+9|)xnX0-K*Vr#6-5Fl$Dkb~6RUGv#i&B?QY8@J;3 zA2z;xmHzVe==-mizI#9Q;tQCjzd6^r|4?XXoEu0Iy-`v)RS~KyjmApr6IF@2a&M?Q z9;iyyvpV{WnGxUMSfsPp?+PjT;+lm#mMvo=9$Q&kQe06$t{@?RrLrmL1xy)*qpuJ< zDy9BPO^oVm6NkFQz$g1K{OtHEW+IIJA7Goif(;$8eO=s77cmV1odBGsxd>VTY13@@ zDrt7OhlAW)i9u0(l!u81T(s3Us~cc0udGzt&vkdvZ7tQNI7uHM=EyZ77oA99{N8@YBfb>mj*{=@Dk&qmIlZ#{6Je&7Do zp~G$a4|J~G(AYa@v$$($Tzna|fWay9MO7EhY`=f!#G?lX?%v*Y?(FKN)4`S=K`05V zuRUG0VTAtBo{P*#kshiqbcvTD~eWIpvBsQjKmRSOpBQJ{v>;*q=+3naMJ1X?EwG zb&<}AGkOSWSAo<7Ak3EMVacdit5FHa3{%FwGHa;Z5w7+(F+=q%qmQgGLohzNLq{AE zFjo4h=LItC0*tK^;&UP`^uMqI(Lju&$@7e!g0?>Dwq4p&=j^A>*+-XBT@gG(h5@oD zpCZ9iklu?hR*s{}@{s!a7l6fzt}2951C}y%MHs*qp%ZkDu46D;f)ZI-YDm1V5f)YR z0AtG-_)m9TeD(WpuRs3q^vCak)_(K;;m04I{`M;X?}v9cpFTSK=>GBNPtSb& z_4Qjfw@s}|*0(Bj&KiZOP--TLjRdigAhbX}p~#jmw88#@tXshF31iW6E+DH4C>G{h z%XYQ2*tDHKYY9N$DX?I}w&qXtH5Wm^UC`JK~4G^h7*i15ZMubE^@LMF7i^FsW=x zHKRNSus~uLC<$y$6|Al>lck<&S2Hir%L{iXLIW@wfaFs&kQM0Tp_6R&{%=T&=?Shj z?n3PKbZ1#>a6lFu5Pzbtx$1}4hx>W)j5s+2FdG@-egfEmtdlmF!D;~B?&)IcaSfmcK`0hCl8O_yuRbu zv1MKT23Qz`dFmRwsyA!~L>*ryPPM^3cwI|nW=ynnTDM}gacn8TnllJnq$PSREgzeE z1=~Ni>A(shW26{}#)6S0(&0(w;3N$aX0VgqVOmF;++;7; zTT2ysoXHIs+t69n(NEp9#dGi8(6`@ieE)944?oO)|NVxqKTJJ)K6Z1ibHira$fRgs zOf@v2?;Dn2liGAwwA2--Xz9|foM~9GrY_YgfdpeyA-0T$C^r>I_4!a{qEtr|>+_KR z7TF2praTTNBXE@XT5%p#0G0->97I}$InU@LTl^IefE=V+eN`G)k<6B_Fe6C!7ZrZ6jX2v$~pO1;VOCgX1{XdwpDLl-TlvhfmPtoKR*2J z*O%|Vdi3)7+*hw}egDmqpMU!N@4vtKk3YZuk3YZu`|of5_UnsZK7Rhgw~wDcx%~Xe zg*UG*y#4ah%jakB+}eBn%Jy3~wm-VRfAvg@+F8ld;Q$+9AvHFurO)-7aJjiHNP;yi z;$Y(j2wj1}s-aleKmkXG;jn(bbkIp;X%;bcAO#35pb2m=^)>tXe_iaA4KXmxFEz+| z4DxvPd|9AMAE!AQq^?%4yEEwS40^gF?yiul1JKpg5pcGLU7aCETfpApvo;3o%^`bB z&{FR$_HT^%&*VtR9(+tK6+wp#i}{nKkxyY_UPJu@(ObL5McOMo-H8M}46XX9pf zeLL0YEZ~S?@`x&cUTUo@JCY`H;3O`b+=p$%2og+@Ql$?Jhpq^5Xo9!zkn85H)>mJS zeEDYR#mmh7hblfEdWtw%LD<{4ca zUA#Dz9%^pu3R`_jq!loD6uJQ8u~HUG!mQ=gRME*hFMk1smWn1N6BKt zmf>=qvBDhYc$!51ZeF5K9P1bOdpNERy1Nr#*4M)ZTI5fcZc5+?q97Cj1U|IzCt;$7uXF*dJyFJZ~HCBe|%DOVF!EwRJq+n{b zaOFD5+KuXsTNSfAbcc`o&R%G^eQW6Pqp6o)tbX%m)w_2yUwt+6^2KMTPmjbK#9~!Z zFk#xYb@lzZi=RKadi(m(9ot7Uqi#=-V)qr>{DfFz)$pin>-Na$Q<*(`o0d&Gz0oqg zqrl=Ru=|Vb{z7MvWbqcj|1V3P&W1C(2s%4XY|Imz7OCwJ6yS~&L>h`>O+~4W@}{n` zrq1g6_Tu`^vfg24OE=`on%sCa(&deordmb8ILqXyQs@h%TD-=RZw?f@VkM3s!JjDV z=x49n=(%%y@w>NMfBpI3FF)`3_;JVUFIV5cKel6MthJYI^%9-IlHOs-=n_p=MiOhF z1>-dhZHkpM4LkP?jxP=M444PT{Jn$LWV6f@qU$W>LUkTbmB&@$xhjmc%5Y3+9#cGj zl;yFcIJP{Wt1RHiaAGx1W6L*tNP#4^sY4b_aKt(STbU=+;)GfpU%x0zET#Ea6Q7W6 z?#uD>J}$Vy#~$Wk6c-r}Fu)WSU{(ZtwJ0AM2H3hqj(BBzpQ52lBsJyHW!WZLtpa%L zCs_d0fCa==MWAa6fVFB0=$ZlyQ428jm4M)sr6;jV%ATjPV2j1{SPtfQ71(f}7_2@! zS&)tIo{!Z+g9f~Lkv2PxYs=RsArLauWOcR&ob3TuM--5@D{O1_*_wUM_JF%9?CuIX z+ak{PfW0MbZwWfvB1o!569@Vjw7R_Q-ljhP%9)1!2YN4G9KU%ZbL&>${fB+G@3!B* z+j#M6-R^y%^a$S@8js4BvAIefSIOh4_#7pZ zt)vQc5Gb{X6RXRySVD9wx*cJFO?@%ik>9}oWc{hqJhZg_Bi*^Zs5wqCx{h!?8! z8oL-MTr1X8W%ZPpoMpi{r!#F_vLd#3|H#$Ln~xn^wsu2HXWHVAbHv)LAi$D=F^nnA z1M7tZhYIYt5}edfZ1L00o=UT)(iNsQw9C3P+JRA1xKS+95x5$HL|equE@CS37GfPk zOQNem3$2wE)=2S4G6gX~N<{ETmE~890br}eNL+7?Qo%& z<l47SPpD%+wWON^A+&T#{WYpeyHT%b1#C zP#6>>y0VC&CNtDzy0WN7Tv*A^XDUj9&92>h)_?ugLp0L$pa1&y_upRs^!@!WK0p8X z!Kvp@&Od*80o9AAS6{uj`R>iFm(Q==yt?=DxvdvY?z?$?$Ai0vKYw!M#45O!MQhD02V>yyOP85Fle#SR6}(V`*_1r^Pcd7Xb{rsj_`5 z^j_q=Li)g`K$!XUUb#H3zYNl1Uz%ma3tgr5l+4rWK@1kbDOMpQ!#Z01=(?*Tx6C9%b z;y_v!>cM1z0e)yu78=ZowEi^PpJs=8naI#pKOq3H9b|_wZ8k?}b1)kk;Gx_Ddm~+! zDAPyFVMA!3(BcC{CE8FqvRJ-rPwdLI);ssQZ{O*<{{Y6Xo;+E6{o44lm5#+zo;`az zE?$_pb#uw12h(?NFFSQIGcsxH&X~7uSaRvq!KV+dzj%86@%=N0_Rg$Y-Z`?^y?#^E z)yu2y%&ot5W8KB`Q&%sqx_))l=~Ii>Zfu;KiZpeo;JzEna#5Rc8cTu3LYAltkOfz& z$%mZ|I_MB(^9*D7Xk%HTr5gPWMHeme`Pd2r&K)gp?h*xJG!SwvWN(n}jj&A4N{zV; zsHQY))FHH7P3>u$C&pFT3w4e{y}MBFpxDDaS6pTavbl01LxRhh zqXZ_RJ4%P(qxx!pyrz3VGO}2o88t_eT#lxYt|$;1%VhQnrGc0OSkz|9DxwNfR;QK2 zMXH2QD@SIk{9L<=BEh3)YZck2C}Oa-vQdxH3hxg9DczX5a^D8eu zzxMF{se88%T{*w|?CA{`&aJ<3W$)=ztGDmUEM6XU#hBb&?+I(`^UPU1%&DmeU^&K5 z06VX{g2KSm0Io7rIHo#}rTzr4=>B@)^wz?~UZ`cposaBZ2(W^A|1Hm)&$p8mff`qX z+T9t9_Juq>v0!i1(-U)dg`wL8b^^7n*@rL|;i|hU;pu_bc)BCGUKpagZ9}2ywe_b@ zk6ynzHa9nT@lwmFbMbSR!dGsDFJF&bxt=_Kq4~2d_U3N7RDezI8@trz+0B)&SOEiBYpxh!=#LsUVRR5Jus zOlfV7%GPpfsB8*SVu8D=1!9vzVo`}r3b9EcG)P5S0Z+%|tEm9C=4z#nYECgi8JG@; zkMg1;yjX^xObe5Jl4M#KOAA7Md?cm%x;Z(#Man7!7hnSjXtwLqvwgf^e-;=+1Mq?I z0X}lZ0?f{*#K4E70aO!X{Q3z&VvHXg;ABS6%6WiIVCE7sUfo@%;vtM9>s(MON_A3qs+{AA?e{S}9gba_K`qpd2OI(TU8{JH6~XQqxF z8``I8RyOmZq#po@=R?aYb<2B4n71a|12T)!^Xx_un=9otibN2 zj7(bh?Qc48uzAI5d$Ofk?|{r$vnM|r9oYqjgm_bxKVISqm8FMGM~{u%ncK8?|5$q1 z>x_#{KAIy^n`)Ibb*MVi#>uJ3)XHRIr$wYJq=|4jm_=!awqTVtRPIWaH*|6b#`JAH zYI~4YD*+1@z^uMRWXcEXs3B$V*-;ZpL9A8eGgU=Qbs?O~3E15|A&a*%bezBI2P`F^ zhqY38BS%N(>MQALVisU!7`IdvVxX5}t;O@)_1^&&@l~c4NGt}bpX%RU#xay|^khI+ z?4#%^3}BV;?dAGqbX6fuS-@10Jz?!<8f_}%T-FVEb&edzK1 zBX{SH&E42PcWvK;JI9|tJ^kj@<@ax|zIk=|i_b5?57^%6T|1WU*gm}Ov);+6NPVY9 z;je|gBG?-7V*#AH~_k}XQDY-WnMO!L6wcB#2#Yks)Pjx$R3ad()mn- zY@xV9>;qYt^a^;6VLSFkfmgGQIDxfH9blRwIzZ8`aG*aGO-BMf;ZWM?ZTH$+wALnz zr9lH!w#^0@+Y|HlCVYKKf4VN1sSo!jKtKR(0l+!X6Yd!e&1`JCbYWy}Zt&(Dtcp8+ z+JEk1^yaOWyZ754K2G1c*Sl|j-N>XtZ3YJsO_Il!74S49v9VO)D3kjsT2H0KU#4-E zN?oNgPpLLoWsB2|{u;fHX!MbN4OQu3?TXcoOZhVDSwMv6Z6s(Ht%8V49a25~TV?sUbmq zw>Z@=X&g{A^vdeeLLifAjC}TT{Ao7oMuDfRlZ)g6#9(vs0v!940Ey`=NCx}m;XY9? z4VnO^^k$oDfj&;4pBo?LwT&oR#>J^IA(Ceo3bVkjhk?z;G9tJ>uz&`4(EV+-(Uz*o z<(dNr>Tk~V-o4icQ?yS;9zK{nb2c+M<>$)?l{7q+OW;XK5;fUiuQ1psW@ja~(Z;!E zL(`^>{d@NfA33_@_=)9*_OIHxeev>@4U3n=H*f9OINP*lePC!pKQJzBPYW8mY8pBj z9cg|jNwG&u{qY)D_2MWLtAJiI1=#^KnU*NkFcfGy_6 z8C-=8?Xre;nch|=R%Mq+LL!Xlh)t{>DACU}CDq6ZBV9?^r zH~Da`NI|TboNBLZ@1^t((=wx+z7bZ>AZu)?X4{U$?!EAP9ZU)})=Gn;qNPhSITe}t ztY!V?w!H_JZ{L+^>9O)<1v#yNu2{rSF5;*c0Vj8o{dF`*Zx}YmnnVgyaXAx4_$zq8 zG(axUfIh%f7cw;@mZ}JlR#gZ9t10HH$=SPmV4+0x)B=Z9mYUL30J#`hC9-v7v=b1q zV!DbzS0YP5an2r)C8Y~%0e-@{KC2!;PWb;qaTm~99U0*^5+4BP<@nsB80a3@x5`i! z+e5OI8~PsIzwq4$&|1Iy@buM-n@=B|cyRCVtsDDpUfp}^=AK(O4&1+e?9shrFP@$G z=KZbjzj^f4>$@+WT|0kn!`d~Y%a+vl4*OcV9f2fY@2o}>1PEhMsy3pmJX1C_05MP0 z>t|=Pd9wh^P(m?P@UT_}?g9?>Q^!+-dn9YY(qugbDE1I!UTl#fvcDp>oPElK;!x>N3&(vEb_Ujb~1GU%u3R@p9AA6X8S0JZH}}-Mt6P zjNZPTe*PT%mQ$;&CKmxko3dpELJe7BDN{JBjebZ?RC}t_t}3~!LgA}2`fDr^x;4r) zVMlwI6K~`es+BiBiX_XH`IiZRDYc6iI>~_WV0t9CD?o7<=$k4 zH_mh=AtMOZhhfPT20uq_XDKZVk(MgdvL#xcNGlL)nOr54gO$V}Qd*@Zzt$WyTB0^A z1IMoOo77IV!ln}F#C#q0786@$uQ5a!u4Z1MPgs|hH1vq;dgZM>IuIz*%KCm$eYZH; z&kqgod}$_Ft@^VbEyQEN7Td)|S^?4)082Bx;D8L{pS-MLV1O6M$iTGF!_G+xp?)5y zvctTFF=g|(ylG648dV_18(E;uLQ@RW8M5bKLV%zHUz*|Xq6V7EdWQwGTLTv^bX>VS zaC5G6Zf@}0*@4B&9cC}1ycSgCe=X-H{$(ucwYkB)fFWtnyHCq zsxMZqvzAEI+1MGD5Dz3)TZADk4g^nSo(Mx)bhFQvW_LvLl^_BL6*!x>qNPinYLy@X z3K@VAeMMNu62tZ=DhE&D;;LYrGf%88)L02d55XQHxnpE^l;8;AeF z@{ix$`T3_u=w*9%PYsXPH+EX=VV>4alUT?+9jxa@V|V=dkysuUxr5sJIsX8nvPdPM ztMcf|MYVDmj-x9f^%vQ}J&l8Ri7r-5Csa zd4qlNcqSen48;ahS&YR@2;qLf*nx@A^xDMXBORwswj4j1IC3nw??B+x>4rOZ``^Bu z{_5SD&p)5Ia4xfYoy+Ef7%2E#FUB*ac>;Z*%v_?jS7}`ob&!T&R_(9R1*zsR6EGIr zWrIS1Jqbaeffs1zdYW0j7J8tW>uaKW8)(5K)19Dr;?`<;$Wi6AE&w# z6u{Ux$C_XO%!V02XhUqRho!ai)OPG>5lQs5B7F@fniY40_*^r{*%qGXyI%kaZ# zX4ce-u(XSZjIuc@i@+AZFfBwvfxl0NPOv+DZqCrqE!VH_MW@y5& zYR0{0y=~bl!{}m8*8sVxry$XmAF2nK^+rk2hRTLcZmON_@YP7v**I~bnkdzg75XBT ziKw%YG}i1o$c6jOQK7fUv^ZC&rlU_=-zLumFp88osV1K#24gBi3QZ`g0MNC91*>*g z(tMS<)Z{BO`--d~61F#-5Uww6?q;V)WW7VurhaB?hBdlaGPPQcWksqtZBx%|Qccfj zI{G=*P$gGVK;we4!Icrk+H$p(YVk6BF=aR@*4e9&G=UHvJTt0AY#olPhLKxmpt^6! zxol;ktKVXF*VJ(HOKNcyY{(p-D;CjI1Yoe*BC)N?5Sov%@<9ADS6i|`q$MC|oyS$; z!cN&dlLWX%fSJX>d8-wood{7mx^lS9HAu z8~X=h8$Rnfe{RFm$EV)Ez4XodYcHRlJ#cVr#fsj>E|V$9mfEUVszQzi&s0MgV|D|q zH5(_G4G=JA4YAC5sWv+*s|00$p<0A?Ow?i+i>PeYW-vdPfEFB}48w(bZ1#MC`YN(Q z0xids&$W7E&Mz_Dk8SC-++AXd+jWS4;c#FhN*x4=d5~O~T)K?;L zSEvHDfo6H2%ThOF4E8yEz1BcaIGCvq_QeA|z+)qsx~9R%3TPTQ`1uJad~Ayw(=#GTudXU;UQUgxs=Nl1lb%b_jHQ7sav^X0}erL|IRuTnWH zwH}JbSEcn(3_%w9VY5cWrijEE5j*2TON0xRWSLV8*x;7rI}==2oaRb0U2&QU{>KLx;3ku}L8{Nu?&K#woCdq^&)c?mH&=> z%!6$#4OuY=QNx2gqzQvKFz|`Q!o!6ai@hCrX#M?6e=o(~Qym*%4K7nmOlgKDXh%x?3o-)vdCQMY_8XJVS39;<-C)&831Zd#(HI?+sxHP%@D zWlCcaT8opr(--OR3VpuXT!3P?)n>>UFgU9t^@4Dt*cGTXI4X3u5}l2lXci1j=o{LF zQVk9TFv_$1g@k|s3sx`&B4e6On50l>l3N-Wm+P_*cEH)I{NH-KZ`CV(S$^r zgh-R4iTEOp7H;IZ#xj|sM&hiN*s1|wvD@&!Sgqy(774Q;BV=s=fPn~OG2OKgG*T@Q ztL%Ih$54V4R18kb5VUel#avS{$B406U1=79jm3bmm;ejyt$Ac-=}9@W!N1KcWDU1D z;yUj=%NcqZ@*;_y($M8zw{CdXw&lxL3}l8I>pGm?n9<W<2k+k6ckI|y-)JyUFIRhMJbfwj3YB=KCJ&I%n9U>vNfs;A=USM@0y-+z)uPQ4 z*IGrk+m0|6B%OK6%A2s-DV8QRLm00P=rL|J)s#G~E z<*rJ#hidS1%mJ~%FVu&420sTyfvG}NWe7HLLvUpjiwI6|JxPu?#jppfFu9=69w2&S zrQTR2!dP>d;fOFyQI(jZ4D$S`<##-P+35}N#4 zXG~`afpnmDs%18<&>$5Wq!NotZjo!85_jC#H=dkW);O`OZg`@8a&i5(?L)hEjcwi8 zvvo)7x{ZOM2~EQwJK4{SV@F`N{oH6DKhnEkA`H`P5yk=xrg=G3%uW_$xRC*PG1#%d z#|2HU#8|c|7RYc>5MqwI&Wi@Jg*-MZ2>0*;J+--rgFsJhBF&8U(Bi$!NUGN0DP-{R zr3|>4MclOlR06!&P3awU@7Ou`ppW2jA>mVn#Nr1k83ldE#s5zO&xxdODH$hYRqsQ z5pwuf$n)6BMNDNLM}wCb2s&GdH%#xz*e9nV$yTk#QqGbW@zrFJsgkQ9a6peOWU7cD z2&jk*6_KeTGBD&s@+KYAIdjG;?3{pA;ev61BD#ivu~wKGU>gd#7Jx@4*sg$-vW*y8 z8c1vt2|(3c#4(aMW)d7taLzT7v4>=~5!_jD#NNU-k+6p(Oc1~nS2SarWvciZSYw2% z5<+SXHs*lMQ(z1hT`cFoVlRr2$W)eUY`l1*Bbc;k3>>zklEEj}ut`-kVtFmToR(kB zA#jx?)&Q@m+decI?;j5*+x03(t2|xj zLgBth(_my^GQMGR^Patp`wxWo9B^#juAAAU*f6V^-C^9l-+tboyxSDBTyW^~Eae!78h-z!E5QMN8e0a%;HE z5~?)A5(lO!L@|eG)-cl?WSPSpON48W0nECSe0P%Xs$)BnOh58o5m^vnt_e)5>jXom=CM8;8a_Hm)7pw|(vY-K&r8-F*Jk_6uic z&z_z+c5Hh4uJrgacWb|}ZjjeJDsC9&Cn0uQ5Fg^#4~grCq>%w0^1cF24sb(hK?s{5 z2xPEP0x$`PF;y0TH_{^rr{R@|$|4>c#yVmswEz@}48~pIia0Wym1lEo7Hg|E5-c^~~v!o7cBI zesJKyy)(D3AHIHN$MIt;H*f7-I-M9AclHcu1Mym`x6B`}=*sX$m#8O~LxXa9tzzXm z)yziAsu}y>xYXn(DD*gug=lb;id6)}LOEhc^fK7WJYjmIp3~MVpIQ}PHPbXO=I!V+ zwsdLgnxs}QRbwtx86o?kv5PmhR3B+(L)w)B$CbejcdfNds3Cx7Rf4Ar;kO%NqUktt z4USCBE2ZKpX!+#~d<7F}F}meKL1bjD{7Fx4iqNJS7R z@&zh9SZ5V^VjV$kE^-BGn>tmEoob7#R-`P_*eKql#2JOgjl_y$sR~gq9up0+{UZdb zIU7Lkt{-trtcVNR{e=t-0SykYjYN)##IumOCU^~oq9mT7h+~2iu8G986!A?ZTuTwp zTna}E8D4BD!k!|bmmw&Ig6qW`6DivQLskJyE?6iFpwY7mKJI@O`WsipgCM(F37I9W zr1MJ3s0Aey5d4d)^NOmnf6dEk^ULV@)!YKErc7d`D4bM*xjZ}XV8XHyEbtw2t%W=* zLF~X5c4JXm9PAc8hpX9kRZbR=EkXrB9FLVc7b!e>Di01VEt1)YGFPF@Q^KB_cETf4UJtlzTTwQ-Af?Pt;zGrTn$MH{zhckj0!JsCK4E_U=}VEZ1+(v`eW zJwB8w?#S>5#vpjGGp!k!40dM>-Y{KhD3)sA#!9Rui*=rWs?a2+Em-Obms&!_ra+}BL@|bd zpc;a7Btbw0fXX_OP@WXeQO|QGxb`}hJ;6ikGtg6xB;OvDSwm8zN2&A3V1=1oF0*Mh zZoSf>(|Ps2gmq}VW7E3HWBX>$9@%yA^yc#?ww*t*?d0**d-sp6`>d^Jz#Qx1M|uQx z2oI4`bPheQj4YVpMf>>R6G#gX#wN!2 zu?%MJ8iptXB>CnXT)Fdwsw`-~SmbODV0~$Zw~r3#UfnE5L#@P1sutyyv2dj{&@hGS zVt<^|(Wf0>reCqfuxg!o-A4D;9i2yxj2=6-?9}l!7tXD}c4fnf6Du}v?P%-}np`B2 z64O@|zzI#BLSKv=tRpve*7gnaRsDYF`OnV;%JI_dNTS}`4;##__h*}yF}zFgmU6x>7gmx-a@-y z*_vzwK@MFP#yO&Z$69$F%y6l)EiDvifE)-YlQ6d%k)s7!KyJ)4y2-JWaPjis@#9M$ zKivBE)xp32?ec&8^ZK9vyz<*Gm)^fQeEa6+6DO8zoK3I$tatIsRL^KA)+g3PDPnJl z#9Jux5CzVB#9a}<%3Of4h||jKi^NtOWX^bs#C9Clf@2x+B6pdmNz*hK=^IZjUD>u~ zebc6`T|0NzeKs3dyTP<-oov}k{?sb*>J74uTh-h5SoR-r@7`~p-Ju$th=d#t@8iw3bq(0eT;(MCLNNxdM>ZS}HM@2yA6yBm6h#TT20B zEmab8rNmsRG?yFPRJV`jiC24Ll?Y?)A%Z(*as(eoz|>K6(~rv4$EF z4IFWnBTly^V0{KA`LgXPE>aTAVUEEs)OrMJ4^QpMw!_qRx!$8Ocy)oKb0Nl_I=JoV zz6}R=ui5W0%PlVQ8I|T7RLrp*lIp?--XiO^6ytC5>Z(ScaeI!*aH}Svegw zGR#F7o1Klt%&wrv_5&}?$_Q!j&?W~3NP`VyZ$YdT)x3*fL7D+*u(Ciu_uokDCpO#7 zvbQn~b+rO}8BIwnXG6NO+*s0?Hcqb%Z<_UO-V&VM>ff^6vuTUxv)RD zcI_G7yMJiIY+Kh9Di{njIof&TLuqfG54SQp?u$%>~ zS$Z6TWw8pUx0OYb+>Sm~&ww^PWF21`>P#E!8fBq4G{|(=o&}y1N}Ji5XXH*if9meUH$ zYl##V_`7n89t842nU<`!7pskAiH0E36gb18iDii+hqpbtf91uqThE`|eD?Upqx)Cx z&Yd}XboZX^)0@_h_Vxv=ewIX6#Fl{S0v@TsIS;|KbKX;@gV3mO{k+ zvPU3+7|dFW&`Az;=g+YZH)CjQF8PGO0J9jA#5gea)k*Uj!n}^5B-YCDIZlgqf0wZV zW~;cQ8a}y_Lqu>|T%AX%T!i!}gsv#fyOfTr;6lm`O#v*OXD$}l$^^DDp`%n_Ckvcp zk*8GVCBZ5IC+zM)p8tgaTPp({;D2H)qOvr37KBkti*L#f$)Y_npdqO8vd?NDc~WZ1 zbH{7a!=~*!JFi|@_u;D(|NL$4Uw_Q~>yJCwaqf@5U;f+23tzuI_WbGYo7Xp-JG*k% zo*@v0x)t(pkFXGY zEMMKUbYy^WkPd_ zz)~tORSHdIB2$G_TP`8 z4i6nWmN{~y{n+t_qsNl_4>`B*v22(%uUKPPw#u-2opW|u^Zo<9Yd0j?d$kTz zELXiqq{)}-h{*gP*A=@0?1m07__g;MBkfXGGY{1C?{mR8drpWXU3+q9{SdaLhU9oFgDAAc_G|F?)`uPj^rE^z?L3 zPfyRxUGulxwcq!vkMf+Jd)Inbtq&-|kyAgO@7d4ZTUi0lfWikl762P4QTmJ3m1Wux z-4JKNi7_>yQcb9g1T1*~utBJX!;K+4GD*y;u%CdWBaGF`IRl$>dqCw*dxaqPoA#Zvp?Fu-nx9XwX;{z*dg$Ri{UlprkTAgvs(zM zDlgj*U~4N`Kw2-$=z(MKvTa_bqk`p+@S`=NWWAs=RT7AoV!I%aq&Z_uM}+5$i)<0G zJ%Td>QfEZwixP2JnEm!(s64qw<{^zh^kniOxKb3;jFCI`e zu2e%M3ugva=0#gqe_O6vNdPvrLWO6{R`(O~6()G2Tme1{G)C=UxcwoY)flRYl`^bg z@t8y$;>CVKWQoe0t-pFBdFp)C?gO6T z?~I$bI}aVLyLGeg_U+*VN4i(6@i#3oWgC={l*ku`zkSg(cgb?q=Iw#4yCBfDezR>w zzp8CndAx?^tfJT}h+zfpU|B2)i{M+k^!2UUsszs!EOPjZ?Y~bACUUB|P^y-be>o=Ni+-$vet?kOy)=QT=Z{J?^V0_(~bA27leWnVI)Ci1) z*RmL1$aDdEIZKLIVn|H@U=d>hwmHla;1w5(m4zZT=v#@VNTQ=EtejBVyLZ>Rk?Ut( zJRN`cdiv$F2NPpgN3Ncpymx(i;_~Rtll%7cxA*w$-ZGuD2)qI(2z4++jMPZ2uBR`u zZd2Xbjn&K6L>rfyD(h5oSJ}@(r@1l~)V%O;04yd|$Qng3#I9+v!dDEKttf=(JGcaZ zQ(c5+jI+%tz9G)iM;Y1>=ub`Pn*-PkmSMmGV?%UZ2yVuaxJ8Vu0u8D1(~*A0UM^kb zr)vTXB5Y8x(D{|i$HI}>%gU`qc+5D=tI?GTw55D46`KYar})ntOA31x@r+rvqL8Bo zmq2EvseN2skOP2?GEH$loD|QI7TD7~N1AC*GOei+3|L|YtgV2Y1Y2$?D7OMy;Q~>Y zc)u+-hyYc@@W27mfQ48WVyx6rfIDOzk&@J6>CzRpyLX5G`FA6K|J&pL_$#3Gzy9L| ziP``9kEwtA*Rg;3!|mVy<{F&eUii(AS0^WSZrYZPwu_zVLZY@6C7^0gQUJM#vF2Ej zH40G0YMeZ#$^uJ}Vyc2jn9@(RC4@Cgt-bxx;qPkJZH^3X2oG=Z?LCk^c5?C2<2Cya z*X-C6TesP}ZnJmCo~i?fqr3K1wRKC9b&R%dY2RAc%0Xk-O6}kV@5$437caG+J=bvj zWbLUl*>e|aPoAz_)@Q1Sav_RiV;bBrB|z=v={#(01zS_W)VY~@7enV}8r=+lu!rFZ za$*^1MySk`frw3UD^n#E5t=K^aD=($un??W$Ylutvmpp;xufL}l#NS#aj`E`jy+p= zGgS^JqwrNre7OP{&Uyip(`xuoTmy_vSY!1S9bL8k>$-;5EnPX()X`O&X>wOp>wRIR zy+UrSkQyra8ZS#(QKAl(s{An7(vhaeJ7LE`!&>d)0abIKrfHRloTHUmWg)y%@}gHo zFg7>s5YP0^^r^6F%^-&XV-`P>h)%6iC0EKZW{EilxvVTEFu~j2@&%t3M6xE)2{Xn! zSJ^gh%^W%2HagNj@nCS`;j*!N9k=e*U%j3=b0M^2pJDS3u)B90Oz;V6<{1 zrTL}-aU?}N8J#07705~exr#yoO4y22MIsGVq%Bn#*^%ms)dL-uPVbwUy8G<$z46-@ z9*&-WJaK1w;_}1M^Ea;U+`ea7Wn5--mB5@sBhf7Esgw>3*X`fed+^}ut-G3*^#y{d za$|`3k6T$$0ZZ2!#1LZ|lWbFpXNWO%5t=^2 z)JN&M2m>x6Ond=00x*`Ttzsgg!UcW;JSW7!$ijKq{ z?T%b08X9GTmhBpgd75Ij4B4>Q77zn(*-8pWS;SGnesI1Ht+{bfl|H5}$T3BD=BU7w z;G0u?Yewj*5je7ZR}IgXVOiqE`HN+V3YQJB)KVa~6-dy4G92E6mQok78wf2oM@_QM z1-jxDwD7t10&Ewky|B@;wnM&kXXwzk z*pfX5!m(PhGeFlnOEfm7+R4zm7#a^<>tSlWERBn)@i5@g#WZ@EdN0!+;8Z0Ai3UkD zORGvTg301wl3fwwxWXJ;2;2r>Y*3uTEY86S+##_ugrO=U(2DA4K;5VS9#iGPjGRn) zRc4_l*B93iCuR&J%{9#xb*;h1wn(PQ9j&+fqGn%M?FcCBl_Cstu#}@5Yn*F?X$*y) z47F-8H`XSs?p4+gK`U#XZGRh~K9bm#Wcd-u9- z+^)ZHC3W^f{O}3)@D|mEZHleC0G_8#FW!H6Y2RSHt=rbvu5a(QuN(+%*a%VK<$a+< z9j6|a3t09aPMkQExo~Om=`;0v4qm_W0rLd$-O{j9q*@dH&wr6IU*6JA8E2 z(Aw(Cc)8kILJad%!l;C3R=ssg$B`rbyY?>W8;n;s>D^JDG00N*5KGac3`MXMKEfYR zRS?oNPhpl}TYw}j#w_%BRurl$i?rcVL!4zwvGg&9He9L=ml2o^v-A-#X~El!mce6$ zfmw>di!bmK;G445_V5W@jorEUlko2=T26kvS%?ri6IgxT99=s1@51JX4ZoO0vumViF7P z7XWf1z~V^}QWIgo;*RD8_tmv9?}eCZNH%k!4`CZY=~`q96r@`OJNIO#CO3b0d*UyD zdhmb#KKnoa{o;TA`|02Red=$29slc}N8$YI!~gt`hkyO^*q{I91}qu;u>bwrosXu5 zw(pKaT6wNyp#}K>SgB&zVguGz1r~ul3Vy5wiCAMGZx=aiSIlh(^A)IrCH^K^%Zg~v zK%#9;tYy$YIGosfu;uWvCENG341X6L+z=St06KQxq0s3w_2(}o1~(d8y9BG(>JA=B zZ`&Q{>C+5ua9z02c>m#&$;qyVQ!P`EIwvNVJf7(}ccHFni44Lpt}@J61)A-na?{lw z1Xwp)?PjVyG_8xFcQI^UW*`oM*l3o9EZNdNQ{XGNTEjubtp02%qqvhh2^rg|sVZAfn3)^PgF;MmyS`{M`Bo%!y&9gQ&GHCB?UXLhbou32y1wkx!Ie|-1; z^oFheY@@;-W;V1LkDXX~=gzh}ceWimwruUj>TI(joEFAvIZd6yHS3-G4<#>NY#teH zgFnId8z-h4pU*A@z`h^&{AJVEuRFj0y#LLsJuhDzn16M4{?(b6FHSvvbo9oJ4ZHTY zEm=`v^hku-GRO-_DER!0vGDR2eM?<&F-KYocd}B*6hZbtP8Y~ZL<-opqt$=^U}cLQF@#B>i#`fc78?i{&bkOgTgk#d2Cly4>TYECw&Z7m ze8E7l(Z$9xj*7?_u;qnf16^)sE1Vpqo2&A0)jqC10IyI}oNtSZ?HM?>tk6{>aW_bO zO%i9d$Pi~~s*0hR%S$0S39DnR1=xBe0V^?qngw&fMmPowq;-4)Sdy{C$Zqt}sx-5I zedWmARX=<_`upEr`S-t!|M%aY{P*9V{`+6X|NO^0fB4>Nc<<5UH7z~TigcknUTBZPk_*tNAWgw>H3#89UMosSTz!C8 zk4p~h(pN4bd}iMQV;g%aI{ULr)+Cm$iLTw4-M*)J`<~{FTQh4nh1YHhZ`>B#xi@_J zY~7V>wU@4@H*K?aFX!~H)f_&WJ$NMDJD?lbV83vo@!{l>Co`STXO}#mZGJq{JpH8g z%=v7~Qi;8SYIGEK|WpwI#&#OxsfDu)4kp|>~0BN&@e1d`HFM(#_gf(eB$ z2D;Z7;@c}__DYe>&$s&cHV@R@j9!+}#W8reOxOC=+MKg$%-`-!+69@_7E-c;l+Eo&z}2qmej{ndqM@=D(k>u9+kD&M ze>;g4N{m_1v8xE1w`HxpdA+W4gK6{jz>#C=Gv{hAUP<1#*>w7Rbn|vCUj8!topIHW zW!r90p|@^*H}T-`^O=kDuO7UAGx2n45DNU8D^rMTfJDkxLw=a zX^p}_LhbN(HPcfkpG=>aJqAaCsSV@s^-+TiMv-h)^GOj+LtC!$-M{7 z6VnaPU$(t@yZpn)RUbbOfBm}iryq}g{e1e}o3n4`&cC03IREOw%NJMYW-mS*KY3v9 z@Zye$Ohf0x3O@>=W5pD<`c?^wCzyY3|`YJy3l%_VX9#fdtjFT}&m4w$i#T60ThxVp*W4odtHNvdAo zveeny`a7{i!g>7z7lxJ)0V>!XOS=+O{Gk%K5pA0=Rm@Jk1%0&?L^W1NjjW=wsAg?Vd; zfi zeW~2RR(ZI(3cjI|Z>Zv%!U9WFXh{gIDWNkZb=64V)QR2o65nEJutn}|l$erim}fU8#I%~}`bV;1ZMQpBPbTG)5sfJ_(khyvG(Uuwfga z3Wy4f&6|q|Z&niByU?>%Np;qUGRt(GgUR;(RPVZ)p-nYgb~bL_-mqy~-KK4I+jrOQ zKNLT4viADTy1S!Iw?=CA9JH_K6ZG~=_8tiDI~eWlSFc&8J#i{|>u&A%M8m_WhRNyr z2a}mwBdI+HJ&o;rS7jj-ooxk5N1@zaEO%0+_7a($rgXvYNabLtoeY(;OyibE{0Z!h}a$#IVuHKzrgJ0SOZ*h1>fRhLnY3`(z-cD zH$&%Q>D_FDn}H6?Qo%M=u=IY8p@QXz2>sQvXq!CMrK;&xrMs2sURiywth!%S->0Y< zP}dD=st1T0e3-et+E_h+hi@CPQmr1qVFM%CgOFOG$)NVG`auK6Y)!w3=OOyCQ19sW|2v&iTuvbg9rFT>?$xAu6?o?biq?DF{7zKvTKXB$+}v?QG31e1*H z;_{kiS*lK&t}9>Kt3G->d*jyPsp;0&^WC35uloFD`2B|sUp^i9^$(}Me!2GLM%r1`2Eiif57B@SXRK*)D4IQp*n;plla0>uVf#Q0Twu-I~mTH3~2xwGN zyF#7I52Y@`lC7jdP9RjGsRA=r6Qlw;<5;88uw+G<(lSg`Q-W@cav+TvEk(R7Gepa< z=!H~mh>jVXBP|PkH5NWd(86=DxWzx`LL)<9p)+^ESoXk}8kL8QcYZ0HurvXJmsJ9D z6pke(v?PW0q}Z8}I!&~m7IIdJIF^Z! zuQ<3xn1o-(CEd zKV13UPXO3o|9I#(KkfU&?~nfT@6Y_{PpAL%$MZjYIdE@m)y}=a#tuO^i%`=evGLY37pm5MC+}X)*|gQTbDwwRfO6%48ZXzoGg>_| zo*lhkJ$gTV?ox2mHhuR>S#2}7Dp~5PEYvz722*Y?M5Er|Mni7nDIF}8lcjbtH1;yB zlj;hvV;ONg%MGQwV#b1MWog`?V2vJ@&dt=hxCRe= zU!9u?HnOYC;3~8D%bXE@Rf8teu8uE}#M*?hPEoRxnO-iz1GgL2YB3(kh%9-?yM7`w zfc#iId$wjk-7sh(pP5}_A!iq0`AYUOEKG($j9q|j?hs93ztFe%Ti354Ce{vaaqZey zwfA7<)?JW-K6Ko>d%u6GuxD`y_Be?5ET{ru(cKRx*RdHVZLvp;-#`~Al^AKtup zH}_=r$^AQ5PWG*7FIRIwoeGJX7g2dZ{s8F`fPwI>dw|C^5P1XSEr4SnEyNdMB@yxB zN+~=kTw`&oHkIn^%&N4>7nYD-0gzTp)tbs|9!@Z!&osI5-1vBdS?(=V_~5P|*cD+J zqincO&lUaQB1k3OSIBGSFbj;uL9D7WAY`}{n*+#?W#{NhLwIDsi7^4K38pbd$BZ>5 z8Kwly6bBUR5NQFqah55TN7tV{Vyhun9mD1=5u*(-kdr_s%VFR%dtma6u0mk)iwu6L z$uF`5CANsz78hHRFd)>ODR*UouFj0qT`Toem;0I|fhHN8mhwO=oM5xU*B~<`x!THN z+|NZQRU5%rY!~2~S2;?<;%EWpFlMX<^~wGcuy`~#-h*I^P-g{`ceM73{ zYh;+Qw?=Bl?l(S|Y=GS~7ed?jSialg*|g2uJ78>DB6Cy}tDTD!rowV-k<3~qx3gt7 zmeRpeIuOL{WomnYE5HuMgxN-MILWR`(r`PxJwP*hnc50A9vO$JsPh1I^-wH>^q0;D z%GO-Tvjq9hN&u`gB7zeF8^IRfm@9dpT`SmDAKz2~7iJI7?BQ9w9L!j=ho^J%Ax`Gz zV*DCBJfn-FcQFVn!&#FIMl+9!IB2U!mMk_6Ff$jFZGOQE83Ncnz^24ELlJ+$ShBLV zc%8O$gQ<73v2UY%=sW4|0}hh0@8@rPoPYS??bG+KXFkl&e0=-t^M~hOKE8PM{Na_0 zyEkrV$u=2dNvX+M0{ydks@76m+hjO!u;=vI{=Ek~*KVwC>vAMA<&l&im}I-FAhLxw z8mNqgj*7D7s~ty>3ou;; zGI#<<0u^&M>R^JQyC_m?urYO3IxcwOBhwa|9bA`RovL?qbVWM4B6V$^Op`m;oK*&1 zXoQ`gp(1TqY!T$wxrne9RqeXJy>X1ApRWSUI`Gl+Df(& zwa2=XGH+H^QKRuxt37orCAskT#Y4mZ zND+(air>udL4b@w4zPGcjLcIY^HA`#ZJCFn^b$)AFar(#MXoeG)?U82%eZ)%cJVT8 z_bTnKec|)xn;+gEn0dVM#k238Jzf8Dc6fGn?aa)Yv3pROZ|;^=)X>N#SyQACVp>SY z8vMXXavu%8*7z2gf{W~^^UD|@82hu83b>+2TE}YdqHvqCcTHsYyVS<5nO*x5M~~N> zJ%>jfE*~B3zCXTV{NC~#H=C|qYd(E8xnr+qS)ZV9NVRpBuWv}x-7DI)-*Vw{WNf?z zHln^*K6bDD)VbiPv&nm7-4hc%cki~JJY8GcCUI00tDFT8G&L2;Y_OnBZfDBvz*wb& zrg9ZJDpfx={JJxqg_ZSpXU zK4>e04uz-`=!%4^J;<{m_JTR!=U^Rcs^FM>0#gM5){K+7Zun9-PNqd%)i_uh8+>1l zjj6KH6jqwjQmVF=XzeBT3RWP_tE>U3=7>|gb+qVGDQv4*t!)~F23uSO%Lgo($U+4y zHQBHLlVnz@v3$j2VTe^<16CX}AYB5iX36QYsb1C3L@fS-Sk2~(XR`GSTgREzu+%2q zr>N-@*Y|Q;m$Ui?#p^a1hqw4HUtT>od-3DDrywRjzx?`T?uXAWfB5wD>zBD7zkmE@ z?&gKl>l&BX!f~-b!go}#Oiqd~#9P0)`p~f+*hJH9t8djdEirX0tLPhyw)dC=Q7*B7 z#tCZH;iqJq*eeI^yZ6^_+*;AvEl<`lQgwo^Uf;g`-S@{%&A)o|<>QO5U*^95H2dz& zu1$6#V2~2If?OsxzY2yO1BB3GE>g4Ha)2sV3Pj}~ z@N#AF70AXVE)ukSSs`Bzhld92a3mPBr8+auXcyW2N|#^ltb}RCM5q;&^}+T;Jxyes zfMhHg*22wP_z`y#=)!bV+oBnwWl$=rli0FCW1Ou6QHm67oCS1Ev5ZNEF^R+~z_!$s z04Zxu(v1n=FacHcS(Z4{f`qFn0iqW~EOHeHM>oeI3a@C8RbwW#=Lon7^KsL-` zz+%SY3(Q#JJ-JW;9i9`7YE&%Z%XyTe>XVChJ^fTe-es#Ts{KpJnBs z5#>&^=P$I~zSa9^a^3j7-n)0(&z@^Ma5%nxv%RHD6sQJs0HSeO#!3|8iWKE13NY#y-gGV~ZM|<%O}nyhFt&Bq;{Asj4;`sHdAjbx#TFQKcxT!8 zz2ze#%dcJQID4+)U=TV_w0G! zDC*~OB-}1vQ8-zGgVPYiWUW4j>BbnfWXZBk$ zb#cak%yE&!xDX^!d%`bDV0eyV-4N+K5F$2TPG{FB(|wBC)sp&NUd?iL_iE9=uz7IU zdFk@N?6V6W-p+jeGWXNh*FS#$>eoNK`TF@4V(hcI7h{Kytgdb};$KrNDfI-onR?aX zV=Hiwps~Z!+T~fZHo5m;=ZVvOeS^_ZQe^eetez5wzrhqh3N*Iq192+u zO{j{OH+47;?;CnLHS!UC_pe{(KE8W8|MJn}hqo`C-nV2$wOCH)Lel{-mWUQ0*}32q zNd7Tn@!S#|>}2OMUC1t=aHW7#zPu29W+JY59riCXYu_?(?`it?j0T`u~a|7_eUW{C_u?s6$m`mUx z))~NzZWo}n++Bd~9VnV$u|7<-W;vcL(^Jn6wU)OG8224&J94~Z`<|Bm_1?}^w!RI4 zflZF#@7&vWM|bawY~1Quz1Fm3g{;1V6>lsJR2RBah4v%`H*gt4$OJ%+0j_6ZElMUC z@P*Eg(gnc+h#UkK+*$*<5zM_PKX0b@ElM`a)^1FmJkxXO^3p3;+OJ>lym_nj&Yi9s zH`*^{`?W?)X z6AxD%KAJgnG&VW8=7%5leEDJ1%enr^$u6*zJH^3di8D~BcNNNQ1uAo~(o(80mntoA zRQ5uvhvtv))3x$MRuGIcu~z^CI$R938@Q_WfbxXU7gWk@$J9H#>8kpHv`Vz68 z%2QMMib9^En2pQXvO=!3P^d1H=qWN&fx=#(_ESwEraeNl#o56IQOz1n;~E|Alf}{z zUo@^ZKxwQ`n@7uq8d-W3(S%EWlo(u)gDM%OC1VE3TsRifl?-ly9v)b5fPXe>CMR)K z=|08c)%=>}?DiFco>{V@OF`%f=_`r++wzkdAn_wzq~ng8L_tIr>v zPu;t|Zhf0CBGB84y;baRT%4|vuO5osxwGT?_04;BFI&5|Vdw6yim zGMbinf*enf;|p`WVOl83Ua_WX$DXzwd)k_px_uFW-AA{$=vEJ{BBWT>o4Rr7;N0^E zKYV`q{l~cvZ=St*0gSzI;b`Abhf>F2%b>9c7%L_iOS%CI9kt(Pw=iJI=6d`o;1x(I zgcDm1A{M62h>5`_EHPosa->B$kxXGj*Fymwfr7pr1vivIqf>d2B`q)|d3jZSMxKtP z=OwkM3SaRz@hv1`v5qA_9TKEzjycT+oW_{O7!4s5c?Bt^IY~#(039n>L{0|wW6_&z zQ;KCyan0z_l)`XkqEDom5_D7xqY;2bWwm^J7UVQaeC0KZF^N+NjMWD?mXOGukp=5D z?pl?n4h&dttzv<$buz4G9d&$rEzerbvQ{%}wQNTn-&G?pr+BJBi7Yo$>t`+j&c~8d zwlHI1xR!?iEL!}H8fM)9SVO$TTFv%0@Pe()Lzm zkw#vymJw~>WLo%jZT#wHcA}0J&KCJnaGYtd1#IyGW2C?kDKw%eEYWcg0;4vwEGmr%$nKzh`8${o}`NZ|Ap-j`Z$7 zlstL5_Qh=f4?k@C^m%x8u4npj_m!)S+jm#?4rsH@yilqX+jO95%>{B(vC>kkvKHxG zR8NqTZBWM3qF@YOm7Xdl3>3|YRV-h@TX2>r9T57G+vzepRbwyHIG|}4yjH)^?k9Wz z9U`+Hb(7%~E*kIYgfElI} ztbx*4Q$IksvEM-6(c?+o{rvWaPjBDP&s;x$WNA;O z#a)V4#1;i(+{PB&-h-WwADx(-IC1ml)~gqHJs3OwV&>ZXtDE=l9Ud4?#xvzGkvquo zNBD{A@{S(YiPL=##`llj-M@23fAf+=G;0Wi#euNcSEXuQ8a#Dm`{ReV!3Umy@$S{r zH!mJNn;ILtd2aLOm1;AWEh}V73JE8+oWR{Tj_bDvv-g{}dNjavhUy@>R1d|tYb-GiIB)K zr?|#A8#54-lfY?;X-Xs5X6U9g;%Xx2uf_?+qQgOomw>PVS-1vF*96^=q>)9mg})0C zMm}RTVJeRGBEYh=l}v4bqxG@OVZJ>n_GV?CYPGvoYR@Q~)nacW!B{M4q2r)hV5{a@ zvK&j6Wvyo0YMJoPG#5sGd2+K~5M!0r0z8tP%*PU5L9Tfgi&*Rr$UMj*@KX$Nnxl^6 zZsJaGiBavsV7nBjTJiL6nm25 zOoKmQPr#&?Uow`|vA7dWl>;p9n^2+JmMv6Z3l*7z3r;M|8?X4L$(Pi?!7Q(DD0cQ- z&$VmIu3cMp=~DaglQrA-fMi{{-nOjY+dbgy9t^J-j1LYc1~-Je`wVrhqRy4_m4n8f zKE?Lkfjf8GUd?TOIIG-LK03vJL&^nbpdMmGYJAZMSZ1dj9mvhqqIo-aY&A%bTyC-{SMbr};N8CyyN1 zT;F2S+lukZ18=ynb*b+7${Ghs`bpPX+l{j5QeXgwRUnV1KD#@x?EvmvAIlnt~6& zM_X(NpH@N`4uD)q5le#ft&pI4{-h~9+>e~0YD)5eX@p~t&)Bkj#^%$N)Ug?cg7uvWoLVHqS&xl>sP}1@>5DM0o6bZ3gO zh{X-Fz}Q@`0m)bcHUWMS6@#uc)XZxLF0w^REg`}#z=Bp=ky}3Q{N}S9!Lr6SegC@X zhAold?-Hvw`np$H8-|Ow~v*V_gBZ$qkGxw-(9mbftr-v6m?w zG}OERJ3!?&s?16-){HU+r07*SX(}f?S&sJFW66pRRc@h|n`z}{n$$?ebQNl;0yTxF zq3~1`0_S}3k`|J=ca97WB;nDEu9Ulo%KB9)cM*Q|$y79#Uu~`AD~!clCCr6` zB^|C(Q-odFB3rz|*3xHgT%&`<;;Uem?BaFW)*(o3;b3hhmz&Lx0sO;5;4xQOBRyVh z7c5NT%8?cm7%^Fy&)9~w+QxzM#(q`JfVz1=zxQCdP8}qMj z&b_!bIevNX&Vj~`NGPSOYc_A#SbzS^mV0+kJf6Dp^7+mASEKWH9CAJbW>I|JkFl2X`+Y+P}%{QS%ffklo5Lmh=k# zA!GB^EXE`L9bp~I5*4tK?ZcKA@>E3}c?m~W0&nKI4frdQlc-50Oh(9XjDaO8GA5X2 zgf&B)X^7IbkrGnJl6?o?qBWnfbmR|kEkIPDoDO-gMrb?b7~=SXg)q({-b}I0X{IH^ zfs^KeCy?ZrQfy;_jc)&GhBzG_!5N@q5ewIeGD8A#3x)*Um@EVQCg{clJnEyg++m=0 z1_rvI=_4$}UKX4XipJ zRs{rDRc_L_%C$)2B0d)^*&x-Qk!D&n*;ZY3hd$CG57$d7Y6KNozPC=|$;doOIQE3t znGkz3B5#83OO#Y*O45z2hIW3>YSqw2>y{m!9eeG24*D)$jolmXcs<|$_FezWxxQ!5 zS3G^TZ0_~y`FH*EZ`aJeT=D!x@6)G!V`EDPcl)=uHsCiJdu?I zV?uA3;q)^s9=gUwRagL$avKBc`wkGOY9~!$qsgr$a!V0rtR8Z$42^@PvX#L{?R2@V zRADWZTWNAjDbUqiDm9l#&1L1L5{aQq0*@skeX&SO6@hjw&g-j_0 z&w66X;6qTVGf@SqLM95sQ`x_wvVT>=`BfQj5skk{BrjE4SUe@nF=ol(z|ag8Eb@?h zN?g_AV3RDcSmMrd>}i(2SyH`P(>iEw9x~MR>uUNn>6NNvuM$u0f}IFC>$N)XVY zeeK%jkvj+Oj~$t~e|r4x@r$Q-Z(QHev%Gr!hQ_m}Hjdmp_;Bp}=+;$mSs$EfU&s) z_)0@j%$(xm_drQ2K?rH0Z8i)k1sH1(rR(EmD4#$hbSy9y>Dwd-T7811j}spSCgV>* z9)&)_G=`BU4COqIF$#kUjZwBK!ZXGBmblQ76?>}7{dEdYozzh)CVw7N3h_U~vt+oo zYB=^OqQDihK*TsuJVP`1uFLv=nb=#LO+dh0;_hx?Z`HPkBKlXqAGW6;5z})K`HtGYz^ zG2AV#>JpK|W+7BShQ!cmUJuj4n+6Rl)_M;eTXys2*6GPJj~-r{et3QA{+02OlV{Ja z-M**!;F0vfBk5x&)2GiRA!Kqa_1zB7z_8=Mk&dg^)?T}|_2Pxi=Ps;0dur3=OIt>7 z9=dpD$LXWnubw?La{cJo&Ew;@FHGIPI{ona^uz0q9^9Il9Cg0-T}a7_ z3bBqQr7W&|p-x+>NI~bwO879ItAsB{)=*BklCcD!G#h@uSjrMHhH}DNKzQWj%Jgu% zjrob^XpxvzSK%mZi8h+oZh-7oYm&I%&tVojtBA1)bdA78dadR(*PH^mh?$1Fx^O;L zA4T{35eS26LM521_z_>^Tmm?lu9(AyI1TD+-v($gU?H6wga1Ytr4>itG|o3C__nOj zTU+i!buL?0Xi8zg^2}*A0xZv(MUFvM=%^J~vwUloua7a56-6>Ts8-C_{2{S&X8|zQ zvq)J%!D(S#s1(8jQM#)^2(1k*lFAlwuuUFl7Y92e;ia-jry{&mR<)!&*eUY23q8%; zKnvU3ROYBIU07QXMi|UvEbcOZkD|!~dXx|#0ZR@dz`<8lUPu6AR*_qYi&rh+0R=>X z3Q0yKL<0;>iy3TIO4Oo{Aqfi1#^H8F^;CcjAS5vzS{jfbvw z(bX=h&RyUPP=e{QR4uomL(tU0>t0#DX`A)p)!5wYzRzDa{_w-L&tEpq&kxVe4!wBU z`|;D@=Pw(-d_VB^-Lgkd>K{zi+@Gv{@NmhYkYI7$?@Qq=6glv`-!2CCG;lv}e@^l3ne~~A}O170JTUC+8GGDS>?_o2Ah1f30 zKc!rvgiVyck(W!7mb}#!=qawGxUSpKzQ)!vU~V2X66smOHC>1h;A=!+CF#mytqVP3 zxuG=pbFo)|8JmAFZ==g3iQe+mDmiuxFkoRqBg*tP55W&J{9Ohxdwuu4k^K+uo*TJ- z;@Xw%2ahb@wSV#6gW0_YVJ-NMJyn~wI#&$nnmUE8UGl@ny6)WGdFS@dn>Th`y|C-z zrQz#Wc8=dUeB;voYnS$nTt7N`{ml5CQx8YaKe&7O;mFmA(QA(%-kzBpnVGuv=)tY2 zdjw3|I_aZIp_&t}#|>j8cs;!uZ9F4AwZ; z66cs>9GLf##XOw>cqY#NmWyMphZ&EEDE*@ z{jHMV5-~8gTN>$>;{VCLO=Ye|y1S0*sVOE!EQr!dVgljMq^vPgV2n`o!Ge6o8jvm} zvH8ngw`9SN+!vPL2!?B38uuc#9sYvA%5rN_xurnkqdUTArb=AmPD$`EE7)Hg;^_kd zjgP1ENephW+RIb9AU6eRe6;7t9W3(2sL?E~wvN@&!5JD>968~C^0epG>w&lLHoSg4 z3&&<=FH}fk$ej5Dnann!I~ByawP{-0^iZ#=Ty|n<1OmM5`DUt*Z_{z zY+lG}Vb@He7wS`FKNt1^(tQey*}A-D+t_kRK3Xd~IF%h7EM*fbWq5TsR>CA^@o8SK zTd~%=Xo;)#d@zU`4$l-@07w+FVJAUWX*o||e*UyaII5&R# z!lUt<&mY~xj_b_i=;McXo=)9<{^;J!#0b3q>4VoVrasI+`SkWVvW1`GhOc9Xwp0Yo zf^zo407IPS`hV(JazxC}Tv@^mKoJ6goX(R2X<>8>tQs#O{dW9W5iD?Ffoz95@TCYixxU4{)*6wt5K+`#mvPC zI4F_D#S+m9Rwo~qmMj^pZ#kwU7fV@G!vf$#HXuy>rGovLxFtDkL{kxrG3aQpCQ)Dv zVU`$xkbv=?U{fT8#wbT0W}A{+M^@~}imjQPh|R|=@(SRm%?K?S9>OeG9L5AkQ&ooh zyzo#0GDV>9EK>Q4%qcF}%RqKCR4gWX$NVkQs%BBBLmcW5N0vw;OJEyFbg4AhA@DU7 zyBkVzU;tYI#E92CuK=@j!A1xdU%6g<>#WX;M>>)L0@h7K-#R za~K}=g(59QpoYUm^<``qV9bI*QbHWd<}Y$a8I?7xPy;uzm=|jngldFVKSyjR#TEfR z1Y>h0Fu0?X{@f=dYBXXZr65!lN{l5UW3kjxtO?Sp+LgJji+KsIfd$6q#{&Slh_Ue% zQrsR(4p+p3yQ{iH*d@r9ve9Ld{KOYoKEpb;5gEK4>x?VbI)}fDA3C<|%9Tx5u58-1 zzpbvx5Y5s;>5|H1kv~>cktp)U3cZQ4P>NgMYU~?~_7A33tf}f=>F!=xv2wr%Lo)X* zop^ZU+041g`{(c7I&u5Tk!$Do-Mnyg?8dq22e)56n|%B7$(z}!xtWQVPae)aotS_5 z;O*SR+qsGPm(w5K;^wkfKYst}H(%$!et!LS?(yv_CmY*S8k1C5PUn?_hJ?|DD0cP# zWo&-si^ME0VDV&ho(y(@31lTad9g@QLfQo+WAlH}R2KXh&Hv)q+eFBsYQtq<2!yGs zN-A~+a-gH?V{|-}0Hzm4%Rs>43%gi@Fu1)Hi~m`1r}+gNX>#{U`}y$A)4I2&^(L!XA6{`B2@t9liRCB-g+U~ zES7%)tx|uh1T!|=DU5cBBCUdWcX_Hu9ByU#7Bf7x6jwDRpRt&kcnmCt=QlbwT!5=x zhS0)9Zv0F<>BZnF(0btBUz3}5TaM~z*`JdE`id)M{@?4ax+zBEY#WxEpFIWkg5?jbO^e8MH{zjFJF(3 zjki8~zVhW<|Lk1P)8}22(@j&48z0X!KYm*O=t=#fnVQE>s~^o|C#S0?rW+nkwTz6m z?LQK2=@MlYv+7zDwaw~in(K?ejuN}C#N?zX>=e1B2s0LZR1;NZLX7n?Ej|bvsB9%T zV*vhunF=`s3!S(&(G=!V7}9Po2GW`-5(8DNgH;-04Z&D}n!;B?##L;9HAdJn<>?9( z&PDcMu`^2XBnly1m!`ssu`pX~Dq@HVNV<}MrJ~zO{MuxR3xC#jgl}4L-jcBOP%w+MRcl!d#+uRqi$=mi zB;fyN7vQwj&lrm`254QioG!|F1w6!9(q6zK_Lq#!A6JXRQJ_&VsX|0b3Y)Hkh6Jay zR2M1IR+T{aR-9o?vdkGe=+YG1nu8~aS)9?bWH{zDPR0Tlv6%qx&$19>i92yet-x6) zBtuii1XCZS;U5N2pHs8gE5Mi~wJYX-?vePTfP-FSWS5wMfGTTXN`Q!kNiyaH>^QKb z1o;X!54Idrl4DH(nQ@U0NSlG5(-7q-+!S&jqRc^2`C(|7CC-7R3AI9)Z%{A7m<9K> zwLI7+uWDBWTBT6!Y8Qq&#L*5(sz)5_6vey6v35aa10B%>rYC3sYoqfegYDTKI32Tj&}u%Mub{5sMY9 z#?RGO@O6Hk+Q(IUc^V&&1gzY~1IAid3KKX6T1TnP%M8cGHBFM{PX4k!!KQ828+WQ7 zO?SS1KlJ&_(EE==bFVum9@UPF$HyKd?oR?&r=QkL&(uv!XYNm?CZ-#vrdy{UcaPui zxPGJMHssVcKq6j|IwdS`*k20B)5DTU1!h7ze6O8n4J;3|X4owY<^ zDFz5bE~}{ATBNYj@a6^RK?n#8m3*_GWA=dDRhf&WW&)>TeSr{RR;Zx}^#ux3q0+vH zD0J%KpHbs1FrdCUXSCQIr#gc0M@^^&AzH>G?jMV2aPk?u@cPZY_ED-9_N%gpVI~En zYMK#@004aAE=>5^!cAFk$BW(BBwi|Gk6 z5;cm3R>QK@p26YZ@sst_kJi6_dHAOvZvN??C;#a;4}biA{PTx9v(GL}Pn?^$cj@uu zl^2i4-_AWiXobgjuVy~JdkV7=KEM3(@dXT`dH>|g#~1ke%f}b*Uq6EhU~gt-XU5MQ z-_^1-Wwgs=N}fo|;K|<&QZK_yNt8|hmP`gVYKa4Ld@*t;OceMifT3ApPg#1Y`ZJ5`U`%id^lo%C>T#Ym2C=Ll9{dgqzvn z7J8(K?r);GtBJM(=qx}bEfSby@D+<#lCh*$fFrUC{w-dESow|7N|wFw>P9><3`^N> zRw1~F^<**=AVpxLD4k_6>LDbshJ+Zf)&N)UXKTD1wTC=FT0D(VX5}iZB88PLF_+0~ zFqY61faL{sEz%{+#eGAn-3J__|MLCnH*dS9XBzK4OkTU?yLvNl^A3=9 zVmfp0VRB?Va(gs%XDl-Qux5N>@z{9l$jH)5mlpT0w>B=3Bx-qRnQ4*H1r<6u>H(1% z3nls@g^?mR6yeBLxfOINxXM!3){T5x4->|EheV#39ApC1(glPci?di{ z4z{{qjfE(wX31V%{G5dq;pk1$^2Jg%-KQXPvm|3X2W^X2Is9>v#SL3o%`S>FK=nuX z{x~y`68hsJSAgxRWK_mDtJek&9BIFKbM^i4!57cA{`%{Q-~aZ?zy0gOzy5jVFMpc; z*FR4G({CRA@OkXR{OJ4~oOiFsKfay%Fh32Lef{Y3`zL4$&D@V@8qDYSPrrPe`8fZ0 z{^g@Lvs161PrjXd^#0Avr}xi3ynQ+|HFD(OcMa_cPsps&i$!uiPgX`+qrViiM7}HU z-}xKHqW!iU8E{pkDix_|0yP!uScZ^7KL2No#Yr&o>*UiZ$1CzCotmQ=)flH)QVeT` zZqKr8wM;}sCMeQ8#u6Y+LD>sXT%EILVIT}BSo8!IekRhfd5o>*g6_>Qk-b287cg-# znsY7CYDEZxq45R!co}vTunxzyHbbIR7e~T7iPK!DQ8(8VW=t}Gv2nI3!UfF6*`^rR z9EZpO4v&4SV|k?X#f-&jmSAjUnbd*C#<~mCm8JS9(^f6SuQGxJ?ji76hAEl?@zk1I1Gd$D1vM)NAtp^7S50O|5U&@H~>x11Y3HNDm=E zdM_j)BoNZ+il`_SRP4uIu!{{9dsn3Q-Vw!yy?c%xf93t&`Tme^?tO1I(eunVb5CXy zLaZ#-Ue~(T!pZG8h_O%&z|652o5fZPAg+RNE%Psq0!)vE82j7U3&Ys?t}!Km2v|)E zUj}t%z+=;K$ z`YY-y*Nq6Y8m`{dU%zd+amRM!j_vX_=f$h``itIEXIxbq(*eeMNg4~t3`tYqgh)yc zpq*lnS;|fYRxVaL9_RxZ0Bn;p@H}8ABTFn)nLQ!hLJ?aM(z9r2ed6wf+~|B*TqOo5vj^>p%llL0Wb#}zDT77N45Zj@*z9ODl<-Di^oR2H#-hn6harr7-1w1o+iR&I4RkSLF}Px z6}n~cTPHXKPw0Jx@~msFauJePta}kEv{--)lwq-%8Wt3TyoTprlZ=@SFpTw9q$u=p zJTcPBnvN4mBP42^To2O^knbjse$RqzUj%nsnMRP zv7X71?#a>ik-oP6&c?xRpeI1};PKSJz#9F5ANQ%)?H*%7*mDfgakHlVWEDi zf2afyV?$SKAv;A!lL$UjIe5^hD2Rlew!gjdH?06pHnAI)-vBnn7M)^?oLostDU z6x*Yuu6U`7DEE*g$d*=yD;_z-4CF(ww}ql8AuR`-3>PsyH%{z~6(h73d^RUOJ(rlC z6Q7n7pPoZZ&m{sUstcYt_$C>y_;kQ^!F~b4rK1JbNWM85mfIo@F|sHN+5E~ve|vK$ zDkUp&{zF@gC@V&k6)Vb$0vifhh{M4_PP0?sI-Agp)G=6L$pY9T{>{oeH!MT8BpeCp zzz6~KSaCK*m>G+-n1u_>(ZCg!1Iz<3CQ;=B=&JT7X@g0+f+Tf5Rb7yv&8O-M609XO zOBo}xh@ub1gc4V{6)VjO13O&E^RjH5EE_l9dx(s^QVi&dfg}s#u?wIQn`WJVCAzR+ zMyaV#yFjQxn{L1+f;22C9kez;PcPuHx&^6v93-(E#ayzDIE*ik6lqBrnKWpBO*2qH zCAW?w)Di{2okie)2Cf*EGKQy$<0}a)c^pShWJ$;@37MTn0`@0yBvTS8)Dj(@WM6U8 z(p8*Y`(+m{*_zsdE$#UoUB%69zI%IWNA206(zOOEpDDY$hZ0hCJ#sNVriU78fTI=H&K_%)a5ZvZl=}Ev($jwdxbpSjuK2?U3YGM_Jc!e%bp^sR#RyR3T z^ZxCn&mS*;{&@cLr}}SSuK)eL;Xgk+|NVE@&+nbzzjgloSNG5Fy+6MX{`hm~(}%&2 z@A_WP_Pu!8`s7j949MjMDYXyB`yP(>O^yP9eK_7bG1NUe&_2-BG}r|&7Rim1V zIRZX4+}qOKa<~5M!EM`CX1TPD-tEW(+B59$-{*RHkVx%shre?SZ z;Mx-pnJPj`V=`QX3>N_-wD7jrQfmY5#KlY|2q)Dx_~h5yz4ec``EUgG!GDFq;8JW;PEoxQM>Gns>BTf8&n3qr14Luc)W5sJ*kO zt<&GooO|B1{3K^S*H_H-6({?Osoo++c7S4XMuLn8QH09K zAxvwh<3 zB8sbxO3lP8+(dl=)lf=9R8}s$qsKU^rwS#s2sPzkA}uxu!;mv{>lRzNb*<*+uHs<~ zcK&VQYVBG!Sgox}4pswcx3pRec`RFugMBOTFJG#O_U4P0uQn}PX;`{KS$n$Z_3VW& zpD%yd#uv+v&2ef@m(uWxt%^>^#fAFV%sfcDRCpBsMs)r7PycYOcW`te=Mo7tup zPa9^Y?@doWn3-;ydDt^G3Z&QRiQcKvp7Fu1@xiW!{h^w8$O>{QQns=VBR59njt43Maxz>5 z_y{l)LDzo*EC5nBz+r%~Aj1%PXd(6jI8_H2ssK$Dph1`g>r`GcfLS+^6@WMmU?Kqw zi4|I+ML=04qFpx7eh@PD3jKL8_lQUz)=1=Q!TuL$wna*K1x6TXKuWcdQmvHKEHb!& zH8*p33+G(2JO;#ANNzEI7feJ%<7a&gXxUOD&!__obnJxytUgG|EM+*$80LI}+8qH= z6a!N1$Q-~|pw2}lS(y!-P;EAJ&nr|Pz)T33uOKaCjK$Y3Kv* zR5h53aS)7UrQrn17_@K@SuqfqC?W$nMF;v;_(*#!8w9$NA=0vy1RxY34u1%+EGZFK z`o%HKj3^9at2XhDp48pAP*=pQWW>08p6-p*z5L=;)#dAkt2a&O zFKKJ*6ld$@XD+BtpHtM<$&a6rojPYedDeRJtaZx{$-;`{ynKqu5uq>ufK5{aD$A9C zrh7p;4&hPpavf2kBPlHuRW8d?AaIljoh8DYGC_8+z*@+$1=vP+qS=*f@g!z?*_Hyj zr-1J*6&5WP6_)dZi|Fnmsv{6@&H)i;N()Y51_{y<6F^$I9(-tU#F}%7dV5T|E|#4R zkP!3mK;nwYuuv|D3UL%*fu#^*(Xb}u+cu7l5ZXC_jice{9K6g((K;Chr1aKWL^BnW zO~qu}(j;dk`!^i`ySs;23WI7fjD;Qp7(@WOX)rkbXKeUCBgB1Gh}(8eGJNNf>XiKD z=~7cPh(}970X9{J(_5pr?8v!$r@G<()*ClA^>*+1{PD`4->!W6c>dknQ?F;wfBAg* z+m{=Ef4}?Jw>w`xUH|;?%Exz?KYzUP?aPfnzux}y>+P?fufBPC{n^arndwV2)Ayb} zYJM`^4r5~xiRUDwjE2T0&^S?)qI1-uk0rPwPW zgH`%z>U;>W09{o+?9ItAh6Q4**a7mpAc;k`cVl5i0<0_t^3@o_|!&va753FwK zd{ko)R69A#7^Zxp#*d1wp^p$z<#wFPiH^M>z=D!)L>a)K6@oLC7!&%bU)Zd9ZWBR= zs(i;7FvG?%XF>*_T}YSWI?rqvu5>^m&jxnHpBKm4Y@D$F>kAxxqNFsm}-49+;SGfriS;Yz}& z0`MIg>eLFIc46Ofp*)rt0JWnOMlmc!LH_!{@1rDj1UC?iW&L?ubXljV^dZ4}=an_?ov21XQPv(U8#2(ZwsCCd#> zGXM-$>H-o0)Cd0e<3o6CfTs0Ob#9s^z;F~&ZN+q>FGAx2Y5-=1g@_7gUMeTh3N$V} zL{X&!={Rs8Pv%7G@(8*-g2ojMU#4{6i#+V~=p( z5y8<@@^cr}7cMK$UDO;uBde{G?%Xd}zddoq`h=C$^y)v@JN7E}AJ%QzA*om+$jhe~ z?9q_NO4P9;1=vIo$?;N6Jj{Gnnc`$7vd#lG?5qXJ_9Cvmi0vriIf{7M1&PjlZf-Hh zS0X4~qF7WRFIt?ws6w)$N?BehTv(CpEvDLic#8|Ka{wNzvI9_6nL)uHAh8Az+-J-t zXfqRpQn160MQlM(oflw3c7HAuKnP;$pz-4JjR)2kkY(T6y@Wv#q6iYU0q#=NCS`yYujxj-b-jPn^Xf(0vzZ6e6U~#OZR10oV}l)|gRNtOZDWHSlcU`aCwd-^_e_ic8DVU& zV`KnW4*EMA@7%tywz@1$DPXWkm`Z?cykc5G=n5^i_64U`@OcJ@z~B%ta0Zz_A zK#Y~^h)QFeE|+5PF>U!t*+t2@#T-|uz+I9YC`((uMzdu{*6#iOn!|-V_BcThZk1-$ zdQHU|LE(}lcM-wn#%Jb+>z!axDT>l9i4qFeK#zF_xc( zQyF8It&*IrD{OjDH8!&Q@pR40=XGB{-T3c4z!`u4f&^M|gtuR3R+ zwMg+)IM#tE27s?3r*m=?G~`Z=^^6U*5BIh}mfPELzwy?^ty@=U%_$5vDS<^m zV+62Tgt;88`4q%CsWz56uNV*+;R(snG!_A7#l+EYU@jJtQGiXsLi2y%Gw`M!vR1Jp zCf)f9p@qOG_YvjDy*%~KVi#qQ zJP+e}vkMFs=8O_&N2NQXG2_;+Vk`hyer7mW0m}rXy2(m> zvND3Lj^ycMknCy z5NTQI(L!aMDLXMxA}X(xY~88Ydr*D!l>F$ajN@k{M^2{fI}Awdo`b1}Pbg~Z)U|ce zx_aZqE5@_+@}sBZTlcc7HYQc9BUMyUsy4E#Hw(A!RBYQRUs1*P7E_TmgvfLyz}OT8 zz}Pf-v|LA2o5)g4tVE5I8i|@5vffR06|q;W(d^mp-n7lSrdqZCV8Q+S>j(SyO^hC# z8b8+Cz5VLdMSBi7Hf+_DR|*T4BzX#n+1^M9vxXe79IDR>H)O?RWk;H_h^8!x*`A=( z6BGFnL`GQXu6ihUjZMH|&!YCX(D?M6lpvvtG zdm+zN3aY@}i&@!AIUs7doCh)1x-=2GtwK$(za30201I45q#?NF4=Eyt7z^JgcNH(R zLJ@LTxOxzny8sxwoUhD_V=KZExq!q9Ga|f&{NuHOwwCI#(QOZ>!Fl%d0Kn4sNOS$O z(@$n_Vv<_zixf|a_jw@n;+j@{_yVN+gBG}zC3~?1+ANTQuTOd{fpVn zFJJC?|NbOUXg@Z6e&71>ZR4vK4<0{kK#Fm@Cr5iHhkM6|x{(oB=frT^)EHPsfJ!XH z*uIVjtqs?XAO6FdqhYYA3^oakGDwU_NR9?8x4-Tqkdn~EE$CIo1OcQ!F&b9ZVvDp8 zQGZMM2Cqk~1=usc;G{DKl~NImP0NXe3|19PfH(3`H4SwXAf~L?P$L24v8XURcLCuV zscwmvBeOG!J3dr8Kp_{wTe%NGU{6AZ3kjBSfVD5aIf7@x^UdLKd)(Hc_;;|_hWV4XO z0?O*9qk)GW5{x~B5r}dR4X&*LjP)@TKC0XqgWa}6U{pE)LDlC0Xf=9pCLhk=B^bT& zU>4>;Yq>H)R$|fpRt;KMfTg&AZCNAOFhVdE9voJl2y$gJLwKw@B!EkSCM}>Pk7+1< z1$l;Yuo{-3kC&Jz5;Ik7rh;jgi30wv^d!EP#8VRmYEYg9!nM?~EO`{z_fL<4pqea+ z3&mJwMl4f0kFhJNv|D#+_tsbs9#b7Yp*VU44hi-j6&xgn4F+&xVt_+vx!*teIqnlW|RC@lxlE)8^4fO50b#r-7_a9Fm*S>pu>h0^Z zuU{Q|@$AT>M|+zdR2@F*EMJjayoBK`CglcW>^{6T55TO|6=inB`f{kbP6m85+ZbuG zxCADM)xx~5*nMJX3KJI79je6shOQJQ4)Rz`rzIxi7%5R{Mw;0J^3QV@3G&KQTuXRx z^}xAI;H==;maufiRO3RLZE4aRkL8E1V!;y((O1#^ujR?ERXp#n12V*-0AvSeB^%~$ zK@tlw7QR(rEeA6jxK|2wfh2)GiX{SCfkYL(YQ3@X!Rm)od!Iht_w*UyqBBpn%|1W+ z>SgWBbnW!y;mPrR52yA$f4=v_htq%mxb*Lz_kaFq{r+demyZo!KHmQF`SOSN=bk;= zHT7`))Wa2%Q%k2GRX%^Q@y(lkz>x6n@`tzA-oLs1X7&!)Rv2rY8ta)F?Vp|)njGmv zV73#Q4aSGMCWc!P!L|b#iZyk$-MM`BP@vGB#33fK2@E!g$sr2FB#wxh#3KXDoC}x@ zO^<`+K!D29rQdx5}M~7!?@N9JyS4R+;C_o}GlOT^3W)h$zi+Tn?f(fCS{3@V&M#nccAilHkO@e1Eis6&>&GVwASfNq5yM9Ro5VG_%{B#Uh# zpuG$T#=^o}Hd5RGcr0S4Pf-DPfx-|0ZnQJW3KIoltQY}Xp@9scb>3Q_iejsxn9?Yw zBo^}$V2=QD0l$p^7@I{8l!%tC0vNmhp#AU(&B0@;gRYyD^CH7v~x#aOxNU7%!?UtnJae{|DRgHKQR_+1z05wRs;qwV2lj5JRACk`Rh3t zt@W%DX8I{<>Tp&Xz*vKgwznoYJh120?D5yHYhKLmc=B{JOt5@7d0=Y1W@@} zseO;955IqZ^7~(x|IdFK|Ifc$etz%x^K;W*Us0HS{rd3p7uy~^u6j7V;?d((&t6nN zd%k6McK?g#hh99ZeLZ{r#nTIqrfy7)->3>eeuMtb2e&SrEM4s43n&~OMJS>gY+`>&c3!^KU=t?`i9`l&UO@PDw+_SDXv%_6 z_=U0Fq4y~mqQV1V8PZ?``Y@YoY`SY+U!5bd6kw#!r>gx4$^ej0CGL3aHHdKwa93#4 z%5b3v170W>`~u4^6ni1SLID=#uIRKYUXn*lcY*o?#7-79zb%^;iKwr!qO4fBD}n49 z5a1@4$QC=V!CHYWmT!*c86r8F2(~Jkqm33BDH0n)k`t` zg*pUvb-%!DDE10<2-qUDHp9F?nu(WK@KS4dMiwr^ydd3-yfg9|p$2V6;K{*C0%#VH zFAzw>L~?M%$}oj7Jkv$Ay2&O-g26^rnaOf9MP?w%4J4_Kn5qCHLEOHITI{bu4-mM7EsBQ&YG~GFy&FvWhsCB95(y<|*((MYti0?kW&1U8UamhhghZ{q{Yo z{fE`ZPAg8Imz_E%J#j{Hq*k$Q54U0+Y2nhik|nVlw+T+3wVXI@J9yN5@TmD%ZFb!` z&-n|1^XK!=UkKD3w*RqPx2ifl#|x?uB$^nY6-dL=ceCYU0xrK&O?c0nB=f zlbV~VUcEg2=eJ8gzF+$B(HnDnieA&oY1vsN?diz&gyX8J}K5Of) z^uon)B}?MhZ4zwTsongCa?J+mva;loU_!u2)>+~*R1xeH95*#wWk`}~s4P(=DG?W& z5Eg^nCq|P|^Eq^W=#DMyx`hYm1FPrLXq}ZD7}Jf=TAnv#f|kFYm%p9^-!OlTz_FO4&Wd5B z;P`2{K(XlBwUv|OHLqXRzI}V}#fx3jkG2ettnKe#H!;5J+03aYkIp=ORQq`9_@jr1 zUpzbZ;eFk|{=WO)f4Bep@9w|0JD=L z9TUUtBmD@*b~Uy(T-&*$%9t(BP;%5J(Xz_?i)RnszH<8L!L61&brLs@l!!Jagzkob z?GIFX1z-#0g2X8Duih;Pt#C93e}JOWE(6_V2-OPaZ}8`K3gD1S86>NH0ArN_pcp`{ z0KPa>7WmC$0A*fCZjrA7sw|wK!52V)Rd|SyDr1fUWgtQBr9d`|tk%Yf9R#=@3l(E@ zqXF5LMavPg(0nZ-hwI|q`RD}i(;uF*y>n;ftXP39MKdrq0A?dx^-{ou2Ju+r12=0hX>dkyR#= zOdBQD#ER5lj3rPLc={N=Azol0^3(*rhR9Wc)B>KG%mVvg1dbw-tB4Xv!%`ICY75oo zn>5ZGkr_Xx#)?`&}R26s=^YW4`jzn7yefb*w z>S~L(AVqB;r$}OwcsLR$iN;~yfT1LofDB4P>-o^;DYQ=kM;FC#B{6JiOsXM4+GuR>0LQ7w)^Gtx;HN`zI=Y^`IFO+rw>1wIXwI9^p{W9 z|NL_2%g6g4-`#xww*Jk_b00pOe*5;&t5-W8&#Zntvle3PqsLX_6IGK_)sqw3A5QHU z8`(YBcX)W9c4V-Au=mzTKQL&G4|PtCbt3?at|x#9#Qt`;p)lCfG}za4`|6qMO-ubH z*~MkKJ9nnhh2NtDdF2p4qVh($&uu%k&xD0eIsjXHz@;Gt42_R|w% zzfgR@KE(V02=v9x`B#aMe?oy&T|klhelf>lvn`ClVvvhIi5$pRfsh-Z$-G1ibpe(J z$q=-`8bT1t5ee`p{0RylWlrh@H|7|xmitKvT*KByG=By)7$8l!XpaNzdDO>nG%;W) zNrfJPhMZ;wTk48jW`>oXW}&8Jf({xm@Ul~26%Hl~Ky-yLE6W3BEwzUZO|aTPqM;}m zlp2(&;=5}1(FjBg18IldM{CzN08-2pcRL6n^{0FHHD$#0Nly? zRdE$zjj4Nw3@gwgB-XHuOu%f@O$#y% zU|&hB1&d2kLl_*Fo1KwPKRK_E;VI?%iiEymuBVXg^Cvnzlx!Ex?jqUU3Cir92HQL2g0P=UZh zttE2Vs?9#0>ho;)-;Rx>)hV`y;e;NZI9 z;Z37sRYN0{o!yIi`j(DOteSebeqw4J0NBYt9!~9;7~eHCu(PXUXHVDB!Tt*aJ(q|2 zZuWKF>g#M68E73FYKMNU;oi2Po)(xG0}l99+sI%`cl({Y*XpjFKh|*bLTAhEuC^Pk zjpujoUh65cawKHBCQi(;ue9@AH`*I-gWhKryKP+&R=X6^zuEnv8%!e0eQ zz6BI@K22LdM?OS-vk0dJ1QNqoRgj`Vz**xZ%H~)%nh>51scamCS?J3Gx3tO_rZgJP zzLRCKd{w;IOieS>QZp08Rthi`Sjf;+fHjW6<=J_EECR4#Ro2VY`B+*%QyT=a+W=;Z znVH4O)*_~K_T&#jJ$*T34Wfcl*E<kb5)UK}1*uF=*>wp9} z3TiSApU@mSuHRdu-+e%{`+#QsW@+iN%6S^if(35mH&X4&)^4JLu@?+V=MC(f98Uy?wjq z_3PcQUT>LxRN2!PZ0mG(^kjGS+FLuV?OnE}wyd^J=dF8|6KB#+pU-G)&FStf=OHgL`SZQ)9m|fM)SW(WXm0nnw3Rfq6gM@M-Mw3Jwyv;vv6!10 zfw{0^5*8#d@hl-o$%|*8x+_LvVPQ>DMubR5QCL}8yFhA91cg4iOwc-KPf*yYDmz2v zV5#gJKyIBpty6?Nf}_r5>0C^ui>b{6EtVP&Sra51i>XMq<1%dJROWk+Qg!pc`kPn`6eJncPpBJb20*X`RSO^wSt zI#v%3Y<@OV`~J<%&mZo-d3pWC)5|jtYp2F*#zuCJ4DIUg+tk*!s;zBlOUvS>rn1(y zMID`G9i3(1@3XnMqiYGME$*r8?%FssuycHL@8tNtp@E&<9sBxw4v!4h5A|K{?Y!FC zajU=Q_DFy0=wRzee=`X89%=%(I@l3<#)mt{M}TRzxAT5S^R2d~3-@lEI&*Hv=07S+ zDx6x2P@qXj;77qp7B;dZvG57p==u0`fQPcW8riR>aiH@gbWjas*eHb zrNT!At5^UM!F@fvwVxYHp`$BQkDVKMVb&~c@m3k2z=*#&qb#Dq&h0W_#xRuA2572$ z0KA$4Fga2N$#O4Ml83A+5?79lU=(poc_d#I&(p^U3`98j0ssrZ7TNZKdtTXbh-C||8=$LR zvOI=5FInYJ()j2qUy?Qe{$_OfbYl@s7o-^r6Ct5RN@iI(*;2?1o z8POZ|(4|*w$OU(@enD4+!3uTr3>MW8bb!b5baOlwM!u-gg;~_8r2_e}Vr>}2SZO9$ z5YyxU6gB0=LeH2iCqkADj@%xlbl`Q_;f9ZCB8WSN(`~L;u;Qu)V=sk1PNt1l z>J#KTqSj2$bTTW~%I@7=^KfeK+qXyGyxITi)y~=3ZBL)C8XnDW@3J?wncBK6t(}>T zt@;No#s@9NhGxT!yXx9H@r5hWwoYePudB7w-rSzk)aGjKaJ6*gb@c`Yhf5zms(Si# zLuYr%g)90ScP%X)&UPeDytO0H-dWn#R(Yl_nCoTn(;^d?_yi`N!NJK46iZH$NE%5^ zMr%rli2!mb$>DTnn@L9l0{~c#2gE}$`*z+lPNO^WSKmv zJyDj)lVl1dCXOPL1Ntef43#rUnZuAd!9Z4(OV$J^x&lg85yM%*!iXq@M`tA)N&%tE zTv+rLGC1SquHs^qUa$n=f5+I>-0bBni6dNM3M*bF+;hNkw~vo>j19Gqf|Uet#)rXT0(5ze4YncIUE{;;y`A?PZeBQjYRC3J%9gDR1Q%O# ziWP<&vC76{%Zc&K`4n03a}X5N&gXH3mbpWb7CK4b>#>)i7|THw157A@Xo-!eFovlN zBFQcOg!yFwuvAOL!~ux0Au}u#JmCwVRsi}|3KKDm1*wL}!YmdZi()%e4}uhkv8n*r zx77w{Dj!AXCuxgl)}oq^eb9j7rfF z)6BHoe3iRUZ*@yV1{y~moh*;$Dq;Y}YGZ+%Xp9w_fN@J?0nOk-b39C`1?sMiB(eel z0cr?Pqy^qBSE4MBp~#~tU0}ZeWWOMKtfeq1YZ1$_l$TY;vMx-{DoM5#GBjQyFjHmW zAfl#OP*BAJUglS^3~0b0W-7qC352?N0Rhlg9l%(g4yR0etux%gO_Z`qnKP|qch6n zh;aJiH*8bgyje0axoK*8k9U3ryj&8yZ~PoA!9 zX{wnRJ^N(3{>|*I_iwJeczU6~XaBkLE0(R&WI31?d!ofja{5XBg$ceAf~z1ZFQ4M{ z6Rf%M*4%icD?AB!U)^|Dkho=srtX}tt9#ktaK-pU#ni*qj~=gj`fTIOlWi@nOSbIL z1WIWd3tp~|kn0I*6UCHCQ5$JWJyma|=6KlKc4YT-ZF%|f;F~x5UccTw`*QQ-)T-{@ zqB{-NdyS@sWFpa3Y7Cbfc^V5x&Db&wIK5JBRI(0byGrHs5a zJeWoZ@AUK5@La3s1z4;c0s0E^P`fM_;&ABV81%=&*XOTgdDaTDmNCKRh-vc%Y?uQ{#hmZ7tQ^om>0+wvLQ! z?(ABA`I@7y-g5q;wf?g0;#KQ|MsPdO(Xs08oyr}1vdWfA-2r}HKu}((+Ofxf<;t@A z_tv$y?Ct6}*w=Gru>Znv|NZfi`{Tn+U%iRGcnvX z*xlUM+0fH=ukFG0i>GV$?q0WWrN`s?&Eiz_lyn3CuOK{$znR$Y)d zxC9>P1)JaenzM32g*H?xz=96oHHC$Td%K}`_s|ajVHUDV2zmfl{it~r(+WaHU2&LD zfE^5eA$UT}oHKF7!$?CIN|2FDmgJEze=LHrR3OX-Xh1*+Fm#298h-+at}13{mnCH{ zW?KqKMmJFj7ohBTr3D9q=pCpDUt(DxGDixO@o9QWpx9cxA}^;%BR0{6%2?2vr;6sQ zW4W4GjwXh$CkT!4uoeN-nj7Q5qX{&RgS@Xfa~F&?02N?J@uCtGfzgz1nl8XF6#=cl zT%2TE%yljiI+pTGrHO#n`pE#tawDXUaG3QfwuZsN0`vm;S&*6;1|?as8O2zb<|>}M z#`vv32LKjc@I#*D>Cn^{blqSMV8zA-Ai%_qgBuOH+!)}(brTIHD7zUq9Xc^zPQ{*^48C zC-xr5clo$THh!!zH^y5;_LtIf0d6*+VvdKs zd!M7PXWQ)T{x@&-J%6!zVsdq7_mWH3tf$VYFJ05zYt$q7skq;qaj!{yuStIMp5*3T z#m&2ldrjJg7EMF5^6mrm?fcrh4|I1Q=z$*xBQ0Xtp=D+M3!- z&F$va&dlca-1aU{OIyjUyMbK?RLfS2ip!b#rSYrQOZL`SE7vNs-E4kZ1d|V7md?f} za`C+M2%#)0T^$E(8|pZ&n#fW{Co7^@%4n7{nx`UgRnY=vJd%YD8q?qrs%fC7QcD+T z6H;}c`X}8?0jigclx88MWJZfjVSF9(zsnY7DopfMftak4>iNyD-!K-c1hCB(k!QJZ z?1kC4eCy`cY}bJ3E$E)LS0qbv!OD_7z$h-4uHT~Ee<$hzCL*AQt910 zm5mK+?%!Q`{(PXmK6w4w!uxkuHZ?Bq>{`*-Tv}IeIer=>QJgwwK7Y~L@SwE4ZEb(w z)?2r#R#a&X7FvoVnx7USRmW!KFbft+SFN+|+E;w!_|p0dTkqbiX=}RN+jYOc=fQA) z%h*u+M%ix!Np|NKN9X;!_O? z+8mC~4ODKGgOz5b3AIFyBA%lFdyB~uVD@GRqe*O3D1cNJSv|l(;tFXk+!2FN3_25& z!V_3v&zi=G!fx1sy~;IjxPm+u9%SZ4Rt0Fk?RovLJQlei!M%gDoLJNf3GMT?_^ZuiCN9U!m60fTbxWG5}bx zk^syJ$kZ%WU?d=q$n$0bY)=r|N$F0i$_q+lKuX?1US@H!rG#l;!gekb*q5+XKH_gN zSrATP%Ikdb7J|Cz)&)W%kYv*=s8owVD@J97IyBb>DhWcv0-x(-zRR1*k^?jPpEHYcQd>JZa}X z%g_LDkj~8Pc=UMN^A~^2&VtA1FSfjTwdeJ#!|&dn`S#_;x6cjVzqS7Bd*A>4Z~uS( z9{TUUI{y84$Il!4Y!6jK&G*U88Wu(oX1w6#{w%>3`;9~jDSZgU{mYkJUPY;M;h7;A3nu(x)&yL$^-+VgMT_QPLv<5uB@t&(lK z)Vub%)^5xxU8c^?V`)ufjWJ$rj8>W>M7@j_ot46)hOgSz|0=D^5^a~80xV+`S$^~H- zZWH_}$)Z|8&MLHW(Y2b3$pWZ%3!%XpSivw8tL52%#F~Il1-T}G-kisum|4l?nz6cl@ z#uQq00YzIt*A!5VMKn_hsLsq@#PF3TddukUg=B{xY&+V0cxOSBqlE5T$|zjTEn6?D zST8MJF3R>NnX>3ke@0n_Zux5KvXz;OmT7D*R)#8yuZ#p1dNrW3JVPu>RuR4mz&0}m zj?8$vIG#41rvvsRaMxx8z2qVrQJP0J7O}F*`0$uZIHpB(`x3ATZZA)kIwLS3g}pG? zHB8cmJT?V6SZz1v;{xM;(*mg>OpLHuu{I2&vLb-4h!4R8kQQ~->gOvGB$fq$xMt(Q zUB8D6q18ACvsy1j<4*v7Gho|57bW0I8gdYQCLaXe8vI~Xt#{FFep-2@_~hx}lbM>? z7l&qN_sqWB_3G80xr?2zU+;eN=D>&dr@nl?{Qb{{pWi$F-~Xro|NeLAKR>(w^RxTk ze|P-nXXlT66cI`J7mL+N|0ARCnn8oE8 ziz);sPI(?rAN=sH?&r^o-@l)J@^l^Gu>(211NNa&=lEps(ahr6mzzI+I`HAc>6z(c zjSXve?#-@T%3rZkyK<#u$x=yqrF7*g(UD_$on0${4Sss{joW$awy-N!Q`c=_Z{I7a zJ!`zznAg&t*VJY~^jKL-r@Xu0&^usl?b6+Al3u;dyL6p*?Y8*V{j|G{DnuyA?CiDl z4&)4vy9S0`9X+;&X2b3K+Ix-qhGxTq7GqPJ33<$F>2P-U`nq}o*KeDy-n7qP|{Qn0R>P)QbRON7_(vsi=|4D^QiFp$SumnOmQXk(C@Rl>_DNcI&c zmo4XS-vcoA(9xW|HP!=%%|}l-PMonEJZ9dt->`j;cKv4Q+D*!;jp&^~}Nj8y8pl^>&e5!)2$%qjd%xJVgkK zFOy`ETpa;~oti;iH9%JfP38ba%n}Reu;VJ@6Eh;HDL_t-O~8d#49{26z@P&bK5j16 z2NZ#EK+;zTLPD_=QBeJ4u-Fx#C<6fH)CClUKLG|6Vk-k+A7ReO3chx3c|q(z7Y9MvpCcI0{db?RvE{(C^5T?VJad@v*&|Zu)=~lj73<+-Sx zUSz?gm=>g3a1fAFVSm9K(u$1WW?%H%EDR=ZBZD(BvRROpiBML1xH>!1;E6Nj#T#7& zlZRw*5p@7pNg5AH_t(Dfd= z(L=Tc5|^)$o;z3g?CHU`Z;m3<`I@)yYTms&^!Dw3Fr$C9dlqbz*M9wS@!vo1|M;u% zzyE6c&(F^P{u>+svp?Ja`>*!zf8P4|{^E_BYZfk(nd~%`DOzK}`-N6?iaO0x zD>t%Fo;9C2XT5OAdAFgUZ(#ZOBwwpx3qMaI(lp!J+{VH?TtH9aJ8g+Dt7S z_V(`Vp8lMHk=*OI<(IBY?mnh_MMP0i)^?ib&= z@4s>*@92q~qQ%^FHOTKx6o(~dgtO!XV2P3A1quRR9>Y<@BrBql<&oH7$^hw2lHijh zc%~GeEW?2#4`V9AlBD1;O6`V!uQWjQ=3)ZE=+yB6PEjSlB zJji36m8eVBUB&le3t#KG#T&Rq)m$jgx>obFg@8U83RspxzO|6yEMgT`0E|6y+*^Cn z11Reu+x|nD`wp6S?bmJCrl?pWEiUH=7bfKf$u>73#}8Da(xuGR)uPSYRmW>{E?+La zaec*}1NmlW5}kwEpU@Bh5;y?j;{PX#v;eHskr7>#8kxY1N#qjPBDzG$(%WQ#Lfg{H z!fiXMwrpR$YE6l+Bui@%@iP*bDUnG69F2=h-~zdoEWkmSrHgS%(r}g{LTHSa+7fjx zj?TqV=deHnh$%s+i~}tjDflElo|HHjVi^y1`cRh_V-91J!--6Ek%7nr1qunQNDN~! z1`9^~b8EA}C56nMU=-nesz3nkEWjLGSY|9-Vue;%Wq<-{Fctzcx55C5W)P1nJija^ zfZ-Zqsnr!G0w;e-qP~P~E&|!_mJ(8SX`HPnDsN#->GIf$>ZH{hnI&rz^UEW$^YN}i ze8G~Kl4VgvOR4#b86eHRkQprHFJGfsxz13!RGRJPC=H}Eq=kwpjY^hBvs5u$4S}lx ziBiZE3^CRkKnfyP2OPHWfQ<%QjLM&ATf_sEpvwfXQz@@J#a#hpSY0ty9R#x^Nmf{B zg~5D!wl$>A+lq&ezf^P~gjQ@=fHB)p%R$Oq>lOrIh~XML-k2L>@{+8<1ZyzC;3b<} zWJ4}dnM>5VK^BU}Lsol00HX@PEbvbP$_hYrp25NxMwo&NL&MO(+mhtGQr?;klJgge zUcEf_;r*!(AArRaL0Vumee-4?;Ix3y)_nd{|NXC9KYuhJr|G}{>iEyUTEOGa?tlH= z`t|dziLshJ``u1=l0GZkkrP)OAeNVr*KTGmt01`jk@~D~e=+xLUE$;D+Q0t1`19w5 z@82){`RAEG|Em4=?d0ss^#eox&K_r5m!)^WJ}~SWn<$)^S~@zuxU+l7iIet&N9>m` zmk#u=fBb0A%=EG6PmevBshOGCF*d%wqqD5DyJ%pjbaZ^#&`9O*D0rNHwDHsD1AqQi z`{wP|v57^EEtVU1)el;XBjfJg!K_>NMOSaL;kj`yQV9J!^Mwg zR=jw*;pwvtQ`41wgT?J#-nLG6PhYURx1g<~blV=8){0MoX@O##KoKL>5G8t|(v%=l z#c-7ofaPZ30CFmDh?sz$wBTnk{O_c)<1_t~ zlBJv_>ryM$D~i^o{aRMbgTpVZmliI3Vd5=}mMz$jvT!2@nhk>0LQ6UL_S6?9S&I0k zB4&0ezhFtm)?Il=PZZXkDn53?z4w5%YNK>@b^4kOniXrsff5n;Z(ya$bf70ls>Nxo zc&DFGxFl)mD&EFFH2V)_ovka{cd$6yD@^2uU@Vq~{g1nK>_bR#Nr^aGax6cEmagWj z%;_>cH$_foOGwG$a4Hw9(NValWsb}T7)up_IAvOD7+V@Cu`=v_NuWH-Q>+5&qLm>t zfbI`Y8lIDe81R_$AqD_rDPf^8iDabFhL{+RcDo}_VmJFCIV}K1(UDUDlJe@KB6)-8 zD`5iRZ^OGCgvNk5#GcQ7!IHROEkfvluK~+t$gi$L)pc4@qCVfdnzkxSRpS8iY|S{V~q6rLHtIfHmlaky_`cwu?eq803lHNx^raoKWy zVOfT^fNRT1QW{8NRUBUum7Ebp7e&xg!xPhDnDR)rG9F3?Y)u4P9mUq*Iob%e7SA`v zaCK39W3<>2rw%Y|i+E_^@-n1+xssn#0W?^1DNXB*!s-sObivTPL9p9ppZ}~l3!iGm zi><%Z8AwcF*g_WCNNYfQV=*xy-Lybq57#)NOg^F|kYFrK%=A-Cevp}<%_FI^DN1L8 z%0*G+lB9Vw#Mw)OMhHnRh+$NElVEBM77>P3IjB5Ltsj`v^!|jB)$)T!g7p`d+`YTJ zvwiD_cXb~>oc;Wz_S2_RA3hvItWXDDz1s8a`HrVgw?BWr|NZ-upFh=q{e1QNU$?&h z+3??g-$!7!_1}ND{`IYCu$Ae` zZo=;USrZdG-oHEk<;#h$UypzJa{9-Qx*tF4K7QW+Xr^LdD9|^U*Eg8gJK$*V%I@p| zH{&<&=9RCYm9L=Ho-sGIES#R+@cK1C*U|B{!y~JQN0;>t6!i@hjZG|^d|1)lm;a#I z+tOb2=<(W5pAY=_QTOF*&CA!-V-savz22eGqUpzr+q*26t_v?-XWzP?*3gpnpjF=5 zX}tSDbN+J5{$tcbwTw&G#fZf#{qha&#p_9zu8SYEXj?iA09`xn9X+=0e*54k0NB1E zQ&YRNsa@JTU>g{A4UKvRNBl!0C68t*W?!v)@v?e&H2;26X5B^UwcE$5&{GW?(S}dLcKttEl{JS6e*>+y9NjZLU6B8?@XsQ(=+FM z=UexGxa-|-NI7%Xy=%Q|uT2P)kv{zP^Lr$= z_MPPXV!pedzBNk~=%v}e7&$5!G8GM(iu!agwHi4pAZZ>-=B^s1F0%9_3oaaWG&!Ld zPKYkeTg9IHi=&4MKLVGONh!X>&ppqH$hU^3EeNyFk^n&!8WUh|0*vOC$8d!gT1Mm> zLXuXHZQ>e7a*8B5N9l8;jl5#aq7og>T@J3fo7&iv+0Yn&?M6UOAw4~pT6Es4=)6~a zD#OKtKxONi*(;h*Kt6`603`?`fT1Xs5SeO{oNaghvRhg%m*R}U7%0n)a{vG7kun+q zE0b4gU;ysn43x3@+E`OHf~h**Obu^_Kr3QcMj0#V8UTRBSSaExK}(ShO~c*KEIHfb zYI#h}o!s-~DXCe0!Lc-smytDH1GGF?shV195RDZzv4EA8wG3kvPLl>Gk_ygP31g@# z*VD=|R<31*;(r;5iY!E+z(D?_FMt2(+%0+JEbv~PM9$Jf(X22$oMRcGShz1^2BVM} zSjWak6D-A$7+B!dI*I%O4ut?+sK|wJGyrK4jAe(YJ4c{-(P-}!yniw-B-bE57n78u z8J4Ib2v_n9SMrNg3W-yWOw>-zHY_|(yKt40ca{~KYTz4;cXCHj*}4`EYQ|PdL{nu9 z@~;|YsDv?70kKgI$fAL&3IG;4#*S)Wec+^G=B#1usY4GWabrxpQ$YL?2tG=(@J=-a zO06UV=O`RANSo#ZRQLaUQYnq3LqbA?XG0!1>P*jSimaq6#iBgbK0r^hY(wL zK)134K!4>PiuDM`0~1*=2BIs~4@DK|GI=_5e=Jpi1EC7wilZW-i>D98kLcis|Czyv z_=15oIKY4vtj`J}IE4{&&bf7WpFi4fd;Jnb&wY5``r+MU0JE>}93S6)_6+a_dwT$5 z!Pzc7JiPhh`TaMq!8?Eb_~gsy?ysMFKK>r-l*~{%8a;PuM@q+1bmOoC&OJw7OGg+|Xj* zJLoexmDJcARC0+{TETn#I9W7*{rO@0o0omBU-XIQZl5{l=^t(CB+#V!s)7Goyp(Od zl+C#SQB7Txko5=9qRZ2m!C$~gtaB8OA5Zj5G;j~aGQB`#kh8aXR1&$gBC7g!R6z+B z7J|zoyCEMd@BT&vT{59;P_7Y}W^)Zr2gLx4O(3!(39hjQ9#I7Uc+;pfyW)$!cWN_R zTQ7EZUAkMFUVJg|(&fmy`hq*PnMLRQ1wkfs4hn|Dv$&evAe0~+D+tGW27!C82>pn7 zJ6@oPk(CPEG0E*ze@l=2Tj_wT#1Nw(H@C|7WjZ7f4FPc#_&$(177nolGKijIx?=po z>`E?#H9R^$(0g}msHLx~w&~%u^Oq9>B5Z7#+MwlutV*U}$PNTU6I~sGlA7-SsOkQq zs`K-Qffx&l!i_-f7i>&`+O3}hY7iQFyX1>1nM4*j5G0}3;B=u3%n*=Iun;ZbcYqiR zhT#wtyuu|K43?403{IEPQ2c+^B|yI|KA+I9v{f~=QYD!v;Sh91 z>#L&;)i9;KB2LdkySUEilIt%zpwqD9mS|BfrvFC!N6{HRzz{0By7rX&1 zvwYOtf-!P*R%jc(`KlOZg*R{M@dFNhmV7R5L7nb3w$oJ8R3en9k zvbzyEOO*I(=8#O$lr z<*{B8_R>3gfDCM6+EXaarup-t0Eske?=bb~d7*S8uA_%}uioT- zgV~dIXJFl(&FJlmo)|A&U#ocavI%Swf9?JDW#ssI*YM!kTX#b<^X+|Oi~#)}h+>4H z86jFw9?1;Dvm=Qv(S|OudYovIODuBY2q2X!#n3MS0HCu#%aN~2kO-@Tu7 zsf-VGvnSG7K*ItfSmu8i!Q{@bfGjD2 zoprxn9rN_*`Tm~kO-)4=rQ$?nj_Cg&z!1k?f^{OY)@65zqWT6 z);k#O!dGGQwOxHkR97s)ROxTE0y3~g|IdHFDBp|8Ew1ohzW3MBSCj!P((sEikpV2p zQqh{B9Ukv^@j^`Vqq4D~C({!>b2I&OGyS83Eq87edxvw#41lq=G+jrog`2M})zRF@ z45LF-lD7*fse`K;>VH6{fu1r-9|2edX7Pqv^0`!wv0x#BJl(??5@@Kv+6xQ$*uOP- z$$Jpw)d>)Unc;wmWrpEojt1n}AN*nsMKgm@@Uw7cKHXV!x=cZy4KBBsF#~kz0lIeH zsvw2cPtiS0%R34k7LN@~&<%=Jj!nVEryIs3Ew^*M(BCQ83(6X`6XL= zM}cBlk2oSPiolJ+yGG)ig4E>`tQ=!uQw)4veokYoyfD#@_Y3?gsuwV{Qau$skWyB7G%aWUlPUlKVF9cVxWmE zn81lJbP6{BmE{0h^;iO&lYqeV*K_pKbM_~>1sjAWSykVTm56KJzHa~cq5Z@ACvV?8 zI)3>8REs{pwY62gh_Ic*qmjb7%&o195P09bsr~Tb;jh0oy?fiVw_8(H;~Nogzz@_8 zh`{pw(Hu_@b{*-bo1Uv*QDxKJ=g~jxbN>?M=%c(wdugNka(?faCz!GN{(+~?1) zynA2s@nbCp-Z+P6Ty~m-x+33zg-0eC-Q!BZDm_0nsd)nz(TW?d> zWZTih9vbD2PjlxNy$45GZC$jkKK8_Ppm081B8}SIiQPF!+}uf6U5}8iM@iSCHntP? z4paA!GFR8)MkaWjy^P*Lm&uv%MRCI1LW*>)=L^v*B@pIQl8S3NwKbQ=M;?t2H+QsMzjh-o zCW+2;!H}uS6gDXC;R!T@qe)>2L|%ZdlZUbcOPOe^sH3m&-z*T5*qi`!r`CY)wLU>A&{H3ZMgrvO%$5r&4( z^-wJd%~>Zg0+`dR!s%{AxvCbH2wVjys*es37<=m2c))gY z&@y&9%0`Z>|NWx9FM$z63P`ceKI55I=oXb}?;dUF777Y+9Q{zveps(SW6w|mH-zXB zrk9e%x_3WzUU>EW+m4SPnqhtO%a^qWdoqljm`t6ROkG^c-`y>J_N@BVt9$R>-F@?> z_W1br(NVQjQc-i;FFnsXHq|hv&>l)*o%ta0Iy_S2YNh$msQ!`G@bYk4JYZKGgk z_x#p&(ULfMd@`h?+pD$1Wm*`swvlo8Ebs7H-sVoiiZoWV6fRv)kVq32#c{&f>g zW?GKBiwA-3tmWj6auuKgBaEYxY!cFK0-_9^{j{xFU<0M2rzi)^|7b=-|6GCnt=mP` zH-S+&4ZJe46cqj{0SmLC62TZ816yTFy0%xa<@xjB{k@g5Q}v>`C&0pRa>j<*D$24s zo<=qlEjxxCYe;X{jRWg3}m_sLOtlwcHvg%e7n7AYE2C z&MA`Q7>S3fRPgA504yU6N?TE<^|5fyl>5BokNi*@08In{uEWqWW;GNI2_kp}<1vEg ztZaE7{1nwcw-3u7jkfZ46rRc<0 zbO``YPoQZrg5(^GVFY3+z9@Sylr0F$)wSgTZyHFxb-~!UV{LhODC$G52*7$9z#BUs zJff@y_cTu&-5cc`fCp?0~!SO$`22(zj*%e-MhN?@9N*btN-xf z;qme9?d?koqMXU8^v=$>hQ`o`4T6XD!Pl!i((|kXBGEj5Wxq(}vPz5bseswJkiilD z(@yt?O^(g2u1`Do&8^(-et*$Y!uD?dix;IYUX~pmUOGIy3bnX<`&Tx%iZ(V2);F`) z)^j&D3$}O9?CuxtADor0Wh^Yl&j=%@WX zgp9S3NZ5*We!Mxn;-`!-gvBZC6EogDYcW%*RZqZ{!3ch4(H+gj}X=}Rx zDj(jyyZ!EM85E&{pED0o2jFst&cW0!N51m1i@HSh_0{@fuC*| zkfp~@Hv~YPLhwr`LZO!++ax5{AiB`>>;?PFWNsOojN@hMKtALf9 z1z?uxp=MwY${_wWto|(%8}_`MZko`<{i2Ri06Qs!DV2}1(0e5>O2Zi_8(OK+UG;*a z7?(=JpFX`JnrmGU_DWYe#0z7~qCt^xSTr}>*Z$~Cek8+L*V0CV#>NyB3m!ETjSkk0 z4mS38K1@yzx3<#LWP|mouCK}u@IuVy(yOZ*qrW|ZeB{7WkS3?cDA_NUcsmf2RR?3SbaaL9|mqcG$G@`frt8@05++X?URh z6PSuq(MF)zGd&>7;m8!bSv+!a1b#zI)56%EmzfPgI<_|J=xSF_(g!@m2h>8=^FbLf zAS)cj+jKc_>T$Ein(VvIt6v3EMBl?=*z+&Ii|t#tK@>zzcOWC@pfc&MtaB*tAyg2SHTK_kudGNq*WhQv&lrH0^ztu1RT@!Pz-(%(R+^j4<>?Id&e)5ATN>O%r4|#hZ zlMY{9B-z^J=%*aWs5Ph;X=bi_X0ESJ(3j;G{V6)+4re#iOkftI#KF7A$=->UzxhK( z43L^jdK<%PYkR*sT^NnMxvW()=(Hk$wm6;IpC5Nt6%Sxdt+bo+m_|yrt|o!f7#LO+cGQ{$Kzmpz{xRCfXy!clT~MVG|Sy& z4g-VpCUrq&rqK_1U33^$mfm|?R;y~+ch1En_`A~g`RXbXdr^D+*z28qobL40b4ete z;y3@C0M;c@7{=En#$yZ2g%xPF z7>4(JTUwy}4C{0!>)_#m*-UUhIOBM%Py;~2IQ-MMa>{nzTd5O$em$5-L}+^T@jwB0 zhtvtp=QEu(UT|-yKrd)9=n%0`De`wHqjkRgj*eg69g$i&7y^}i>$eYUH@%xz&1c2M z3F|FzbmNKDDMP9%RO;#_$!e0xI(HLvAgle5ln)R5P8tM#IR$@MRFj^Yvwq*CHdOkZ z--^`N3Nr@t!nNi5!CCC8!j~Xme`@$rT5FRDB{@CW=-8_-PO1!y8*P`jO4W1Mb}}>Q z;bdeIp~yeC@Rx^2?Q5+UT;N#o6L&?74>m7F%=NYR<%Gn4jU3OIJMmw#cxmC`KvKG{ z+JkEDzL#yoXn8=YabtPfNU6!7azmW-e!w!LlnMEpL1-~+b>Ac$cV+!^9h-z#T!IwS zHQ8n+kR7UoaX4rlhk;B0*J3ufAdPU!hmAq?aH9ZyBLo?SI%5@MelU0$@P`2RuLE6k z6U<1djwrT~zzH=1w#GSNSWf39rSF#=^s5`U2Ye~UghqtshWP!lvk6i96(Hf16Ri*k zDkWU+IGo8kD~W^yjvtl7@iRASiTWHbu8u-RH$OP$Lc3yqk3^4?z?I8}~WbFJS%<^gE1-P(YDDl0{fw}+s+HHggBTCAn zJKBpf+uJIN+Zx*2;9=@Sct_({S+RBLxso_w5XFKj@V6o&q13rL5n~O`!&`3qwY*8{ zIQeID_IJS9brJM8Z^Rq!GPe}8j6*_{kB*#E)Faf@BCu28QU=YBuW=-swcUAN zzMAO~`u%zIwkCP>U~}@KGTQ6V`%m`cpRAK}g}o_e)dz1;XT`GC!?d}^u(QXux6K6#-YJ!ViW#Oq zrpwulRWnF+C$g|YfM>VA^)CIOK+@7eB1+#8JSE|3%4l(22pQfumM~oBYXiJhhqt-= zGu$%gd}<0h78dz1l|X>IL2tEr+!Ej58L`_Hv0HW;mh2P+Xj-y3qs*RKk>|u-1*G{& zKz!kcPsPMxbBmQF`M$_Pe_C5G;My98iW--Yax>j&kTbJ#8#~+{9Y?6TjwSxGIe+C3 zjeZU^)NJc`KX<M2M|QJ06M+{zG!sQW5#3M!p2!h#x{kZtNgG&U zH+Spv_SopyWhrS266%Tbbm!$l&GdJU8R8z=;bwu5XAdA0F@V^;gj_RGU_@`O2bD{=DDiIqYmI356-O^xmjWk_ z51G&v$fCWvrM}EIHpsNhfMN-PJkEt}k|{cXZ3BA$6TE8m-dy1QT(vyq+ z={4i!dD+QT`}}-@s`GkHL)->Kb~Ut zhhuhrnBFoEQ$;E7iI5J_1lo|3fc!QlzW!2(jo=&ywmC75IPM&7NBQH_Ep1^F`mV&g zl`3fF3xyF-q2UoC(f1~W=df+-gph6^3zv`gii!6qu>hL{hfZ`rfx<%UB?bp+&E*}E z3f`^RKPi~T01@ieW*(Ny>$_EPzf&}B1uHal?0dn(2#)+Yr!dAjDPZk-#d_JVI>L6W zL8}=f7_Y{z;LC=!^e2L?o*akA0P(UmvJ3k(LsTYf}qPkg`%Z!G{!=eo$Uk6O& zD=M-lC9($#S`mVdfeF(;PKV+ek5qm)nqgu{Gvx$KqROctWz~;^s9%R6Bs5=!WSk3+ zm>>u_n%pDQFx!`dmVc1#ywYMfAI=ulEGrS;9L3c545HF6#g4|aqZ(4mZ8BR}J3Sfy zgtyUcgygav2Va38J+%G_B-_+Y4@XMSfb^F$Ba@!%Z$;wSI|>^EW=A_xbEPbgsj)#i>sOjA8kl#MJAGf|}zdd|> ze7#E?G|?gzBVn!2$AEOAVt(Uvb+Ef{v@brb?DU`Mj~EMDQ!ma=bAP{Y)Wc!h`}3CQ z>!nrKr@KG*yXzgAp)>a_Zu3i=36vE-D&`qH3ax{07-XCVC)y?wC_Kbv<`zV^Z!N18 z2QeAn8DzjItw|#%x)U3&2KMd9(F2EyT0estphMN;$a?xE9PxBK9+8~d-)ORZQ!WLT zV%KffIs%dnT;|~^tYJT~hW;AxG`UH(do_J2>0Acge06u`Y>hTg_hx7B4GdJgzhiT? zM7X=jG&j%4sNEk)SmqBUNjTn}Y2BUSrBxv3W_vz&9|2v|&8~##sSQ){Yc2_bzoINp(t|`0&^9Y!JcPan)TDkDCS1JCPCe#?{v|U0%ZF=0t7jOrVRW zVql(?MfId5eCO5r(V>|Wr<%i;ZkXd31ik7L}$J#a0SzljG5zE;T--*Si*e>p&lQ#o+x!ctY6R z*Vxun-i~MPvY;}eFgb^T|a%Uu-hx?-N8#AB&1L>yko4r?b z81v_gM!azQ4u+h86qbYyoh%ws9ZKlRPTLXtA~|hHps9S z^eKb{Xrj7LyP>3D+e^a!q0>lYk-YiOto?eLKnv}Rl5R+Jdm9DzqJ23U3S;Pe*kB4V zU1)~+hZ0lS<@zV#I&ep#U5{9YP8kxoB**tsB?Vas&i}sS6)ev0+_h6S5+U{Zqw7%2 zY_o+^xsoeRE=s0idp)I41;|DIVQ%VbE;+&E!zasSsmStV6w7N#TgHUY23GHg#c*aB zGwO)cRJS!WzP&}|rO`s0W? zHA`$khFULod3hUSi)-HJi_iUfn0M-5^GXrV5MoJR+fJQdPn_S*J{nViOUSJjJdc#wwd*Z>TSer4|J&i1q;0%ckh4 zf&!V)&?5ZIV>Jr%BMIDv24>a?OqO2WC&amR?-N~LhOoLswr&zXGZq^OypA?iGTkm- z4kcDLS64BuE;?FWJzAgJyQABR@__1Qq1( ztE=k~=&x)|Vb@SFMfAm2n^xMJ)!0+n+fXtvhE7j*$WC%2Me!m;E5|oNJO!rpwC?e- zTzb~z=y;&IKC$jzfqSXfEMs`kz*wE{3EJ!>>y=PrYI&LCkhVqysLLbZBwlgZxF2-xbLOOH1CTdN^lTV@AbS1doM% zIqxJ;&fR`+#pGhYgh)O&N3&AGreR9im6(kc^>M38kdR+016b!Yruy+Nk8E?((YG|TSJ%~q zHkNre*4bAcTPq%~Hp?UDX!#=wiV-CushQ~+VC>3!=C^bfKMU<@-*-oQ3(DrIcekiX zM(Krej;~E@OYE&A`tdES_ApcwK5eW@b8#jj2I~uYnZ%EAdSgTb0(5ZUm6UR3ABhhe z!g$W96{U=J_Yg$SPdzdBN-l;B(g!06gN-GOvh!hYpbaF9a1Gm8Z&zUB>EFeC|6K@R z5WMCg4{_mOqrjAldJ5uDBb&CvgJ|S|g!hFGo&^t{K@Opb?lCKcd&OHqeU%mKv4q6@ z_IkP!SEgF~Bg*O zaDx{bIDL!$7#r;2k6IK1g_8d9as7?ohVA7}FXdMO+_?_m*Es0F+V6sz6hei*Vb!s~ zDzLrQ?4*D>ggQ|iKC98H?%5~n$pqIE98#`&LBf;}7UlJ{GG8OarC`#7B|8m#_1>j5 zCs^ROxP_CzdZ(pkr-fSgg`%5T1O+*21ws+-y1YMg>)37WDUJ1TI=Pwa>3khS^a*VF z!T0da*oE|5+nrXyeN28n*&qAo2hTOeD7rnrEx#{Y-ni`PV{>vL*^fkf8)u=pGsQuH z166?$a{M5!geXD8?gHK`2cir9x?0s3ZdpCof3` zK6D`Aq0tzOLyR)OszN3Kzu)JJk6=yV12sM)Hxruw{h`!4D_ zts(-gqSI_$C313FV`EJ*G1?&Tyhj5U21gq*O9SS=74&r_&`Bf+2vRPOkOG#^c6NvV zI9ij`m<2vu5g3yW2y04+d%xFrDM#2nCrO5daV2}eIs*RUL0KMwk&%gUBoUop4CB(G zPN_ySHkHN<)2;?uo0{ivZV;W=5S>|CRMpUSC7@#%8eis3xeX0bI9F7gws5GrH*H*B zTM*#;u=31va4ZS%tZ{PAZY|#J$UhbZ<{$h?GC+#8F$#cU_Pz_s&N0@Em!1G2Yjlp7 zg(d*2zYo-aPSib&P-_c37h=ZJG{_#-u)0xj5$Jm2jjnU@fKGyRLf&?JlOa(uI4&}o zQ8w_V?RceohxiKy=Gx`kn8lxjTb&mfv0pZc{O<`?;{A^iznF_a;J=NJf1BVg8$qs{ zfh-961nH3+&Vy-+jv!i=R3xY|mJlpU8}SxXP&{S~#Kz`+Y<6M6?pf*Hg@U?y|8WiE zAYsLc(ZBYH=_}ZgH0B^3Ham%C)e?`vugk0 zcvNoI?nFRDYqjUu>dZDvgHdEhy4}u)hm9<9wM|b$!eN7@s24Q5H^k)V^72;#x-s`3 zwO>-iP(6NDK9B7-KrK%b4Tw_$oI^pcDhr~y@vyNmdS(he z%V%0==Pj5weVUC){Cug6B;)awM*(LkQ}>zLL&BuB%3(BGY{yvrP?9gkiNBTE{xVyE zAob2S`QRAP>rUv$n^a1(0!_w=gd0!P-W4gv!FR-!3}{{sWS6r{U}`XJFB#^~8lz;l zAIn`+uK|2pVBJ_=JS-^4eR|kq^hNYkpy`hg2SH1>|19;AT5HqDG^f^AyPn=u2kePM zgnS9!F9&W?xBCKmG`G4ff!0S`hqEz21?(CgHkdDtjI`)?RdWme=k2M}-jqn0g-o3J zkedOkg%JvTL`bpsD2QUScUC~Z^JhI470~SQEi!hYCTpuA{Wd#ApX8*tBD}QHQ}mjBf*`XHOKs0md>%SnMb$9QdQZ$X zi2&Sf6(zxuaoGZDF@`S)8`9o!!=<>VNDlQ?$`}u{RYY0nIRD29&`+2T7v}LmlpvF? zUKJJT2&z$*4l}Q+2;kqMbSl>+gV}n2c;al@TCp5mt;f}_Vdk^nyt!W@xY-pyK<+3; zJ}2*RC&komhe4r&yl=c=zU~-sPqqqit3NrVFwkVFK=ay)W3` z`sn%ZmQ*#wPa%g_$RFx?-ds?=5FTuDS!Gm9((tA1sD;6py39k%vpU&hEXdK0OdVNU zTOKVnU4QGUKXiWZ3(fXt6!+ay9Adu+bov~*_Nsspf zX?6Scf>8JI@t^m;q8nAV;cF0@YB(ROChwop4Af9e>a2VtDA~qgH@k$tnxYh=Hc&xM z!Fbenc92Dl4ng3}mBONNgx8ty(klVSa3{W)E^BI9{gcpET32H6ZnZg+^;AClXDkr^3(i-HJI^lM%qBMwR%SZ+kJI!luvLk3dL9?POv{5IyDnu~ zqn>_xI@;nG%wTCsn~O7D!#8znZsOw1TweCQvYL`ufF3-evV{;u4>-)9`{x(2*Y`r* zOrN)=hmR9q>P$HRa(F+nKMsUu9;KT}es`xOTeBQ`49^*+K#<-s+^>cIhJ&Eaw?LOK zH#-3gSg)sQWmAAU9_#1ww2hsfu69ptqe^S%uc$%~-7O)0a$N;oji?+fDEX;CMj@OJ z3S8h~KUJ0g7`@ko2vR%W^r33G1a$1{DJ&)M=u`eYAvX|l0w6w#pv6DG6`QTqa|#_8 zfX?ONZuN2SFm$fKsq;M~Zw`K-biC^~A`?{`M2ZcLmJ=r?-uC#u+FfsPkBupRdZOdv z{Hb1atftOHK)Ce#tLk~g{YK%lhPdMU%lcAZNbUPk>|_7^!k)NeyVLIXYyH#fTvsX+ zQQ+>==jVQEOKePar9H;fWH55f8&a&doUAv~f6(FTqM(-ihXRZb`YC^Eqz8)5d(� zaXBbmE(r^po;6$_v;r_Sa!s#EY#shRT4Fyg`3_{dOS{Uc$yGJSu9vz-H%3@Z{9%y0 zrg7CP#8b_)*)6V=5=hsTGX<%zeppD3ZBm44Bfw%k^Eapt<5H$b0~f@y2epG4#X^Yf zWimmCN{_(Mca5d-{S7xs&kQTGfMJpeBis-LS_zI`hze;5^;feM0Ay|2t$xK!2RrTK zw@le@aMqO0(Rclz>-y0S1a>83kA0-J;-%v1zP6&NrjG3zz#3AQ=Z8H%k~Y_N=K))t zxW0PECxFGthQT4aC&bxBj&k7h_FT3m=n8U0WHiwU&@lip(wG(I0l7}*_!e6#3I^}$ zwGfDl31<7%k3(-Lw|Ikfrh)F-S@++XIIeYm4CgQK5T*n)ejH=U!0?FlASpl$h##ZW};{Qcu~BUaxXkGBRdK`xH@8sr(rmY5&)AM3?Iz_7Xc<7P|$(4 zDKR$&o|HggmH{*h-yqIGP%#w3FG2=2$pxxRQWB~8;i_i4D!w?e$r{BeJqx%gc(GF|KGH&4b zEalX3Hf@kbUF1;^hDXx@#Yk7&&LQonsuJ6Ji($of;)~4r);J4I2&Y;oDx_NDSMkfB zy}#H=Qv$`50(9eq^hNa8^UCoR*`qBohE;5pLsEDEVNq^2r6UIaI{yY&ei1EdwAEmP zi=(&6oZ6`Pge2zu(#c z$5W0oyLz8XON*F4x-9^0tP~-hiS%@L)QDEm_!14UqZS^e-qAna*3kZTyP^kk_YM9< zKAxWDEhe^I7WOqRwx9j|qVXYWLHFcAO19+Lx=y3BQh(T!Ec z)eX83bJYxZ7Pb0}7>1z!w@{u-frnkk7M}QA6Gj9WId^|Cte*hv`m?!NW@*WJdC63t zM(;-sHs$m*Wpp(46w|@ySDEfKqu=}d_wDBb_6**W`&ka5;M?8%jlSRX=XvPMwqMof zwvhYI>;3xpGt>S5D-w@l@0rWJ*|%St`&>2Wdi*_29qyJA2?8AGyqu?OJp1%ID8k~9 z`IVY}yFO_il!lsU^uV;h+K=@$ImycIZgBGd1i;zjMA^ho0SDl^zFomp)@{BrZ--1~ z!E4EQSvjIWSaFr0eAfh)pCllPnf^s*;*QygNTT}}lsE^yg9bz!z}6r%!b&|FzPUA= zB?3=jx3tEJH3j%)4aFZ99Ah~jZ%sMf3o&AhK=qWZcpt(mo9cTb$~Oov3i2vp^Hd2LCw_|v zR)j68&vPlWHYm9oSSsVt7}vv%LEs>UVf9YK7G59mdN|89hO>UCL&yXKTuvy$8du7) zt5a%~;wR3<@JIe>%sZ%bxF!h!2e%&)m7rp_E_dqCx|WaP&%w3Fo_efeK&)XvWP%F~ zUij=U19oXn2TW6i)k#p68M1U2oA5)8orLt@k;K#$INS|{_YHs_ttg9?0Z~>vDoV_M zx{Oxk7(#3Jk$T_D>%U;4KMOeUPo~s3=LC_dJ&CC0Q!w*jAeg{Jn8?p5Qfjj}aI#uE z!Eyw3i4#`oN%k>58HH)iXZV5Ew{q65BpY3j-i%p4mSUQWx!-jTbG43O2NVVjmKGGU zIS|dF1c62L<6GFr7?O7}Z1*zEci|-UpS{G;1R6v_0}%4Y486b9HQ7@LB>M2<@H5fV z11`gHQ~H&6AS}E?&|VCoszp4Cq5BdFtYwusYn91>J(I~8gkllR0YNFKstX{rgh8XQ z`PiWIVE)0+24O`OIKNa7Y4LUhFjNo9o$!p~xh!xN*rRE*irICQGpZqV*Rujx&GRPH zs0$e2ZI8kNNM}sU8yU69nG-ZL_y292y%Y{y{eU)g5U*`Z{(e&>hgf0CJ?i$;j4uG@ zu6=O?*ZgSEvH|8W18DwZy~)C6{~^1Y>^%O+7w$xB@5HOG#b9TAz{PO{EIs2EC|m`G zKAU5^yF~)EZ88pRE)ncW(ptv=9=GqkH%U@vhaU+*$_^hHKf1(V3;=}yc>p*A^XL92 zBoOV|1Z_F}qea-Gi{AdgVe zAoar>+D=I>6_YwT)Nc>(7lgVWzh4Ajmvf!{KHFY^AKN`KU4HA1ADyoYHC?0Uktn*~ zxqifeGD4T%SKIq>%Odf%xsV^8$9wzX=azvb(2vM!_@Sl>yT_KXb zyxyLVa#?#5lDD{}FS159#nB3l;iRgce~5)YUL@=P3HnonM8vW9hy7&!{{zn0 z2WP;LW_F1&#X}eKo3|5!yha0z3+^&m_&^H16YWQ!3b_bg9~UN*G&uye#vqoa_yK+h zh4qMO!Ox$F|Ec06ZL+ujUdOSLu-k0u-Zs$R7U%2JkT$v7We)A;4E(84y0wc=aNqdnvn-Iw$oNFp;3s zjyxBGi>#BK%nKRP;Hho;K?Tgjxr(tFaFC@JNFQ0`LptF9MM!pekiRoTiITuC--})h z!#V?4JY~3$Bdd!e>xmoY=AwOC1{=v@edU3oubB4mxsm=;`8l2KdJ7K;b}~Jjv|QY* zOoA-D)0COIbEE8y(YN7@S}#fPclbh>od=jdl`vbPQOU*ySs1}rAYsvWmMK_K zyhGq1QKo;a7Q`A)NVOcglXDk>s(td3JrU)*q`o-+d@bg`SJXn%b)&Yfs0>8$zxUFR z$CR)}lQ-~4QRAB&tRG^nl^kn=_Bhw3pzEjiD?{iq%0jLzSoJB@9ayCES_Ni+Q}@ay z=sGs*aeFf*WCK8sKloXh2f0WBEfayvJ?K7t@31ngmS3i^;rp(_Q}1U<*aVVezvx0r zVr)6k>Z9K1rQYzz|6|;4rOB2oYYF#;6!FG6PN5Cb;KhmO#kr<91LAaj`S0=KpDB7- zPzb5SW?Z@5NuHgwDqCJDp5kIOVXryjCFpQqMOXb_fz|-&$j(wYBuJe9qG)){*lmIH ze(@7p^KFN@X>yhK$cB)?>f*%}qf(eAwO&if>n!$|xZ3j#ul~ z6F?w;9U=b3)BE)z_?aL6`E|bf{dvBVEA;gJYWxo5^*tZJ(5~ym#BuaY>{$#FFjP?`!GL2fHT(%J z)ma?;QIt#)&hBN*XXQ+7=*;KoOKxflE==E865zWb0wg$p8&b8k?Uj}-ww5H$PaJKo z&G;{kkYa%0>OT7Um8Qp}N4Z{#*ys@?oJ_gpQgi6fbI+Z`t74|g-yHJghHhyv$EfGLdmYiS)eh7B& z(Puxr`*y|O#K<8)Vh&QxCSJm#(@wh6=~v8AI=n#!V~3@V7y7E7E?0TGwdeKU&jPBF zVXWc(l9*`5sCUFCRre{o4bC1{;;7c=Gg|jEf~$Y48)XgUWlgQy2CFMZ+Yf&pU&@?c z<(#ZyETs_u(5T3s%9`;HbL0|utJNV0+>TDCikQ)jr}^c0HQ&fAy42eF9L#OVe$V?n&wu!P7=k#`M=?wX@Mfmp3Wn7^x{p`TN7f``Q4|HU zZ1/eEgpFz1f&r0ie>0ffWE9pxU(be%^DPL^^M!Yz)Wfehrbh9ZtX|Anb&|RPGX*PGxqaZuE55 zrc*Fi?Cyw?9yJ4+X{MxugKE1Ol}1AvX3w8neHpO0#XP}oJVQvHLrA!uDTcS)vBH$i zC9IH2@9V8rEA4=Uc0qiOrfwmyvv*>I=LID$j1Fx`Dz#tP;Jy4^YWH&wCZ~^=AqOow zk^Bxqj4F+ z-=?D2zQWF;#>k+?Y$uPdgVzFwn+AWS0(adq@L9Pjy0o3jMHjt2g++C9P$c*)!u^TU ztlfZsz}04T4XK(&otR80Eh)>?w`=%S3BBYNwyA2u){y0X=ssgx$o1pZ_HM?0VY?^n z>!bN$|9vm^dXIQJ7$_Nh?Z>`e>xb$Q3$1N**0;O4IrEu6llXcIIa(uUJyzj}w_=j( zg#&bsu2zgr%HYq!9{@=x;W(eq?JJSc*__kR;Qn;zc-rB1m|js^>-YYnrQql(XQ229Q|2w%o+8u| z*F?s1e|Pv8OHBM5U?!J|8@}IT7r#G8&U(JrfD1$A?@!+oU`7K_#_aK3{5;(sjJ=Ko zDhTh}-y84GLa|@hx$ECA#;5DYbNkqj>)KpyZgc=u{B*ngPVCp!(;WX=n60^N@mf0c z;FzhZ-D2y_Woq!x^EZow5YI_W9QA8}mFsuaNV{uE~T5}aA1{>jxi0?`Zc`kdGp z2g($DzgIA7XTRfDJVcL5P?Bgzq-hMSCsx4R+J{5YC`R%I+GLZ}{W#wJ{o!E#L}>ym zP+NRcb*}=lkhag>r(xR)>&4>H|75SVntnVAtn;}xu0P{)G$mzd-9@h-aWoaX6KgGV zt+ThV;uEpiEw1U;IJRnu#Qxwk?=XCnO$KBFaC>oS)@B!90ceorq|`)36pFJW%;7b| z^HIQSLiw8dGT^vozD+R0%x#s_ zUe%z8+8TGo;v|tH`hvZx^rTn%Sn7GaWSCdn8Js>+j9x}8q38>fc?3xYD|GOFK*m|j z89ivAi?QmlQq3Z^h=+b{9@Exh2X0X7clq2H6v^;Sgz=*7%(#_2RGOKio?-dbUk%}kkR6eQ z2#^Dng)Su(A%soh3Aj0DI1U%2Id`~T=a1QGFh)C+aXy4|%+iTlm3&7G!#PG9_P7?D ztZI=YmPQyJ5ON(F{+-668%$j=SAqUpg9=Ourzf%XhDEv3o|AQd6a(LSG#q_fmGC92 zdNDQVx(Kxlace{Ye8hw*ygA8mX*JT-K-yH3f}i!ua}AIKRo&Ue0pMvKkC!fS1!W3L zvkj07m7u~}sshla9tao7pvF6Smg1m88Il-DEQpBZ&!rhp^6f0N|0(3^mHWnJChcM* z3p4UwZ!fvodDT1FHTziQ7rK*_*A3m@D(9Z2M-2pJ%=EZ}puFXeD?^e9Q`swawJd&b-FnOlEn$ z$ojOx8gM!%MW%`zk;{tI|IJ$NMU@RB)W_ z%2pc$JTUu@2q4kF@dX?^D*h! z7P*gc#qACEY+rSCD3G%S2&X;L)KOS?Y&-&E&7V*OAy^e}W($Cg*d7sfAq5pwqc3bN z^zQ&4PU|B+u1u%?v3t+-^#`V#NxtBSa`a*O)kQ#^q2-x2eLn;b4r8!Ty2_8y+Mo5} zmC&Poq$UiC&e>pw8)Ji!5{k>CY5}L^2e=`Jkf&r&#Ht~YG340$sKN;0%@L!G)3hd- zx(1L#XEz_oB*SLe5~;xcsV!z9^>TF=iJF@PyIa*?r>=>2boVmLQ8Rjdxm|%m?u!pU zi(UcGVy)V&PS41clBv(Y)$_|zPtzDOL{FGxWxJxK+m(2}tr?k!WxSe=4c=zMSE<+} zF2{W47i{z*Q+qF6hkVyB%inGN4?b;rt8&7M?2Wa2USQhmVs>ix$_=U0xrTYx3w<-Srf%r{~;z2cMq_8*Yx87%KA4`Se9h<#T z?!}wV-Gm>ynMkmAb~4v@GP4SCu+Kk3OxJPMU~YD*si%*)eG( z_1842$u>>#> zN3=ZI18ia-yGn&r>$7k=jUh%K-0l_~0Bq1Pxl}#^qnT_G7Nk-DeS^=FCUkJ8VP*wW zzc6{*q1`PpTib>((~=-b`YxLKZeIk4gg&hIVMG}MA_OETDXPHmx?Gugj1p%8vF}ZC z$A{cO@NX}xq&uhrF}u2d-Q9+|Hu6x<4|oE9E-Y!t9bk%Ts2P}K7tl}#6z-*VbikPz z{)>=RWvQ1S>Wg~~uqcE2c1Eva2hzP8kSIulNS}yG`C27jUum~-@p`&3Etwg~Olh^{ zhPe=_ZF#8~NwIA;?HM)1-kPprK?%r%?O!S=Pr{3{dh376jYStBL1dtR7!_lnU5qFAF(Z%mJwS`u{++^!Qpka;S3O7 z=<-ntrEpgTh#eWV#5hY%Ut>1+@12Xt)KugcIJal`!gGT~4#7hK?lWHI@iU@Y+v~b= z>*4KA=tk{lBEyVz1Qgn&#T#y8UwXG^5dIwU6F8hrfE7H)Url5)yxiOfK%L0}wexPV zz6SAA&3OEHZQ2&%`2Qrwam(Lp{SKi{dw%ZLwg%n0F3tNpPd1H!&$--{Yy3U-MhyWAicS038hcJrgQZIMI8N3+!rZ%bRLP&qcP-5` zOksC9{E$+w5gqMk(jQU7 zgA_~R#%R`2U0fF{C_{l=LurE#O$o_O-aPM}=F{)f&+0zTLa~PC*KE$nL6TEiBGt#w zIz1I6i4D5O$}nVp9(xJMi+( zFX?n{ec$)qpNdpp-jO#ah<{zH>`xG(7`Rl~?+Mx+3HglE`SsFO<*EN;J;!UU%1&su zQ?jNm*~P7@4!_CktLsiz+}>N-xOy!NR#qF6opQZ}mxMV~8&_QKBs0@4@tZL)n8|Bw zP*{1$LKNU9jTMPs97C z?m;Gs(rc}wPRY&&L(V@XlsVFl648ZUGmU6%ed*GE=GyMmjmzbIoy}X@*<1Vmy?AH} zb93)uR@z^xKY@iMn3!WqR^sP`YDv@3yOgt7O+Y1 zMa27el7J8#q&2L8KSu}b{Ed@8LwzAebn4h6Zu6D5@pzVa4rT5uYF6pO!}SQR)1p&~ZeLafZ-?Dxy@d zBmET^)klop{2fiX1F6{?RH0!DiHW{aftDrqYXdOM-RVwY?GPA=J^n>Vv@_Ubbn(_Phd6oGT+mri#Pizns9Q$=Y z^3>2UUUezjMQUuwBiT|qS0mXAm(}$6()F3OkFI#l%7NY3iUz1DxwtqTzEB{y-$MUj z$}CPotJenWciy{YA*Rn~p$EU0&)Ml|!cC+3ey6pu$coSrT>9Z1%WM~$hy|hk zlN0CPYfvChTi~J4h-o8LX?I!=pP@*b;Dxk=*lu%Y(Fv}JM$S@g-YCsY6`DG?i_;FF z)F=B3o$;{EZXtXv6dJ5Ar|Lm!(eWMcZPX=0>wH}8Zz_n45gEL%(b@o-HoXtE*hSYf z3qf5yI-hL|dc7c`!n%lO24#?fxrFIYiiPKBNJI4tbQsMd#g6?1c6B`3PVQLC>xWGs zS#8-WQ99)00YeK&bWks}h*2O*!fx;l8hP^HlL!81z;(p#FK$$du^o-zil?olCb}+u5SRf zp$G!TCemyzLLoY$f7L4^)IcTuNB}u04Y|kbGTQn1)uffTIyN<@nwi`%Zla^j1b2`5 zye+;t=R?DNA~-mqLJpTIhfv$HhW=A)b7OaNy&FK` z79&mS1aK2kp!MQdKiQ^d>oZGdm@oS|qkOC5l%`bTVnl!qhcN5U>#wk z!Mx7(M-|WaM0N8i^^m~XJwt~MME$3!;>t-&v#5(23oq~Xqx+}axN$GeMB)IJdM_Fb zd&*Im`aJ>sOv@G>b4P?|uNpn`){jI{1i2!}k@mL`15HsrvVnQrFW-<(&b;DivwiU< zx8l{_26_-xUJ#A@z6KNs1^ky@oXF=+eh3AK0LbaXLHJKF9inD|57J%@&WuTkgf{62 z?7+U3M+`pZ7c7FmlwuZrJKD^zOS0V@$&ouh37p_Rm(>6%0?%E0zmPenYrn7ao&P_+ z-*Xqf-~F0*4+4a~3|}UgHXX>GQ}x>eI8X7@EDb}K{l1MK?fo=A9{wT@uM8a)1}J%6 z=b31g=xK#(&PvtTQ|4rW6#ziA@ON0Mn`FHm+~#JiKF6?4MuL70ILF1)Rx)ap-!n*q zdl(1*e;SNH3H`v2x|vgWg0NNTi3o^5txs9vqruO;)8{NLJ{}4(PKpssy#oEU4Dz>NrmYG7y%AAY zDb#azXXWMt&6w|!QZ;rG4QTz=n( zfr|Y#6*1wleFm%c`eBm-N0`M_-Jfq;i4;97+0+!Jj1Du3|B}I{pMP5h{Ink z^j%O)JZmOc(6Ew61Ai!CIPAgY%vk7wh{49Y19B4)PFl_CmI)CQV4Q%143ruk!tbE3 zmaL)jL`OyfhOQzmL0^2L>&mC=-=kKUwIS`18f0~0pvEr|2yqe2?vv`xruC;5y1JGM zT_EqEx7s|%JGI@+&-uN*CDIeH8`n2Cn-h=+*zPUNrOU>RhlhJlep|NovUIua!wXXF zKRc3JI6|V>6MQ&J+f*(AWs8zMw`ZB@8Ty$S`WE+yBUhdvvD?iqCFA{5Ufr#TZVM0Y z>oY_lg};QsK@Rs|*QoppOX+sbtE^)6u2kzhN^#RFFb1cy>=VL!CrjU~W3@^c_KhVN zOzL&U%0UzK)sKRH>}*R7_00A4-1SY>>ujg8!=C%|4_VDM-=1BbF}@EsYJikYJ2Yj} zq6Cn7IJn60Pt1$`F@eZi72%ezE#SryhxU)dYJKEGvUZ#HlHyJ?Zkg&{BSwe)LNu6~ zJd1S!jjF4s;ICBmZ|K}1NRtU6g>oo7!x>34F|vW9sus6}S@6RFPR0>ghP1*+ zXAllS%s?g&R3t1Y5|W{J!1s9Z7K0)t!oMaB2&(!TyUwuXvJ@K!Fn&O{11^tc0#2|t zhahCtk562|n{FbcUqrEWhjDn16v8Q~28|Hq>Y%0>n>LQ0c0MuoAncM`bUh1f=*jH7 zrf2fw_PM?1>xW->-M9Jk_vbT9R2?(y>xj_9@2`)Gf%oFb<>IV%VS`L(* z`=my=wtH$kAKN`#1&n-t!pz0munmJrf6ss4&&+IhR{^h*q8=pI`kIeeYfP-I!)&qq z$$Uw1qE3E{g^}3M?JN^Ggn9eIZf!8>YY(;Mq_Ci z!ps9ke3f%;)Z*;I=&BP_vB3iye>W{bVD&jGgxskKIl5kA!+aUf_LydFx+jkgaYLT^ zG!wlVm#D$@I*>n%P?xs9wER4^+3Ldm)=hfWeTKf8&6U4i<{~18fl$f^qU2); zt;5#oxbms2= zburOZpd4W<8U4yK4FJ&%#~=|i_0oot;N9q)szpXpg_F6oQ{ACE1KHG4 zm13%)Ojg^qR|T|fHt2}kQUyqo1W5ROetOOK+ngUEN4-p)kh#j-uAR2dg^vQZdmFX| zs;Zru-zBxQN^N6jE_D9jwFJvbAw{jyKJ-75&MCT*b_v6=jfpXFCbn&3l8H63ZQIGj zwr$&)WMbR4ot*yHS?jy#i@kPV?A_H>Z$0%E*~eH?+DcA6Gh}A;Lw#4)>Rw>-K`^vfiwNg$Ph_O{LHqVjcO&Ay5O^?kEL^B(3+hGv z)jU%DD9Bh=o;&Ql$)*Cgf--zskz=ykA~Et8JsN3CA5DI)`8q~E2-VqR7k2+@eFF=- z(gx5)%wBBxsX8D!I3r7MV?aiAkn2+(C+9BcSdFXvZpEc+)sDsTXIUES^`Kjpe=E@b zc3pS)oqh6C!n7||q%zGl7|ak@oNhTO6O_0HH-{+%^eKb#|XD5+ErbYknP~rTM+`cJSs&B zY%#^cc#`_m+-RDF>c0gc2XltEW;2kD$Gj$1^fC#;LB@5A$)C%5AT1LCQ>7sL5d$b= zWD?HpmZIenZp>0RMPYStk*{gjyYlHuTN(j>8>l!ALTIKI)Ze zg4D_~PJ@{lYxY_3`kPo6ALyvNOgfQeo1dMU@AcIcy03Y>{f6MDiUY&JKTA~Yt*S=! zdgSfyBK>st{I>_?eBy0?$)@WOqb7{o^=feb*I=n4rHVm5+sA9`B}IU)|8>fD&Gh4< zreLePsmJ$gd%y8~qoZ7}hwc4nt5E>T`eX0vr|+}KR0;#))n*{ng|SGYjS{%w`KrhJ zzs}!Q%AAEo57Dj758ECg>y$^&iU^TC=9CY+IyWUbS6yCNZB~kiG4@a+oE2x`fQ6LL z`!j^-yfD)?a6Lr_^%z_08LECa4mT%%{zwnwx*;IDu_D84BYvhP@?crMA~thO)@3?c zWZCQdJGsS3QP^I~-%jLdAKzB}jEzzJlAyS^l!t{OGaVh#va1)Hmpr7Y!+T)8_-MXQ zDcgr`xfRD?HJllHK?dsJzSGD-zOxnV@r}n*s>C$S^hiyPrwo1^6|Z=o!`4>2lSWhf z#(xI0`G()R!zd7lOL^wW%Y1^T1o|y3Y z7hr~J=6xzRZi;1VxeA30X;)qm+H~*d)L(utu4KK-%m!T)}@U(t@z9fJQSfP41k*_`0npdkbMgB zl+77;-iI&K3{Ip>XaDtj=y2y`cjrXxv6{f;;^%lLKId-M40A>2tR@!0QZt;Wrk#Q9 z(OikQraos=X`W&#v6GdOMpNpk2sU7~5s2paCxAf39bC0qn}lv-%AeqT6sfO1aE6u* zjd>~{i7s`@2J~Suo*?HS7wiDRvjhBSuQ466 z@QpWpFvKRBOr3x1!TWn#;?v2oHi#>S2W)}H{h8-CPBex+WwaYS)D zHYx+`VH$kWsKK#>jlk;Z{OW+sfnKMlZPwqCgj~Bn$a&|$Bf3ZevI`Vx)DJB#kC>N#_JYu?>&e7;zV07f%E*7PL4Zl0() zW8P|f3EwtsH+nu_&+m?2k%_K)ynWBkJGYNHO|OUzuZRcqqX@DR5n}9LB}#R`t0xFr zn+eyCg9LkuHP>L)Hs$#FXxyH54o}7tUyQfnU^kZw6?<@Bc8zDI1Oz@FtM=AfvKjQ! zH?>u0tAe5xq$C=>P+GePkPOckUFv~a!QmGd>mds((B?IGidB3Qmz>THW9NrVhgoFr zkIKlyE1o77+5XpQXBV}}atwzKgWljLcubJ*ip&@~+(l{nVS@Gk8q#LH0ca>@jyx7$ zj6Ppfv-%Ks`-4$NUeXBSE6&rDo=)*Er*b%%l%}Dvp?7ZgB=hrVc`FbyjK#NQK zmEv4hp;xE8=obOc>0K>F~3wQ+~~Bt^Exhap&oH(OOe$k%jDdBM+4@#?uOJ zXy8)76RF<1Eie1YCeRU|K4f}Bt5XJoHUWJFI{0qGO4H3iLYgys%bVN5V5dK5D|*Jk zg~!-}-BzeTKp^wCV=}SAdK&~qu2WT;o11zK^Ix3>*?QQKIJmZuBx~_Kzu)n&DZgry zuKA0L0qiex_tx3BeCSw=SUm6yO;rM&^cWlGJr?%|QXvh@ru}YDT_*N|X^OjaYaiI# zzMmdiDY-yau}Scsmhl>Qy~uZ3;r_he?|-6%392i!RZ(5G8)WnQF7jBd*_AmnyQKn3E`p^yv~)q*nE zLb4#DC?wc}Kt`J&k1Uxmp*2xz%u*rBK>Y?QHG~+id}-ZY*>o;-l=X!qR2Mc(=vQsy z2jwsc{%3d{G5m4I??Ieh-)$o@zZOP@oPd&*9kWIGtu!qd9c!Iqc6}IoNVXKe zG*e<7Y)O*wkbljIkovbZ@I)Mijl7Xwo2V?z>7{cz8f=HD&G5~#?!&-L72UPTIW>X})$>#fj;JJVots(Z$zA1S1GcN<+S8*Pt*`fzKHqom%brh1C~+mn0#HWXuFlVw z6o*#LvnAVDfU4l;quUUm(%c$jx6x&~Fv`q6$jQR$?xt#YQ+t0&xsA=a%IT!i-jdeP zh@7SSL1}6{pm{mX>1me9$3>Z@l{m%l!f+}=oKdwkO6F}B#l|_Drloj(sptNc_qx_{ zSfjTFgsS~_)s=E46N^bAVc{}+bk=qoD=~&+zySucD(hTAZt(S6$_SlZ_;*9I%#X^* z+0AkMtUq!Se@zd;?=Ag@tELyC|J<0%f0g2Oex+;OrfY2X?%j+A*;!1wTFusY`fc&` zW~U)9^M!HohO^j&(BULDz}MGF?JO7CxqR0*mTNEv!}fGRlI}bF#(KebKFNRioM>ef zu(TrKyScYIbF|5_zsa_}$|HMrUee;!zQDGV?Ti}IbZ$<7{d1&544p;UEH%ZlM63XP zc1G%Dx$w8ws;!lBp4PmT4dpqJiS?Dq`ML4sxiKOBApzmh#f52g^$2ZsP-W%N`GskA z&yTnJw!?=E=o(#BB0`lcd_J$2-IEiki_YrO7KWxC0dI$0%>(cx?%6nU!Pzz#ZY(Kd zt$2-O)|eu}m_<=^nK6h7YyS(r`WRgoQ=%^4pjCl>?3q`K65+<5_Q+@8{d@jO1a4j= zRrx<_ab60ao#GW>W&>fJU_{f&xz^%cBKh`i+q`(4w@#@R*(4URBkPc%gjH=T`*nTim&$&_S%}Z^en6WA?fx; zNLn*W$2aw15hr6z*V3TI#g4f$InTdKXkV`xqDK% zSyJjd-hV~)R5phlgzN~;9m%I`l#Sb z2E9hq@9@vYKt(Dmux7KM--UYdf7lm>*{h&d1`RSpLVPjW{1P%6pL4jFzFVow{+w)| zJWMZfZkd?2;ICG(gBLU*k#y4P48=@0I{-0`T|@UX9ZWKnLtAN$L^0B%37JhM2sjK0 z(50h7hdLDs6TLHJ3?Qo1wF~Y4@(Fs z3F)_QV(*f8xuDd?1(^?(MsA=j%A#6uawyM_f}2w}VZq7*-qlTpS#ewwa<~?ZU=10P zI9|kfg7fV%Vf%pTOyWk@(gNB|odmhaj$v8a`-twzu*nj!!?^rA$y2MAZH!L6s4Oy! z?=$M*0e+GoJ{S-pkXpEkljN~t&*a1+%wc&eh10YaitQ4DnF+*${#C+tMpX4MJ7abI zKPxY8h_kUmem(bs3fV~=9H78+N!J?5NR+bye&5q5v${X>nQVSdvrWQX!0poCs6hBf z*cEDaC|1Id(PU<@O)f+T)_NL4jV?8An(j%03WuDQI#B*PU*-`x_wPA8f;Z0}+fYOJgzCjY&;KCt9G zDRRG2qsY1|OhzOl2!)QE*lumTztWiHy?zj_c6*iie&xLnjpG4AL3@#3ACrwl&e|Pe zis{Dlq04WX60Ix7CfDVvf~_`7KSste>>RX7Iw;`r1lD5YbW?G2(z0LdTc<1ES12Vk zjG1Tk*f62A(w4lhdUk4Yx3b0RTrTR{sxGIfuB9yHuf0hoH1H2->VDPQIsD?!PkKPO zehr1(`>CJ6R)j8h(s}sMzkrywNi{uz|oZ+$;BcCAK zp;U1u^vd=!TkhtSF2GNos`snXI_Q#MFsF0W%Q?=`F0nidsnpV`+S17z0AJ7V&VfT< zgdB7G8p5UXF~2B(+dwaEO5~`I5hQSr$O0wK#i)%Ob2Cg)gw6t(==k4_6F<-BN3q)s z16~#v1lnA_&U+UfTDF_X+MST}wJAp&Q@Y=}JRI-tD|;Ig+B;I*Uw2%(4~PBgGbm(E2^{ zv(@fB^Di&Ft1W!}Hl?iqygGcM{X*3}f7R~yhDYj}Zl&u|A&;7fpKjxHOn3>Y!Q*X{ zrPU7?r!jCmtfj_67QmXRE^|{`RE4-lPgW@oX(9HCZv4>5*A_(peb_ z8R_yFSrchXn6$?cDc>%gp7ycURp7wudP&b=FQH+bJSpME!mW;k?VTuiwEJmSpd=dl zS*&z;LE8f)*l}S&TaedUJnV$=o=8LPK1BCZvciHZc0>tRhwWV^onYgB|d zM2&ZJso!D`O~o{dnJ5Sx2osCB+wnFH)!SQcYS=oyWN`FtV5Js)90$t94_(MN4nH1| z`d|{VHr2g#VwSUP{!cakXMfPSBp%?swaqAbT&*Emo@0)>?~FmLsPpv;lv z@gJULnNBYgu2XrWP2!`3P-%KC`NPU}|D5sf*$at^n*R4tG9$^Y)c3UNG0{o|{C>*e zVTQ5WdNmq|LW=&6`CG+`Udgvw)}v9`-*!ki_p``CJtaZAo zuAMAtF|-UKgJ1k}{=r&(^^de`@whJ!%(t^(uIe-6XO>BF!o!F(hWG}wVGuf*3D4Cu zgklDt5GQ&5Kz}j61`_Z9%r;B)@UNk3v z*Xxeq0ua#NcaW#Po<}D2dKUCLX*LeWS*Y+u)1?;I1{0_BMfs_Ei}MTV^D2j<2<{fGgU1*0Q9R?) zP(=A?YnuzcL_n!6!dy5H*g6;P4g0mq(rbJ62xfRuXk;&Du&$EflE26<_!{A|YU;3P zYQv<`gWFL5t)UUEt2qt?XUqX9=BMEKMF<@C?GWZnqdZtm*JCV+!jvD8izkPH*H}qY z!px$at(Dvi}^_|u*?X0gfq+g-d|~QyAYM7}mg#)Ed)kLO%HsP{e!Vah&0#CE>D~Hqa@3LIOZ|0ScE6DE zKG*OMi@{O1`-k6bPer$n=Y5HQbH++W&%62VS;}UO-l~9ix$j52m2Ho{C66cIZ0^bc zPY58=Sm?5XijkZgp)cKIEwBFMcCEGC=8=&{?qXSu zJP8RaO9j!w6u}JqO>&D_&W1TL_rFOtKRGl9GB0;KKZYofznHaT6<2Fo@TTo-mFR&T z**l41t&aZm&DW83lA{h?&M6um-w z$j^x(z?M)sr!#|Bzr+Ez#8Fd?L3Tu=2P0!za^8SV?>XP3fv=!PK}#*D)VZTycXUHHhlc4)R`d@@C+% zdyNYZhJ|P~T3io;A;Q+r6KC|a!8U6;q>8hO3r<;HZ{Eo)b%sos>0}P!}XwduQp` zyk+_CP-d=ni=QZ^81FD8()Z}*U!nnv{eEULs$`|KDq@<{B)!UPOp(|*V&R>_DwKYO zHZj9}KQDTfe`9}2M*IsQ6KwVLcNT~N>zra_1XFZw7B{H!wJI8H^$*jxvlSNavjS09 zWBPLqg+8p_m~rrB!xWOhs6$=)%qq4qPyrtJgmsj`zKGkxk(17D(Zc>C`V11v@HK$n z!-oq&8Y6Cs{uyYX$Uqg@{PlHbb2Fw}Z^c2Zl8*=Mm~$B`q6YTj@9rXyiHN-35cgjO zF3Z(wh_bVP5fb_*mPfW2mzcU2XR+>yNBPjH_Y|kk{%xGwBuz;4XL@QT#E*);Hg}6D zD<9#vaSmAIh_CzWC+7X{@^iuU>dN=y@px_uvxdRu{G9wa8KX<2a+3e)QCC@u15(yw zBSoX8DSElwL-v3)*S_36psLc zpt%+!O)uadLmgdJ;}xNPK+_C{uRXc`07kJMe74@ND22bT5~3diel~2_A|SwOU3FB) z$UIWb?X0VFbeBh1XmN}=N7P}tUSuG|b-Sq{l(?n2u&upqS60*PpxHGv>VGjh3lTAM zopEsfxWGZ-Gk0dF5!?FFQe$m*0W_ZaD0b_pmA+s)x>6SJGgkq*W>xBv5LhTwN1TNF z2gT29Jv#MbLxmImW1=H~!?q@;8-n0Kf|43qqAXYXob_m84UdRF38+<xd>pZ#lX(qi+3${Q{YGL5y`8j6SXS(^>=1!l88)S2rDj#R6s`59K ztp39>CJ6RXs)0HBEEvUzO2pNL*|?zI8Nc$8s5%>6Gazy-lR;v;5=PJt2b5#LhR_bD zGbK|v-(sMbk9=@N!tM<8SNAN%F$pNPrn5P=i6ZShANahB%2e(EgVHP{1Pzr&w% zb0zcRcQJgoKz~uhWB_(l8wLWgODnjRx!g@NaRc277#>luqhP9*9qWpo$o|_Jv)DIQ zez$1#+ngZWAX^M5qhCQQx2!MhE1iHU-W7I2R1Yx_H3_)0K@a?_<3TJ!O1Xbp-%>y06_o}Yxz?JtTwG*WcstAZtB9s@yXZ0uJoAAA8Kw+ZKF`<3<0AmmX|~}6Tg9RJ5a{tb0cQd8+i0ZoZ%TSl?yM*MH92;@N7Cy)oJit&VBDarKO z5;=~Jp@w1v#f`I{@<|^XxONuV`W>%bk4Vh&Qx5XF7=#3RTPqU;83Tr1iGMH5=lKynVt6d2SxMo$aXg4MZIlQJHDT_&Lf^3e-+A>460ZoYxjgMEoo@ zxY$XqkFa=a5QtN$$l230L~(@tMI8L-(F(yt_y>ZQ4Ni*Eo`0O&&|K~CTN;qjfEK{S zsyJDuspWc9h7JgPO`np~6i81)j_Q_hrO&wvzMr2!1EJ6lr+5N(39q~$Jb zfOe8W)zWg}(>|>tvtW{eJ=kLBxJPW#t=$z@GXb9Y-Q;324~0UwG6 z_Cd^%cGdpZ*Hggji5NdX`yQF7c6)IL8$F1Ha37Je;iKAF@hIWY9+g4&ehi?U+vP!s z75^ilA1KHl?kSU-*}!6AOX6osSYN}_b4bb2l|)kLtN5KoI$N5?(Q2GDTbvxWx%pgo zLzZ>E6*(Bn0cOLSN@TCy2Qa|yxL z7dJ&u`{i~d{)qKM`q$7hC{#6gliLo3>R+Y2p$5r=BOcd%g7B&8sj*3%>7dCJZL-yd zlGpfU6`X$6;dg(jC248pYubIAn(L=|=Nurh6;LA%jTMYnnO4-RK}OK`+<)_iA|Gnm zhSMoT97@gT{)V8<1shokpltDLc*E{amJKc{SzY7-E3NIuL&x9Yx3}+P;hjz&(R54v z(4BG9VE8mN%4hB0q!~@#wchCisYW+-RfOF^qKeEp=9d&Ap5y@;K&riERcjd)O7%0% zZ7LZX{%Jj~lEX3sD#cHn$PTter?L&u*BbG`}0__h_w( zz)iBjM!&{)3a3h-a2;8TMXl5cV%eNSzP1#zV=9s0r%Z>8I=g@;bPW-pN_8~~r;E}5 za>^MyK15z=7n0@zQiTAGRSX^%?2kBa;zA&sNlvHa$|g{rB4wm1P0taPOT-P!*AV<>Wyo{#(EgXP&$O_g3ZYnQiHTT>Q0Q^7Hz{u9!S zXDGmKR;IZdT%mLwQWsZ2L&w%L{^Pe60rZV4m6ff|&>ui5aDK;%E#iXwI~lRBAR*uq z^zgd1w-u+mG5>_fK;Y^-A8^hn_$^%8pL+8;ziq7Vtgb*8@1|2O19gdIOJDR?){oF; zo7+`!PF9;TPiS&Scza+q0Q~q3-9>(saK^m$TB_68v+UVBnvn)#b6)f^A4ruIg_}%B z@5YDGo_M|u^>1T3V0Y2o!%(z>z?Qdj97hW_FYo2t=0Hv5U8_Bo=NW%lXR^~?wIk4h zRYv0E;+US|_C~N2-1l|MbiD=Zorz3QQ<$ocm1kb!F#NU1Did80 zR$jW~@SV~$>0_XNQC|M5*35*k)CDiQO>M!Dh%HWw$NZw}9F=al(jPAxa@$p%nd*B~ z^yaF3$N=8dB#U0pg!c=#;S;fgyK1#oGJw|&7k6Ua*JBoZ-Iim}ziXA!gc9WgpUvk{35M$1)%bK&4<1$_85#?;IFfF0EcotU{>v{BlJkzt#fSl( ziWlX+jWbS(xE)TDuf#J@(fD^E1#X-&pY>98&x)cTd+WrW<_4~QG5flUVh1Fg}@w`D55oo6SqFbE_ zI#3gh0F|O$N19{~O$C;Y81A3zzT)X&u)LU#-tmp3GkdY9@5Q7s%)-IQ zAE6}*MePni&W`y7nBHl5lVH@{KvL**T~>#84HO}TAnb(HI|~@0z?Or?1H`c1CaRe< zSSLBwl`02syqN^05plr%>4J^_V&oO}LDUT=)i~%=SAulDX$0h2VUGR0*e{_Z zqG*`8>A~jsro&(($z$xtdf&KAeq$^c*XJqHR%&w9*3+%*abvr`8`)tH2#i1<)pvHD zOSH2?(3FwaE6(f!J37X=PX$nOhHB9$g)$10dH8YIkOi8vj5N#PeL3`8_^6Cr6c-yN z2TMFoaRG0Okf@3&z^3D7BEvJIxH0XeH_!eGsO_&%+`ZBZw33l5R}`|j9ue9A)O2s| zz8VzFE7rGV7~m##49YH=)<#c9S~rFICUyI7%wI#OExe?c1^TlWUudf#T#%VFR(15f zKlgr(bg(@Fut%}WQL=o?qP&AOY438s@){61B;+pV9Q*K!Tf=dN8suxrCFYyy%j`*~ zYLgdd&T_oL2@-4w&~4EZY_im%I`z(;8xh0cG&?f2c=({i-JuNU`wTWGbq`^e5p|L+ za5&J!8~MFO;){v15MQL@Tgf91Zh^7X}@*6mYOOQni?{EoidS& zsC9ksn)fHxG5~mGiT(Fsim=HcrY126OzL8=>{)=x;lk?zqg_cYPD>*iQ~m={hRQ#B zvtbJG>}KVh#_84eHa_t8PlJ<2dYIp%s(0i5j*$_NT_(nKV~yZp0UIAt9|MU4d-n+q z_XTsURXR-uI?c5tsi}}(lK>9K0-u=%Ihh!PIR1URp_K$mv9mBUQxz}^`Mez;0Jg;8 zMcJ>K>#MV1F9694@NQ7hGs({Mx1`Txad$(QoMD1_Tac$-ZKt;0AQtH`6`bEB?a(?P zz%W2YL)On*a$Tac8V}ZAOh2(s_%lXV5hgW`2K!;B(ws!IfvXVQ2L~GXqcROt9PbA) zuRn2S#4~-izX~e?Oi&&SBoy*@L{SJ)?!tYK#7!(!UPYCKvHv)YODL0rw;+lJuY%D& zSVE0p40XyyjzYq$u*@t4h(iB7JIjW1{U=zmNN*pH<@TPp<9!tU^O?^%AYPoE4FQga zRRlhQ*uN;JXmdvctT@r&k&$2=Aky~#I6=>Q_I(dEqTxk1r-0+_v}f$?pN1P#5zby4)AyF7R4Gd5RVTYO7RGBz-$XVb5}=N@9@OP8fZ-Uxo1i z#iEjiA%@FO410zi%v?Z|iAbXe9-U0KJVuFI7UhZ{3x?TvYBcRfj5q|tW~Pf0j~8}} z&VkggBuSlhh(1u|9ltvNMkebl8~(PBr=Sx#qtTmQ1U>~YumZfNba zU=M9he)!Vo_EYKKW4Fl@@%gjZ``_`wgp_bQm$OIRvDaVks~++8dt=2Xr^AbwEatTq zQE7TeI_#phS0-7R-+DY$KOQgkrgDV5&iV{?>c4xehS^#uY zVM+`bMUFLNTapQqD*tG?UT+UMoA?EXB??klA7&k9t0^ zyvS%7vc!}66TjF}6{+^I{NaLmxk~qj=bCw`5`Py_C&d+(>f~34`uA7b()r~Qqlr`A zA6!JyPM>F$*U!DGf`orS#RcRZwZG<9rEERqEw6SS5#d9Iy-bc&9V1VJ4fsf{-GTc+KY0>jM7*S~MvcZe1SZT^>oJ zwfctX6u;LspL76`WVWuv$x3np9=986gfFC#>{$fc_9#WD?D9D1eP3*oL9bk4>K5S) z!2VX=>jBBCz6%R}crPVbf-(*0s=Y}_6=%&yR}KE(R|Sg>FW(~+0*GiaxjnMIlce0H=Tk$X=5|3iP5*DijRdoZDRL3FZQomtF>^Xe{3mL z^a%S0-PsoI;Vn*f`WZ7Yh`>~7TjDAj+N$j{^ku1mIbr9 zJuDn4ER5J&@9R%Wq&?Mf57%|MyWbi3miV}bREv9=pOY`oW!7l3gNA_iLaVy|-(S`E z&}c;CC}=wo7JwS!6#|{~>#pu_o~LyVm#GF_~4GK)*u-y~bhZs8=e`{z~`58rhk!vp`}AB7;sT1l682qtC$YWT8HoyY1!UsY-Qum$R|VreFo# zUjv7&)3**FWa9m-Xh^Uikhxj>(~&6PdM8kw(n6xtBvVnIvjxMTK*phJVk#)1hSfRy z50w3&1(*r4{yt)3Svo>6tz%sIwd`MOUnP=9$0i{mV3>{E0TraFX;UuD_QFI0mU}2A z)7snj5N8~Gy81362Rk%G@s);qj;mg@qx+#IVA!te!fH?a_0^=2r?jN%R1vP>9 zq#VIuImj0y=iw6|Jzn{2y)8_*Z4!n$;)@4x^uiZ~;r&Cy z?xN&zU?NOt!AYzrJ0P6WJy@=OIBCE_C@InM4s`@?3ne+n? zMH|3o&h;gfSZ4%+_j81mi%V=zU8Haa;QksF~jYA~~@U}6uAUF@yD>S2Chh3I>?^~Ynw zt&RkIo%Vh#Pb9rh1ee+p97t|yy$M&?@Vd+txFF2%p^hRedvyBt0a*>fSqbEwoIiu6 zJK7@!w9WLgoOL^)PC#?`7rH(5K40^Xk6CA0F+nDHv$w_z_>NNC8JBfCr@rp*Hd?N1 z0YXc7&+-+}R3{1bjh9+_f5SipCCt?)?4{q`t+v?W(Qp2m`ZqGqhI*O~bJ3O9CiZgZ zQfiNGb(+^qy|SOV^u+ZInKE;|s;deM3pl zoA+78(hwjOqqB_V*`G|O+RiynMl+u$X*=nnp1KqnY9z@>as5Y%I?(qn12RivMOc2v zar#JNHUH*zsgc8?LJNfY9BP%qSFMu|myP;w$w$Mb>3Xq7gmLbC`fdzR94S4|KPh-ghEwOUNpXl{j^q#`7 zV7s}=dQtd*ber9XhPxWpPJRQVkCtj(Hn%xix41z2Ji65d5#?1}y8~JW7;6Xx9I8q1 z?HW8*4qfIW@1mY^A`~W>Wjnrm6o}d;VlNtJ@QfAAaZ(ZRVbY$k>-)2ES`Gsu5%7a% zn^U?95#?n);^AXHA%=Ink~t%GGYOrO|%fzxr7Xyw&{AC?Do{J z!K_7Ccy}Y0d&v`p?jj4GLp-nNMQa4XIAEsWhb0570d_*E?D^cXv3=yUudflf2hF%K za9$5~hl2alR#-2Le!D7&jyH^+YHCQ~)I75{?1FYw`>!n@F0B5wG>4|xDo!4a@5sN# zhLZ?$$kXf)#52V0qKIoU`a@=m@W_y@ae+%kdK_@nZMf3>aiztd;`(Fw zX}?X|VdQSXTssJPc@&5;M^l0*K_Mw77smYccf?yB<2kRrti9){oGT68DkbcKB7a~U z5s9L_X~BC{iTNyowUH}WoRBwTylT8ctU^H;iu7mplM*D%mP%)kk1q3FmW37vv*|6G2T>4z_1 z+QbcV{S(YF*byP(55rCjhMIkh5hooc?*JDCDAvnT{pW?KF8weF9P=?n!puoj?OfrH>$vm6-nFPJ7&~o(6oKb%dKSWP~w; z1O7#YEd?D|iyL?s!$ScK*H9p5!_JI%P;TjblLW>TaMKX*qc-}{Pqp?1B+FiJ@jQ80 zM;|?2mmdd5F~F#t-f?$=hsx%uT*1Z1_*rpgYCV-ra}`B_NV^{$)W9&Gd6odHas*cL0MNnN^zD2A>Jnt4@m{)@(Al~F zZ1!z+v=e=w)o*U@KK2sUSAzdsf+}6<_pS@i-u$hKC9>S?sqt{vHDN5Cpc9w<2Z0p( zhG+>n)lL;{eVOI03uSoQ@syM!Ai&E>3vjpuGq1=|Vcc(>>1#MGG&gFt`3^N}Iz54% zKc!l= zZyyOiuQ|63M~dGmia(B0RJS};M6mAT{#=n>+*e-jm6UT}71`Ncu6hiwdaTCt1FmjY z?+ej)OG}e_Sbojx$mFI+OVN1Yj_?Kpdp;tD^*82310L6py7QX`&yzx>1{^547LGi>y=fcv>nt}o1u(@u~b<50XNJX0ws241;~@J94A3NR~a^#CCwsG~bprukAoD#bT(B4i

=O*}K^RtaqoaT9$w7YpiU#uD|&^h-6$?j=bKq0yKNpUEtUZnR^H!U3Ykcb(mAQ-qbHmC5~>(; z`@TH~W{xga%krot^SLEBOp5}8_%`I8;JAyy;Z9)q%ud*g+rA@nF|>AM@L7OYK85H% zX7AivOMkw&NDyaAZHSfe+tfK8{N9TK$X;G@**D_ZMbFWCY1zG1}g&4tjMfrLq65P>-GGl^}e24<>2v z7z_3wLGKBKT_-{&9e8YgK4OfUh9!Whb56!dN^@Cu+MZdKCZXe+)l`_#*3 z@d=Z^u{I5DdCRuhGi>iTp3bRNrr5e);SQoF7_%gg4IMN{8urw?*qaxPHM{jSU!V2w z9uy^&7h%GZxsa8E1BNw= zy$E8L-Ph+rHpbQAT3m30l6iK2e>kJD+U{ZHY^)6OI7wQ)w&huRpcZ zh+ZP|gg+J~MG9xg_e5||w%}JM{xeD1`|u?4f_hB^dq>@ZQ8 zIPrB~@}azKQpx??w&xiFVYeQ}z4hld|8R0LDNsnWW!V5mQk7Jx?P92UayNM|Y?|R% z9IVmEJDJh%7n?*c$L3;|BvdDo1=C>CPvb^D}ch2hir8NOST!AZ-0z z&>9LkM*=?SZ*N`m@F|AKgjj{P$a@ldf7Urt5#))s6t_iStQed+L=gz%&JZhUm*B_6 z8js>gX0Tq5P@b=0&AyC7SJZt=ePSL9!aJo4K;^D|MhP6=1z#2?ZzVGVTee|< zo7_Te?}ErlJtIQB5{>)}#`u6IKg9Y~PfED~?jm(Ei9P&EtOhnaXJh==VRj>IMm`UW zQn?VEYc&b?NLY@M=Qx5h6gLz(T;h; z9ft3IG(tfSmWCjah;=c3b@(G76n2nWMzrleiP1p7Regma>>#hfUqKsSltkM2A#4O; zu(0}X;Y~cCnfN}!s&)I2QkV4cvMzGsDBW%=mFX**Q#&df2v~;OKQ}9ynhp{Yu#S#= zTUM%5{}G2>5Ug)5q^z&t1rKFRqZo%4y##kwBZ}%r%Bo-RW%kV&4H;!iH^>yN6scUK zsa(ySJO%Rpb;4~|JM_^?^=)MQ8Z~(vu;}7vZm#I&YIbFmD*G`nk2#KkqvLJQ2mTj5 zL9z5l5@ysG9Hk051&dvh((Jy!7jd1rC%l+l2_AM) zo=5CecOdaxai4qb{AjBq-(q$hV)cggOv|}CS$oOHRctJv^tS3;Hsdbm$ehKSq)C)0 zw@j!_`ng|$yV}69A38;?=7ZesL-~9{qwDSH<*N5uO6yBQVXv&iPQhSx@`u-5tmA`m zzvb;Y<0-Ia1F8L%C%MX7;5xP>F~t_L<#Nua3$%;^t2(&5Ec*Oh4iEdU#g)AKJIU@W z;_85@lCzw~cFh%aV)Rvp3K2Fwb){`g$6c8H2hzn0-rv-BzZs`_BmN%=DF_IrG^!mY zM=`WjnQP0P!OMG3M3=yIi+ZdGa(bN#UlL#0gi7hGxDk`}MpQ8pH%e^*QO*PrFP#AaPmj&fc1 zjjGDv7GTjb&TcDI&T+rygf^h19VHG}fSqy>P5U9xw1uETzy12N>=`*~1CIZ8AbljS zh-pzK^&3W(T*M?r_-t7}tFkCXTmtr37;=8Z$4sv9{u^U~cYebE0B=E%z5{pfk34lg z>-N2Z2agM%J%@a`-|wXslm!dp9J!GWY=MI)-a9j&m|H~6DI;bSlHybSgz;|72By)aN_B(RFWY{Id@nQ|!F-M(D*Cs;76u8@=uV+rdz1l2FZj?RVHZY= z^6G7k*#&b-|r%KX8_|Z6OxGN1@#@pz^ZNvy5N$^bAwRr_Q_z@t} zfGKc|O$$oTB_*W=N5=VZBOx!H!UnA}i*ILZVK0{C5}S&XWcb9S`U>J)m|{m5MWNOIYCdvmh+bn~~xk60waSaw5mTFv*C+abmrh@t%wXABM!2 zp5RSSge4a}5r;NhaXSdf_i+d$xD#zAD-gRO!lHlJjk)mBryOiQS2Nla7Glt&mk5oDX>ed zDRzuSLoAYGA&kXj9=^l~ek{=2 zibls{2Bo6Z4;bmc0|9<{bYr$1yc9ygiKV3>AuY4IUC zw)wj<`CiPZm95qmk5A;ZcE;4yhu1ep_Y7nW4Ci7m(vu5IXO>pZVZ-M++v17&#Y2|) z!?SaTXDr92XDTt61*A2Z&!7r#jjnuHn$y;m^r|M}&LirJYW{?()I581dgh?sSb;6R z4`Ed@XKc1BtE)E`moA#7j}MLJzo_OPI_-*9|3^*)y{<`}o4aPS-CbLIxUurNCXaXa6yJLs{^$kw*&F(cx9qp|5O`JJEbQz_lfjj_49m9N z1!gX2A1gtO<;YlW|8OcM63FiEOL^B2dHZ4T*~=bhuXx;h!mg*VFK_3q_f^X4!-MC0x~jUnF1EK{mdll>Hma^gH&qlM?7*?ehSUitgo^tcopjq3o_9&|UXu)}!^W9{`=XrQJ4#Mmv#;#Loc zA9ni=q&oz09mzuXh~!`Z*c1vsjl|Cg=4B9gsdQFCFi8ji>q`MYY!J(hBJhY#3(hR2 z?>Q(uax(VVsg$ZKIcKUelCr{RT+j@JF`Pn~u2g{+J<5wE1U?xBGeU@Nm=m4|n|-dp zo6YwFX#@eJ6i5?G=7DMpDYZg4p3v$``$x1s!^Q5ipxQAOU{>Hsio}sb9x%;D`!QpE z7%@KdC@-?ejU4$4LEyj>d$NRXVJznmrXzvnK<59#jB;m*+$rHsP*Fj0^d&g>5-=$; z-I2j}<%qrbu>ou`z_IJtDHa*eNc|I2U%T8!Z;cj`tNL z`9~xNa1;D6jTVk1b|;B3ih%DNCbZA72x;+{s|z`@w}x0>sspm%+0A!6JdqFME8+@> zJbW0}m&n7z^OuJwa01afY%4CBB;^p)Kow{_g+#d+*mT zZJ$29nzvoY@&ixI&K)+H4=9wSdT)YAL*d=g#EiT@lGMpThl}!NI zo7LJGTV2O{@{0McLEP4r*4di_{>`=*;R%(do-gGZh+L8TzjDhW(&NorRRz_4VJDmv7Em zPAQb7?cHg=-U!)q#It0d^Pv*~kDn(kEy4}=@4uQLvgZ4{&CN$shO?bLMSVksy#vKP zePxrXQ*}+-AHNXQG{nBG?yNLslqlvihR^QHaNPyZzywc zBzI&iZ%kgG(N!3XhvXAGUeyTCUBz9#6XXm6rs2bA{eiWS9meo+(~=##;3KLu23aS zYO#uQ4RoEWr?N+7=~x_MvRqjl&o~*1qu*}}pXsXnz>&uHTKW;XE``r8Ix9+v&&XM8!x9{%A%qJzJ z`xlnc&Qyutye&0N!-L*pxjs64_H0!gKgNZ^c0_xz0EQEo3oszs8e+Go*MFkxzvkGj zv@1N*IfUoNOALxmqeW*@#95@s?V-F>Ixm&ZNeHDz`w{qFpq9j5cL6jPA`izBy0K%u zh4G$(4NrdAq?ElznAKPr??^CCD?^Rpz(ng;l<#3Q(2xN6bRQ1 zpgJL?7DX69jPMWPcmkr5?0n#l&}Nsd`6__n0)Yk*xG;emfe)2=)JPm9%A1`Kz>);e zqrK^397XI-iE%?f>W5WPgt9>AAC(lqm-w?qo&=@?3Q-45G8v4?0YvaAQ@GC5a91)9 zWUF|J1Cix`+|2;CKPFpr2n+{AdZ4$+ITFa)=Fiv`!gr)6;=;4>f;?ywiz*0+$n)W4 z`_d9!L;2f?QLZGhJ2lRemFP>4_Xdg}!Hb@Nqb0aAl0CVZ0g?H^3A;#%yFo&bQbA1K z7aF%4FDmwo$Z+DNKmcr1ma8P+Cwq5b#x8t(fv+grnVsy+Omd@3T)`3>@6AZ?VJG@? zlLL5?07iljEfz->1I7Zh3S1BkyLGmPSah}V#p>tKuM^@5h&((=0KWzVj3h_1D^^zl z)GK-$!uCQG0XVw}AREAw9TPH1zu&9SYpcI~9{A6n!+-zT|M&MEl+pU1zdFBvtN-@x z^~UC{rIo6=`J*$I19P^6dgD&Dws^`=Zn5oKS~<6MY)i+Gh6hPl5yn>6hqZR`N5(Rh zszMB4kDF&sS*&MgW~

Z*lSd+S>EYjTZp3OE>g}BV*%5_04fL^@2CGJj9?5j^vKX zcg$JOp{Q64>MIP>r8;B2R-da-rOFiG;T;}J$AVk)sVY)U>@0_t5o3)&uvtC?Xy}7Y@Yjy2+tL?mQ>d@GD+2a@d)0gl&4mejF z_P+a2IA^){`BTk*|87Mv`~7>(=KFg({b{A@s6u&AqdTpeI^RFEduS9KZT&-8Egirf zV^qlwg|b9Gf$i4E3&zF^M#l1nr8yYDW_S0ecl4z93}p2TWGPj<494RVlY45L;%+^l zJ%1zY8OW+@iuwHk5CXOJ*}vb9JaQ`V);&)DP`-HRm`pBcXppFs@8j>vFYI#Y%OiVlu0(Tm0fJt*$9*a3s$>bLPXx`yW5N zTwT3yv0NBe>=_){F)?{aZ#V_d=%w4sOHWpopRKJuUs-uDJ9`5?WUceRn#|{|*6Z&# zU!mvMr;oK0ip#g|W^OO_PRqlcsfwv@*soMvw$9yMShxyt7W2Q44xc@JK87CyFZM77 z7H-URMztOP0@$tb^1%U7br@k#phDvau}jawlJFlFq=l?BnMKW ze1ll-7`{0mWdbD6U~i#Fm+2hBaS!2O#viipmbr54Z}i7J5k3UyDDez*@C|hgV>pMh zJn501p)@z2FhK7hmb+bY$?`=15EPDw*bnrjAn+N(Sbv%`kc1I9g3yx?=|+mgQ6q6Q zu`esmhZ76aEGyYJK9`zUO0gTMNC1EYwj(FjPnc-OSPIt}*^>ha4go~aa-b%lP|h|A z*Ex&}PcA>QLkP=>8tw`Otn6)}bP%$V!?%$!utoDZo&;aPjrm5u#mK$$p$>#q_bEY@Z;eCyFLO-doS0 zZFsU{l*ISKm13p*@rR9$?_ayVf9?9~dnbD5&+kp&ztw;LUi0Pai}xSyEG(VT8!MFR zV(fsW9imP&1$yHy!}K15X^(cQOsy+csIvM;1WoO9Ty0Ca%Ca-8}i+f$e!P5Anu^~1^1*v)`Yzx0G zE?u0Rt3)6$smYTmQif$wgVLz(em+X185l`M3m1j55QEcl?No(!3fM^Ou6h8gFvu8{ zrHv~yCN+7+>75qqF^lySR!VzwW#!K5>dkpumCc_(5* zerd*h+ibc&ZTfxQ`e5&gdAVkYTBh1yJuek~=Vxi@5`GdIxj*2Junm+ccJ~bAwsmFp4nSw_yN2jH z519|2MK!e+ysgcB^epaOW2Qp6$7Z|o{=*$Wwoeby47ss++qQ5;qdO*(AJphhe);nJ z@4uS<`m^r)xBAbYUw`}ri_1CdMccx!KL8dm7I6hz7^~Blqq180WO7%3cvCy=T~p}G znt*qWl-4fp;Ao;$o;E5=YwHew@``xt4}5hkuf03NWUd6P{`6vf{qfw~HI?SLeB$Vo z{;b)2b$R*T>dK4N9x?`>@SVV?PQc;w*Y zXUWITkgwj1?&>;d)cvZzxujB^zxjJcbc!3D?~3+BK`d8aAEm4s)+`iBYJelE)}ssXVF z_ENkpb0yu=jH@{?0=DOIQ5-=3K({Zo(f1F# zJUhlZ!PQj=Tmdo4gCfGwL_SQ3A3e^OBk`xl`3loQ3wH|+9!m^ix&>1KKk+mN0te8N zA@HDvyHdkl>5%}lC>kP!i5Z#cum}rdJCQlg1P(whfIrm%Eyz#}C@uJ4MQ=)(PWckiW3L6^X8WQddZ+ULPJdl^5@q4R& zE12UE%JbOzwZWQU?HG&3*B{UdB3SgJ_a(S^2iw)oULg)K$)T677fnt){jkyglK zC4T$*4x#JEk8eJGdhzAUb9DQgn?E)*n)$Y#`L2--Z7HY?u!&jM1bWW5by08Y1$E8g zjcwf8<}eT&)InY}raB0F*GR|GNMbsAB$(8*aAd5cZz!+5JGHSjsi7sOyRS^8Ik~iS z+dOl=xjg}^xu?~)h&p?ddIxhR)%z9}uC1)xoS(mBnm%qY9x_hvRO<>xWRl*Y2(Tly zk!l)48d~X1?a-q-BEt&)GD@apa7;FK!KlACZM;9wcdoVNKxg~0;o-`W zkwblhyT=s=EOVDIh`YDE3>pd3^hve0LNS>)BuyV2P3jp)=^acP7|tFV&BMxbi-tyv zMxw8uuI6{FJqO4U)dcK?*2yniSQ*w?;7R6QV- z?e7`L9U9G7s&?u0hYhA<)8cMXmdcJ=1Acjb*M_8U!S z%`;WznUf~-QRDPZ?bJ?#5i*CHTK46YF<6n%7XYsUw)@YrIXafh0arE*W*Of9nqz+! z-?QAeU=Z;J2#slCe||~`Fb3NRQQ1^cHbIyj%8UyPI1nonLEYb;L3TMhp6O1F^rs8(ga~i=(-0?A#s~f25S0&&;pj1e7+UxSFcW;k<4?Z@?9u=XZTDO zMn@1}jt!wXg|M;23APg}K0ufe8o51Gm>r5RmYwd)$?#_;`!W-K83{g=SR4$ru^^_R zC*WAAK9L1L5lD8!Zc9=goKO*D@6X-gA;@+T7rG_w3`{Nyj4g1F#LBto@!Lqzj^r3e z3}7+-0sB(SWblQ-o-5vy68qC-5Gru7kFr}B8}0&%0F)A~y7m(R6;vEOUZ`S$J2moL>Y-h6!e z;p6?4wTs5-!`=OHPhOFpSL17&Ni~gx>iV!(??OXg(y=#bl_?-QwmfzYL z-PxNepV(!YJNN#>osG>qHro~R%t?c3KS%}=M zum>jd*(v>5gW>Xm?cs;_)nC6feE-^Fv%b)4?@lVOj*K4Z=-l<>rKF{OyTMqAsD`Db z+Y1ZVjHc6SEo8Y2j6ecaS6@PRe{%nD#?WY_ZE?N%cXM=AglNe8zIZJbM_ixN6N2z45%kc&WRu?8(cR>N-(ltE8bNzNI6vyDuBd zAJ{i#IBcFhIx~A}N`FqPJ7=}t-dKP0=eM@MzIXng|Lytj->u)iy*AHW92wi)-j#<< z!k5qhj7babuC8C6u^f;ovfFz=venhksc(gf+ZS*BpH=(ae`e2ty!*uW#arUzSNQwS zd|ua*`-jq}42L)0--BP9&n;iSw0-_mv$64PZ55aSG{J7J*L~Qm|Mb57(}&jgo3&Q! zZJE3jd+4N~D#p3_lS?b-W-X^>ik-t!DB+dKOI4Z!h{MnJsYQ$9f7Remt=U zlkZIBJCb=0#PDrYfeSa%gCFlJkoZN$d9uZBBreRZlyDci$eS1C$rQSW$9PA?c+vT; zLu>qcX+buDAeH# z((xjTrRx2AU<=nbexI|Ro1HtRGn7nfa}=s{%=H=F*~f0{W;C?Y-Zj#y>zJ)w5&grG z;jt8m2bbr8U|{yb;^MXSjT?*0zZ$3akID0}sN3lN;W)W6N3GpuG##HdpP#o~o1ecq zKYwe^dTD0%tln@?soD(}sqSYLns;eE|tf42Yiz4hCd#?8&=vz9Ak z@`ID=gR_=XwuOsJ%hy2RHhos3JE&Cc93IQ=8Hn%fjl~pbi31~95ZpRmpiu4@SL_-a z--Xe$J1_^_KArxU(R4};*5kb+W5ol*`2)lG!_r*ocPRk*+Zj+qq5yH`9AH`Dbw_?vzFf$7jG{u zJ=oZI{NdvxYT^st_AHG*_d*6TN|FR~mu7xr?i=5*a4!PS8(C~ZIWYBw@q9wKKSZZ&ToB6x4nUTBAkzh)kL8YMy5bpb z!EAU1!Q4o33Szl3qWy#^VbPi7*epgwatJFvfFQuxZ4Mx#as`S2m{=DuJJ4M4_DL1K zR3H@x3@YX?12;i94zv8=etcKBP$n*r>f#gT;1%ZJ8NAJ(;=&Z-DO|rms*5)PcmQ}6 z(_I5-uBfcmm*RxJzTG#(aP=cO26G(3Qv#zhLmBZt(0&-@#fhB;fkSVAY3(1{^* zj}UwD<9wr1@v&(^v020@i7$)qM&dY9xHuZ$gUEpbAhrM^Wdt$a9O2J(SQx{@mk2|h zXVA7_t}`RXkDG+&qyol*3n0bAt|E}QQ6+A)L|1O6uc$C2erIU=sq*7FX;gF8OIU;HEpXBoMktjNXRi0SH0?V=*$8o#F#0(Tf@nGA;B+TTCnfDGx#k zg1L@(_O>97V=xa&B%w!A=uU{l)`$Xl8^W`nU@lM!A>l4C*{n@a zC{=lwJ~IwE>qp0T{$s%Y~FhR^QU)+=fqU>7xjiCx+xHV%~{XS&tK9TDizAz!_wT|L8wLO z9hCG9C7?t>=!THx$|p*(q(?Z|vK!yHf^+ z^GC)C`UVU82J;4ncaDwkoyH_;x~UUA{Y7nEiGw4VL!+5}gDL$(Nqs{}!()jkjj(Sh zqOqM^T_4ogPHt=`4~`14xR`{FUjDP{z`IYp?>_NchKYrmyL#`%CUitCGV zRM1R|?sRA?0mx?ekGuaf6A93j<@$4u#imU{gg3bUV*+@ILBPsp!r>)?J~o8w{)1<5 zMS7~OgoG{ELje6JT>v)TA5<{vK*U~pfh?yvdQ<=#idGi62{nr$l*&30-NCUSyFcH5vw5 zcmH3!gR!JmY=X7FgB?5x5_&lzk0^;>MlLBO3uLrNi$&)HVM}5=li1Gma94h;cT#3p zY;pio2x7eX(XJdJ$PZ8+8iDPKr???$s6X9-5b4HB^5dih z!2LZ1M@w|0#Df=rmgGW9a^Ym*#Kpmh`tHP9o|vJZjnW}xMHt_66m5u zp#-dCCuWipBOU-2)40OjKaB52h;#!cRsu0HoMb;NY8yuqyM_r|5Wu1#HYD5y51@+q z4q%Ag7R-T|g3t(809c_XLFkD_;xGY_w|I`DeMcs6Aw;?ngr1=hE&#vq>*OB9afVr! zj|=1DhyqCVCJQ`BLQf#Q!$IMeT^RKGO{qcu+x+|$+rqE&3#ZMqhp}ux2*2tZitisv zhBRjLo+(3xMz>3;Djk#O^z@; z))n6~kUTP$KR&S&t3oL2AIk3RN$nrX9+lW%xR^m`SPc?wn9m_j0zCl1S^`bPxAvZygd?1Va9sm_$j(|QLbZ|gZ& znqQJsRxqjFH$8K5b@e7Du)mLRsv4{{z^<}S|8-db3=iD1@h zt5mA;dIu94S_HL?{JJJ#XHSZByl`AmJ~FyTF>!o$_Qu-U{ngcnb92|UxC0 zWQyIU=~Hu7&?NK?<+XGqwRelV`k=*Ncr0ab6f(?zbVC#=l)EuoahXc9OQ%0#G@YJM z9%M;lfc^Tp_uJ=z z&mX!zy>ACfcCC8ec74`ze#UajVm)QGodLpp;n$_5>o(h^iOD^^gWKD?lN(#bP3_Qr z-q8yUXu$9cL3W}{k)hV^P^ykUc#vyqR%<+{jRVa;Pv76Us_xoY){jgg2h)j*1ohDE4X?7-gw02oJZk zxc@RcY=tj@_=k(-4DkJ_{2&@96pRKD-UO_=3(s`lx^g0USP-W2MJ3?C!BSmNx@;)h zoxpLXggfzL91^zsq!!}ib9^IWaCDIyL*zvjdWLbGgSZYPOk+Ta_M(Y=fRPBcOA7)K zz=k<_2Z3ci0CPi7c{ql^BRtjz#SEfB6jFRKwE$XXF)&FmKhBrTarF#D#w8G;u?22X z65rV50CAF6gxHA|?o8!jUxAAYp}V66pDzU}Y-q9G>|{S!cqMz$lU=C^&a@;KR)$A- zj%Rc!K5ZW*{U9NEU#Mgc2(3xB^U{-~~xCiN3UWPf|2&!2lQ|-To;j2D)cZlMvb!8|}z1kpb}0f#<9h;-XRV;G@he1vJC;!IL>dRU~yAN3_|FA9GAEKj33 zH#2(?)+L5Rm@H@euq>u$5JE&7T3Icf+`gg2vGH8B_Q3qY>80f>^EPl%>5Ye{4ExoZ zJu-Reuryb#g*;M~cE`wA-oS9SR0hPOR8}BU6i=%6X{RdW6OfTo(-8akCHL-Q+N+xI zmX74MuC%_vys_~z?NlYK=9h1vEzJ7H^~L40#_9bsMG=i23Kc4Qhs&gPyGm1L zFdi~ZA2*qSdbL`w&(7YOnYjTrvFXE;nmnmIqq{%8u`L2i>}S?Di-x7S7*BXuZ#-|c z-iBgU{aKVMHaJo;A}v#E51XcsPpbE|btPBVMmDqvy87aV$5OE2Ib%qgJ}{C#GL|V* zPq_Urok zZ>y{KXJ)UC%a6(AM>N_~D$VhUNr3AqW4Xx;d95>+N|U)#H+4v<+SA>a*3uEz*c#K? z1&!q6$}EFvH^gyU4nV#uCeIic5jC~5upcxFD#9^3{YUN=rKz3O)z6vKW*MeSwfX|N z5|Yrm`r_KV#mya@wr+O+aI{jDH)SZtByeY@&1coxVOP&5gH9OScynZy;lx#d>zmdU0X#^4j{HrR5t^8F=XacnZb4_07>3 zp&i%M&TVMnb@xXPj6l4Pd?IIDQTD1PBeyg#I?|{ z*RcoR{Uh~`$n_>+-y4ElQn(K_(w`XN4@5D>3~Y_Z@J+IRuDXJdCXEH-B~RoUpNY#X z^vx{vjm^eIL_1LVE;PP7f$bX1a0p^-3+6eHBfU^>0@NdtoP%gCu#KXCM%O18lDIqq zppMF)=uDt^ppq{F-I2(0+G_g(tt=+%3M4p^=q{-ACxGOP0{VPI9Z78G=#-$WJYq(E zXkxB^q{M~Fhjkc{>q23=gwk+m$?i*W2<3TDV!YT10o?QeMzRMz$sOy9@et(uii!df z_JpP%B)|~6FF1CmZ%l~;FVlsc>c~rX64VI4Na z;%M=nEDT`ji8ySCb;dC8C&nTPK`_rLknMm`1WuUA%LT*O9{>x}EbzBm;Eb?z3sL{| zpXe)-#O|#4fPIyzr+>{qekQ*B057YEkeKOy_yn`L`G9`vy3ur2r-z%kLX|lpOX?d& z$^n$T6kONL>FANj4b-ib)yuZUa~A7Kz45@7#$Kt~g?hWx+PzxcA>Gs= zgYhJ`#5vU5o_*!_uya>JPG9n^y6*S%6|cKLb9Af-)2|+zwNx!G-oT_pzpboQ&06#=7beQ9#o4)oDsAz&G6|{Zb;f+Pu0W^XsW%)k7*CpK&LE|(Y5J00e^I45qSo$JYl|`J zBcpdvQrpDET2ZKtt+AcGX$I3-i{<*9^)i-Ea&|&lIW~T9cx3P3@E-ZZ5smi1xT3th zJM~?IxV}Zy)fb1&U0LH3+vO8EQdur4I75~oRJ4vu8X|NPbYKmQp9W6PfdUp{qx*sNb(ez3H3 z3vC$Z=PxZRUS3|lxwiiM((Qs;-$+*UWA06krOy zL`(=>)ZUeUrYbhCgvyHwBC`UBTyLPuxb6_M%>A(!=E0o~q~qAx8g5}h#>V|f0mTMR z4qWtLTO`4=VglfzEr#@uAeOry)d`itV3H~)lt7DRD*lAoEx*IRM7ij%3**>@ub`wt zu1pMO!H*g45B9Df5!_ob-2liDUSz&6Dcpm|_XJm`ljSi$||@8W%TqSzt~hKx)5}; zVQeQN%PEB7j<|vVEE<Dz=0%kro`G~2LSOz?qo5JDe>c`2QZVoNzu;4 zD5Ny-Kvb=rhUK~<@c?vTu(ySTV`DJJ5@4{2`*}nj2xEyqqGh)Z`hojNviJ5YXw%r}P!r&AmdUDOPF9XXlPXt)A_W!L;8veF#IX<1^R-Xny|E;^K|v z<(t#y^KG5^*MH~jKHyQd-}TT5kL!2Ht)0p8i6XFEO&_10skF>pTw1znv7XWBpfpgX z$QqKy^$habx*1!!(ETF=xJ}77=qErFHiw_6($q%JL1yLs;@&xmuTt68_Qj zJ0y)Cm1Rz%)Nljh6`HVIi;#nIeBVIe|Az;IV>&j8_MhM&m5L!E0l#0hhp3h zsx#A3-4r}})Y>Dusly88?!KYCo`Ia!&XmV5xvy%X`iBaXs(mP<8>{!K&`#|#PVY2M z@6;Kf%}@y@+}$Sgp@pRj=Go&|lGTn$b%{!|Lp!w-OKm-3G@ZgS&8n>PRnz9m>DdG3 zStyjjB(F&*BvzqNM^Lx{W_w{Ql)hjEHkOpIM@di38E4Mb^-&#R z5?{OxfAWg@`d#?FCt*)sQ961gL()tv_5JW*|JmF3%3{-rq;Mb1>WB;CID~}5#Db9q z9w8hEUIzHW?&bikT%ck9RZs{Y=Q&eEICernL}DmA7MO=1mOHYqBAOHcmg?X~b41X( z6^USvRiwFWVeFPJcMEL8I9~tM=pv{`b|Ip`ha~g`236oqiS(hO(k|>REh>N&54nY` zXgp2iLlgTl<9wqezHw>Z$vNIx1wI-1p79w@Q3;Mv4TB{NVCyP(64!~ub_j`ZB1Xc6 zoj?JwWk=$ff&c>DD~RHWFcx)KyZXRE7z+df=Jv%VSR9G!MWFfw5xvmI1Hzm_$sSai zABpM}Om?Jl+!E68nML%xQfhjUUtFd;J8~O2d|N2T3F{em*_wER=xzjoD?JvRT&!eA zZiZV_sh?yQK4}j=b$?jG?!frnz6m>V@w+_}cl*SY0HShJq0s^*dhoKn?byBOeh4YJt zY>OxK#tO_%brPwbAjf<5yv=rHVd0u(?$elMR2>~x?8SK3eVBgiv~?a@cdb@{antlUjqdp9*zUHj{XM{4hmRVYi1(@=bJ*@@@0|llSkR zo6VP`vi*Gny9S2J+PZRIR7c)>LjU~%?Z!Rgh3kF~p3ysdGe*V=#uXJB-3g=qUSH4U zlD$Gkln<6T2Yf6zOe5T}QP2Z~H#|__V1`p%Fxw5=DFdH?qCOGU0qF7E2z+;HlqXw) zr;7ap8LnGPDDN-_Z=wSV^4fCXB7Oj=wh-jv|6v*X7shU_+qM>A_D-1yZ(@WW7VQEU z3yob-z6fCH*euPIK;9Qyf@jBpQcIW?D%nm-E23oP`KIT4XXJaP=XP06EXq? zu`uFBh;a;|6CvD=vEIRs!DJUgxF>Ye2m^>rZ#>c6Bf#F@hcFgvn1XPAzYwP&l4}?> zkj@UGG6Dk$xIhBVFVxjP%#BRT5wir*!EIV{B|5C z3R-1}+-+E}n6nq&5lc+Pl2zC+f6}6V;ifu>@_k~;eB*cE#Q7faJ8wmwdY#R*aZ4^)cl>JE&L zRZUEsGn;Q=q~Z#aKnAV8 z04q#P8yv}=)aVFgvZ7jMZo?)g;R@Va_C=*de#|4@lmciK9CWo_*~ zwiB=a`>!Uif%e<%q`X6H|?tUOp*ezv|^y}nYrzS6j{ z*0{M|x4H3lbG>$brRl@_hWDGbE6Y!9wmasTt9rwQX>*lr;rjCO-PM&xYpYMN=CbDt zwnrM>i4kco}sZwY@@!>DTxuWb^(e9LR= zN}5!cK@QAxsbVs(qc^&Hdk+AJoC+s&3j+JzWVazm2KgcdFGPAcvdDq+}2g_=y~+byR52f zlrxw8Z{Fj+sE&U5Hs*CrN?T|Cxcs=y`gFni`tj3>qR0;c>7VEkn{*Gp|ehd(;q2jB1irnr3k4Oz+aLY2^ca6ud;o~DqJ5dMKHPYJVMeGVkGQ>r zw!MUqS%Ob53d}AH*j^Nzw}S{Uo9-W#;206@#1L(xh<>3&I?^LOVZDXP5(sQBGTT3p z?CBNk;O6&>lLvH9qvk%SOvOf7?;tF)h91D=g_4;;K|~*)U}rD9QxMUICn86uaAVT= zF=_m$L^hq}i)w2msrdYztb#InLS86O&UcP0agpqBO)PN{7dk|4x03^ih6R94a|I`t$Pv?A0gsB* zR-u?+fQq>osBt)UiVre1V6j>r=sJ&88@Qke7AqzO=Lm2GYzI8YF^KDgFcw)7&?t-e z)&GB_{U}wmit!1+0jWF%v{ecT z!qN$Ks#=#lWhykzl%iTjs0^FgsW;k>Mpp)52*&bBb=kz^ZtWE07A~!vvo0Lf84E^b zY0VulYdv`d85Y>MpFT{PF4YO9ZX|=YM8D( zmY-T?@7rv5!Kc6a>ceLJm(MMqKD2)N(7L(t)-w0o#N-KVV~8!vvHn(_p=ipmbIJ(m zG4hGh{^6wFAu+1fs%Z>)SrhQA+V5Gl-;1}QuWN&A8i{S)>>;UGrbvT+ZEdl3Y9|bt zrlSVq$(h+IED)f|U_7MO?ovzwy*xA))71~WYhybZy>07ewD&|{mj!PBh+s%69#crv zx@^6vXv$cGv9*%+?&yx5*p41aO9zO`hNZwNW6@!S((z3BM7~UskEvL*WD3y5^$hZw z+MybwcPIjTKqL%}reU@_AZKj0E6Xc));IoG-?+W8d24yat%Wu98TTK>UA-Cg;8CVZd1G<@je7FY>8kvw zR3cem-z5PvAauv(69^>4FcwD$cPH>&s3IIC21`>3{|R(q5jbp#>>S2*gU^ic_NTf) zwg85)TQPS&WCwqmGZGlsr`aFw1I*I<|H8!D$5+<>V(dl!_aF6$SUzEtH}!w1_J(Pe z7UPFfVSy!xh3G#{LZB!GpO{0*ET*IvhGdrDx0m4acZBBb2-;rco0;bqpY0x%=*Sjr zV+ozMFxD&B(L2=9C)B~0;Or5w&D9ss)zR($aBu~Hb@hdVC|Ouhg*b&#a73DSDA_wO z%-s|3=o93|UrRK6>P z=Rsk*5^3&!p-w)*jv)+ZT7)|@)>n`c5VbuZsU#q2H$Jx5J*LPxzR)9~#4WbOIl90( zx(EPPm<{L()dtDGgePy~COI);T!}*9PJ=j5QV;}HXl_{Zmt&aFl^l(uCt$0tWFKn0 zhkZ*W!eAB?5(IM{@GR#*wjE&6!FFCQAv|P8K+-H{G|(c30RUX&PKtCVi#$nDII_rN z>kvimTciQD8bkkyBSzs!0=K9n&(jw~?QMrN+VeBBm*?g#%~|2hTF&bsHD$j-Su`fk z9vn^V?vHKjifZkQ>=}?4r}t>}`F%q%$hC@W2ru6H*EEK-c1DfLil@yNKYn=o?Mw5Y z-`fB4SKH@LZ*@~=Mn?BbWjn_eu>Uen@7Ejmk53ej%JO>$vj&EMoG_}P^WqUNbQ!_I+ zmRJ7x^ywvN=KpH?&)+S7erx*cd)r^%+rECO-`sdUH+M;`EuWa&p;VQqG^INIZj}Z) zvLXC%9dXHe)$yTRdmAotZKK=YaiKllq6D4-pCw+emFajP_nuM=$U&-2*V(_6~&u z%#KA$*cZQGfc6Wf^B*2K1L`;VPW%#Ll_wr%tFtvc0@U5#&RVIRNWcuefl8?B2BzYHoqK+e%aMxszDzi6+SB>Ed>S}f&oVQ zsGqW}dRf!F;yu0NFw9St@9lqsyn|~T7lVR{B2jbVSMxzTx1j|*pe)^NR4OvOgo>+P zcsw5Ib9^qBTO=TX$Ho>ng%`V3l5Uzh$=qINAEB9H6p8~X<$E1hWXPIfR~+V76cWYw zVZ}Ji$t}#yA_(*XM2U(t7g$I~G0)#wW!+h$`U#;H!aIM@s?3EA&`Y%2&kctI`WK5N zlQ#cOS&%|;Jwe*n=7Y@1;5X;lWnKU_@I3xwckPc0X4owjkJ6%Z2@d|}>!s;neNk?F zQFJiM>;_LZ=S#YgeOV;=v`Fb~OX_`ArG*&~F}sE0tMm!OQR9VFg9kh2rnNT4ifd!O z&Ku|Ml8Zyo4gxk;(C`Ir{sHB8bYXvh1fzCWk4Ll%U0mxaKLu zN@2amm`dUDM1Y3}%6>LT0iiE9rHhZ>v%273)nyznlCeF9XcUQJ$mQi?=<&0Ay8TTW z5qN$sCjs#0ZTCus4tt4_Bg)cg{rUFYn|C_S;>1NqfBo_h-g0d1fVOL~K2PTsM~ZKd zB{t7+f~*QGC~9za-|m|DsT=qE=?2LN%~O!wJiF>y-tvVc@}4f|jdSZQ5kMSr6MiMV zE>?CY@OwBO`_#SLF#X=396OJ)aV@?&;9W|Kzk2og*nE3Z*=d>KKaITqiZZe6_Offo z<5!=O(_ol()WEtB>_qqXVS2mmeqIP)cfYyU?`hkbZAC=DlQT-cQ*^(!P%GZe^Z#8~ z(k7DjMDNnE57$x1YNdqB-V>tT+e}yPps8SKC~O--+g)U?qu{T-L$c`k@_2(wz|94d zkz9UW)A-AhqQ|Hj8Bmp}ys{hSnI7wQgILocyicA8O{;dt@9R)HI}i7UhMe0**kkB$ zpR_%c1=T{V!$0FjPyb|}YA?xJ7d%yeNNeX|Do3J$qPqigwn-*Ul9{3Rhc-KULeKhK zbE=-u_lqhu_4iJ9C|gWusCh#Ju8K+vm4QJ{Mpx*7&B-zG)HHuq)>LlR#GnEeR^CC- zYsT4S#+gPfRbG6|=FMg6+`5<2mY?-sK4+s`2CS_u=kzl|uxL_VcjMKS4qT97uM5Ra zQf8Fv!LAr;eTi;>I;UOf5w@<>`jxnGg2QA}bt&pvR^2a6y>PM4hjmR4BO3a8_v$Ht zSzLQ-CoDo{NOf$5 zYnn3+6$6)8$hn$@WzG9EmThO13uwFPXEtx^*!VX3Q+ISeRq;Pg5;RSXWeO*jxD$!N zK=ercgEox{KA$$0BJncz`Fsvj^EW)zRM;`{lws`%O4K?0XCZe9j7tF~WCAoWTh`DL z$l~U|()){x159~VD7{fE!+vk74Rz`4TrSdU&9%ksdfQ1cH*|ARK7x~tTZUQvg*jtm z`1moKqo+ePb_s&6PnS3NTwGkAbU{Zsr`_z*OQ@yi5jxndV79AUEygzYixMRl_sV;# zs{_+D&T{koh_R7;!jBC$yN0!!9PMubit0x+*3ze^>l+*HW)M|~Cniw?h_N3YL5o(` zZn-4Fvvplssc{V`N_-lu_a>ARI`jB=D*xF^o*dVwVOQ?dg7o@%Q2lhS-@OH&UyMxK z=iTAHf14gojO^Lxd3XCMgAPyMVVhL~<}5)YAiwX)e!ikSp7i*KS6^=6si=O7$?>)Y z)eRrNs=M;RjmU$=sFA&^5-}t}kg2)|r-U*V6KK*gw@hj-z&9BVm;HLKQo`@u=f zK3*OX39hfXsqNREC>A~*)#lf%Ln_^+R&ElB_?$i7lZML?EiGjNpF^(smBm>Va8xGZ zv7*QmbQvm?#?d0a9NX&hNwvv*YOV3oFB@T)%haV&rxPshHc7`Vi-__;9 z+8Vu(b|Yj~rWbHCnUmT3(xvS7#n=hDaRxBlXD0dbGW^&%b#BdAZ-yp(NnuSi!T?>^ ze@b6MXhHFr^rBiwV`}xqDq70BT1bZ`sJZ$C4%YT~u7iU|iU=}F@Nrj66pXWDzJ3pD zciltGFhc!p!f3%|V94lOkPcTwK! zQXssH|FF)bA;ATWPA)v7FmMV|02U8N&&UTcEX=W<^YUNaC>&Kq;KzX9#LOguO;u+j zVGu&!3W}A`vt9#Ez}L7I^gi55QRA{8G6=n46}e4c@AgqxZmTUIzxK(~C=&Y=qVvh^ zb6-c_uc?|+qj(a7bCZ^q$K^r(wcwze6qbU=`1g=)baiQNP8w;q?!G~L!I7DXTOL+vqW!jw9|IOp1elkmQ70rnlJ-+#h zNDs4B5q8{n>9ezeoKw3=YS1Iu^%9b;a;l^8bu~rTy$&2gFr=_4r@?d|mA3?LY$;ReT(54BqUNe7_2Ojl7HjV_%?}RwH$gPDVG~ zdk-cRyEXdZN#-GcV1&8jz1zp{J5*D=aLh^KN=1A4ihhykBn39mMv6QGvf+7L69g+l3AWHdPIZ!f;-`Y!a zTVIv~2hi`~?woXBj`~`@MfMF~WTW-)mkscvYOZloHYfqFtJQ>h<>%8m7U?@1+jr6J z4~C9fmsjD_8aJz(-hW?q+nyS1;Lbl?j~%`Sr{+9#N|i-Tvw(Y(6z%Lqxx2=j#ndFx z_8UnBTKsn}JKR0<1l0Et3WthIjg41?1vq`slq0C@W#xAAa{X92p=aA0(_0$Tp+5%K zz_R_AdG*)?p`~G*8`d5kn6R-Qu&|@T-ks~6<9^*dIhu+TRTd#f?wdl?ofV5evDdw@ z7rrKy2@GS{x5mcG*ZZe@REoTRP8L^BBLwj3DAeWbSEJ*(%N1yC#D{2nx^2(yn_R~n zLda(dYS7wTHhf!GKK&;CAQq)3GT@iJ?K42ap6DNq0>K)#v!GA(fn%&#zMV4Nj|!|jyn%?*BQ%eiZHoih*eo} zD;phO&=!_a9mg)dGcO-*SkM$Wj3d)>YCq2rQdIE8Wg#D zE6AxgBn*SlBo^@_3l=jIa(QvoMG86J;{sH2ifkPkZ`Gqvp7Ahp_{&aHBp^OC8cKPX zEWlNGL=E1w--3J{loF@4`VH04=)9b;Y2a=;c+1rEWJCMfy_~qQX%qE!=D$=T0rw2l zw%FZ6MBHPH+9ta4&}4zc3L`@(VU$Rkn(T3B{+zmKT->(WYc6t>PqO5{8KD2<5s`Az zS}tj8;U+zd{J)6Kmcik3N$Q*U#moOd5SxbT32cDn?K?#&V(@dh_%bAnR^xxIa+V(~*b zy&^`fB0mc61Pvy!4}HQE*a){naoXG`(e|&et3Zd#Pfc(2fzJc``q9&-l4Ed8wGBId z2R@(S-Cfu%{1>_LN*{n8jH}xMF<+5>o2~2Kud_|pd0>+}Q$KdJbc;=E?Ry_ZstX9X zQqw@*s%3Y*TiR5O0pzI<j<*$8gHfdkQ7Xdc>Zj)P{`^=92qhJdWHu<+?cHROm{_6sMBM`5&DU%VMIKjDe6tlD z%~8dEK8gOmMYXNm=>t!q<7`%1Dd>eTBL7vqPG9r$`-TGosBIZzcs#ikyO_3lc$V9kMs{>+c5!L23xX%IHvIN% zl{NBXH2=%iA@fDiv+HuNPdo!c2aQbbUCSQR^sv{SLxeQkbgg(%r1 z{VD`S3*}|X%d{k8qf}<_*Q~b@o1GF!5Z1Fena=_|W^s5BI#TXzb-EpJr%4>RT3{No zb@LSKwxr69lb20XS4~x0k5^x?k0gcY$%a?3&ze4{n`M?dD}7dmn4l77A!R)2zD;6q zR@UxY?7~eQ&D;r4`3!)kMQ8Z==qgz{BkV zRY&mB(EJ<>6Z(@Zhn9*fGGVmRDC=+-T}f018`=!}IfxLgdQJ zl$12*2V{euHf}yIIIh6$Eyco>TL6mU;x4Y{kcje-h4!YA3;qBwv;ZTwBvbYsC!P{l+@5}&-?v9t;3(l2e ztZOEEf!y3SK|ZH~u$F^o(H7$qS62Pj#YITLUtc4KJwG^K7cX}`e#Ac?Gv)r@U;$qzwRiuo@%{-t2B0seC&)?hHtL; z!==z7Jf_QrkHsR{008cCX*fi7xFa{uiRNFHKESNNimQE~y>#bfY9p7U0GtO@CB#(& zG@@w>JD8g69TY0@sA2w79jG{FfnO!c;C3rcsZryz&t;a{uD3P0+{`-L5FNlaiRvmW zG4Yhq_8##o|Lac5duSK-bTzX4Mmf#yBcasxHE?1UZ;)VsIZJ0PA}%)uoaq^=nIE4K zvvIOonK}H7JRq!?!X&1jkh5*A1s6FT&bK;zVgLpLOP$_ng6zJ|E(Bh<4y#-kJe>^h zb=qK`GqfiMAMNp#LVx`|THK8Kad;uugJTK`bpFoKaCrY@z#A3JrCuYDQxS_oBderq zcO{e!d&_AUp2+=s!Y#A{ZE57_E6o4-&Y97ln%K{bC zRC2NAjlbS*snucA?6z^)yZ98>j3nn8v1UySm=R!`f`e;AVb~>VhfM}&G|HR~)EU*+ zNmyCx?frX&zgkzYS0ob@9Ym@FE%}d38n2bq> zV%*o)owOocllx^Nonz4-SzlvSU-ChPrA0^wD`7BIrm)<#?QittWcSh%ec7(*EZ|iR zy42eGwCP#i9Lyfl#oKghVt;8t;%ifyNFXZqcpuKU5(pyJs7-0K4>ovvohecW>*)BR z#x2zNZhq1H3yq1%$;s(xXw2s3bBd*s>ik$GEgdaSy@~%S;1>VyVdTbg=3F$Y^&DTSFkf>iI$0#w>o8W-(|{ zAf2snJt^aW(T}-=@}#IlNS1Pwkp*3Te!=OkqaDxwN7*DwRX6b$3T5}?gHNiQnZ|2m zma(`TOZIyH{dw2VXCjqqp(k-D=iVi zSXuCH1KizqL6e5d&x-s-!1~>lN&c zB!(MwGpl>>li|rv?gwusvkKKOs9`k?^@XkchK zqdL_35bh;S4L!I^D(=-4S%rZIJ}l&K$gB6Y8cLhlvrf6m;V`of0U91pt3$NAl_C5LTGfo}k_ zocZe3J1Jqla*d<(bx>* z*r9+cX$NHRR~#xzln6c^xwsqv?&!HD7d<%Gos`1A-ohxZi+Kc?03pN=r zp!Xr&F=qhv%Yk%{38MoWi_*Y3_bd1#Owyl_h@XSc%f{r=PM{r(!+ePQ~;=Ln4Th}U=NI5p^e&ezqqBV%5P_4pf5mhI}>|EfzJ)4ZyC zyP_m_NJP)^HWl6IAoc$0o}c5Qc{bMD@+R?I+TXku>-nN=rHiKVEHdVg*EK&kUT=5h z^AoMcciPX%v6|@x>mcs53GXk=V{L38zuW`cg#VP#gTAyNZK(WwTNN?rP$=D8a(!MH zZw9Q!E-#%^-qXc0qk??rdE5OG$5k$Xdyv<6wf#a=bJw?=TkUsE?0?{sB_#Ks29V;Z;tu!+8g-~KL^g2o!eTzU!2yL3yQX+5 zU9g(P@$NgRHS4V|nC>h#C=EDd%!YqW28u4R27W^6UUA=Dq_gUr`&QK#CW$I2uJ~N8 zgy#NT5y+cdQHY)yWvb00JphrznN@}wBa-n#7EYy3c3}>XeKlKSohig@=xcX9jb_jW z9fTiK!-``2dffQ>Ff+Vmw|5tXq>enqsgc=nr45W5%5pp|RdZt|SakY*=i3{wS`xsw zr=FN?YpQ9WK;yISL?GZkKtx}IIJVKZnQRX59XPQ%_nXJBq?lc9@jq&tRW+4{ljEz~ zT`A|^I;$Y(i5yfm=F6&r7S=Rn`+kS}33egU04%*fp~# zO43}eT0gIlTSX@>jN5EJi3j85rod9`1V@9P=&oB}?V1BkOAnJmo0N#fj~2-cJ_q-! zu^V;%a}aHnfPKQqu|)YskvdsN?QE+H$2hMi!*Hg%Jm~8K%-oEpqw=H^IN69dabjUC z-GbsT>G9rn9Rla~@o^cxhux<2$;;}w$fBGs>8NJKjHD&tOknMR9??}C)!O|0-*bx-O&zCctrW*TL(LinLv2~BHI?#V3&DZojvy=SuUGx%f&(Pza z((SFo^83rmY5!c>gq2Jc#lqy>u(GWD{p`J0cL{@n^gz7bJ+NJt?D{#RA&oB`GpTiaV`|SZ;-^TJ7sZcR3w_+FLCg= z-_6e7FE@QVg+A}g&HTPI+8@LkY{FG@tCVk)fyv#`s3U>N*xr2QE;6!Xf=(+snh7fn zM4oeca`IuNmY9K|tG@}4j*baCEmvNC8;I${hhda~bcceOJ(!u_CzO)Ee0_)z2zCgQ zlK`S#fxm~pkM-My_e;;u>DPnK#k@#y#gm!yLk5uc%h5Z5g1$+0L}Su-<#G*trlm^J z@h&fUpI_1$+ulok_7igL*K4ljUfM|7Hx@C*^8!3h>l)IUa8gM;SXtHB?VKI%7C|Ql z7^09@-2p5*T@WBHosig?UlvLW&atBM3Q~B*cn*w@YNVf+i#<>|=FR-N?>=XONA!Y$ z7~Tck?Z0yji{nwY;%dp+PuUp}ePIv@)g=A}9Dn>cP$BHq-!bLi!;2vNpQQ$1Os)@ehXX0gmOvo0;L%+CT3=bDX0 zjd0y8iI!zjjtjp}75~@N)#lR_wYFFk6s*>*)FQ(#3_W^72{j8IvuPcAJ(mP;JXfba z`|;UzLNo(get$4cWaC!EyV<7a#b&3Ky1Ugw!hFn*V3)i0JXNm}IoI~$s{mPm%-sK> zA`2UN3^s~8h;quy{~FSK`~S*qeZ^>^rmonRs5xJMy7BV+bp%(o2#n#`p)W5-56YEr zXaoE70V+ay0|!3Wq02?nE_7^H1%o+3U_bD!0_6CVG>G-pkDe-KNqYuM3R8cjz5~%{ z{7ey`QP`uO86z5xF0tV}3+y5*oeZ{88V^bwP%qZz6kcy@?mbfTUGUZQ<(*f#HGlDw?d@G4Nh)X ziT{H@dD&!32_r@A>dCP0eN%_q8uuOLrSE>&Ma~sd_&eP;<8qH)jg~&WUOH2H&KMC< z_r`*EamUH}?QC#pTOm_k$WG6w-_QE<$_>B2FZ9`ETD&dsHjVlYyPwa_>s#>Y)*m_d znWgo1j2e=fZWi@rG&=iOG)`!^!Caq2@8Cg=#Y=;oQ6RinQD%l&kE^zM$^j$9HOJ{- zJND#cOhzbr#qxfRKfRVvWD-(Qg8iTr=N+Ik%`t`lL&c6W!qu6ksUm8+$c*vMF? zQgU+GqjOSmOephoD8$EYXJ?M5r~7a*d)OMD{!zFY%H59c^3@0rvy+qG&N2W|=oNATkwsu>8H$B-%gQ!PQ+`@xLSm$mIobwJ55>Zbl?LOJZo<_Dn5pSrWew^j#JJ0 zVXv^+u625OIkTX8HLyd_-DH?GkpEoU1ElW0#;%4)Y!Mpeg@4Hhf_z(3MMNmA1WJoM zLx?RwrO$)OW`h-cZ_tbmW|#tv72@D1`d2szb}A=A#-&0OeNU|Q51R4iTJdUwMt{Yk zN?33KaMTV$8y>N!Lwd$s;sklIADK6$++x0yGkg$T-r&~f zjulYm;cfZ$9JGH6JewkB!mz=5dowk+l~u4+Rj@Txk^76kumR|~eDULxe`sj9vkV;e zK2tm-%0eNv@R3h|gA*(B!YsLMeX*qkZHTs?9 zRe~9NLr2I!Q#B&sY4QD;@SE1SCMg=evjY1Ss}<}*~5N|OdXN7_HGP8abG>G@ANbhcnCg;*u8%me+`O2*-;ch|%|#9b7kLA@cej@-#YXYI zlJ4K~WbuQLY-n1##?E~IVJwb3gs=dyKe%!?x+AU=Ocevn%)%(>18eu#X+52V4Z*4A zX~w#l@uE#1q1Ml=zO%}Izp=}yrZ;lhPMr>#4aA4GhfBy=hiX}e{8HW{{`g9*xq)Ch z5h}Us;t;h9e*Jp+()nIdm;&C)qrL+g&ivJmaioepC66r!1XHpnad2L%D?i96aM((h zIPI#B!c$dSF;bMQU+ut@9N6PibWTHqI$B(xvPXRD7aBMdZ4UZxPs@RHLfJP-b3GIf z#(9Y27JtG>2M|!{xy`rVWp3{d0T15FSl>3mqD1t^G7N1IZbF zv8SUN{tc*ea5!b{@iVtmlCo1$G?JDzl14~t(IMGfpk8B2y(~w!Gu41lRsE1QVIkD( zz!=wSa^1-7j#%@Z72ulR%rO!(vI9IV_Zc1$m38^H$iaHQ2<;RB00^O%UgAT0;F7-N zE$eR(s9QJ^HNwA`KOi$nBQaV*mU;g0JqDnPYf9dcYqkpsl4~mm52GEFg17gTzSmq; z*BxTUVUJgfzrp^T9K)}I+l$s?gQ`HJBbK~ZPtE5ai@dKkwp_327DE;! zTawkN&!k_Wd)CYfA8|BS>u`zNTDebQ=EqD)(e@>G5)oi~Wl8(Q7)g&L z6UIELzWl2Gz-Rr2P}AE{zg$`tGrjlD&ITaq{TCS-A_{;rjjBfkzl;nlK&Q5N2g;HB z4rTcHc-$<3vVub(%SUw4C~Chq}vVh-uVYk1jZjThX+tl%v53a!vp$-82Smx zjK>d!Q{-F^b3SvvF@&w; zC*@PRiDroLr#<;|Kg?K9p2VIn#wHUBPKrdCB*XRNVol3Y;8 zRaL#sv0DW_jva0yMNM6OBu^smBCSdI{G829E(d|>4KQZh1^)4dNNR{f^Ju04!7<_1 z<>zefct%6?1l0Cnu~Vu#4p7Wj=WONkNWdoKvitC00duLHTd}irs3^@yb>Vu}z*A+p zaRJYkVj{&K3ebvw0aruirs~V@CfKWj9%gZVzy2G~3`lLpC?`u)nV4jB@K@Va2g)y! zhNVpavGdG{5O!7$5|rE8%j~U@6yHuq%Yg;t2gzx5``18t!%IKpa3d|Oc&HNq2nY5W z9o}&*jn^|wci!(w&!=qz4K`_$1UL zjG5ho{CQ>pCTt@w9~08H8W+VTu?`e>N~r4NVH(~AQa}RV^M42cn-L90V=J z2*KjgJi-X;KRwW2)1U!0j0L?ovm&mfD7^g8%i1 z+|YPMpsNVsGhD0@Y$YtQ!VVxHoWHzr3hXyVmj`;+Rj@UX4P`Kzi?qEh*Iq4>zM2R` z5&8G0z!q#o-Bm>0HX#3HvZ_d5&uJ^__ksj8JpxgP3U9u+l%Xc^GOmRG%Lc<$IX;}9aml9iJp*ml%}wdLwli@qx!-2)|M`2MZy=Y`Lw|dOmb!0jQTOmdBPUd`0XlyWfabxlmKVR@9g&+xtyh z(IvxeCvN(93Fhrq%&UMyVTUAHGA$)ecbM>!Q%ssH_+ zs7%k+hAK@B{KjtV{OhDHaCSyl1U&=$l81V2f+H{kn4+m?g8E*D&x^9Up|#`EK%uhw zlRS=G$QV4J$KO-Ii`>{G>MmOe2cr+%^H?Hfk5a0tZeO`;lg43yLo|+qKMhJx=qJFD z{hM3;)r{2~2zVT7?hRV(W!>GEk41&Hji0Ry9ULf7&JSQN^>K$XzER2_sl}|fk4Bx( z(%OZT(fl8_lynmP}PGjWR|w${#dI`WBo%jTzOzAT?+SFIFZ(`r#q_NLZ; zEa>%o0#arb7<}K%e=PN>KuKEncb|&!0nmn%OV5N^)y<@>IZBA{J-gxFRQe*;z!$5B zdix6bB%Nxjo-69sJyHn<+!L?i)_u`nGh)6rt>xLM5sEd-WMGE~qEAenIyk27MvGAz z@+8S>FnrA?>xjJKVrb)JWCb;Hn$34JjZJiV-p0)4F`Nx!(9P6JPAe(n5;^GRtXJJ8)wN{tR&t7#BTs& z0ds93ttR=26d3>fDmxe$h~7X!!xCkT7&50DJb2v`Oc<-3Mrxunwar?9vLR}=h_LVZ zB-t!#?g3&x_(H_^>`3^2rOfjZP|H#G6z0aDT^DE`1W7(b%N2{3X-AxGb(L#)nv;Js zDS}=_Y)HSRgZ*zpA1eRBphu@A9~aGbh1P}{zZ0N1AX^fMJFLm@TL+OUoiOQ@jqg;*cEO(YVQJ_%Vh@ z#sdgovC1{=*!I2bG;V3jNe*kEB4vs7ytS}0{MW&~hC;WWy$PFEJ7Q)=nNp{MhlkWs zA^-eLcyqbVq(%WFR)8ae+WGpQQ!hg(Z%Q2+k)*5IgF}on8{?LG`O9u>*4F`Rohw+qBX64z5ll+JO5W>!$oU8DW2RYl4h@>x*W~XES=SE zZ2aeqJayqtbJv!!-ew3XU1Xr5q?rdY%XP51oHmrz!lk43lP>sgG8jA!P%p#Wo4%s1Z|| z#n`0DxpjL!(PpB~cf*D;%It9jxCK11Si6UTUzJ$3+&wZA?c&_{?TH0UiN1r3`60z# z-zdG}YjT!kkq`-4fRNDZ&5I1b5ViEawA9Ib8v}^yYNcb=($lJIXm|T7`DtUu#|e~S zmx@3NiT>-P#ML_!+CWbFwu1?|Jlj~Sh*N1CqTWDT(v$WL_70OkMu;WC!R37ePE((s z4j#VXnd6^Rk`W3$}wvv&cfEBkno>B4-B z_Z`mq{<@#`*8VU$7gY8S;OuyTFDjN)W~P-So4CE~*I9Y;IePlEx0eMToYw9w59|m% zhXmYNCyDr4Of#M(aCsgvDYXmS^-6Ic7UL*Kz;G;uqp)0rlbGruRj) zPUtOHO}8-Hm&C;n7c|;t>YXBvjs7TPaa2?{(=$Cj9m?%o2HOQ*sTr<@FQkGmBr4GU z82?Q%H=E-rWk7N9fJB=Wkddw~1{wvuV?1&`YdK6@f?>()Fby;nV;aXA6u;wac#@yb zOc;V2`{x?Z5#!o%b>5;1i6e)dt)t#S-F87Ha3HnzitLJe z{bR|d4$$&(u%(mGv%o=E8sU;f4o4;&KmeYceh~Y;Aw}{Nhal^+^54*#&l*rwbGNdWenB`uW83+VGOse8ba#}cW zwIQEKw@UIYBu|`(WqU=xgB?LgR^ku=8r19Y#Zwahl zdL{wZfb&C!;bopxwYvs4kr9t%B5BbTj|L(o(G3f5Z+5;<#M(_5I_k|t1BeD|OH)}> zIj~YU9d>tbM9!R%jgj(o74`Pge=a)uMw&*JQkb0CR56}vyD2#{t&UlkM8?v^)~z0y zRK?*}o7g8q4kVg3zqBnShE%w+e^-y=cloa5uUVan{Or!fUFRr^i-@XNu*YkuAA!EA zALyt`)Yd#C(-8T>OG@;ibdT&ruLKpn^roz=3Za$?ymirA3a5+k0-zd2iK6f zxPFc^@cNkf8{^vohV$_je0f3DyBc4Kd2q0j^^fKfPSB7d;ReLm#uM>0t->RYpoSL; zG&@UA^;l~-aMG%PTAM7|K9wpW^KU}UHf&0KuorPk#&CghumYUvVX8Q-{5Vq&uw!ir+=xK+(yw zY4&XDvM*3m{Q+fIK|loFueAke>2|cnEj6b5Pf#Nrs$AQvhD*Z$TAOtU#2N6Rh`h5>L?i-w#lIWz-N3s5&%QMQRU(h1aw8D(HnjH|08WW@wl_~(O4)`ZkK zhO!d&PktyMBjw!$1vU5{W$#WBcuv!TJ9Z++C|=WhsasJYxo`+4DU$O>9w@+e|Zmy1j)c)EH&JX1%wsbQgLN>S_6b^&Jl-3su5&+y>@BTx+C-pr!ZbA`dys8AhE zrCl78pdr2Vn9vnbS>3-8j4Tp%7tZu-17Wz_p=i6PIkCdnKd62X{}5LO3@AvDlnG4AmoA>_k=81e&& z%_dvE$5DO38ae8tNf7sJEv_@;7N+K0qkths*s25e=1r0M%JM2p)ysp<;U zP>u%gU*4SSSMQ}&sijSktx0j@QPWT9?90#M*Lju&vzB_xH3dckdbCUtsJ168d#6c_ zp`@)IZC;N8_2$e0(cTi#(Y>4Vh1+uUJjVDIVyun^@=2kTO=TCvn%^s&!B2iKXbAzJ3HOZ;x66|IE zkC~~Qp*}#zERO%(3B^pUw|Ro*-+vn3&COKLNS6dzd|`C(JF zQRDr1cq*Bh$%#j_%u1|o!(oP-khW0zVfa!MFdMF6CY%gl zw>^jxHG#PYQVUTSvC#$LQwXs8nDdQbX{?g4*GJc(26a;0-;-VCy&fx-RM?YaRTj4TIlM)GX>97dq2hSILh!?R`yI+c|Q#->~FoV zM!$@}^*H_JfPzwk(8`O=)*zvXH6_A*_-B4EH18Xu!!Jox%oIz8adN$_1a@f4sEL{F zn*X}>YCeH(PN3J*uDvG8`C@lW^M=~m8|uIIe(DKLUJ@k`{j|eL)&ka;zz~-ey$hHJ z%>g(Vsn1BqueVg<>-RZz_=Z|ds^(vFj}$gsoqofb(1P{gK>ZR$SPWPgCbebae-lJU z1UxnN-E}AM2eg})MZvh>D>zBs7>PuXKJ*BUd&6-?VMxMZBpfhvMWHxck~nQ*=L_T! zIA)-Fq6jRL^CAcqq_g5u#%DZrkCuI>o^^pL5k!jqGDl;90htB#cm&s}52~7|NfE)ljx)V)aOUPFHS33U%*hhMXYo!ADp? z2h_1!CS&e!6%2ll5U-^B7EqX-^8==NT3xrii4*gqK{fQ__}(sGLqWytKvQ^4!`_Aqp( z0h?t+{=@V2jCZ!_g(!;8k@0;cS$M{PqPa=`=B8(E*$=|#-p$XqN(kc@3M8jAdDhWf z>$KV&7L6SeY7{OC^%g*FD`jymXK|sW!QVmf&r#IS@SD2YpPk;m#5p=(t_TaiiHYAC zTJivTn>*U$ACa?z5{fxn%*=?UX9BI!%+;AD8-z8tJ^xVup{{+N0wVczC1f1PEk!*4 zsm+a7=G?hKO@Jt#|0kn_%g{4OLr2`&7n2rp!hO63xqNXDOX+u^c6Zw8Ww437K_6ea z9|lA=;Me>hZECT(A4ix*akWylJTVppWyU4}2aBv;AINbEL0SlKWafE{^dir%}LbK#YtYbi&}5-KV1!CI*SOPZ7=k%RHqX4f)eGLzDYk%!1sjt|5-D{EeVh; zPF_6@!2zvo@bLbTmn0_Q4ga5cc#w~e$)2A~aPon?0b-4|RvI7FNt?lfox`48W3p(7cb5YH8lwVB%lSO zx98_bo{#L!1u!vyb&i|s)Nz~C%!<=LCDV|;cL4FXxu#{`a+{ChD10r|a|7i)wef;I zIzz`$#CDFg{3mqXR|g@^=F&Zd(BRIx;UaO&wL-X&G$>uS@@x6?>zz5?^ku=u27e2G z?-3*b1rn!cN&w^x-S3fdAj*gi^$;R#p~~TxZ5k6v$VXdiOpjX{w{m+1OgMg`9u4%> z-9|fIlBlIj?Zg7oQgrN@Y^iI#r<_h;OYyT%#(U!~KQS(0lHbKe0iFG%^U<4NRJk3K#wB&Znq3f zu?^it4otTR*h=(>C(r^k$=;i6Jv1rqn<0z~f&_J}>3Vq2W#Ob}fx6Gko%AfI0I()@ zdYCfB>5KZv&t+ha0M-h&U8Xi#6q9TZWW>kXfzpgf)*(`~OvnZV z3vIyX^(ESm_?rKVqow~ZXLrKsa{;AgJ2^Gmcy&8@{5^I2Js45N;^Pv@nP$oPX32S( z9GVW=+Kn7^s=dpqJn~B%p66IU$u@cV z$_UL=mK(`nMdj8w60n-*$rnK_i=s8Yp>bN+PIr~5VUO*W8`s>Gz`Pque$p=Ftr;(*=9b;Ru0y$jR=!Ng`u^mj<=+&$U zz%ya*z|O(p6K|&Ot-de%`ne8s1yfVc>gu;<7dVzzJMcyB^-WB<5;DFp+|@O>u{mU@ zqs@cJN{m`%)f9o)yG0cN(P!$t>P}#OhK4V9b{!WrB~{i0mvNx?t$=M;Tu!O1b;9#M zyV8x{5R3vaZL0;2tGkk({CyTHAG?Orbq6E%h7&Y*{xCa(%jU*?-*GPb00 zjZ?zZ2c|*zq5wfdVp&D1&>nRAEnJ%?a4&4v-2!+=Sk zhY_8^2u|Tfv|#v~_QAULL3(Q^03K;9_~(uxMl_a@OO%Okl4VdTIdnHQJk35}yM=p% z0WAQeuy!7stz9;g-L_C%wQcBnW;87m`z@waZ6aBVNY*m4)*x7HF|yJ$f*eI{Lo)~i z*ClSkbF9AUCIj=$#@1RyYi(mIEs`}f;JA1Z0z%0?0hW$*V@rxA$yx&vZmqS5Hd@BE zTS#PGlAW$G3bfH7{!80<&D8LJ53tx|LA26{Pp0J;?5pD)5(*yV`52ic~$V!m-Giu9d13ayZW2cnakd%&a-y^;(O>=P!;Fw zVE>bW!P~8>ll7u>B=m}?;s(^z`B(FNs(C)OeD7MwCJmOiMzwdwLv3_NY@?JVYG&3q z1vW^6P|Hb7t2#m68q+L~MdhS{Vo79A@3Ggfp=lJkdoH$j{Hko*qf+nd?K?3wb$jJ) zHcou~^s(~ehw|0c;-%#m6O%Vv+IE*$F)C`jo1|easwjy(Oekj5@O>(4J<6+H5w5zI zRsytgYW5JFnRBMXqg5k=?68NfQf@zX$Sn+RRh<|ay}vM@`(d>l zt2e)WX;@h*>*;!U<4(Yfm(fj4ze?o?CDNT@$#y|QJRg>HpwdZhY)pbr>p=*7U zH{_3XK&1A-(9zMcYx4^k@80Eo`z-vp%29V*$}eF&&#^2nr*MU?&GH!BBO`B(YLc-k zYaQ}SEngLxzQ`w{Q5LEeI9ytNqYt`UNl|z1@8|--|LExT!J&&X#jaX@bQL$Ww9>z@ z)TMy!Qc>emS?j|S2DPcTsXDe+^L$?wnc^V#=Q)JjVsl)(P+jL#!*{L~xZsj;AQ_5G zbBfKf3jmFAq5@y!E>niKb!=_#*wL!mA#d3(YuO=cO043Blvgq8>Vul)(M4sh_nw;E zdrExv$^_HAm`vtyX^2vG;BaZ^xK;C5HFet`Jq-_yGxQGB@bK4&-$BfL8Q0lyt+)4D zclYI?!F$uM^X6yD=4W_QuWLKoORn5Zjo9H1&RKsV23Q9s$)0Is=WjsqGe)&d297~Q z*Dy1OKx12P6xjq#i-;vyXI!n_^>O?x*~^&fYYHv{f82p+;uv5`XPLRMY+Xam90N=# zzC>FuL$a3<*&E1Sik~rjT|Yeinj?TkgDY|wV3?&M;jTTC=nz1p1!1}X1<8h*xrLj% zhMGHvm^cTUxP+NHg^<>42$9D0XhXc=!b2=(Qyqc~F>-={Hb@T-v{-DgM#+Z29Qy-c zgY+C2`UqzAX+fYma1A$QCXypkTq1WnMD3(U?sRmIuy72}rul8M_k@C4DjiZ{EF83n zc3TY0H8ygzbO=AP4%S><8_x}X1SWu-z*5u1Mi+%HYWoJ7u_CC>t|oRgh{7gWX%VgA z5vW=#JtK376W{O!|Nn#5pN_8aW>;UUdk=RB`PXHV`;x{hGU?f-#?w;CC7JYMi{g^1 z^|yA_jqa`+J-wIIohJkhiR_BNr!Q#_o&m<5zG8Xcxapq5lyjHEGP4e}DQ}F7+!+|W zplsVOYDz>v5nRn>aStKvYR*snfx)3OC~fJWRGuo4rmEDtpw4Oh%FN8e)%QghW>;2T zPEFrecOB*lBC-o;cQQ<#W}DX42cizaWL3vjSxbUI6jIA)a=3n#HNKTKjLI4iwU$>g z%Q!yS1@tFb5Sdj}=2lYSj%fsh#;$nWE#$sp((Uk9>)_zW#%}ZyJmFh zH{;8M_SuXG|LV^*F#e(vH-;K8)C(h1hFIu`mN;23gvEP+g@ec0k}CG zyfZafG(Q6v`*~G`thK-o{QcMRKYuU$&%YP``D^_5uif39&)Mv~1w~t%n)h{d{nFiY zu)Fsc+yjeQS$RuBqm;#O@WX)y8ySysw1w;)*f}(EY-IG}_{6n|iMtC6*^`q`xcnUt zpHpvV5Oa&oxI%YzS5i;^9(C9MN7{RMrJ3$)!)p?ILoC=86?^X$dyOX1sL^ONCe}oa z#@>5xSg>Km0tzUCV#VHjH+{;UIcI<8J^#eF?&mfnGkd@1oPFN4?!_WRftl#V@40?g z@ny1IsSNA>VXeFC{Vs*>0c_teZe-km&a|Hfv+k4SmGEWmtCiI^TU%Lsd+AHdag)5!6a*05t?UJ-YgcHATGslocelWqhM>ZXmLJwXy|2k_lu6s z*VVNV$?v@JzUoF!3Sg;Z{e9=9VGRIK-5SiXK!$3C(sRP-;bk?gq(NR22+D(QSro8+ zL6}7*TgVCgxWfV=+crviP8b8cnwE_M&KikFFh`=fEr53GM{z7~YUNP`zyx6py7>TN z+ugAQkc@>@U%JjRFo_GM#N4gJTqX6LC3RdS5ag5ubgKhe%TWRjv7p!03N#2{ZA1al z>IBpUI(R7v#o%k~MKtY1G;P5&t8Q}#wm5qs9al+1FFE^1>TXYU+=C6>0`=_u)itdo zG)(Vln+oHM@2eXKfGk-pF!81;Xe%;g+njfQq`9rM-rkm5PeG z7ywu$5Y`HpVq&s){SIKah5F|i3;*-OFY=S3nnZ8}zL%6f%c`84Etp|v%&=40tYpq? z=E7X=%5uR9w_trecXcgwZvN#6)wjLNva(*cyjH8A9GjA@6#7Ee*+=Mkj3$ZnYJM(f zdnapSD+Z}R^_~Da*t=}fp-~HXhe~&(OaLP)BKrY;F2pKpIWlfSrWnGU=jmDe7|pbU zq+Q>lSW+WZR3+8gsZX2q1q$0mkQzG84w{{dSYCOtwjR5^lYDqse0^Pya0T@nn@QvJ z2hBwN{BlfszQnsciM(>D&R!GRl*`;AXhKA63jveUUX%&^FvYIB&#b-MtgXw0Ks2Z$ z=oD2d7FVg(6SUjAEPMK`p@6e*$f|b`bYznTOnV1GDl8)P1Xr5$Der!=Q+AR3^LXKy zWRa{QrQ8zL#B@|(gg{_~z|$AP&tt`&zd}XEpwn`&Ekyh%)t@~ZwZx5ET#BJiJZWxo zPRrD-X|Sb?Kbu>Ko1J^bnt9Hiead77j??|AG@l8^17r*4OP_kcWCbE8c$u5Dy`6u4 zLcBUBfB7(tB+%|a0`@=teuX3=9{l}}^{cbd?ym5xJd3J&TlQ?!`exk5X3W~gtM$#u zwTl|Bp%#$e#v)zD|~AwZfpC^@=7#P4g7R|F>H1YmT3YGna;HD8PKn8!XV$S&@-S3 zpBC=dkQj4Q@|f$uuuoHq*XvYUS07~^E70qzV8FZwtP+tCruUCuo|uGrZwEh%?> z8foMapz7$QWbUd6)TGFzx27$UxMGdMS|P{sj*9srfDn%2Na0ALnDS{O>htDE5&yI|ZFpokIV$JyNh z?A_lYv;Y(m0Nwx(#IbPabtfBe76tn(JPJB$A;9u5R@D*|V*!lCO9IK3qZG~o1vy*Z zS8CV_!J${vRzMTMSYsb~hafflQvkEhA(~cRY8u8uSi}38#sV6K!s>>?SZxV)97;hA zNT>cKUX%xSNIb%Qw1S|T2Fk)g%fu23BgFrbf5?ln`Cq)sKORxEfT5}C>r`LrSk3~c zWN|)!VJ>%ZF`c`dyTUEl+9=!JDqi7caaU7jI5A`64?23Rn%j&BMB~O*lgc`icR7Xu zp-N#daD{2Tw0BeyNpdZ!0=}flg03+P5RKMypaQx15~=>2-SLU z)TDO^WR+0q=5(etooPKxG4ATaHMDA!)yWoB$+UOhVeBjjQV5#ifRLz_)!4O-H@kaj zdwY2&C*@~nl?R8}Cy{Asx#ppC8Q?OjmL#hDM9 zV0aD;+atD3TuqZkL#u96n_hdj1!=%xU>M&!=+I9F;VS)P-W@x{ujDp5?lMmIpiO#B zGJQz{4jK9KPhSc}B?x5|DOEM-r{<_W4i^ZB5DbqMdYLHtEJh^snb?a2R9=~8LyPgi zuq%V%}rRX>loGcJ9?QJ9?5CI8F~3r~A{V9>6on^apx_ z>BrOTpk;2-=4KAa7P}n%`P1z8A@=VVNI>Et&=P#0t*a=ybEpdSzb#ps@ zYbSnv^96FExvSwwHvtGRpJKTpR~D{|TwkQ)EO2!_czp{Lcx~^+?jOG0*-hKtNt;^; zZ|N}0Ekz@{c&XMdrRfX-^Ny_)E0l{FL$veWs_P8m;dl0joPT7tR_<*|Jc8U<(OVo+TEP{h6VlP7IlB(Lh3A z^(P>O(>3&YA%BI&T8aaE!Z#rSTzK7uaXHB1@FH4HV(?beL5#P+x6QhNiTb?0w!-eIJZ} zn1TP_F;-aeo|GcLsSPUTwMkvWBj!}<3O9dur*wCx^yr{|f46#jt88s8YnhwGo_$WA z3>c<(501F?lbyPHZHV2@4b7hAmENTl0fd(4J>3!0tZ44a%cYekaA^gWC@Mibzg)Ys$E~f?IkQkB zEmtWeTOl!B`c;xhcr3_uj7b)GlYz=AlBsDjA`Li9O+Q&&j+tAConMSzT8=}!NU2NQ z*Ne+Bvvbdzi>qdkK5{DM*4AsA7S4);3hYvY-MzIBLaNc*tE1 zW^=r0Q!dEIB9Of`wyBaS=8Ih4&7E*4qdGpxK07ZwJ5z_YyYgDyi~`k+ z0@V?!@6K-0`9<#eMZxxN;tVHfbljn(Q@*kh)z+;zGH$?_wnVDI?5Ag4*>fJ#vv}Cc zP}d?2Rke^SfJ1Npuoh|1aD?i#u@!Z8UU+>|c6C*8dRBIHl(T>E7O1tkp%l6;(owEA zJ?pT@^`S8>`-crkgNA)WR@F@^dF4_Sb=c-M)AmmL{9?1mVQ5EBAyZpEJv#}WA4=O6 zW#S+b@Jy?%Eo6VcEJ75(^j9q_MW6w*Q3ADGz(jz7Fbn5WgqW3t_E@Y1 znrETqS-Rjm)UlU=!YvKNOUsMw2HP&jyHG@c$#yb0yojcwFgW&NJZ2SZ{hcMa!&pl& zrt$z5XD6mOf>@lLn31=fov#KyP}9y=L)TVTR!dM? zLjYQ+l-Ng#*PnM>H&$v-}>I6kV`+9+9H&qapVc-my>IQ`);#k-Sa zOCTCo*6S9O>7`{GM#o!5$J=G*1=Q9*7#@yTT1KWs;3@DzhCLtJqz{5rkC?1r*345T zD|Ct(#F%yh^7?SuOQorT%nuv4qA2hS@sUvPqUw_tR`?*5*L>eR@Xr9 z?e1RI)>hW$CYXp|hu8joCU-TitIw>oMn0nem0gUkB`C1xJok^lXtaNr#9e*0v<#&0 z6AZtBVf+Zyi9&OMsi(9lKn^4BfZ&a-=Nnt$+q*HK9hKz|XK%`c4~6DOneZE@`!l8< zApw2COjammBA~oh?{x;|b((TUo_a$ISc^>1pSE?n*EE{v6zQboC?=*$MkNTpNDvK= z6^=>(NSmA`o?Wck)Mh#~;>VefU0h1wuBNQ5Wo>L^t*xhnPBiXo&V1C=^m7Defu?JM z@c>~Dye64}OWgRKoqP~Qeo6lFVFt{xzwQ3#|9AX<{(bM?f8V^hq>hffE~|8{sCFR@ z_#+Oo$eq1}le4sg!&IcgGI9H^O&}3TGK|^Wj@;Y{U)v0ZwCW_w8CDaHj$0s(X4l2# z5D3dFt1kveoS-o(uUs;xM6#1)#AN%ft_LH=H;;x^t>P-VT7q(OJFaKIgg)i5v=Y3r z^=fxN>F6lu{G#aUy7>4cZ+#PEXQ+|k|$DFa7nFsy|?%E^Z{9W~QF^hl4W%OW{D(aP=?EJ zV$J&P9VGBVERp!i>g$^_m_aeS;5kM!g%uc!E1)rx$##ZBP*sEK+ida7LWRN#&5By{ zxHqakfx;dSg&jOajqSy?Z2-v$SM3H?Vonb*c}4E4+|rZ7_L1TAgcNrx7eyVU9*eiZ z$Jt9lpVu9M8#K-a1&$>OF<(gIEKpbzF=bB zIfOC!Y>6AQxs|%Mk-=cZjZ&Y^%)VmHL=KSM+PZ9-+pU1Eph`2NQ0`5pLhy4rGaGR| zQwcX;m56AA>RR6k#?vX5FO`l*CRiT^6J)(kFaVu6&5mTvykyM)sV#jfWNiGw(5QQR zw{b(OMs<^1JwY9$!3{^De0@Fh} z>*vo?>#JpTjjl=QN@>|z4K0@aLvBoV@cv=a=~?#4NzTzx*1FDXf_MWkJmRNw|5gawxUPJoy+SK%IeSsm0)xv z_UJR1?#nBH-5saP3tog{Pe7#_)6c*$z(WhrwFp&1+$0is< z0mjPyh_QXcIL5RMBBl+VUwSmf@@a0@eVYwpe2OZxYMYF5inI#LO^Pe5QZn@1 zAES*NMRaV$VT!eZ70TREJS1Fif|_}D+%q#>mQxUn_g2=o0Yf0fB{;Qe*rEWqA#lo5 zoN7BtX**w;)jzz{pP@N#}Jer7gqh7&c-4yDli0LFG%5{SBW1Z+dA29kFTOsS*e zaBd8s(VsD=U#_mDZEY3o@0Fh%*PR?Sot}Vzse`?$gT1Pgx(BvbO3X?hk9yR%udK7B*B&26TQtp-hP7VTYDNW>Ptt<$-IU|UvgS>NO~NPaRq z12~oyZq~*|!Nx`bmz&07#dY`kPL~Rc5qcah7!oD?G+a0!Lhxy%SZEa5H%J`s zE#&?{DmX$ZJKvr*p1iqMb9vEvan^ozR)4TpwX;>RyPLnZp3a_qIzsjB9dsHRbw>KS7y zh&15P+-3w>p!ROvu?Yv(%p(fTr>)Djtj4&oLc6F^t+F0`;CA+@Hxm_s@v2j8glfc| z11tfP?T$#Sy;s&BZ)^uItvqJ3J^RRJZC!d*jk2JVqC&c&L8g`fYQo54)|1S~+|}rV z!@B|+P@#BRd2?I2vK~A*s)KmSIEfxHPorG_>gS^n*U5PLf66 zklhH?vv<%VJ=f49K*q{R*vJxPXpJ_sMw?hm;@#ydYl7Ce%}ew3MP<)C9;t(rDhG_7 z0|sI&&JKMywSFrj_|ZJY)BKyctJ`>E4IQvQ&aOOYg@1%$FMrxz*~$Tcb(GS!mq(r) zVGNwXu1XiLsE-E{EGQ2Elm)_902P@HEc?dXeHPe#A73F)`~5h4ODaGP*j3q!z>O8c z5(p!;2moMJ%mwb|Sdclue<$U#L6`(faSbFPN6kzeriWp41kkDiC`AzZiqR1;byc$S z##y^+D(Z=gO5f+nrJy5KO#`K&jRKo5Eip9>l&rFVFzWvQVvZG&x`)-qSmAXn>~Zp{ z!Xnc5ek26`vAywsari}kl2Z}$d}N%O>DSr{qF~y)Y(OFt(V)G{vc1cuvDKupRkyiK zk3cjBaa)}r&v1SbX}IgMff!p{rI}lbNy$-+OIGs>R8rSPddC#_49rEsBDKn@t?QbN ziz*dMYZQA1Oei#W+N38!6ok$%Ca$c$SzJos%*C;1pFzoecb|0!$)p8@i0XBaOnL`x zhbbOo6A#Df0W`+*DQ5KiB9J^CA61_m*PR{{0HPuhC`X6&0B24c&re$}FIvye>Q7FJ zw|8S1OrN$c;IV3K(=4x307F-;dPz05q*}SA3D?6-AF z*$KtHF{M)8oE#C}-+%^(o2#Dl6XNAL;pC+1@Gx(N6FofULmF`GC%Zz7<;=(K@0XvS z5Z~VneECHEynGhf~%{F ztE-BWld99xGEf}66TiA1KD*!plO)L$Js3U!l~IWH9)R28^5gAYz$#E^`1%&rJN}b^ zZtc|W9kgOFJ*Qay-F@~c+318cv4RSf!4bf~)2DowxSE!7r`)ZSO_w97J)~f~IHu28OMg+BHjRFnQ%tB{fndH5epG64%x1FgYE( zzn^}6QwGoZMb7k$XEPClgngmgx|PTj6UZzb9w*K%KB7!G!CftxVu?r#Ooypv<8+Un ze)q&weZLTlnVp20E!xNuWnwL9g_nL2XG0y!nPY#)SO{xi!gQ3v*-HXW()LgKsJjbu zJ21o|2q|sirfB7bGj&zs*<(Qoqv;(b3&9q;XrV+F%(1p|cjp^oYdd1#R19OXbn%LM z4odpYi2e!>9@n%*Y7?zc08|~njjwip9%7NnmiLC?#0!tA6`z_lKh{PNYYl|~VpwYt zP(5QIq--XjVlJp^2_95O7+Q66aaA*MRZ}rl6LDoD33(kc5b>(SFR28oUU&_*O4=f( zu1b!++Qzo(QYr#>QvYwmq=+P+3Kp%QhhVIZpejy8R#`w)n(tplKVivx3aTPz*2=cd zYKEqYD7pLJl>@@xDFV;kGq$=>t*TL_x=F6ONuj1my|LA>pX@v}9WXTFQcut;t5vS3Q>|%It8UcDEL4e4 z*6@5NrGn!}E8Le==EE5XhD0dTHQRUhIXAWGRMe@pcIx&GS(3*b$0mH4tf1wUxYf1P zWp2v+!rK{6%mh7rZ2Td4%xiGOt#8PoZ^*WvY(pM%9h>l?Pd#R`LZ?|TW;ux)8(A0U z&9~R2%k!SA^Zu*z!Rw3e^V5#wL&C{X^W}LPQZ3qccG`4$T6=hyy|5Hb8gKwpO@nH3 z7P#WeEtM;-QYx-esjOFTYSRUk+=FHasA3Uky;@6$Vlxr=xfs!GhyJt`xkpS5+ zn6A?^p2HN^?tc4vf@w*WZf2o!QigPVs&sC#en+?O@=_KUIJX-1cL)c&%|{0+o z)g|HPy5sV^Wq-eTWi^R2A2B)YIXZ4NFrwStuS@K~wREUgHz_o?>kf}OZtcXKo#$O$ zm0ey|09x`aXLmn&aXFl`;6KH-ADu897}4gPJ~9r@u@sug6w7&OB>>ELJI}}It}Pu} z4Xv1Fq8y<^sk~0HxLScgH0kcMuWrIbfNf|oV@w4AZ`$gM?VW_ZgV&2oVKbcI zNv1!Q?$bilFRGHsE=K2;N)=X07goq4dO7pP7RS;mv!rzS*Xg1K<%+}}z1|_>85}jD(ClaCym$7a4vyn@52EIle21wvbuDsbb--Eh zHb?w*x>$0Scvg{YW2FP6Nb9^_p z!}g9|&Mo>4jam|WRPVxP`qlc#xRG(==^0-p>jjDQEH%?BHpwj_+WJYDsm~(~S8uu4 z*ABJyF>y)G_Fh;cI~jd@S$zjNuz>>WBmh(T4sxLN6?st0RvKrE!P#Q8poPjo3Ni*3 zUMf~zYB(Di5V~asuoP=91*`-|2@X?{0LCBC8r{+qc3moqHbZUrelCc$@B5D zv;N=31Mt9?lTy6z?5&iZZPV53-AQt4?XYSgS~Rzr))9a}w4oJbMuD<>q)N4{MvXu; zXzehqZNe2*D&>_+6;w!<)L^O_u%$JcDH-biPtYdT;XD{57P`$h&`+l-Ned_T9 z!)VKH_yqOqvUrw`LEiS#_%!jdOf$)Bu4wm?| zN&iV^=rlWgX7&}(f^HSxT!De@_G;k$HTnHD`Sxl6;ObfX`DqJmNW3^}KRIeTJ8eEb zF6FKy_71w#G-(u7Dr6L(^2+1~hV2+kf7WaOYxdDJ+kcwvPoHub7%_m>+Ri>z*gwwm z%))pskV`FKhds*+#TAvVA(1GbC;WjS!jHm4{DRRw0n+}#vVjq@kHZDs{DmzX`0d?= z{eoqyYkk&NYHu$`fBic1$8QUN`+50qzs!I6ME`J0zPxBXJS^MT$l|WN<*vMe60iBi z#Kpz*)s@_>&4P`!;)TW38BY8(JA7*TF@4IF&cu(7TeNrU)-|{DLSuQ>RrC<>vXZ$H<*}2mAE(9mv1Cfb5Z5B>YXHuVTvQ6 zL#wDtrlL-zZ^&ke`*`OdVt&bMglgPO#6VdWfv8IC(V^0~~PsFE+zD^fUFObSCkj^WWMTT3Iu0Fk<0nN%rR6zwgEmz`gws>`uGT4;O z_-yQitZY0Pov?&yijYs}9>9_*dUU4y4Ce)f@`^}|?(0qI?@MoQkEv^jOh_>gjna&Y zwR#kyjrUZxagw!i#u(a3Lt<9XUdF&)*3=bWk=z4bGZ!y`5K;tscv%3mcsY3I+hmK;cfs)F zSXu~up>7S7x|%kUcVqCMb6|K6^31l%#{BT8nu@4fh-sK1a0(azP*$skB#>)ZNGa%w zV{jrcf)%Ezh|2u)G!+r)d#VQFh8D_3j!J6!C}H$pr$-^wJsDL&4P9i8)fR%btp65c zq16jcsv=ULhY_bOYi+OW=z-NY!>Azr+IJbV|CchyfAt88-YLo?xxrQ8y1RRgxNLB6U{x~NJ9SszQmpTNvQ$?RfKn*uMy2t=dC zR(y4>PjgFbe_sxbT27zHU^3%pIL~J|0kd-tXj9IEBR0@WH8ko@pA1;$CatVwEG_0N z%xBHcyyMKKFLRUDHr{M(rEG4;Ew8{3?{KJtotb?#GaI+G{C0b*?B=Tb%O@%lwLya= ztswXXLD$=B(#=)RL-T=Ktn) z%=}^y5bjPG!76YFvml3%q|Afao&iwoNTIpREkqt3=AE2WTwOIH#BcT9Ug6qG(dK&T z-cH5wLG$4r;rO8Y^l%3TkPUkVssj%X ziw_R+wzjg?H!`@~w8h2r8Fng*^_nsHVuBXIV1_bTf%GY$nqx3s2SzON%B3<3C5x)0 zN@^s_>dO- zKNZHR?;gBb+lU+;cc>$%rR9nvMR60P_nuiSk)>UQRVeuSyomjOCV}>_Z#&L7&4}vmR9{3 z(>9%bcR}mW;6NlP4~^RQ4SE-qS%tn(Pkd|E-u`Tg@ot4%z@E3LS);9p$!B(KOl z`JHogyxoHU4ZOR&qmSytP}AozPBDq#!G|IGHeOgmCwYWP09z_;JQ~KU!ast|m%9=e zH7yaJf`udxT7jp)Qd|vjE+AH1Ff^)Liz9wm6x?L#;$big<_5Obm2FNc0iV0M3C?j)>zL}!B_8rFlMWhNMT(rd1^##?m`PH;RL5!FT z-#-djf9w#4a0g5WQhcJ)_r)=M$Pg=Mi&w=N2`gavQSzW}1|`QY@y#>)FR2K4Vp?Hw z0YF3`w}N8#5dAHfV>5Fd%c`8?-Y5r#Nd<+#3-&s}*5Bcm6V!jTfISF>}k z=H?R@=Cd}}%iiDg|NMpW^%DTt&mU+XZprU&z$l9(rgmRn^nfCO!&bQb+TO_+8gZ>@ zQiEh{3(;V5`qB38>+|z`*jQ*ef-e>;eGGzb>Gz`5$vbV2}m+_*ZDC(*NYSK<>~0nC3%5UTHg>D@?073&F}Ms z|3R>@+XGQU3qA!bzm^f7wX2Y8fXu@XwC@u!kH`Gpf&6J1dd#WU*O$G&er5gs>oP2y z{rU6c`)l&`MbG7V$Mt3B)g|}@K@=3l`+Fsu8^xJDiWHOsjHNT}r`R?Wx(P1>7AC_WNm=%6 zj>p3CVvj-_`5Fo{cy? zEBNxc4QY`mIyosuD!fyVPLh#Hq8XUE1XvG#+%V!r$CS`~xq@jYt#qiz&&b|UK z;$+kFl?y90+q%pr7=A4s#wFEit(}JLT_!b6*wPw##QG;wUM~s7T5$FD%|)vW7#;?= z`k+lYlLk#Q3nf#s09TNd0d678LX0h_kVH0j=<<5W+GcqW+dg30-mTlzhVAaxfq9Sp z!&*d=GK5)}Ax0XotgJJPPLy}`=MRd&t%vtQkqw7!^^Jj?Q$Bo;kOk;!D4=X8pk^eZq%RJ9zJ?OYx&jJ1;viR0M^a8*1g!$3 ztvsyzFR)8c95@0rG^LHqRQ1gibxh>{0-ijKRntR=0&TEf=V2`TN&)e^sg=hJe1B=A zj#eDQC#Nc4o}{78ei{yC)*aCxN+-QR6qX zKt;uvH8wqIY;noR(G7no^XvsS?u}_eyk1f)E~iAVyjC~A90Nzz^n8iTLU6(LI$fOi z+nQU7si;#K9&_f*N3mxTDPswpozIHPJgRD(NCRFJn)|@870kCs0+cjsnzR~Pb-Vi< zXXjq6ujj0A3zioP=4Nv^v*|N4$t)Jog3ivp00$rn>FDhh?ZXZA=5lm!ATc4u-1ULDox1>> zW8EJL8(HzGX!9xJz@E#>S;Ffv$|F$NCy3uaR3I@`sc$e~d$aWO$I(B2WBvQzmj3vS z_36XJ`Qsq=4Q&`@=KsnV?U~I z(a9)4z0JP+d@QfSw00N`k9o0Y!id=mJckc30KnR)l6mB%aV z!AS9U?ApdF?#fHXR8UKYZF;`a>kQ1>ENpdy(%QC+NCv$Z?h#{r;8#nSpXZ6K1vkNeE=h~ zK)~#{#W(}cWP6T|+f~#lzRLqwVM!SPU?Ik47fWOpOB7T{z;A3OV!HY?#%QMGF|*cA z?WQ(tM=x$*#DFKU7#P*=9#9AI3uOJSKBM$}rDrcCo<^ZFa&+4|yy=XnN#=9bOc-ZA za&_$ucO@1~2%JRDY$}7EJTmfzPD@`|s$5?!Uf~wava<#U66zWvqZ08Je#&~TKmdS% zo)l=pa+lF{lL8p~2y5Y^jB}8H%2MDFvqh|7)~O>lnjMHEsl<` zypAwNOGH{z0IkL^tI7`(1;I=x{#Vb`KRrSya22nF6*95JTH$pJO_hEGPac}dDDh)) zV(R*W5M~vy0Aqi|a9$b=yz}E9A;xNGOW<7<@UE&_2I8{Fn-s^~m%sp31DLlJ_+Z4A z1cndL3;%;R&-(%}yNcZtLEo2A7O-}d@qeP7n1b)@e9mOXv1VQkjXWlHd$x5v>hBK& z%`!cYh+Q5HEmjqExZF|49}v5|@{8== zr0Tp((8$U+XzR4^BU^Nk^jkYYEH;6t3i>-5u;sN{O>GvenOB>ed26cxW1(pmfmzUQ zKf`%GH61iM?magCU}`#Oh7&Wt@Oon-_h7&B;;j9{E&1yw8UnC1ILCg+*o*GV^KNiK zjhxfd+U=cpBOsI6YGTrn%>hl*h`%g+YWg{o^@24My}S}LzZfya@*10P=pQ!aEzk!? zK~!mbw|Xr>p}GlNR+GnEr)Pq9cC#*!1MdGeuih0lYWAZ?M3V2;qP*31=TMV{5SIQqtg8*XOfx;&<~C z@(%@n8=Hiwtg{%Oh}hq&`SgMO`>&MWexZH-IQ0Hz0OXN==m)XSpZbxRw)^tDRd`uyTLD{P3{&__$h&3+JcQ22u?eSfhC65B;oQO#&V0!HV)*LX>$Uatwe@twER(mg`i?V~FwJ^3PKh7r zi^(hWa0*e@_xOf^$W98(md+RxZw2=-UHd?+o~ty7t8zf&98f&k7BIKSrM)^VuR(5s zLd;g;M}UR%Dw3cj%d^4i;=g4TI!Zxw1+B8kG^^(<3kd<9KLS#_fFJm#kA*1*uyIU5 zPe@KjNEQcrW~HzKD22NeEXcbGN&ZhIrXWCD8kDCPm}BkS4YZ6^dBf!QF%?K;`Bb&| zHS|Ov#wuZj#ANuOh87O1f5BKD@WQG@X+=IgV{yEzs+FSxR!dX@iGSf8lp=`W2EpCR z0+R9|6jA6~*4BRkvv4?vZ&gU*9#%)#%@3Pb=+oE~IZjPk;pQXt&~K*Mk&MabQ_NSB zQ?JG-p@YMZ28MmwJM9TX349+-6Zw zrWYP9`Q(|{>r~DB5)(qZWe3Ty1sMh!T2+ywQPZSRQms};FdL^mUEj#u+$dh*f~Xlt z8Z0cP%*}&KUFt;Oz%XzeEG)$z92B3O)?8n;e*W0|>(3*<{ydIg>?Gu4KfWJ*e=~4( zNd)J->*~A@maTxz*GbjNYSQ4aTSt%i1jB7@GkkOV)z((>%4)&{1K6+X2@WkC4sBg_ zog@&n)j=}s?$;-enNn!L)-*6;-aBa8(xKDPs?pkEGC~wxV{~0z5RKiWNn_i#?KHOS zq-ks$jcq55ZL_g$+xBMkcMRD>UeW;CrqFGzIY#s`PQ zaN5z)i}_J7H1tZl=ij;W*e3dnb7E(}`h7CZ)7ew|?LlT3IN7%d8$c{4BQW!LvGCxv z4~y5=7ll$kUaY4|shfjf&V3GoLy>qL?edq3G=?ly7rV1%qts>6%q zot{7}tp^EVZHd(q9l&u@#`crJSk#>pY-Cy50FQ)2`$nhHW*4gOQllLmw9JF?(yk_T zeC9{^(Fzx38Hqba(ZeYF$@u^nF?wd$6MSl?KV}NVx-k|`j7A(K$MjG=`mZZL#}zLOdA`70Oo=zhfvH@NBzxsir<(q zXNH1=@`wD}$S3%|pu)~PPFnUw1X2&C$H)mcxKn3)mFo~D_gJbMjT`@*GkkxV+gN{; zbu8Sm-!3R^Uv56=II%>(_`Ad#h(g<{l&~VYjPrLR+BLPgu zkZ#AXBtz1p@TJ?&3(hYKjln9vx0j&FhNt>xeIshG*C5G!;(ONB=g=A)5^G%*7da9{ zcM%P`(4R#?RyI=K;ztc~gKsuKoRJ^>Fh)P*Lp7dfiItvI8iR1x(U=|H+uHOd>*`x% za;@!tvAa64rpx&<^@s0Nb%@dG+O?EdA06t;$wDhy`hfROW|5Y22l$Blt0;xI*+fX% zMEpB*x|!!~%`^gtj%VrOZ>wf^hwPE>4imrz$Oe6nV>(_skuG98l?<Rpm5=+w}uiZJ!+aR~tQK1^}^Sp2-`|582Lnm~6ed_UnF)V?0i z{%!fBz3*}Sj-9bbQL2lL>TDz3+;q~>kFR3zAms3LpK0q%7%|{KJFcbpUP+D_$^^0e>dCRh9GXPuWyfZIJzZK92nS>8|Qg3s+RKHJ$Jzf*?+q)dV0?|I&VUk8WdVX zc}-{f=V2hed^eN$OO)hH&P6u*03o6PI&Gw)SMFGgh30BuL&K%k0Cn8;1!2sL37};CIfGMxCGv_Y^{)ZXe0xXW zczyTBVg-TabbhoSU0FO;so+rxxKC~_%6Y3+dm@y?1E`qbj0g&?b8N3_bBA1Bp*UhJ z)J6LA%=BiZOj^%evCeBqg*MN)b^6b-GFi{ptdinD&O4!c$1q0YS;Hu^Kpi;$l7Lod zk%H%5WDZQKMR_Py5^s^1Q=E^w+w*sk1zGO_gwA~gzWsYXlLuat`%Ke^n>z8!y5vo2 zxxCX~hMZ?0rX+S;8lo^b+HT-Io!Qg~iI&>1S|B8k4K~iC^BLvAJ zIlC`Iv?ZwcpviR2MSZq|Vxinl2%2@b_Tf4#ApIH?sQr}o!5={({p2Xc_%VYgG7y4& zQ^CLNFlk8b^b%1~w3FPfRtFnR+lvj;S^=oY6j?E!?No){s zLI6ZNydP2$NMtn7&a&S2tjrFP#+M>dXm9MJ!n1VcqsAs(ug?DDWJ_|*<}Ax;j-4u)y=vIX9<1**FBruIofDglMQrGn2i zGeNB3v)B*ZVCZp6)V*D3V2b6z764^yRk(rFg$F4Ha_YBHaPbKclTSM5RSR>tfmjgI5OyT2FPpPQbCxW&QiR~ z)h1H|@x%D853dM+cn`i!cWV2HW&(N{Ao`7h^tzDgOBDz( zUGijK`<>J#y->~h!5a2tUz9CjU9`Pc1QIIX)|PlZrhgX-%eHaaU> zUk143R!5LQwW|1iQ#-f`h#v+@Ojkq2%3mN02!dcl$-;^b$_`MiyC#$(!&G6S6%hW+ z!iUe_BPBcg!no)Gyq#+Z3D}{|r1G+6(9PObj5VH@5_41iZm0LKTJdy_Kq!W{9XKy3(*%5?rHf8?fQJh}z}i2YYJx z5-tYQA57|W0fkJO4TXuS`BZ9CmeE=rCt{|U>tz1Yc?VC%9mwB}d*cZRfGnMs(gb53x0He<6h+$SPR1WTA0$YmY-&90LGC?7+Vmg_FQ4WJ;|C=d=$4-F?c`w4+IAC zH9M#E_V%gP`r-1(eVH^j?tWO~DJaI&&4RthkN)C9p}kEazCsI;Zo7XFBR`wSW4H(_ z(JyN*TMO1W=m3vy)WsVj@hRE$X;rN)vECkCxh+|>EzQM$$i}BdCWj>!pMo=w;Sn3g zYEC0z0)-@`N}pew(%N{mq6q^bw<|kQMb&LhDxu0+G7k`R4z`;j zhX=qqoS|Tlj{*;Ql?A^g%~6cNa1XNT(qcc|mvV z-aZYQT=k#Y8cN0X!7VBA`k^E~!+QDO^KAy43^Ohh*zHKelR^Jh)z#LJ&Cpkczpjmk z^ZGvIt7EsZ5#hV$75+s^{E$$VT$+^-P|u+@B`PlWNB`#|zni)wMN2AfK&^x87uPl7 z@q7GUAR$7aN$~VmPBQcxN}U(CI7(dgPZ&B-aB#!9ag|FYCdaa{<+y9&p~Zhn(pVG* zXN9PE#=6P`y#^iH+GA_$eu1Wwr>hx*Maxo$bAS@D&mn7{_C#H(FAla3*>hke{kXU0pdX`z=}_({;zC#y(S_>eS zX`d=?pQ=sRhwwSj*0I92!GVds(ACo=wKGal_bgGDXr3bEvT$a*`>>_4iFtjy%KUn6 zPV#+QTi^24e{Sx!_jybEJX0S{kq0-pa#ZT<)~~78ZfRC;>elvkSoZN)?`qy?XjL0J zFmp(8WM8~PLMpGKZ}WX&z6AT51e;FnNo-s*O4R-|E! z(b%b}%`I}&aj-M1sVSp_=DY+j!D_6xzPMc~Z4PGn!%;Ur?1SVBXE=J)=!drWV8LTPX?aP_4^Nc1g1Gt4`K1Otl! z8HEh%`e)yY#QKcLzXQfxSNB7J>?O`UI>I?+1%(cRrpY)Ux-)71<-i|S*C=z1}K zppnB4R|W~$iD;n)=#rOGW9Ir?Tv+7gZXX}F=p0F~e)`oW%>9~U*>r>#iX59#SJSVw zBelEo)2Q$7KcARL#FBFpSP5RXU6?jf(f|z_E@Hz@4EGz09MbQK{yH0=qaYl6iXlTZ0Xu|J}LywR188~(<<4$+C!ayL! zv|7uvH;FX47`Z&{JtFS9KJBqL?=5lcsx2887lRAgN(6qkd#yC+IQg_Xx#Jbr$|fxr zY_J$i^O!Jm8ikJ!MXQ87Y=^lEtRyZMs4tYR3(e~5Ag-_b)VAle_{P>Xf0=y|zHC!k zdOlsOdfcvh96q`{E-tn&G>T5X*Vn>M0d<&$IxD9YC#RJtHY@qJ({BhF3B$A@REr`7 z?ER=EReDe9;$9AIt?Ic^StSj5?G4$HQQ0`0-1M^^znEXE>W1(g$n_k^FY8t(DZ!Dj zLJinbO0?8s5)pV!opqo1X#A9c-k>f8R_;b0I1|XW zL9h3~_BLhZHwT>}XWhaFUDJoG+-xB&XohbAEm;T7S&ZAZoS+>G0Bxvk1SB8RT@H_P zo!d`+-X$^QBM>AG6=djE3-W7~r4;NMy$A%c0(+mTB}l=K1TA6bN7C+IZMy3%_>REN zGTs{1NWTV=;E)walo-u5tkh>X&9XcC4yli%qm~B zsF#s}+MA$&*2IPa2OltTdu(XuaS6&~9h5Ylj;Bh*$kGKBJBXeMr;f8%nldH}$cG>Z zhIQjTY<522s6VQ=-*J7yfc=9SBbDraqs0rTUWsIBsg&DVw6X;uMAax@pI@OvZh%q; zynpe-Vv$`djAnHXLph zv}-dC!S`L9XPJQ{2Eu&bI;0DtmllXKpvVC+{k<1sq$5LW@2qDFhh~?HYQ2+c2bV4f zz>7yYa@T_k2s$H&(*<7WLu(5-=c%n%WEAQx;2L9)KS;g%J(R_TgDAyjmB29)g0Q;uElK?Zke5_BrpTyLgOn zaTMzWTs2zAwshj{sx7XnL3?~YIX%1=8aq|lyJG>yXNG!+H$GCa>vL^K+B88H{6?n!DmDr~87uY8&;$P^{5>Pv&| zZ+73EbC7ID-Qy3U_J#eb`m+MVE;_>=CXOE_I4O%ETdRlN0Z0TuFM-A5d2^N#mpfpH z7bIU*Ax0c-2%?1)y$NEtZ2vlA&z;LQS?3?J!x=N;p{8Yvn-hW<4)t{jYiYDH7_a)R z3=p4m2IkIq8Y}~b9r*Wk6Yfzge(p^%HZv>MoQyWDHRs)pZ@6044AqfUcB8+3(b#P* z`8lrrc3yFES9Nq(_4u9I*n%sRb9;fudW8dqM?j)-CKs=$oZ+C|Yy{vRR9^F!6xuHF znT~MZ!1@p9_==)3O}=IN@h6`WC5?hzLq5!-GDGd(D>J}OPS_HN?}wwG`GP~DQ$ zZ7$(HL_z~<9>0)9RTRN|8n9HkS?=iDRYbUJ#P1edQd@L2->on#`eQ56gQOX=h?^D0 zL3&m|zY##_?}7EA^KxkfaT)hi0?}%cxkFKNSP@@GmgMdfuga)9k0$x*@?GRFjVL3G zngJ}CqdZu@Z)kJ8Z)d}i;$H!NdTK{`&@rS|Fi6_zUqV&emurJxh4kBa0L2K?)X=&k zehyq#M8*`7ZKwSNJNz!dk@lU?j}hKZ$eT`hD#F_{7P|ho1G?}>p�}$hwMBL|Z%G zRdKKRnU}(v0Rhf=9a(K@B{>C!clj&ON@712m4U8p z+OE&pN#(U}4NBL)Q?Y*U;$}VF+S#u8ns?eCI$Vp4 zJe4O1@Akrb-Q!pVM6yQ$UtQpak6-U^tN$k{z=uF%FVeB^(+Ac7vnPVxC++R>zU5$y z;*>$(?8~=nv#)%@=>lKxh!wUMoQMck&QI_+9U@UnI|S_ICH6Wj%KFAeV`4J)gA4b9 zWyysTx5MO{!29CIy5;D2=9TuDo~BeTY=dcij`xN2Rdo(hx7YgD!(F33Kfd>ilOJx) z;Mc9h<*k*QY=OOSF)AutKt5-A5 z1IEo2_aKj(B4K-E`7KIESMPzlJL&OQ@#D^1H<#k(VOPGIW3xlY`@`YnVXTOw+RVp` z|893>cTFAVN{_+UjpO58bLnNG4%psZL?z?G#P7*`B7zQX4(xxk%v~Mm4p#2U%%{fW z04sqC9^v}A!=~Xyxt>YA$6WgD+e!#h&T z8=hG5IjWuR{lS&uVSVe+qrn6RR|G+mbW9~jLae*?LA|#}VxI{5O*Gm^R{I z&740LzBl!$7AMjNR_xKrG^OJ~&{_WcnOyAu`iI0^W`Rjs%*V17a)>0rCqv-6&TPw5Nq`)-H^Y#mf!L z?Ij0<=@G|!0fHM!F@vhybn59>FW0K;wTkEtsv^+z*pF^tKD-)S`G-tc$q7oD87BFK zV*!(rtLm5h-G>33p+|#^_WGA4&?=UWQv{oS!;u!D1FJE$Oe_m&;7UNG)po+7tc4@~ z0ZK&iuAS)xxrzWW@W331MxuwD@IlWkVymWvUSu9oU5a?VE7ok^c^65i!E`E)9`D8# z+d~MW`yO43n#e2rQ&T2PTH^0nE-OOSF=oO2xTuqCuSgq_^b)G+MUbh7B;&fMtj%;= zF{NWjO9lfIw^V2De8b;~;>P~A#<`Ay;hOTL)FtzIJ_AnPUtIOu-1XZL`&nRxx?0X+ zqbXSZdyknO4^hO@YD}#S5tC>2n;Vhsu5n+_QV(YwmDTzWUTy-LzHE|4e=VBZ&(8r(f@|7F1FA9Q9@$rD){D zuVC*!5`MBDdIH1s3<)WMg1nEL=OZ~qDTn{^<)x&NA;;zO-2=X3Y{{{O&)GCY#UJK=}>OgMQPIWS=$5fv*qMP%e(9vU0dqaO2&)G3jTdpD!K8Z8mI zCK+SA#Z|PKTfVo3q|Q%Xb}nt!cF{J!{;f)yzj0Mo@dvl1I{1y{^5#~HFX!Tmkp$fT z|K=yN!;7;)B)cbp4!+lu2lfW-4j)*_^D%$&&2;Vh0{sI-+K*RQs9kzv>E zkBI%x5^zNaKUUl6Tpo!D_$GayIbFX9ot!ru?H3H)Crn*>?JdTwfaNvnVlWUe#zI@6 zATyB=V#P~)X}J%U)>$V+R4&QgoTxIOIL-i0HrOa@Hlb5L#QsArOHVCM$*m*Rm-Klt zKfY!#2@VjdT4(sj;G=bVEj+(NPiD1qwZH9X^Yj8F*_4Y{*~QMcaQPfGzOOtmD5O5h zKG!R^-_eiPioJ_+|2~C|vpmCNat`^t)$Y&TR}xAxDdf3wA3q>L76|lyU+V5}wPig4 zcgR=@aECPOv?psX*^EY{+<(J>1Jz2_@ZrKN9W)O!L6dqw%BZ$=ySjW6o-q7^X&@qr zXP|V#%wWgPU=EaFt<){k$zFVJ7@RGeTA8?Sw&U6Qnt!^)L-Lrt+!))~{HCq(n3-zm z@^JEqFk5m+D_-KXgMzi&U4#&2B{#v;292A*;ykWn(z6{-RDK~k??Pi#y1e$b;E0{YW9TE!Pp%JuMafixI3gu z!I$d&N0jkCCM=VX>wu5jt{zihs@2HYKPvGSFviDfUi7-%Zo~7UM|#GK{Ii6MOjLyG zGU~t5aGMy!bOgrXTX|^tG$u7hFs{K3Rp zgp1W|;;;G?ZTu9SqNPneX2yN1JC@b=SgLU6j1s6c{-YW;b{E zSnxdUOZNKPU;d9Pj|T{K)-d(mUc4XgJD5VNf10Zge2(k%3ey$us%PYmlskjI?=*fwePwZAmy|>BG|a%|Y$al>w$|QTVm@QeQPAuPF8I7(wl-UKcHLsB8pw@8ORlfr zFT>C?SR6T23`_F%X#|NDFImY|Sti8Ct~-=MHXs*2IjTMe{1G&&BQ(miv?T!7#SArm zkHyam6$KnB@*H)igL&kXJn4oiv>r!l%D2?xt?6SVA$hGiGCKMPA|gqQ9PzJB>6R}- z9K%*zNu90EJt05uFXgNHqs))~>mB=cshY=K&m+Cvz|fa}m{lh)^@{|)HXfh2w22$7 z8O@D90vqIqwz_ly-c@@WGSG}iiKE0#3i>kzG6sL(T<2~Y|2srmjf8{Ks;hCYz5S}) zbGr*z^;ZD~6?5-(Tt>lcSlCM^_zIwgt&PvGYGU;Au4bDD=i;YGvH2nli-D)ZIi-scw;{@AtoVTk3=|EyakANaP1IZh`c5mZT9P=mq%?uKSW&!pU z3;FqqaLPO+OaqoPF?y>E_GxL}iM!*FmVV2m22RtXVS&^_3s?bC5>kbwX;NjZSkE3a z6Cx^}>Vx6a_^phIYx?8Uh(9-j*guC#CC6q(zN!|(O+ZkTgv2We_8)`u zzmb8&filDpQK3*ubDDmOCstL0U4c}a5j{%}(_k_Q?EhAtJ9$}ZX^1=xg)i_ zx-c+QLo!jvxPEL~@Oaul!>(H2Gt^Zi(pjuh&xh-oYK$^gZX&_vgmHQE+vWyRR<)voQQ zfltQg(?QPo?dF$*xydX2s@`(vFlS${&KMOm#eAtNfR7Ef__dXtST&YoXXq`gQcxg%%1!XdrE(Oh3Q?z$^q-ikG)#H*;O(WNQC|^ z*jg-{T)Jv(g|)gD0I^GIzj=xrfx{1rnZTF(^VN6O;qy7?Bw2Quf&U*sV}2WD%F2MouD$X>>z0shFrj5kF%^^VLaF>PZO$28%&Y z;g{6!#_Y-1X6f)&S>U6Gmr(S(tlZy<-$IYpp^>O3r{-Lr3LcS+@%gAdA3Hu*<{qB} z9)x8shLK3B^=|3urPUYvpe52CDhbN~9;%yDJQLFeAKMRRft0q6)UK}VhAw0tHk_U_ z<3(0%U^ydkM$3ENyq-sJ&M&iG$G~S9OyG@9;B7yt>!q1U(<>OKx8wqrpTn+KJzx7C z+cV)?pK%m~4ef~K8Bs;fdpodyCizhiSSiZ~!JB`!TW!Uj>{s0FN^h@z|FnAyykSgV$XNI05)XmOjiZor8c7x(YUHU2?w`b2FbIi>=U zqb65e5?h=GmjqpvgHU*cLsci73ebkqn@Tb{r>?#d4p{_~1zvJy#q$WOL^xc{L!dH1oPI1dS5>CL*9yJfnhWV8^uxT(;!MwXBjtxlo&x zS0ItEJVVq4ernh9&mWPzZyQ(-&1jWhQqsszv^^SS+LR1V!Ha)?juaRiXVl;E*@JP_kfgl_P2M8~HzY zKkwecbG8Dr$Xj;rZ$oPda^yeXeO|^&vh)RVZWS+|BEQ~0ufIOM;NaAJJnwV1a^7Fw z?yB|i-#^bFC6!;??^gEm{xoK`L8V-C9!yJ0F;-) zCe(}FNlT$Q1ExOYe9(J4cFM(oV^Z&E~s2PAW zx7s&TfU9C(Wv$Sp%;cav>G%Cy-2xq6jfWXwGr(T>7OixHkdLh$2JS!;6Q?X@jbp6) zaS+dZEQJ;EazHJsbJ44)Oqx6H<@HK!Ba@R_i-lc(jMFxH#zOr!|1hO|o(<;-sdaK9 zG-s>A=dSDtXKL%^_5>tvu7qGK$IHz#D0Nu8IwzTyp?=Kj*!~2>gcwC^cUElFDzW`d z7DG)3T}`L>C@WSCQiI{Lt?sg|>wt;bFiaH0zmeZ1f^iN$l~`kj8TcY%xC(lj8-=sW zW{I)IMussWX5T3tp;i~vQNl)Q^RJs#e^Nh3nwG!>b}ukX!PG2FC;{1VE}`4P+h!+R zE_L^*`%V7AjTROQ073GP3`WQj z%)&%an;J@=Bp}o3~LWv#Cx-uC`9SUu>Y-O`fpMG)G6 z`OSky&@h}Aq_4=y7`-MkFYQHqt;5VTd+(2ZrhwKY19jo;lO#e=3p7crX&>@b7tnuV z{QfZ`b?T%CRgyG@CT!K;E!9mHYDArp^Xp*5bTcc6gq8XgSbL^gh`$v`_A8ulvf&{U z!W`!tXf!l2QS+*mw^JMrS+4cJbz-=B$RVC#VAQ3r;oiwa!Hqq%dt?j(;P(O4^VK;VJ})gdC;f*dNN@VSD@Xbz zUp_Zm0{CA?PoZBQdn>*^zPG?zLitQuy|8@%p7g%6_M@>F-QW_ ze=pn)m7B+onE)q9$S*=#uaF-bNb{FmeLfDSb<|kdC1b4@8k)+po$HM!>FJmA*S*GNNBeYzigtSzVu$gT6+Y3N0t;r$+=AFAIC7BLEn)pJPeBC0=ehbHBlMT3=d z{dC!Kw(WN`-Kg~xY|j;D?gU9u42%33TjKyRW&sGPGr2u^%*Wr^E!EjW$>ZzS@EAdH4lk|%zs>oHl+;g_@Ti3_*}Ow^MR=|VH0L@rXE;mu z-8?3gXnss>b44C?Cf{TehY%%ke zb@WecpvG_E)!K@6mlvon9UtG+)tb_iK3h}m$c7hGBphchgusMH6oE@^$2E=W_!okY zif67V1YXhB2(h6kK9vN)3PB!r(Z)NJzp~eu=0^mc2@!=FE>uHe%{4lGF|v)=XV4q^ zeNnoe+*=(A7X-{&Bfduqf_Ml_u}=uS&PehXsIsb%HwFZutLh5@7+BDv?`m5i>Wdk8 zFX0JK#z(mb$gCDEq6H)>+V(~O3xlBg{wK#!pEgq;nyE7^i!~XJI$K#XXLOMN6(m*# z2qVK7WQv5Y7t%F{A^IkC8WfxbmML<42&R;>q}m-^noDpYxp2Nw600bsN>n_$v{Ztx zigS<3*t$^PtkC?T*!T#}(RpCKBU+P-IE9(T3V{SfnIa50vQ~zVPDZK=;Py6#-Hg5x&EB%loA%pdh~RdaZdm9lWn(+1&N=a4c8<$vi=$u}&k(NvPAu zgKB&g9m1|cfF6Pt^AF6FFrl?c6SVk%!vU@M2v>dz#m7zYZEo!*$5-q1{ce&#f2vpe z>jkyrDu?0o{o^GEm=dvf7AVlY8GMuyQ(~uAB_~IUnymXRBK{g4Iua8rIQO%?SgO4y zQ%gmNX6IO9fa<0QT(ppkCQyGsg?n@s6bD)81lZe|51N{HAv9)Z8JihvJ~-N-)@Pu5 zbI}iU>It(4c)T-Jps5vJR>m2cBulxKGTr7q9`%E1*Y~u-Cd_T+Y3DP_6OW)a}85idcKX^!JpG->zk|h z47i!;>q}b|*B7+cHN~ZNrBTj-W8jyJ%Mk;~p9z|bDLM*Cg6KLp6Oc~gVAQn4CCdIB zO*Sh5(dBE`$j=Q>*eY9Qk(3BgYt!|{{-y(%DCFe|qr~x`rnlTr>Lq*{hVC$>q`6lo zJ@{quEuLtu|H+Ut=lBsVM|_B$C3q)BQdB4Ow+)v3Al_Qr^mOey6Uxh*rZTP^m3eI>yo}IRa1;ba7zrf3 z5w=VG`voI;k6{-h{5z~$*UP9s2rVcAn0}#H7!8Qt{whHqgF+zkCERCNAVQG-F{K|T z0!Z@T+_Ui%wrvW*-H0AqX|0DfZ`1j&c}xGB?doId79@1^{bwGdzS*tcO1C^JeFo^? zGs#{UnEJY@Mup4Lg{fJIBH##&dqHF(zS6+FhNM5?Bj1`Ea6^hotVdK>YKh`hg9#ms z^3o#&0macyGU(R5%x*PN4;rIi!$1FD^xr*1(8w*rkQahsRaiCeA_CB*HK8ki1q&a%gxtnvim?r7YS)`{HA7hak?nN~^io8L(&;-Hn3?Ywz_bAUu$k|K4#_ZhV{EJTUyrYTGLkM zaQ^*m_#>ruV?4s?W@M`~Eys^J_1*AFf$2%GA`oM>7rv*$6_cD_q@gffVM7#XI8jw4 zkJ2Skmi9C{TQO8UueT4synKvLT{Q`OdA$!;Hv{F%$Jg85&ezAwbGI-5+tgLe*WH`< zK~w#)aqT9A~+gUoYjUiEfwo@73p%e<#1|p zwR|h??NKc$UCAW!GC7PAtMKFW-1QBzvNO66@eK_1_a%-xdZ6a$NBKWZ`<$8QP zzC|vvhFD4|8j7eu3~v5l6;->QgA+&h*Gk=V-A<^DkD<;$c9I`ecQT*l)&Tx)pQ~#` zR#Q1vQz+flq1M`_NLBrln~xwxpI&=O7%#^`L(3URw>8$Tb+&F-mmf8^ZdaCXx0Y}9 z^&ZkQ?N3kH%I#(lNHYCA)hd1wWg z|3V3OyJjIp3$r%`9HM(4r356a^>MpT-cXJ9?$p6rnhDYJwfUD;plpxSMOd^U577xO zQizW7p#Dgr%~Tg3p(3I%+3hzH>-!N@pe!~EZ^{x01Jq5+bGL(nWcKY|<~eY7f_Zit zo>r3%_Q6bS<}(ZC%bT{G?1P+~Mmby3-Cu-XI|cC)F_GE4bRN$w+n?amgzgS>pVypT zw*9`pW^An%?M?as{bfbDe0O8Q<}@%@sML@0=y2oX5%QqnTElXS5&28O-p==C->I<* zfB4WvSuf{cH{xliX=_Jb;A}^+F=PnzP*w$EBR#aM6M7bc1K-u8J znV!YU>Lio_3i8tcDxr7<1^G}aU`V}2-#~uafpGQ(H6;p!)?tj)f-Ba~7bI3glMX7k z%Lz&dz$TXz^`?Rg_e6#GLqQhFY$#{TC_7eNEg)UHh%UZN>V1EpcO~e7E)!>0jq!ht zMSynnKMm{6!d;^Z%0lw0{f17j1K@-75Yf(ZQ92U)7B^2#nwFmykP?LB!a{yiy4@>& zMXHL-R+v2omPUfsMMh|rn9klzjywUrElLi>a)xPAiorhaf2c}^sJoYm`Bk&B3J3ny z0`0jI0v9}7=fI44k{_FU%A56$6+l~5qcHukOtl*an|}^r?-Jnlv(CZ?wz6`}#hvYaXkz{C zDe`JvrrQNOkhOF59x+|cUxlU{=#zV5ya>EcKYwnN8oX`Wsi5~@?{rz&Y((TP`_ioA{`Tp!^ZEJ*OtJ?E+tYiVG z1G>HE9N*+lMy*h*W76U!q9jof|DZ)nLhw@4>vB?QBTzNwEwKJAKKwz9QrMqQb@|C` z#=g5Mj!6Peg`@nNwj!tkvn2>(p^Q87Qz+)fvbt8PIvcbk8L>PB9cbIhDF%z{L_}nC zr;`2flIY6YT@f4oV=JD;M}D!9E(u2f9G?*oqLgf|rJ&mvb62NqWu*zBLWzHL7ShQT zs&p3wD{U9Eb+0wH?$=gfmp38JuLzyY8Jw&s+?@G-n{6W5cp!2+Kef3!@i?q)tnzg5 zw5?}ot#h%Vue|*4bG5^C>%Wue2NO{*;Ez=}aIIYTq|$VgXw(wU+}WdVr_i9Fg-_J3 zLEq_@svEF(>tu!wbRlSm%q3=m__B0M= z>@;g;Fi_{aY~(cSVLbNqjD7Zu{dS$&==)Dg^9din8323BtN@aJ?Z&3*6#@C(D=y2e z3JUW+v%`Gd?Lu8-V!KCW@geR}N@lamdn~)&R#s>3gzZ(5^V12}+ojK38+uX%H zMa64Iod=e&;pO5MBl_E)SK;nt2;^n*0}B)tU8(fwmNNEa;X#E)_R=~8y;&+g^Vj5b>Vne=Kb*@gZvP88V zljTKCRRzn)zqtsRB>4$Nngh&5R*h!YwZ5d!4>K z;T1i1%?-IS=vCI8z8rV&SXYacvm2c)FJfPRHJ4_jhW(MP>8EmYh3P1TqfUaG9Knx0 z7VwR2f*05Zt&a08Lewj=W`2S0Zsu*bF;i+$mWakH%hQ(+VyJ*PSJf%D`O^U2h0QhR zpVvf3cf%twS42bE!WY81D88SZcaO42|5n@n32;Vbr#l|qMnzH|)JKr9z!y2l+^h@+ z724DSE8Wb&Iz`PLFF1=Bw+}r$ED7FtjN5&b5ig88gxTeg`Lc<#xH7f7o|Q(Dold*F zLaRAntvg>$pk9!SJ}onrI!7}%Q6G+RaYTE40Lps-e{Vb7P;Bp3#sZIzoG&nh58R^$ z4F>DL&mXnUqFQO~CtVfse#+k6*iB6lU$+ifH}EG`^K+eb)uPq36ExIAPd{99-fo9A zHehq|UT!XqSX>yeHZf9{o!8&W-bT`uS*P{rGoi3@qE1hr$7UCtol^WdqqW+hG%{i^ z8vV}Egv`-^%F)z^t(AYiK5b~DqZViMBSmjZnqi;P3~g+loko5_leThYf?kV=ie6m# zPW-o}tNGN~zSj6dec{|$Pgi5p4okg(K)nGE-w`YOiE1B{K}%%0Rh|i2Zw!Xo+3hH5Id|m zLzkga>Hdm161W*@v z5Eld?`1tjRF`6>vo0VB@D}&N>c!xCBz1C+l3*XG_sZ3lVR1CQqE3~@vRP8M0e_4#T z+5NL|8ufCTwQ`yT_!@QfOErzk7%$d&Yz|4(r>3!6;n)*+A%Ao}Zn5#IF~uB_sM_jV zs%Z4q!eN-u_1>(x{RTmG$%VGEY%byb;)Xy#Awi5QL57qn#TIM_x8VYl3!8dB@NXYSmkT=jtKD`))8HAO0R1||}rk;!&C=^qfx z_Ugx>zka(eOZ8RK{7s1&yFooDAp^R6dk2dgrJ5fJ;{~||ke}N6TG6B+Me>afEVn7G zVMi?U57?L>s6)0q0%?EiimTV!d7&vSUj5)UMHRw}{H|^o%O{6HW|`M3vajHWEHnNO z3?r*lThCT)AkQobu;B`HxA#f=Y1wGxP16a`k2+QfiL%arP@kXET%IzVAd_C;P$Od! zVWG>0ODV}t%^aw(sH?|cZT|InZ2-0OuJnCxIXOvaisYGqerr{(7h0#!n5Fj63)ga~ zRnp<6qR&FbkA#nmNBD#OM~0%vq->xlw!c6MX#07vH-=kxc3|7PuzYS)n%K^LyAPpd zI!wlm3lV*KknNtZUF=mHtz0XL!CxS@8}NQ?*n6q3Dv9AC;In9Vt-jsYI$Sl|->;yU zrK{RnD4AI(={+lPzAxoyuslkp46PZw*bp)eFXo4%Dij={_HP&|e>BivU=U^{6^GgH z@yj8XCX#@|atDusJSfx&25MLwjyZl}OV|KHY|_izw~@UefU)WMB>gMfT&Eqs)fY3U z0+K0&0^v+cYU^6|Z>tEFmNxReGDCUZKyf6LDC|oxY^a1(XgzFi_AGz&EsrO9B0ONd@{UqiXE%~`)`78&&K_`q%xeV-{=nia$(DD?!Kz~A6icKq_RwB`?s7M z0eQk7@hUJQaiywX4oMr(>)4Lq!k$7L965A$9vo z8(Ug>{UM^jUR#UC#PVb4FI$XkF}M2;<|PCV2A3mFeiKuTh#T~e{``XR}tA*ql;pk#BxB61Z)ypi*d;Fn*-$dI7^ z?N)S7Fd~8mTr$!$DgECaB~8(HlpXvt$m$iqD8DY0YG5%i+uRx^@jD~13u++9fT*DB zN`;337ErtGfInV{d)-4eMhYgiC_B@Gw%_j;D`LR-9Zf;gSsPZ;^duUBUy@j3V#yi# z9I|6Rsc?_<>re5haF6IE$th@HH!u7bDxN)|V9$~xF%D!|#EH0?f3~&6uMa@7ZGrbkZ3>knyJQspgj5$mwlwOjUQn)oP^$k{fBVgC<5Nb){_RaOG*px0*I_`#yk}A?UFax6 z>jOw2f)H9$MU^nM*?FM|@MR;yhA2!ai%tIqi%S8$h>gx#{=B|(P6y*PfGeCk$MX1T zIDMPGH(NHh8?iAS@NwVqx4lyVnJ8JWqi*linYYWATyd^V(tOa=XddKnlrBXG$< z9k`dlPbT|4*j|B+%8Mu{UVTjS-zO*#0<0WiN(R*n^JUp-)#ry13w_ZnO(Fy2yr#5b z`O;XlbtD>`)EXS9@Z23hebV7}(BZ~3|DPHBM*|Z|)F8F@qB^1uM(cBjn~7DdlvjqRpt$^jy81+1-Iy7_;?#~9`>X1Tsh8)XpKa!k z*Be9|!xoI<19kB3RSfSn-tLdSiE%j0&+Fd^`fYvLQs3Sl{zq**z|D(U2Lv4Exon@fz-cNR<57H>7R zEH1O_4J@n5tYJhpIvv2zD?^*D#)IT0cI6~?;pb24L|1NrxP?I*(V0jEKC#$S?n8rj zwKIG#i+N4VtwHxn!v#n9EnX1~jhRvW45r8^yWmNF&Y6fD2Pr~{YN2Jh)P#_V(F7ME zH^2~88be)f^a^D*NJDxjD!WHCW#sPtJLr|@XBV02B&`7e2{$Pf;brGW6K?mb3F&a- zlqF*jG?9puw(yCq4K6yGQduHqpBP?JI#$}aSp`99%0tM@OkOna!NMjPBneZX7YDSMU=A9eYrvAb zQGEzM86*=Kdadh|gWH}ZKePG8@0h7n3`mgm3tEw{I9CvP^hiImNa z>fw`#5Swh_qg92TW4RBZwAfbG&3-U@Gh(w5beBg=*yxmSQb&z$CxGTJ9miHKjAWp^ z+A59d((i8W(dZ={W@g`fdMz?PV0^mqk`(i0#Qo>xDveT6RtTj(PV6{vRq!6%m(axr zv!nTLmsP6Xn!@#RqICu?POo&8L43V=iB;C1Nxd*8h6@hXc-8y|A4=#I0Ymy?oRNTS=L1^VJZ9lG7_xSzq+Oq{UXqX@ zK2A%Tx>l98Vs)0fxY)!FeED%TN;Fwi)^NCgEhuRMBQ$;u&hDcA+ST&)tO=py;8Un< z84MnKs;_c~`=!Ft1ZEgs2st^USQKhHzcG}fi=ek<^n%z)Q8EE35mfZ_?zREs1mMEb zW2x+x=&K&{Hm5%XexFr<$>6yNu$Q;H#ykCaZEe@en>Cxef6b4pYw4Nu1!Vn)YGv5U z@4e{a-0p2GP~15~&n-(&k%*21&9Y>o5(ZCWW|PhS89~p{#;@QQy4yR}>(6+2{Cn=U zaO$$`<+AMKvWz=cZhNzNgV%+J=Y@+KvD<_Hc$E9@BPEs_W0l0R_IF|`flH*94g~`kpRz>L>xU0XvBQcYW{GT9)IwzhDU9k2cAnxc$<)}cheFX!u7 z8|C_|%PS$^f#~IRhm1^u@nm)9e(0zm2qjsf&c{5&#J(9!j>2(&()~qQIOV)USvEpK zg=3s7sF7@-&;(St2-M-QclI!mI5gC2q{=6GRKwLFUEVZX*&pyI&Q%6NzF5PYpTAy> zm`rSN6UP)_qPJtUnnmL376KxbydaYWX%*{__h}Lnzi=z1mIO)U(G4uQ=L`d@JLH3P zHb3(ohWa_A2`=*=_~do=5=gNf!#~2G|0(+d8rJ6K;2`3+Z&V$>GBRFB5Ka5$!}F3AV7%~x zm)L$Qk-vw?&*!YW)ugAxX`sihuf;B=V^=rW+-cj|GsngE08z>K)r-fg;2N_EOkRPT z8j2xvD)|TQnyyx({+@}$4R@U*cdY|gy*tg$meWNh>{X(J4QqAMSIteUeErl z4CB>St28y`D;>6M-e)iW&z_L75Yy2(y|60CN>Wu*7SbZbJnuWDTluLm&hRc1*d6*)kKdHLMyPS={1e`uh#%)tY%NNW7Ci?Bbi5-2ln}I5kK; zTmhhqkcKCf6-BcStU+a)7sG`Eq5Gi*iZo?y3_=r32|5avunak;-SPZZ1#tg@!71<9m!Om=Ej}D@)kH7~bQw$P*1v6zGGhq!swV)imi4CdWQnrAB zXNlvH1Gqd$ zT4T~w=t*qOpXcy&YiqzM5PBAk(zYc)BhA9{8SYc$s6L)Z zRSAk6(~=%-i7sP-i3(3E#V(J_`o!=RG0=WTA>fnr=Ukej21&A?7r9?--8K!dzj| z;3mt-&1sI)k(6Pf9p148vhQWIr!-Z?9niu4u!p$cl=U zco|en@fhglupUw7S%Llo3o}R$MYUF;sf-#DrdwR?!c0Qc*IO$*EVe^o$PM;g;z93H z8v?$qRivkFlap(bur#EeM(XmLo%uhatBX}X^_#7!@XqjLsH)ivwao18DPt8s(lxZi zhATyWvhXGc(BM%iA<-X8t+cCXu7%|Bga2V?TEASWewB;| z@JN%++*NOLbTd}XqGCy3^PLx4S%hj59wE+RktUI*Y&DG zAc*OIzqG3HK>bCHqH3bxlg^CgBzYC9YcT(NJ0kKAgZ&T490^MlW?)AS)`;hjx97^j z9YG$p={y>lZ&UUY|4 zf#VbXpl*<~2#eo>FY;)Cb{cDt5FQKHCCm2Y{irx5+#t}MbB5|p`&}qREsuAkIzeru zC{+-FlPsX|JqQg6V~Eq>?I&{Z+e)B{3TQcW1Ty2drk!f#{|G&@6KZ&#kLGY#ZqK9}Ef z{jRO)78p#kcJf{btz4_I znXj{6c63d_3I;{DL4&isd-Av(4E;Lz z`PBML5Nw$T57nx!zpAeswz_)9*4BZbwLL(r)jU$M2GrxhsLi#tY9$BNAJ;w`0GJTa z=xx?*qw?EN84{Sp3NjDjS<0(*SN zd>>m(JOvxO_JUOvf%X2md7Od*FJ1m90Z$xP=gX$1V=flT?Uqu#J(8HK6bg)1jJ0@* zj8^}}QvVlA6E`=Hse#DpL8r+fhoupiZf7%V;;>-hKLrr3&(6uL{*AlZuK8QfAXRQU z?`*&B@`BvLN7ljgL)AF~Ms?x0*I|adar}HHjP)OF9=JfC*BmXCDb50kpZU$gr{Y7K zs2w^-2T{cQ9iNg!o0dbLl|-ATNujMGUujdY+P+9@v*H?!vE+i$EQ>x>6-QHbBSV8f zw1hbG2jfLg`jcUQ6Q8RCxdk2J4d@T1{W0o%XlixQJhuoH;CRq)#9bOK-K}lc3NQEv6 zjqGO7Fn}&C_;Mt*b<=5kXtmv-Tw_6s2a>Blr)SPudrsQgHKhfeB!yi0S*-<@y#(b9 zkMDD*KE*aebsp$gp>lmA>`#My(SS@H7Y+c}EbJBUYu~wYzdc{Du~Mb3AHq`GcbO__itbm`bEs64W-kS7?+WGXTbG>tCjHInt<34o z>`C3MSAN>8R{mTsLd&?13m=n*8jJmmKtzdc1xmwT^30dI=Qv@<07^bACj*R9xKySY z@vn|(uL&bxU4 zUoG9^H4|Vl76gL*iVgVVS?^eQtOZ#H&heTqf3#iVv0UA9UR*EOA1Uj0sbz0sWbuyo zL+#b;Z`I^w&<{kT$akGO^?}8SiAm=~;E?34!$hq^ z=-GiW2q32G%F)-BVQN&ErBjia%SuqdLW`S%ZxH1z(1U=~Bs20b>D81?I5{l%2TY)p zWGLYN7q=8n*&%}`+ye|AAIaawuyV0Hv)eeDDJe+8$M_@0_##HJNzpuRc6>6$ zlcKauoNcs=~b zma=e@t7wa>08ei6MREL(M;)5-Uuk*esJQZva2U?8$PaLd&&IUg!xbIk)mvfIc)sJH zXML>FYubd*e$24xl4_q0W&oo~_9wYNC(8^{xkHF36I!_geI*i)Rew;m!Tg&zAkrSB zlkT@3Xae6}JUOC46*l^B2=#*r-VGdj&@)zi)EEq~6oE+)f@9K<_ygDBC0VyYTz(Q# zOM^`Ynksbi6%M8ZmGaM|=1=cz;5Z}%*^JP)ZXFRJ^FR1q7_jU1segUVtN*-@Um72w z23|0`gj~RZB}(&q#udNVTcOO@PA9Pb}mEIvP!LZj9kxpRpcu zjLsGG`(`LMt~O!CowMVEMZn+PY|zbZ(aSF!pp}WzAO+=@bnTTmj(a#Qch-H_0IhOB zrjH*KF^e!umsVZxNJljmRXYZ|Mbrh4Kp~64!0@+8bCyr`w@Pk~Nb4jIngOG$JwN8M zQLbeW-5*|SfOEUgaB~~5KG6y+R*#Ic?~B60JFYV9>BYcKkg(o|Hbzg0w*NCb6j%+fDhJ)7<%ZE z2PCHV2XK^fw|oT{Ly-O66ztih(X@-ECF@s4Cr1M0QFA{%KG=d{Xigg+vr43RcEi+a@s}bP5{}Luz_bleT_^s=6?3 z&1RXcQjM*>3>$+wR4B*;j^&eGX#Q7W}&|z;&&;W2B-}J8;$B7m)Vn zXzDjs!Fu)QU;o+Kt&cUw5|Si};>H=W4EmG(D2_%6938N!(A?|>-vk)m#01rfw3IsB zWU<;hcRekmyK4$!csj^(%Lk)vZljLg6F--oulr#{Y!DAREDc0T3-$=giojNqm6l@F zOMlYnZcw&KRta1v*_O`A<%04t{5x; z_zo@61_q+_*2^^YD^e4rWv2j1%c^xoqIKr8bOo1OJ&QcGC6*7vqX=wBb97o=Z%=zL zA&AvImBl4}(VhIV3mI8Bkg;^I+Jz_CX`{UAh|YGO&Jw4>>Y%j7Iv!eHH_}sfMg}#- zUOObDhD-`v$9C?lIvuvQ5L$dq9h10)NE(VkpuYpW7i@ZeZUi~yFH&=Qu92C2O5o%m zb$1N~lyS3>&0IT9MWI=RpY&@!HoRBmq-Q-kgf0#Sx-hJ~5>)#hwDG=sZ?ky?Dh$)> z&!fw|eKk!87_i?(F24uAL;5L%6!h;iA9=7rkN-ky9z5O0`}xWTj>7cA8OR7Lsu}!; z(1&0rrqYc6>Jk0=#(IhwLTQn4w3G%!a`Pz=Lr0&;M!Y_13Aoxbw*f=Ixi`qSXO`^g z)~WwJP*M>LwT5Vu)xz5adXJf66EQ~J^}f#+T+;l6c&R%bLxT`a1BhYhPEUvG^G|Z~ z(}OgY#tOcU9+tXepes2Y$#Ztzf4cQRs%G16vGd(g2#0h*irj+?1a|=+TPLsq{a^@B zPfz&UQmM*2ht56A-Mxny?_BH2-M6Qioa9a!St)C6?KPg&Moi?;H>ANe4!#6?#vM$d zZ;)vX&cmwg2Eh5~lHvjiw>JoC6poF@+G^S!aA5t%uEDfaPiq9 z@!M`1koHaZ*-g6HP5$2#gkTlaD}}d;uZHIS zSvOsLufjd{fl^0_9a;P`KmWr5GkZSGz?G9jzPo$+gTP!)aV$kso68&S0vr6PAs4T& zI3lvOxS*0CJ&|1rs^GujndGsZY*sprmft~aEVoUaH$R=wy*y9?ywRNfvAq8!ydI=} z{_!2V+xz-^XSnq5!}joKEN#H}zlK7nLyZ)yG`_n%bJEI&6UDjGct!HJH^{Se=O@`b zdpBBONezK1n;ebC0vL{Y+U$9XoSk*5f(!)%RgNGB&f2yOFW)IQfNTCH?CHhf;ey7?zW-Pi`?Dr2U|;*GY|n$(AJQlR zVty72v_V1rYAhBA-Jq%VKCIAVoK?XERoxBAgj`Z1+Q$dwM`j3SA)(Ym+J}W%HB{ILer@XsV_dYUe)jHA9QqpE@eU%@}FWk`j>g)lrGc z2Wct?NlJ&o=abK>mEXz3f8kA0G^lGQAR*PIB*~W-RBW&z6uiyZA&(>dSi}MowCo)y zrdCA$iC9FIjwc2cz;QQVLi?EowZ(Fz(sz!j!gao0dh7mrs|gDM!UXikX63p$?FH zC}2h}li^eRMZ{+aHVAs&n_OD)bmSBN8R-!0wBhMKoSmgqS1*o@u_)ob=-vONK06h7 zcxQ9D>-RLBf_DxX`qG}M6PT$BxCug8s?^S&!0DR6p=}7{Hs|JZU}m%6Z$EGA#c>`U z51b(8^_8~8YkYzqc!~iuQVhw>3Z|+^q-Y$bs;j1|n90o=qNWz^^jQb6BD=SI1`gQ1 zl!UU9v6YDRiR6q6$h9@FvC{VysLG!fRJ2%Z>oBh;aq@|Z3Yu`SQ0z_f<+#|0!MVhv zBK4+vJTS1+g^{fuAj-}rfF(vhD~CBRpTnx4-TaDT$%48r&#^9#8LpK}3e7_7Dk%K7?~jBr-3#-iWiKO!WfSY1JtVJBB-zsXb$f*B-IBS|t{dA?j!ZPg?x z?eKKvi<8AeNW5bzdE944flI=tUVSe?-10xzgT}UqF~TndO)~W@2k)H(_TAvJ;0s8S zDg2df>dkqIZ5gbHP!A932xr{;XSt8}#=l1q2J`+O>t3FkZhlE^%~-wNr(NBbolU1b zJ$S#nuzK5pt+W{pKs2X^zUu~#m|HS>dve-alUEfClW~jE^XRe)Xz~&(G6GL_NzU^5 zhLS;|E+N*q8V!A5%fq4Jno)W})j1x8#TBW|e(ZwFaK#2JsP77%RanPc{g57_S8p-vf%?a;8+>fbfRjMP9?rXqMLnm~K_={lSW?C%u# z3_Q#6YlLovP$SWT@OKJnq0?)KmQWrYz~8bCb)9^fYgm1QxDaEYc?bpDY5H&S%-_lA zM9Idc&QLSZ(Mj8<3cQ#K?5J@xW))ncLGrrTf@-+;BGgNx?ia^EacpYW=lWTS!a+!b zKrj;3@kaQA`9qo<5YyHVr%f;|0zz$j0SG*9^A-@*)D>YS)d)Ninyc|kr+-%=_qYLi z8WE}n+3?jvAu9)oYbT=@u2wkCTpvbw**#mEJ)0M=&j!u2gg}|Fbl^Od2Z|b3_qw!L zzbZl5;#uwfdijim>-LIWW;qoh0W%OJ>yRiTUu-Pd&R)AzQ6?RuQiqdCbafur_9Qe` zZe)uaSA`o1T($n-3=xu<>FFb$p37gplQ#q(?_?JL=6{-Q`3C^zRHII2B9?A|`-zC9 z$*7UdteeZKnZ2mBnD)PuwyukwhGY2`)$NyArG)63XxMn_>LAy)>$us^1o+QZcg?j{gyH8wM#*+%Md_*>v@6qe3+t@#7sp3IY|5C{oac>--)RW zKK2wQYCtHK-4*gVo*I`cWH@^p^#GUx^jEMPy9hF4ZZpo&XWkL`+}=XpUVnsit|w{! zOHdV#a&StC&7@6sInAKWyklJX)z1uJTbJl?QfY(QGb@sewUXYW@m2? z6|3o#>kEufG3#?Oi7e16k+3M26{rE^Nk9K7U`JeIoj5DCE*ZCMRfiry%X$m$YWro# zpJ9*E5A&xUQBEQ(d@o=JWKE4OV|`|=Lzd9L{?)vl>mp+OAodnm&eoWIZS3R5Ct4^h z&4a+<0PC-B=kue8-v1b%-|nvNZmH5WkN*rE$JGMMC--Y}?|1%m^!ynP8B%NTb;J>RoP82`N6|9U;RFCA$G3rR5d9=@kVbvA;OZ<2@pg>0MH!;>yF{ z(Y8xXVz4nOy1uS0G?tluPF7(-Sz*gg);1WFCJ6EezAv+PO(xOOSXe(Dgfz*Z5jz2SLAmS}?C=~avNz^yRC5Zb($R~e z+lpHX(eG~`$#-*0kx|ivF|bjX+hXn!b@<410RAZLqTDZrhmDB(h;pMLIeP>y|z6e z4n0EmQp8-y%Fy_w3rnL~gs22`90>J0xyFA(gIba8$VQDmdM=-QkDu5%4D~q-Q1@D* zgzlmo{IqqEUL$TSg+;3cjj55_;a3Q;*zPYEZ5}oY=T-J@U~a0OFBcxK=l_4cB)UXD z><;J!5NFIwExfCBM(cpj(=ClXdd9g1U3{*eN6 zoq(|z35S?z&)1bV+&L#8K=^NKd_#I~SAIhqVOKY9kXh9GEd9Iig^}Op|B3IVqP%_R zSjmHUxWvb21CAy$rY7WaY{%+L5)(8|(Gx{tX39lc1$9$2A-Zb*t}Q*#npQP_V%=56 z=>>`GOmy5n#UVu_<6-YfG=9c791(;0>M)~w3)DiLaZ<Ey~sI6W$EdJ$N2qMSZGKyl&EeLX}@2)a<%>2 zD116yK}SCZglh+05m144fXAn?*6rb!0X0l>&(LGC>+p;H(~3OM>gzS?YCmb|zunxn zD^8FlO?Ns~Sz+wQjzCCGpew_Qo6~HFC&}Mz>UIsJuLTYSi-;cc@nih_R{L`zdzAkZ z;d^dR_TPtVl=Hkyt&{s>60@h+m!#9s@l&jyfJ~~GXWznu^y1f>c)#_1WuEZ65lo95;y=;C8MNX0-bFV07O|azU{Mar^JRlB=&QH$SPUvytPE7Cfe3OGr2uk_e9|A%0#N z{^L@Dd@>swvUzz(fIKKlezX%hQk(Y)o98~^x-+T`pGPBSHSZAgAgfY)R2qjz85VaL zZmSBnovo=Zw>>hbIk1hOFT9v)Kv-(WKxxMy0g%vNkkwp}ePfzx-)bP2**{GQjchiq zxS?`B4B3z>N5G9@OPPa?>VR2^4|Zf2Xh&w!F=NhaT1sQauXrQ{Pazpyo700>nmJ0^ zIjP2WlAcmc`s6N(c{uIL{UU>EDbDvDQ}Kx4f?Wu&^TDTmlZ(_ zTRKx;#Y!M`5!D8ffk3sP8qR$Q??UDNS(uOMVp~5!E-f^0Qr~XBqbKqY8uJ2fvRDkA zE6gc!34zN_d2!f~3{!+QxN&^pbkN3i>cZsjgg)qRlb4MXL~bVuARjUC!G=&iP~$z& z;yj?nu0PKN{RcgqfEgH?(89Mn3AIrtu!EGaT(Y}dw<)n^Q+(z2hUWpq3mWh5D0{cu zLpY2+De!UMPsmc zytjKAGCSY3+#GYC>K@T(&CZp2PB_fr8M)dD$Kf9Nj5HN5Z!0&4H$w*!e|XS%w_!eh z;k`V8(*SPo5OGTo7&jF&#W%`a&;$+5-Q|0st80yy{ZMTSOIy7#RgEG`d+-!v95Y22 zSt#4pWB}U&rDTS>)CgF0eV$qopeeW$G(0g>M`9+6M-j)}A9597SY?wWpvK8hEW$<& zB+|_rL6yWext<}$C63dNa?ZQqTIpcCbS&CXvxrmiB~EdIJ>yZBJ))-_o%@GNslwiW z*xiLYI6&9hdokAgFw}A}(rwMq>%_|?C3w zCgE)DX3ec=+&sA0DD@Slv29Mn!xTp{G}1r?vVT*c2f!PT?`&Yv;PYhP-y*_ZMn~v1?Q`SOL7vBC*dH`EloKRjaXXpZ@)!gUfg#Y8q0^sDgks`%YgijQCB za3f}S^X_mHNS^^D>oD^lKKaJ_F@D}XUivY1a|Nr@!6WZ2+QK-l8-hIIWb~Rm{Dv&_ ziUq%w?cEklEa!gyV{-BobaKU`KHdcVwHi{KumPD~#!=g?^0$d6@s|OepKDN}p{wlY zl`aEkyA<7b`>kNWk3WAgE}VW|1OycE3vl%MY6b)(`nzO#`=r6%<^76(QcxV=<_2B4 z$HEFrPY=u1jUp}n3lsv0uw1s52K=%ud5Bq7amwvRl$(zmJ~HGb+NH?^-$Tp%VCW`E z2re--A}Vat{$wwD5RnRlF#Z@MT#-!7tJ*Na`o@vfHM+i-$08#j2Gv_pHr&z~pHrD$ z2PyRLKSAk0q=~8KXmnv3UQx7|s!zy3{~MO`LL9&(l$VTZFPL0}?srBHT1(iTJTEAX zy1rM=f?217H##5%(QQ(EE5%m#xg|KkXa)t#VbnM~nw=B-Neuz*Vh8I-{VvanxqxW)tm`$K{+U?b%xkifiNih(}ZqJ(5vS4UlATi z;L6cpk`fmw10<;e5aQs3;-az9QGwyqqy1S+Ba*|rA)IV79o-rPB^zrp9P{`3e}?mf zMQqD$z~vj`6ojrJ!ln9PiCXO42y8Hj$$srkLT(E{i#?oS9?{ygPK+#OiXTF%jbu^G zqnAXTb*#F|=Ai>P|t@9ffnHHRz0VzDh?xOK-{`wf5b@9?l&My60mc^z~j?EqYGl1JWgK_QDkRE*;_8#7hbM(mT9gqTx7{a zj=22?h0BF(>XrhDpe zI+U%gw8#PTVDG@U(@Xy*V#QeFM3Oe-W9m!jFNfS+VtHHw%`FoBB|5z|nrLKhJKuJ_ zHIQCQC37o1cOx~#Uz?Vg(;jc&oNcHywzWOM!vsBE#b~7ykX76x9b_y&9io6-{xG0S z1+xBDkcx*n7~oaP;`p6U3%n`+{Rijc8&Z^u{UvkKzp|IU^VmWgex2F2ZC<$^~uWO^z8{?3SB5_3GO7 z32Bt5gmY+l{dn0>Y7gHkQL}`LqiJ^oC{n{ByZil|z)NrEDQIRk*w%rHmpz&*dQfkR zaY2w2!oKS=;?NjjMlNlK_ z30f!59S>07EBg&&`ESD@R9$tI1`ja|B+^eUl<#Cu>YlD zPcRGl6=sNCJr%VwB(wl>4yA+Nw0SM$yb?AaJ#gv#hpZ7_3l;Y`59g@@_r5X{hB_Oj z8u!5(+lBip+T!d&xms%F<#Xt$4ULrpoD|A`Xy2l(M;B27;4-@t)wO{`$yzy3a?H0( z+NL%~H=sNe)dl1T3}igx>shC21XQ|cw|ZE{zbwb@P3kQF4H!K!U+DS@kMOKjjHj=! zmj_JM0zUOTe;PV@;CT1n8yIjBWU2*uWc@Oz{H-xxnW8_Ot~qI_Lpr|>^$PbDu0C4s zewuEd`21b~G<-lliK~?wb$;CNK1CrtN=WSWOla!K?QBkl6c3534XwR-zsU^+Y@JtL z_M>pVc7;Wa3hk83SElgeGcB@0PL9$K93lq}0wstpxt)dXra=TpOq z@&{Aj?D)bxe0rlN8e8MRGAH!$`~uz$-lQzUXzE&}2FG}sT1@%L!)Is++{LuWL@Zch zjY#jb$lqX=L?A^SI9JEj3XM8XKUhysSOYEh^@%$W%M;q`3(oHY%EQO8hW^c+k_8Q! z;9VhDzA9}})~PDyA#|-8zq=LSV0rr47gL0x?6N7625a?qDzdUjlTzrh(~(va_=DH| zhZ61?j^E#(ba;#rYWrky&L@Du&wsYpH^t4XsH261pZ}->NM8+Nu5Vu$>YHK}lwe~( zr3YJQZ~-k6Fz1sT3u*xI{KpjVkLh%u%Egk=$(7g5^GCz{4gXxO|5+*f)g)BqGQON2 z6ej|*tzG)fkA+;rrreyy8-f=5qRdo{3-nZ4th5PCbt#Tk>skjdmR|?W;@1RX*94(= zKg4Dky#YpX*%|C;*?=ENkj8HS72`5Xz&OT5M0ykXS(_NO)0hc~v@1t6Lgk}6Aw!pzo)z)o5UDjAOYbtX3X9UiI0adoAs zOwBEArKJn3mL?9!0&%!^=mZ4J$ZENZ{?qhY*){P^J|TlGb+BNXMM5Nvo{V^>`n1lu z5p+bYy1@1V#6GHu-j|f1Mkv9Z=0TW6IBsfsL+X`8tKqr zbQ<~4LE1ukC+^-(KHG4g*xab`-JtRKQQpUh+&~CngrP9b1{xaV8!AL7YQ(7SfaE6B zE$m#e_Z_!49JBK~vND_U3D`67)p2r3ToWoeUa8oX9O&?->hh#yZDQbAzyHScvu_r0 zFIJH+mOz^+X;Kg@7vkmy*VT}}zHZCTg4NPh*}Ui-~N?n=%Q`lzO(zHv)7cV&4!JwUi5ZwsY5PP1u9W``e=disKExq z;TgU7bsRCuZh2a}${F;>*HaJ}8o2>nB}>L&=Nv!3q<~?mtqu8&9a(;^X+D8jV|>e- zA0^kfls^c`9UaCP=)R_>yAv=hCwn^t70> zw0p{AZ;~XhMHlfZyp<~46!=LoAY`kZbNR!cWN*tYrQhxzf8oUGhZf%@%VgTcQBV^6`#UUAYc zDGGrQ#!NcYe1=P`feRFbL!|G)j*uZp5fO+UzxS7!pqfI<$=h4X)WqW4Ovy+E6zLg^ z%j*SP?3Ek5#mh_0n{(ysYxT=><%?_e!nev7#0#XjL<&p!va>i#Ot~^F1WHSJfSjhW z%Kslyvc_srmTsHr;#4w#rZCj6R9dr~pS$1_K4s=(E-}g4)T|RmNHGLiVIUFWOo<_k$6KY z!|FVR-U5+|4yl$3xz17vz?1v)*MH=P;OF0W|HHBN3rEA#Ucao5N9*ga_JMlC--d?S z26=ddKWqiNH3a(9dV5#0lJ$gDT14@-ukFP2p70vjwej_2< zY+0+C=uX=DME>+Bw0O@7J?&C=?b6ntp)gDpVz6arRmYepLr0BBe@P_ju#?~=*2Iw) z5HnC1knN1Y!3HGch6wFQnAVW(1{_hYkw^NG^lZDNvc8UD{mq4ugIYnB+(9;xLyeDX zs#=lJynd=|wEu6U- zT4LX0Dm9XqKxJtiTw^?l0wL`{tPx_XUXZLkUaL4-r#R-#siw&e)z@dD@LJrn_B zQ&i$}Q^HNNUR1Q&_+P@(at&|Au#RW0eghsMCt+p-E}<(Vyl$BzQv*~Iv=j%5}z@hQSW5b?VrbIogmmtWi z@+z3>wm32gqFxVSrS+d+fqo+#G@4v&M3blDHmJr*td%af^ZFoM_)Gj5!i1j^kc@q= z&TeUjHzL*Ea?{#t)7^6kwX`3ycHJ`fU-Gvdv$dSVg6;hUdHlrWbVp?7Gb@l!gG5@j zFFie2a(ZuxHG!3G%eiztDsdE#B*QQ}RaX~1J3AowXF~h?Li=z4C4m57eOhAWSf1*C zg}VWVFIt$Q0Yt6?7he@WQ|<1@f|X7B94*E;W9s!KD=+`l*K6wEwfGCcpZWhD?El8! z$@cxs4zeHrVaRKkmo4CD^!I6iWA1K`xA*7O-_O!NJ6$?Zq(A5$Yev`?rhk9sXsi{t zdmL)66<=l96(ySo7mJ4!ldCo@XX)JF>e44SH;g;PSV@x?7H`${Zp7+Uq2-s|V#Ty0 z0qiJ@We*f0E|92Hb5V0Bvkj)gRQCJeKKOce{9(jyvsgc&q_(P7B6RD_S_2OfM=WR< zB$K|u)__MXjb-8-zo3!OeC^0_7&&Ye6AW96g!JFJOasD^FET*A(C}$R2dkuQZO;+e z=vXc;hp3tk55{Pl3kL&JC6yNQ$T%VdlqC|`@Rq~mmVs%DVt-7dFhEsSS*I?Gv`Oyg z_JZkf=}RZ{zLF@KgcBluu|SOeE0q0~g~L=ePF^{XkOS)mF?#VncK<+k`!H|qEQP@b zW}=Y(DB`5V7^K4H;Gn=02uS|`6!HSW1|D0nMk`YI#Bj{)F!KBnjIK)Vr2*!iAm!FP zVIXOa)CHUXIGTI1mw1Skb}sUCR-JW{V?ib96Jv6r@>?a&zp&DTxZ3sNqZEL_*VPq0 zm(K$l#mX0j@|=iYnHy_Hk7t2UnfY;sl(STDZFz0`?{`>BiP zPok1lQ$f#`&+`E8J&u!omWy+qyIm&4mHIJmUXmQWSVRP+krFQ{DpsaD2Nzn+jSmos zVpAJ(QyNinX@E&A?)a+qgtKM^vL+4&$!cDhxvG?vf`+U(u8GN!HKH_q5gt2=B`V)B zDbj0Y0k^E53AahJtyPedmY`6x?zNBk*4@H+cf5V%v3h3p&)IGI+OTTxr@;P4hxXu? z24o)kFph z?QFHHOEiq6RW7%HTKX;hoon^JP+|F0^t4HwB+|sB5(RBhnnD!V(()H&8lDvqlow@d zmjcbN+LPDft05P(Jr;YX5y(0^b^6Lw`YMfpJpTBYy6%%;{mQRt-BDx`gwRq#SvFhu z#S~2;@$uyEa{I6{7|@}6h@pE=F{t-2d#=~}4X&6lVRPlxE=BVno^L!OC7nPM8}1qg z4#;^!hBN<+RK?<1GJE8h$|QK%L^xCHL~bI+!xm$PPMj52WIQ@W9P$Qo;qm@4N~k`< zgv{Za>-N=n-FrLL;|Jk=ev3Kj`AYpfiwOhbr6nVW%gf}?bTD6#2EV38VrV@(+&{oK zAEr#uC zmom7ztvbeVw+c}%-A(hVUvLFKy$}4*FTY6io_eyHdUX3cVtP>!PU6@Rrm{+WNmT$p z<=6)!CP*VlQ8Ce{Z78CT5nZkB&Q$pe%Yl`_C}8GW*s)xrafoJ6CEcB-DkfRQ$h#vG zmwC{7z0m_FNW-FOpg!Cf?SeJYWOy0FERZ$+NM9)O{aYrI{=xlouNAreV4@$mfOHsK z1zG7YlyCVJjIp{muV754SfD@9sJ-K~p4kGOGN~Jj7>%N~l3e5ie@wU98D3OGvIF*uD|5Gs9G{-QZa^^)?kMR5};Q4oiXP$z;PdP0d9?lJ{A zQa-<rL`U5F)>DNH05Y9av;3eR4zxKk{Yv7j)9yfnVl|+8?P{y zGFjC^p(64&yvazB6s!O!wFO4xka_)v8aJAzL%O+C)X==+;DKN7dfe7(2!kPE>RfUZ zb(kWv%ywY_g3JSOK)pdJyoJC_R1gJ{Opju#DeE){IL9|e5Og#Vv^3GLu7lg`29U1+ z)e-+)V^aj+mt@e>5y_B{sFnG_j@OQg5ZZ=nRxK`wo}M!v7&E zGlRDR5J`4d+!)zT14Mx9=<4wI!j#O)$nrG7*7nCT)p_Uh+sq5WmRJt{v1`i%J-tj# z&D6CF>c5|{l|zUGnoqy)R#!Kam<9aRP^X-K^KCyvD6in-iqO@(KEkhlkK~}C39hOZ zo1ukpab7dsA2Y+yx59z{GooX5=!Kcm4-o6BEgPC139g+qbBcIuO1OEe3D~HSqgbvQ zq{%SyTU>nYriw&2R|JI-sE(+_ub93u7vP+N+X*3P&;|I){N2=qR)_kOaN$Nq$GQGK zuIfd_7HaONPg6Lkj*N$2yn^!V!j+^u&ro-D{$4A_tV@U_d*;_)W&!Tw)JyhV;k$$FzP*$T^T*{}VHgB#YaB+m6 z6FBCpR-h~106Q~)EdYQo3Gi>!w##LKI{GUGtZ41+odj7x$eNeC4t@z zWIy{Ci&TMjFL}Z8k!z3>3Xbzjv$1^3$QCNKWw^kp7ZA+(Ztlz~S){DcX0B016IH#y zU&+g40R2%7peSFI%YZ^LwY?WHgC1t+;i~NAGBcb&#I@Q6g1+XDt@V(u_8}kdqR;30 z>x8*>!$(vsWpQ;V2Yje}N=28cD(O4qk5G}X&SX-7s$AKoJD!h|A8 zPN?GJ-)U>Wg%BwK`q0|idkEC&fY8*G3^Eq>rP#%}$oYBjIe{>6(0ADbY;AxEwSo6= zQy14qQCZaz6`634@J)}4RxZC3S8p*_SGzhpGjIC`DRc@RZkNDAp%_w!7{m5I0~l@e zhjF-xX-_Dkj5RtbtLFJTQ6;D1hKS+2iDD_MF!nZZK7SAZpP zBe8p+GP`HAcP90;;PtNlb)qz% zLciVfdKS{$kx`sfRC$OIbE-Ll&^|w@&9KMua!9wgPXmDIjg2M{H-m|$?QcH?4&Ma_ z&$S0vMfWdt`}+*2_OM9Z2w(0_a583vjBHh0{#4i9{>kp=s@A9cH6VIIyAx!kVW(!L z!J;xCu+9TwcI>ZGzwa-x5j)jAq-Cnxm%&S)j28h}{uOZ3A~uE4r*7qgF)VnU!u*RG z>#moIkql0L(2u?ES8WoBg8)w=c|CZqusEf&fkbH@;c&MIrvw;m1(YAQlp**vbffC1 zk^gb{Un4e76ec64on-d?Ad(VxkO|Nv0}XGs=kL;>pwXMqle93&vJbI?q*Z>Q&=!x<7JVBx5k_w{2>Tg+r&dHFnkXXxe{f2@#JtVYam>S6!pS3m zg{^jhKiDV092ISoJGRU+p=d=Gm(JN&ui2JP{;(;K@Iuzp1n*aOh67`El!l0yc$9K` zbYvM?HzU`0RHAxJ#lugpt&IQ_aR=%!gvo-g74L*Gq@@0&)M6r$BL;vG^5u%E1sd~( zi>m^_GN89{4OGI@)^@PGnwtdRhm-yLNw6$)Xm24hfW;`lk2gip0+Av&1fZ~D z>s3*6C#dRcb!}vohH8zLW{sjoQR1}k%`Nq?FGxDd`FSdkg_O}+@X?xRYP%!}eeDAJ zEMnSRcAzTROJ)2#2q-P&ra(;;Fko*$jA6RGs^IJX?en(!^|AIhX6sF#5&kUB<@4QW zdyRc{BII^&{yCzewIbs=;qLl`)6=rbm`}w|bcE;$jZor`CiT>7JD^*{!K4mK-<%6- z|0bd<5l3bbG6~!Tmn=eS`gVW0$l@QGn(xR{YFV{(eBP9y1M+|YpgFj6|1jNryyd4L zy3EkCE-#aXhGtE2y@iZH_mXF2nr?1lba_Go@3V2RfbseH^WTuj|C z5Ee?b0`YX?@pTUWr4Ht*PugMzJ-RoQ2O`Dbc$B}$TnY|pRw|3j&?z@9G)<3cYs-9j zb@#j1H>370VjVy{@_V1Zp2ZA(y|=x5eKvdnzlde$DRM?n?08p~=|D`=`>Fc$CHVGT z@$Oag4v_OBIXoaBN}zlE#9r00R#Y*ua(6eY#SN39wvwWzQDPP*-YxKNMPzPD=5hcW za(H`vLv;W^Pj$6Z@$r(7RM##5MrOkNLXnEEM~n`b#s-tR=CaPI4c??Lv$Sd&jT9XS z0v&%PpuWdLej9cU^uvtM3x>8!fB}r)%Br9*6<&SXHNm`JE8%J@;fVS)a|5C5N6O9n zDoyA?$e~C^4x%7B2ja2nV2a_-I%z9lAo_8w*3b?}>w_wwdP6^M{btBOV*%kGI}RWz z3JZvZ<75Uq(ROnpTmy>~`D92Q{MN=o*iOnasJ<_eYON{?=Zz8<51*b&}OIr zS>37HlvK^mXw*}8z|CrH>36o-NJnF7>6l6-6b>A;pDQV+HThBgaJFtUQ_1@ zs589DAIilOE+)$ozlGjdd(OdWHMdc-v00>`7%Ve`nyILA$=%IZo~u^1RxmdBVLr8{Br#vY+%gLB@LZGX6Z}zsNq#@A$xXE(#;mY3S}wGhN6ETXR}bMb)^GLHd_6YZy|oYtqhk4m!VdZK``mwg zb^)(6*XU_wXH&<>C8#ZV4VTa?v+2~>7F63B{YC_P{Sz9B0+(3;ou>;}lV?x0$A*~lso#+zrpJy5w^bfGiW0GMglI%a`0*xB2j zo)RZ2-lwWsAL*p@r-~#OS*V*_QAbLw%`mmw-)wHfRY*`xw7noQyPmy;Ai#*90l#E6 zQ7@r_W)%nPk;iB#z^|&O>jd}`(llyQwdyrATvt}^R`{XU`R}**Z-sERK>!bB0bR`; zK0d6jZaQCQBmOyNOQVOf@=8;4@K8}|#p#WBMQ-Y+lS*PEm!xip%*r!+JnML00;==} z6Ir}dP`;9|eiy>rKHcVeH7~0bFex}2^xGOOTe=S%N3Xg0;M!WlJ33|Cx@Nq-x{nXp zJUynXeE)g5PaVCIf1T9b)bI}rSmBrtufM;=r1ASicKaOrxavK9dVkzGO}&l8hdi}i z5k&a^ct%at{BMr?q^YOUw^s4l9D{bwW>FFHj>ImgG9K>Tp0sa{~ySuF} z>aXiOz-^{+D<#TKKq~I=oHc07TENHGvu*jy_|irkf64jR}-U zElezjOl${jt!Q<=vF=Vv&-co2K>N(Qylj5GX0UP`?rEFisgNh4(_tZvmgwzAX;pH1 zaiT!sL<^fFb|96<)~DB&hm8Q`Aq@$zDMTJF!Rs4;uceHtUfZza;kD~&JcZO!dO{>- zZw3Q6mmVKU5M%P=zIwO))_DJxqC487ar9WTauhGG`FtsS*GU#_EA$UiLF#>SJ` z7={cJp(hh&fglQg9wg2J8OAv5&R*%p3Y-XOVP`<3U4@mx6f-R3XTV9D8U+-ZJ_(9{ z%Y1zXe_m>Zwy(yQ6A@_q1@0STUEKl!e z`TxR^t~ub?)<14C)^-w@ZCToFow)P5wA>w+o{txfK8Y@FxlB#_@*C8e$|VZ(1!6Tx zRM&{%L-cH%tD>Te-ty@^M(8J81?wB=;4y(;)sFx6_K6w zQ^@b*xol^`?DcBv4-Wu(a zv`imq>XV91sS6FKMh4ZjkUM|Ct#A}qj%xbg zgV3LP3VO%di@xDI6e$!LtN~k#pR(vWa5`+_!T~Fp_Yir#De$N0fG8{L2b@#l#ZgIms*lzg4Fx$0KRwk&^^Q^*?jszR-m} zg^q5bkUYkglQzGqtRNE6$CDrKWsD#K8v~ z_G_D^y4qngC5BE?g-TAs$lSbG^FK{tOwh+w*ewArC0e?-YMW=E>Qr?ll(j9C?F9sF z)$0s>TcH25&uE{*{*A;sGRf^TDK;35{ds2KufPrOzjUiBQl++uwB=~yqfld;BIvi? zLc<#$#UjeAo^tq^7#UcKs5CT)n#%`?K$4_+4IxtS>ZS=3-vJBf84K$X1DA!&6$!e^ z_8#-gJ2A(0zJN9teGx1_gCQgD)he&Pq6T7kr;{W^Lxf1#A~ulyiJBlF6(MnURyIE$ z*9Bhv5m*gySmtWm4O!XF8aemZ`TpyynkbDErKnLURF+Iy99LlzZ+0!a!foB+FWl&o z($t2;idYEupA(f#VrWv$FBfRe*X;%xkcY8arm~!`I$sEe5ky^x^hQxiQ&u{nx-?;e zeoHjZ$Pg1Dvo}t6SYaKT|5PS7vvPSUqNA6>!?VD{&)(U|vPFXRcB%8y;c8<4b$QC= zi?Y;HwI|0c%q>JkASWkU}+KNqY@#cNMrvj_;Qis{+4t1;_}=K@ zT489JpIgBlrHQ;vAmbdP5GJuqe3X%-@*`#WI7PMWAjPkvlej!vidc3&IA@!1uwU-5 ztFwVLGvFh_fS8a5aN?Ta`;oLwl&iz+PPu^$Yu@(PVLI!6jQf2^nDh-V`G$ep=|EK; zg>$zN zSQ5E~jXnmItyIs1$wB=Y*OXd*(Au2fXvsnG%)&+z6c9g&hVy-N8DcVtuM-3UP#|jGu50FSbFsGL zkQg-3a?t)k8zt|<65S_#ed!DTBSmMJ-h8F4q6`Fo$+}cJvYjljoG!5@U|Wz+T@1TSwJ*X4#Z$RE*2xQ!%K?>|8DnOUXsDDf2prWl;EZs8 zX6qGa>jju!3W@Gd>s}8he~qUfJtqon#kyOC;8!X68n6F$;nnwK_cEci_gJd%{a4&9T#>03Cxel4pNxP)&WM1!4JBWe5@Qg z^fbaB0NiR!QCd7DYz9eY3U!hWV>B-`mNLLNoUjyo4ALR=#J#g<5#_*TyDPU|7bwRD z#Ot@8S&S{GJDZe{E&hRaV5LEBBkLMtTpuQ1r>IITHf?9DE|#$}0Vp4F5i~j7GIN7A zH&tXkX(}~yp4@f7O_6uA&BYhk(W(ffte%9%c{s-RbSLvM9IwqEF>>rTG~zaNWH>r# zxOu91d1rZgCwO}#cXkjocj0t4?KQMrIE_N{a7J)?X^rB|KbKJ>9$YRhab+L~f4AHEwY( z!@{gAbZuP>ovr;I&lF#$3~< zNy`mL<>H?Kpja2D%*W@8_-PC}nl8X?^Ki>?brRwTJynXahqjRII<=7c~5yrjJZlv@S zL?6#sLy18~3`U1t;tnL!B{#mg0wR?+Ef5Bc+4c-R%;eSRA}BBXP46n^;VaeX{Y+Y(yGtD$AT4sh6K$BZrs>Wa&UJJ5g6knvrBN*X_R07sFI-o(|2u=Yu`JPTttXn4xVvCG3^+EsVp>5k_FFtY$u z8848dF_ki~twJ|9PE!D1!;s19rS3D32eY1Ubw%jLo~d_>mD5~R3RMON1Z~96E&pEn zEbJ)20)GAd!tLEm4;Kv|S21rlkTHKNoV-0Hz5ydTUT}o0jcMMlZWY!?@o<`&G~SYo zoKn4@Mzh%3B)HWJG*3gI;MO0WdC+dc5{ex_?_(qG>_y~ zU!;c4Lw*)hMjl%pHWPRwDkw1-Ep8S~ZuGuO+W53u=x`Svwq`JdXkvk~5we$CsPgLvVv=AMkMhcCLM)I$%hK$IT0OO>nN zktq3mpYMK9iZszgK2OE8W5ft}(fZiAAjV3x?7QNEAz^^iC)j1nA9387;zXZnL#G89 z{?E+#)@wCMX*ZZCN;FshXhX&thphny!d}UzIUA`qge@GgH^q5Vj2m7oT`C+(!QHQ^ zfW*0FNYm*Tix8*U_nnZr-(n?=fbk=ECe&@>QPmwJW>vZ~*zaqL0WJemKeN@3C0*`2 zBn&MOqtI6UrJ<+jRJS4#vL^*USM z12|n66i*KM9&gza9e`yP!jO}_MJIXrmz(lcu8#~VUyU+^1_n~a=U00>hF$R+zt1%N zpQ6Y_4UPVG243YxK{_F$aTB^Uc&CbYA-04;Y2JrKIuE++ z34&Yht1Gka6Hc=c2i)!s+@l9?@6D^_qP~-oHQTC-lGBU}E9}uCRwz;v49;LeGHk5F z<0UwLpZ4JWC-LwY!?c^!OBJ1Fk2rQ-97ZPmC3fewrVB5JBocaXSiM~(8K;QSBuDkQ zf{m4j_drK^j%OaX$;{Mnm9qKSRuHGBLcT1i^HZ24m23l%SRT3`dI($8qXEB?Bsr_v zqsggqwWCQnvfsYdE=7x)c_Hj$1WC&!f4pQkzQyEi_3*8}&b_=i!FgnPbUDkoDiujw z;(b3qHE?$JWNYS1R9+jkEOjS?s=fZ_O}RYPW>K6s3y~OkF+%PEh3ZA00aYvW@w}dF%L)a5Q13PKTQVIlo@D| z(k~Kau0p1`i$NpA85Xz*e}Me-$I7wHPP9oQu1Av?1v&!ha=YfLsJ#`T^)t2@^Q{{b^z7 zzCYdo|0GsX?@&p4IB8+J$Ik;SknKI6uSeNGyUk}^woji1c2UU@>VqLuREa2m&WU0R z>tLQ#o!`{mlbBgh=GgbS`G!2Yk(g`3-PdRQGUep|aXrC%%1(U~uxq7~?HUEwfG!2= zg}k#=Vd6xMf?wjP-O;8JUe6_rrG?uUs5r$97BRcv zpK9iEO>$&RKSdXEK@=*Ci=@_%3)g4Izf`r)x)0f*B^i$%D`;@LJR&)dm0Y^Xe{m&c zL;6|At+YZ2J}G#*{Dcjjl56Jz!_jNF3SN5&HeCN*=X)=D7YsLr*`I$T2!4;>LqEAs z4ICFxQ9<;HsVBpJ{W_1^wKVqKgc!#5!C)aE)*hL_$|DQV@caf!^b?`>t54nY8xJ=^ z!ygUM=sIpp#`g=l$e}n4{8ZWR%gz^~npR>3f!yK|tByn=$(#E|$vHBa48;E8 zJD03?V3JU}Oi%KvE6OCXbE=KviJ<%~!%*X*)SRIfBM~sZ9hNuk>W*v5m-u^y5EHz+5Z z==}A&wQi{31;NOi^!x=(?+wf_e|gD?W%YhG>^Xe3p;H;^367kFiv|0ap7iVH`YuT@ z)5v)SP~~lV|At@dh*xzaQvc|{cppdlrHstx!h~Bm+cw7QWA5o~S&SQj2GS$A{eh=R zl!42ksU&4(^-zk-k&Bo)i|LMXHc?jZ=Xes6XVrr&p{&1#Xpj?qRGPAl-oLU1jH6A>l*555ZAb` zjMYNw?}AciyJmYm7**xYta+sobUf$J;+0fI+3uImPn?ge7qgahG=N2Pd*Ry$IWmJSMwQA-2*1x6u zCuYnx(&d_M6wyACb&{Y~0AjVkKzRuoF|E#J^R6mxy>B$#)Y_>()vNzIwJ&MzUpqK0 zkbmGIFo4eQ;Fa?DyE>m!SX+Fc6`qmW)z0*iZxW-urF3^cE4nw1F|Q*vs)TU?R;*7PV}Y*@$x3heb8__rjYJ*D%plUjd+ob1hr3!Unys>XDWH%ZQm zOYc9wC7wTprj}IifHo2%qkgZ7YS=J3`eo1{S%O*4;s7rB^@hlEiuM!91X44U>_sn% zOczz1kR|jW<0ol?Fl1slfzE`L+#=;VJ|9eW`VLJg>Y>VeSXpSat?8?a<2HJOlC3D6 zS(I16RQ~RiQcxl836Vpc%#zHSIsPnd?=gdS4DHnDWoukQ#F$F-T~^2PgH=}Qw%4m3 z(gnrW=A{V3+lv$Uf^1jSKQ~(MIvQ>H64@!y2c8?7GafBfPZkk~GX_~=`Lc+<;qSiv z?!`Bp{JsB}b^nGe{zWTa3e~GRq@e@o&Vmu6o(I}os06!4^|U2*ecW|gvU{piDyt8G zUAE52YVO~DlIKR`E)}=LO5C|u#h^6Dx!4V;oZ-tM7?DJxoWF>{V;{>--aQVf4>@w4 z3D$~;uKHea+OSbG zjWU!5A@};+?~{j&+n&#J0jgr(=A^IBB4L7wt+g)ksK=*Aqy0Q}U5qFhQRjze7M2`f zSC6QmXhB;jUP5(&J?eOxL8Q=_Z^$&ev$!k&9#lt7O$IbBot;S~AF&o+i6rmnd-3*v zR5ZgZeln3#Ji{`4=vn#%6D7CvxTwt5%(l2=wDqV`l!j$_jXH#l!=P*w>+(-X*fQJ2 z=QDAu6t*1?2oUh!^^)0V6^5D%C$Z-n6sY!%+xjnMp}h4uKpH{Y*ITZrdTg7#HY{J> z%TD%8=Z8gK?t&N6O6n5kk+O5=x7?nr#@1Ye8~8^Ivjj#*dow?NWcE-s=__Qky`xM* z;^3g4ovBPr#P7@=S7%G{cIlGrrTF?_6ll%7-Iv|mjf~joxt1SRtgW`*tT;c}Q*%ws zCAn;O2zgwylO++EUU!%z!^;}Fg45u0`%Qn^<{677;*o@(rflPmI<+I29HDqYyDngZ zu-U6E@BGik7_zbvgpq@f)oh)&)UmUiCG*0s_{aPi*2LD>$=T7wz~;XrJ3~uYMrJ|= z!v79=s7>1FL4Z49jW`2Nq+SpKiml-=!32a|=;eh)3F(zh+?)yNC2S1LOa%YeqtO3) zl+b~t7jbhIQ*t(NHu>*TF=oR5x~9kn`~ZCtv~wfWVgO!XVq#2pL16wD114k2EBX?MO2|{N_7nA=!M%2RE*~F2MUep@+7GV=3J7W`A zdTA3|GiP%`Mh*r(KEnTXcib|&;&>7I8NLU*=IrW#pmO_8!EmJngXhi_iCDJv1jQ4_ z*Von!&PHT;(c6(l<69Su2omGA9~1SfE*O!k?5(WO%Lx9Un2=WEF(;dNS?JZ@%F^D| zHe_ns#4xNF^A{>X2#|^?sFiWPq%Hm_;pY)yj*|cTpJ@C1FYv0Vd2(}d+ZTk^^4Xpj zKB1`OG%F|MA^UQjm43F*d7LkZoh2ceiaHhW!A?E(&y$!RT_8-xzoGvh2AB2!uTcZW z2AEm@Gr52fg#ABe%YS2~&d5RtwEr)MmWi43|30)Tab2=PjKI+H2b&ePKZbc{or`C! zK|aEc{|(UG)DyfB`AaqM5=}PZt7d5kt*WYPu_si#|F>)e?#GPlzZ=*VR2$bQxh-Qj z66_KLS?BJqS?-GyNk5lOEIo50r5Ppv?|e!L**E6a$~%j%SOpDj&Pv8uA+6<4`pk@l z1vsuZ3^6A0hlOJ*)Z}6v!cmREXJU#UvhrG zHR_u}(*4p%*RR&VD`DOpqFJBWZHiR4T``*)J*$Vj|4yL##xMCJaO>vD>bnG9RB-b9 z&Sv|7=%i>Q{6EYJw*U88@jo&OORr{O3`hnWuqcR|SeThR|IeY2owc2#lD&bE2_V-J zLPCNDPA0~Lj79X_J1E9nY0Hrt(V8A zW%*ojXk0+jn&Ip z(C+AJzEnkmzi4uz`)@*X8vy4E?nHcS zt?k9zmNo*?KEFSDoo$}nbguT(oPs~_T>o@j%FTPEn2vBrDWUY`2lddP)-O8xs}e1Z z24e?{6B$(OTSvuLsNATsfHegaC^(|Wn*=Aevs)usLPsebRq@vGU%19gP;A zj+I(-R~PQ@H)%ca4v_3d(KR<`bTCn@oa|Qf8rC#Av8AUp$~cX`!JVE)BOBU!oJ@5T zzDM>x$NII_wo{|CV~?XshQprP6=heAciNwvxApsrJ&CqwoD^)F2g4AVsrF_kT_-Cm zc54qRgtoylssXhTRd|37s@m`GlC>FYRBe}{;}%60DL5uEB`Xf2z+pUhYW(qtx%s;J zLfsjFS?p_7v?HnCFc{ktm^Ga)qVBU;xVE;G@zfie@?4nfFJH=z)>z`m*qBP3U}LXt zZrMD>tL*e9A(-+XJ_e zY9Imnil5M@Qyk$gr3k7-ZcJI^l@?05KUs>NiQc5ILaja@_KE77l=(s66BSRrKNSvf z=e4=s=#i7>BdA93j;Z@#ENBPCa*)6{lz>BWH^toFz8Ut~J+`e3OBAf`R#xABb4%Fu z^fCd2^gj-+dVD|cW&m!b4;KI-G(m5NfUrD#G_uvX@%4J~5E2tYuV+nlFj)ZpaJeI@ z;Bruev)Em@J&`j0hbzOB+~epZaKg#xd`N;Wf#{HSd~gDo2a?k&)Na_2ausOiXkJg=>3{rfb#bzdzvXG)bvl%r452CweA~cfl+s-1zRw zDe;^f)TC@!BFIKhRCK8>J4aDxdVkq|oh@VfemtDSY;C3u!lT-NmXe4fo9 z^82{o?xhrbJ@9Y&zTF(AT+x5M-hFKOdOrhi`hLAU-i<`@`}p2ntW^wVKDV#l)@?%F z$5pS4`4_D)(j|2Bi0v_*2wr9?j;oGyI5JAn9hM#lHfiT7*f&XL%TlV2aacmCm0(n! zk#Z%87wp5hIngXl7CPEVHu0COX2@SgQUCs#*nv9!^UI$g`GPMsVSic2_11ozFYV=W zW14;o;p_S1LeJOd{pKv9Ca1;sMr9oS5@8uEUx9X}Y?vk4m>&le<)pT}84lO3LONAOY0@S{ZGgyZa~RreHMA3K z=;?laNEt|(N#XPP_xvt#wVmVJ`Ih7B`?2@ry48LD_4={>)-q7TZ|D2<_})TL z)8p;^`g*WB_kO%^U0|Vk#p7*mYiV!sJ8>rMuDDF7P=!QkA{Z7%j_4rlIildU3;AU;)Y{d;C8nimX7k^k4KMN)K3ypb zW_2b_Rix}xc(O!Fx0n0l!`&zSR!)ys^ZhI2(Fe}ZTT2DdyDra$+mpTNrz?7&w~G&g z9^dzm7PYM&ypNZow=G}Kr?ncfnHUUUCY_!wo9Xr7e!S`B_uwQSFOcKoZoljZhv z1DN8S|JHu|^X7TI{c$z#IkdKHMW1u~n;_=%_TzXY##OJI$M-|=b>eA&|6%drbOrhD z1oZ9!ZmcAxfDn4yP*w?Fq`Qu^L0iZ$^+S(nM?82!l2VRXUlL-H?o+hq_{PuJ zOun9%V)ZyH%jU|uv@(Bzjr9I8Yi(U0?;J*tC&B0Ab$5!oP10S6s>`vJ{m$6+a4%#; zOzu8bkB{K}b|PaYC͗iS^Wt24gKCy!%Y&+Ezb;A2gt>M&1m_WEvQO0VZE;{%Z9 z%^rmwkGjX3JtEgMz8S1TrW4}LOyyQWbXwT92F1R3)uClMYDl@N?J7%Q&7@Cg%|*%k z1SwjL<*^0F>{xF4L)x%bxIpXAN@-ElGLN?OM&_gS_YJ3rP?eNjx$N-NOw08(be_#j zpBL@hi;10`&rV5qE2FNx`H(0AeqCR$p5`8ztt}0&8|j;mwFdsr=jV5G1)`3+JGa*j ztBnk`RaZMzpSK&yiW9G!#JjtaF8UnAZg0^wNh zx)=@&d|O*{HNG|dGWY#iSx`_RBdPk!!bJJx1Ue~Sh|Eq`?_^UQZ(}7>xx$uFL&bB= zsUzk@>#V>t=41_L&vR|QVwJAw-CuY;S8-i+Y)zLB)#s!BX=h`6`1E>hIo(b#`~Bwa z?FyLeZaq&sAD4F|V=43@Ied8ILlY761z^*a%B_X*b!sevQ2{OG$+ndg%Pve+imla3 z)^?TzHCD|_%4;~QRjf&&+%<`gjgobm{_eU?8_3&a>n<5vPCv#UT9~th}29dQ*<4&(?&VM?aTLxX;Roou^-XDrnri+xu zQCw3(S?%5yI{~7Llt|hdnp_ns59Ud{o-s!`_|S-tB65faS6f#$)lnB$OnjYN8jG+Z zot$C%0+U)u4wxqSCPhIN$#7~5idBlGvG$25*?bFW0!Mx zCFAsZ+1;KU4~GxslAFZ}yxMwrn!25YC@k>sCNi?gjg2C+=0#O_3Xv!MYV`YZh(T)^ za5xqBW@6zjcFj5Gcl_S#^QBxl_@S=&)GHAQuP+^g@)9qaN#-K7I`iXdto15U(j=JH z$7p9B314@*&BaNTmZX-DdSpkX`?zgx8ok-b6Qy7d`Rd7ev)Mb);!#gR#<%qii+w%< z(KQS~~bJ=CbK{MK?f z_+xEfgrX%s^yX!5I9%150xL9E&r;Gas)9#woN|0Om8d;cowGXD&a-F__hj-yxqs?U zeTX%e9e@#;>5?0l7iwoocIIPHlUX-q*2KXnGj&UkDvGCqJw{45QlQg~l1)%?iVbG% zLHBEQRC0UpLgZ(P{H_(3OrqLjX%}pIJzb@eD!`2m=UNQM$GD?0YPU~!u(a~NS&7po zwzxQrI=PH0nU&O-SpJPqFF_XQxrU2SVVkMi*<9ai*i1LAhkBb{a)N1UMpu_?cg^XV zf?0FAP++b4Fb{sSgE~9wFFm3=eFpi5aL z9v9Y8OnzosLN6(jWqsHc+?GMjm9FmPYfWS3a|sscb0TWwb+LGz-JQ4;;_zLxN%8S` zBcn+!2SLrHfuum>j9LlIHbjwa+@y(xU)E0b5zf{3*E^%cHv~x2#=C#op8na{x)TpOcTo8;_ zR+H&k(H!GY&{qu3me5sNhLGip%|xU1e7H^Qtz*DlZgeJe->Em&wNDy0PfIT|rP$I; z>BWVOw6qLzJ;aA{Q=`@cinrjTf}}g3pwKOi=+W*~9C&0k)aC!%{99@9@KB==s=5Fy zB>BsThSl1G$PO4(^EbCs>A8W&`0C5u*|>=lGKZl>_nRUKMv*Yr%aYtuiYSgCjDCqz8@D&udH6thK%{(h2pI!o56QqJ*H2?GWhz0U%mgbS9Soh>#=6B^GT zMh71xrWIqPX10ZzpY$$o2rq0jW@O5q;)K(c+Y1x+soRCb-==0J)Me?AnOQ0oM8p@s zC5(!M?c;7qsU_Ubn2{%&Wq0c~broppTAm-5g-s!dQ(t4_@b~TM;^#=0z_TnZtJtfP z=t8*qqHytKGb}9+W1VT}N768e{)1Bze9V*{MRCG(pO`-%qAc?(i?vTesaB;_UlebI z?bhW#Q)yH$KpG!&UtRCLB>;!Sxh zWp8aJ;vwzlZrr;wucp*Dn&hQjmf(xEN}CLoVo>{W&e<*h2jl-n z+dBZs)@}QqW!tuG+qP}nw(VWEZSJyd+qQS@s@?T{=f2nX+}GU^_ng}i87r4E;h)rv_Te=~(!$Kn;tE8Nx} zmQ~u)>x-}Gxz}{>-<;Tnx&?zN1s6V4D84r=Jv^m1syI0URZ!%rpvc#ZMNv-XrxcH* zm58I0C&-hDpNx=ZI}o0TgcggB7AJ>uWl>EuWjDOQ-MrWV;uWJTD3~u^4FQ-tO7&$U zI-U$~i6VZd5Wn`y%bm@0ae+y={(ip1KI^LhW2)8ce?>iyXV&TSpN3K1NmreX}x47f%{INdl_E*#wk{MW=UCYj*h`s}6`Jzk*^#MLh$yxYjf@R$Oile*VdVYMGak2mxu{T?W@dDL@68XB zt2wjythaj7JzqZ`-xuk8Yxx^!LRC@_w3F60bRF)+ipf#Wq*#Q*v+@V0=l4%)5d~~e zhFG@-sIiVjnMROc@9$4Ih^BZka$JaVS9V7*u5(SdGPNa4r;y$YZLoMPvIB2%2U>X= zFV9@B@$Sy?hiMoF#mab2(;}r@Rn7TJI|fUNW_#Snx|?=S-_D^%qFA%Q`Bttz#02)#X$gs@OLPbP^c@JxNCkXa9j^0Ox30j8r zp9iE6?T#13WsHCS8L}vBT)h6?oOoY;1Gv>^F}5rg-YU1Xry-#ILw!+rcBi0k1TCB! zKHqlxTY7KEE1cdio{$FDmJw=zM(^W7l+7lZn?2I?ON+zk>Uz66N4GlBg4&#A2+MbC zXLN~nAt^l;EvoCKHT=~*3_dRgj~8=$!-9LQsM}}c*PWeg)j5Ks+}v6FqB;)bx>{g{ z8lNOVH*^DlO-!gb%o;V?iEPV#T+6*Y$RTp<-S{`U7^NuQAuzn9F}nS>_^#au9etkR z)X~<@)0Gv>xrLj}HKW~!v5Jmu?m8R|4=jflrt&KM-K8+h3m+=lqqvbHs@ zxi<24cQg35^m(?*dO3`uNX^4XeP?yUwtmK!q^*miZ6Gz){9Ytz2a(p{86ro#NCtV9 zt%s9neT;0nmj?tT&qB?KYFAQcl)c2kr0rHXb@@sf{A3gQl$CMit*(aRkb(AFy4+bg zURpXy)&{GpcD%rYImaj5u_Rp5xv#8sx3OsM@1PO}q$UcZ~;9)r(A>++bf7D>)$J>XxFtU99*}dHBFGb-p!t z>t?INe$}DZYmu!J2ss%5TIlZ?C%6?!@Q0e>M-7t?ekUIURz`d?;qEngTfaEUdM>+R z&%M2+=TSb?aK~k4OLQt7;XEawC|ly;#do-lTRaodxiv7kb}ehSiLQ#xt;rE2pAEv+ z-nwmmQ`2|kbcXbL$9kEN{aA1gh@uj4?s}a9s%RK)E#Go<8i^Ns#mjKX%kGz-27`$W zRat2_B&PK_%r~do4-13x@{)6NN0*adog10p?phcnf7tK_!gC4P@<#Bvb`x-FkhO?Y z_2`jRAR}QIj7k87pQ?wOsE3qkgOqB6opB&BA*cuWo)KR5C9*v&ZEMR&!m-2ItJYR3 z?l=`&58X=Fy3RANGay2asY^e?C3iCtHB6v^axp>&0lQR9=ppwI!5|ALbfA z4lgSoW|2a;`A&NE>b8YTAqh~{wtPkI>e+h*HkK`xcICOP=C-Y90SY0P@Z`%e|BS<2 zCtz?L`kphiJxSS5mevtiIKy4bfs@=8xQJFkFHP~hzFcTC0r@TfM#7?2R-{MQB2Lx< zj)-HDjRP7n%@{x3B>E*>i#w=%{B45DH3Ov z`{yY#f@$Of`*+QO5GY3I_wCI6h8=hHJlIN+J-7ppHo{JO1s!(@Fx zP_+Dqh;odS+=nUJP@Nvbu}Ls7v-L9E`!aGMdhG`qdY~264LxfB_FiXeTQ_X1IjvJv z_!x5h{*3u&DTgS;U2iPaXU(ckxteyE?cLb;QX%1qgbkMY%|KaERHIW$7B*}gT$^kh zfI4n?iaryNAq=ff4DLf48=e4&8UegBNZ2ssl8wQYR6u%kG^@UXe2nlhq6b-O- zjqp`N2y&Z=e{dzpbG?-XAU`mMNT85SOi-_E&;ueSU%MV#DLo(0ZMs*GNA+XA?;Y8M z4>*k`DdTyPZ*Oie*XqD?_>VW$Fuy&pf?H8>aP?Q`PbH?njE#M-ZaT`{3xkN^%R`E; z<${%~X`<#cfrxDs8y)Z_K0u)f*MkD>URWU=(jeYUnF|Fc4wb9sl-1pviDF)|PjA~7=r z9%j^_$j}P{!Z#k5U?x8hRNBEInq)J-hh5dR7szVb$jR}1(>=_ zHy;@g0#j9NqaYP0>c>e!x0sS{afNGxBf+z?hO@SX=XC*8_`)?4Y;lHYi6a4+AeX+| zj=i#ml9(E%RBz*pZHHo;zp@9)uDwLoK%8{+~E+zx_W7!33D8wA)T zKhV285FT+)y74ut8AcvVoYbTA*h4v~fRZzg^^{qdc)OvWz=w&5VwsO+lJOu!StZw5 zq{r5PC?XR|a@_5KaIN?_AKz8E((2B(MwL>tJR8ZuT6`=gEdW`R&C!yNUAufW>F`mpmoXn1`5H`B#K8K1K>Y!4EfOz8{PoB zf`Jd~_Qcrw$ky=6?E1v+4pm?O2m&(dadj=B-ZF`_|Mor!X%ST5oFAB+7nt0ei-T$r zK!y=!3MO)rMC4?oENx_d5;b6Ku;zd)Mdsr}MOimQNx6uM9wL-)Gsj24--!H7!qvJK zsuK~%c`ilkT#{CFAXh-#h9a(Ls)Tuu#o=Zw6^uyKh3hix7~L6^Xn;LI9(rb*|{i$ICVjjStu_ zzQF;OP6!ELQcp>>w8XkFz&bF%3Ur19CV~V2=V#^+6WJ1tNriBPY8(p1VRmY{w^tKR zKBQHFf*UR`R&_~Sj|jHr0KB?qd`=4Z%s%$x!q2 zB&$_?UPo>)G#WFlRm?!!xV+oZgQd2VY2&jgA@jYc7Kw8(9N#gE4a7g0V7y`Wh!j+slC% z7}oY00qi~qX!nrLBY{XW+Lv1>1Hd%8K!X|)5#}*A@QZZtVcN>G+LvZ~h+uFb#%M=^ zsW%+m3W81O0qZd|>p@gt2O3)+O-ixw|KO|LPZPK1rX-!5ADpeeR_s(z@x0VCIOeqB z7>Bztv9dEYu=Jk7gNBfIg`0GWpmvv<8@xnL^uoi<1vtH@tFCG0S-)JZ8_Yz@aL}XD z;Bl{I;Mo9&Ue!L9g;LLF%jSHyFS*?|tgV$K!KmR(&g}Jdhkr#Y3c6kRKrrFU$TxUGO`<*0 z5dZQ7wO%j4uM#1jVV|XOf3^UhwAI9=dhV7ON+Zi(U zLvh~4NRX$2LAS=$50RvDIlRAYYqi72n zi3yK^Gf3M=B)UM-Lt*Ddm(1r&#(;^EZikuzLTJi)fxVxie8Ty3{Q>!!HEEQDBQ(aK zd?#O;kT+E@D&Qu@NV((d+g2XQhoH?&u*JZD946oN?I^!W1irt{_*Lu$z)<(1I5@<3 zdc=6@umN4eyOa0*rZCe>9IZD5ORqQnng`G?PO zrmiaU2(rl~&?lNyuB0k{2+9~1sJVej&LK!ew_b8*W4E_#E<>sGVdD>CJ6`1#=p)zU z`js`X1yi#f4{F|LJaJW3YuNb zx`K8EW#7{3)bb{PS@c~?r(9OcO!cQfvw!_AF-ln^2_}^4_{s`vY^xB(3`S9xG%6>O z=J?l6aM56(xp$U6>vNf1-iKC8MdR+erS z{x24Omw4IMiq0!VoqbT@451v^IX;DvqoR_G{iFq!kSa)~zA5670eaA@JTSsUaxG1UsA{c$4_^bSI$AKl=F!bOozo`aL< zzSLFbnp-S^99ewmJZ#64C#K9Ng`1r^mdbulnmo2qeMbqLJ{JhIdy09^>in6E=yWM2 z5hWQ^uC~U=9rZAq9=o)p*n!YY@%cgHly~VY62{CuU!}KGTaaj7DRTU$XOJOWSp@ZH zxr8sO%b@IaIW6vt{oz`BgVRCfXPLGY&n7A!Rc(!Xi#Aa|UH3l3Zb+MLIW3avMOt zgl`qf7|@aN9T0%@v@<<*oH};?3_d4K#h^Yte8Ee5(MzKPtVdu13uSg5!syzQ+4hMm z`WdSWagvL|z=g4g3=TaLuwMZXFF`;B5pc%^=gez&3n^wGpz)J7+Sl9na07@rY2>bW z7F4phgh!AsI*UrR_&!9^MlVw&t)Wwml_{4dEqfmls?>}qO(POH<}5cbA>*B-q|?dv zbK*rVN9%`pk@0C7kt_8n8(VE9>#~d3@RJQ6Ar_!JbDJZuMpuSM7;(baolUVQ)KI23D3I;^$Qc6ku7`y|Wr=u7qH+Kx6-GcG zuK1qJmGAU*_jKY{T3OYA>ID+n51uxZ;e!#z6AI(xeJ9gMiOhi`M6@(w)l8oP4&62< zzsS_~PBr?ZY!jynp!vv*caX^H05(a)DEWvIat@!1DWY7)@H|}@JN;)+vBb3V*)zoHRZ( zCzV7OLdG%S(dc8?P}x|KTl->AA{OD}Z}4!ol&LN?s-x*VBE;)k4 zq8sWTn_Zs3i0={DgH#!oH`sv;Zf(ID7iWQx^8-t22+44cWLpEeFf)T-1zN@2FqqRP z9YetT@W4q(H>an{%FMb_^T^6hxwh=xakCzdqLVs;L~?4)d_Sq8vAa9{#0qA#j1t2v z1Z}tFJ_iY{tGdI%KW}$w@UP&fXhRpxgH%|h2qpFJ zhTb2rLXJ02L9=JY)anWI{0AHCL0IPPw2eZdN!5gz{@!)hM=S+5T!z+{fRxkFs1U8A zfZ0C%GhYhi`UptyqToWh!ufUu3O^hIl=Ca3A?7gcZ*VQGZ@Ib6QS&x}_OUtrvT7Fw8~*_* znu?=WE9QNC{8$nES-XEvz(#Kx5tgk@RIvp6!41sn+Ov)cY8jk8}dh8 zvT@^9cZ359U9uZ)7`H@Ib)croEIC;)I-B@~Bp7Wo0 zT!Ip5Qq_9BW;B*v6Oe3}pbJ1z(H5N@>2u|JeAwi(q%aE;f2=n+M_0giIXDfIj_7m> zrIII@jHL`qxz^gX92mCQ&k}?&qBl=PDqGLSF9L^I_!&|{&@)Yu!` z)i5Ng+3hRTHxaI_BOF;+0P?%xyde**AV4tJ*E2PXW@HFI|0dujM%Nd>Sf}b8@^w7d zk=GY`=iH3T&9BHvFDeS(<%{iZ#agPKMxJp=eFY>=-9+1HEy=gOI~f*Cdn7!C$x zUGKSAo}?ob)Hgm^Elc?Wucx{*Nh^C}&si`lZKQEx_8$dC0Z_aS7 z5Ai?;lJavIS2U1dq#GYd25os`_b`m9(TO1@=FpsR;uxC;F}1reHM{WI9{KEUjg4-t zgNCwpen#Kok8IoCw|ikb-wH;Exu-G8r)R>NIX1lyG>%(3S)q*~+h=COR{n&rwQxz- zjlnGiU0LE<)oJAST_I~*YJ6iU_D7M-ikYITx4wkg*sQz7meDpVTQpz5wky!a-#<+& zEJcUt9)$D$a9_%#O6}~dVP*n$|UXuL{=uO#f>{kHbzaYjymzHt0uPy zIYityy<-@q4U~vD`aA z^N_U!zBWXkJ~h6oIg3+5H1ZhaiD;?noX4q!TKI@zqrj&>h+o~f_4x3;zJ3NzS74Nq z!p4&7|3p1Pg?^1`c!_HoNqCwhh_gDx1RhKxEN4I-6(9zgL6Br{B*NfAgi$mcndujM z@e6a9VoW$>jK~?U>0&&5g1DBt2O)M!M_XUmCr(S9FD<;~f5OCoG=F5WzPJW10=&-2 zfS#&8ahp1~>PmYt*iboo;(1?Som&>`a{eLs+;+|7T@+@SGF{VoonyTX94I4~DdpVL z9i5b5nd*zA<3DL?LuKRIEmcZ^F6A_|pwm$(RMDBPbdaMK_Ef0lJv?wP>2@aAGwhkYJ`WbOZ&dx(HYy@(wi9C8JW6K{w;m^51kIvU)q=d(CPd`?egCSWcv%~ zn~(XgH%3O5zi9tArHHBXw=~DZ(%IP6+4&pt8^iXyc%PP$k%5tkm5upZ#PPpK82@b& zW;W*k#lg(MO7|BAJKeu%D|2&GCv!tPdpCL;8$(+|I%5Zi|MVEn-=ZcLQ*(PKOH*fh zM_R*ww&`Dzg@4bcj2z7WXmqCUr(j}b|I4)h+ou0Q%NrZom>Su++gsZkTl^Q;|0LA< z7af3|?HiJv>D$xZDmvNE#$WypWfv9d7yeekd2efM2U zGt2L`|8`~Pe`ftZG*SO;)^ED+R_0{-tFbv*|Jt(u-JAZk&i?=4!2b;Vf5>(I+rZy_ z_;1Jj2Qnk)-$(z$%=DJd&aS5a#n$Zqeh1_HN1yr!Ix{2ZUvB$9Yi)W5R~wuE0?zSo zgEKSz9oYWC&BDm=|AU+S=Q#JzF7xkaGnViE_*X3Z4}OmCK=D6rcf-Ha;XhlR`QOiT zEF6DDxc?w$W&gXI{*Swill@=n*Z;!)?`KBVzmqd7%eQut_3wbl%Etb$g_ZyJOv3y> zkoMUAG1g)En~~{T=gIng=C2I?U&j%&?941IjI94@{J{L5650Qk9``$&vi_4U@Tyxg zo^;%i>{o6tQ7&lAqPsIV1mM+4&<8_bs)&GBU7`d`B?rRk`PWDEEwesNqnyd{xR!>l zFGcf3v3Ll**~y97No#vL*B|${LHs|18T=)qhuh(0fBdBAh6YZ5QHPwEd_LX`Ds$ZV z|M^uW{4IW=m+yU7=K6Z@&%28~a(F_E>-}@O=+FB(x#-WI!yiv+a%xQ@{^0ui*M~pf z=LbLTw_03%4QqON^!;bnk2eB}LBaI9;rZkLbiNj> zQ~JaVDClw;B^(a^*3_|QJOTE9!7EuJ4!i0+(wL{v)k+}_JL*)?k9U7-e9hcI6#kxG zZkaeDdbWza#U;AKm`h7#0&*+YB2rX_-XI#~bqwtolbZY{!6Pj=& zq7^J)ImEuJiiq`<*vpZ0)JNo2=?Rc_S@ImS$%*ed4SMqqd3U65R5$Jny0=0+=89y0 zda8C15jmVX$Chwt?#}gONQ+kUE0VbQ!w{B3Xe;sXJ@B3E;p<1)T;rplNo$B&cD$&J ze1YnzvDGz;nZ<#?!txZHZFl(EG?kEbZCRh^)pOX2E^^LTh|JlJByA5SKp375ET98( zJ{)hSPO(ODNUqLQZbaUguy!5a5pt&RD7!9=U`U+3kyMt=+)`!)Pf^{4lVowJY~=Fu zku?*@W~ehnbbTm(H1e(>hcUE4`pHju*J~6*b z!>s$Ch_lD_pDqVnYlubQjUR6s=3&=JMfTyE0w4>*jnuJ@23q|r0!YY=Iq<-p>5=C> z0WpFTeJnkCYZ0mGBOK7yiV^2#_tDb>`fpx}LN0qH5jj_#`$t^t=w4(R*%$BpJY40%X+WRctN&(yV>!Df0km*sda%~g3 zGr9_*TtP4{G{cAUn~K4wdDPfMYX{AdZ}>`RB5xmUu0vffVsV|h@LbJXN=Fa*Bwdm5 z4(oL{n`?D{t54X~{n&}?Ei^C9c)yU!+L6Yt%gm*b8y64$R!j^7GhdVz0B2cDcT=J?jPusoQT{1l7Mo9_* z+zXJh#Yuv))|~g_r#0b{pM+%X(h0PpjxJ@#-sPUqB}u}Wl$0(UWv;@YQ8fP!uPP1w zh5l_G6+nXT>~C!!7usJaGU^9e)TmFE;He3^56k4typY?Zlm!{45;Af_=$y0~M$Ui? zZ25^OI+P9C2vSn{Sk^{Usu)pS76=QBsxM9Ps;74yN-fw>H3FI2% z(VDCZ5k+2)x18xx>bz-*4CY3XiZpGd3Cz5K8p!rFr|81zya5?VA7q{mpqFdE(6Qip z?H3=PNe$buuV~51F|DxNPoOP?8u|YI{IXme`m$J@x$#lX$QULY<=vKT_14VlqXl?T zTg%_jHVbyKr-T084a=VP;+E6y(^Jxq{`KkVwPF1nQ_!8LD*gZiVVi*D-PDWZKmK;V zS~gcY5U^-JhQ0iLLYi36s%KpigEZ7cxpLze6>?={Tc|T|yrl$(;;ob_(629ap_dxA zB~0r7+OniZp{+V*mGa@7AR{}#eGZB5zUqV=wY&YSMk&^*;uW`nM!YhpTpS6Xl87A# zGqZAh!0Za=sHBqb00^aLg8kH0N}i6QPXP2j5$~~5fNcdFn)4_TGw;k5+5}O_W?-gi zx}K(OtAJe(WIND^1gBTcQaKa&9Iwp%N}z&>L;xCDi0G#Ij3B@w2nUdo+&T<(Pv=QQ zY=y1pQY=tx3UMi^8rF;wCgMuif#*f@8+$=A8dm^cm&Fc2QgD|b6SZx58AY2dIY>u^ zCw_$0Q$n>uV!X`Mh*+$yFB>2*`$RaXykSplUAFiKyjK2*CF`(^AGb7HSlfB1S}Mi-rK`%njM-5iPLaim6Fhlc7S59g1jIgY`YO zm3!%yF+otpr({_wj*qq5;tIkjr145pn2TuNsbvMrt4pf+jsaB9!ML`HJB$bI z(`dp)R|Hruyp<|X6W9T8x7GPp_6SapX*H&@;|-S+72T{R+vB~wn*}&M%S~6>C$Dz3 zf&EODRLtBqMYHl0Ogy#0@Z~#0Rv!&m{QubfO;;WEN33G+T@oHH#Xub6)aR}q;K{8W zBkBzIs>4zn49iY~pC@uMi8RD7!2c}O)wTObCKcqRk*p-Vs59Fuh04OFV?@lhz8$a0 z9$_UqIU5V2upLJ^*aRbrn)esdWTLKoY!^Su4}uCM?uq098tx3R3;1G3sR-?3S*0jYD8C@ybQ-~L~E}W-I9p^dptRJP*JJOGMPN}!u zq6j7g8OwNZ>V(K-0f?hQW2y)DH{+)8n*_xqw^2(9&OACatWQvDUZ;Vf6Io9rf*z#s z)afEvvf1>6rFhPqosS(TXZT+hwL?f*D!=)5c;AhQw|p8I?K*cc{9l{P8WDR=f8KZh zXq*4Mu@oLK@E)8p(B992sRh(61ZrY#*OS;R-+9~a-QmSl(%4<|L(7HrwPgwJdAV9i zdF8s=IEi9MsTV}Ef%%CNHl6Pwpj{~1-$fpr5nCH8Y=D~P6nz@|2U$@@i__H}a#CrN zs9*L+md^D9jitTUZr0vU){!_(3%^&F4ISv&vRDhZ)2(K--ZEG_w@(vL@-K$_Gm6z*PuO@s2o7EK8pbEaYN&Qgu$*djG z8$kWwOn{Ke$#dFO88AJP7mXI$5Tyr2oPa>|j*>2knil-w)Fh{s@7Zds_qr1V&9Xoegy*M_mu1YAUM_&GPH5dSJaJRG)qKqxn2ZJ~NLMp$oAFj(CuY0=+~ z>J%LmW^WiJI*cY%`XE~iCcXm_)6SHZgRV0)9;AS0(4S@NlMJqMWHHlZFB2{>{%k>) zmNay^<~J29cDaUZRzWZ>9;_HuH33axiwG6KtPR*b|NE<=Q>DLr{L8zVQHr;cn$uJ9 z>~%-^g@7thKbyXZ@!3;p8PD z9uNN%;-jWRT-$)fLpzT5?&W@ZI`H235z6_N1;U=&eIg&Y!8^s@xZ!f1)gQBeF5j;2 z3}}1?CPOZ~t;)(Y<@_SuJ9=@$wYLIp)|c0a zv~#?rqe;%QzQ9X44DUBz^yKEP>^o%DnNyC;zpm3H&*D!f`L%1l3Vsc~+~@T4;@p_~ z&b0FQc$N9zK7q$|a=pAh{`Tdac7vVDu8Qs+Fls8zXngH4-poBKUZn}YM4H_9dX%ET zXZ`x5pKRmWoBi{<*?R7B>g!3XefEZ*r#m>@8?n>pPl)HV|NNI954iA4_Wd>cqX#qf zr5-w#D*EF2KH_1zDa98xZN<6jh=y{xcpw45T^)XB_B~t<{)NWc@TF(Zmye_S)`|(b zRgG%<)U!Ai?-Dz{*kk%L^FAQ-*mE_!t zPF3RW&=G~b+Yid#xfpcnqcyD&>iK7dS=iK-4rS%S>B7=d*#2XXaJS79-BLF}NH{i@ zy!eZDf6x2tVj&fjNUDApbfe-lUFYI@aDkV6qQVJ`1CU0A^S3N@hl~fYFctnRgC_af zutHc=Iq?+7M(PDME@G~y%bSpzW|c4nf@Pt3f#7zmH?6z4yL|I}X~)>Dp-YN`pGH^{ zu~VQAJQ1z1TAcMu?PtqZx)&Qi1$T8T0rS)~NZ<8}+EU1N+uv5@LVh;ha@!PX7t-nw-m`f`*DM zf6{>+6LM||MKr0Z0?yAJkOnyTGrGZVNlkbsuWkkJB&!7c zM&*vG!UPYME`bY9L+j2ZB5G&;94fmm2|SiI`2;`Mc*F+N(z(XuxGwp=r%PDXb1>E? z?oue1&)bU^U}e2>2dx;%m=rl&60oVd*2^^*)S?I`K~=p3>>1rzlt>Q%+dB8gh;C#B ztM0?wqNVafm;O*q!C>L=n3o2#fFeG!L5#_S>S@Bv_!@9kwe~c7>4>;iEN~dmB)iGJ zxKvGw^5?O9i`e+YhL8F${9X@T=Dbb&Irwg>mfLnQ8EZ7J2J@5F(k}ISb=H;6Re7d$ z1qJL)0-4|(|aCf0buuKyzLO^yzSKQK%ABVQ)BOAdqeMI zPk*3Wn((yhU+KBc8vo=4Pddq5%e~D0)SJ~f*5iB~3L6cZ58D)Y0lO1pV>=ca5_rbc zMGlQJ*0H{%_h`a?vQJ_#DJte5U8 zqOoS(dU=aL?scHYSDD29^RwR?pIuW$8~&GAbFRk+g+p9GII#kPM-9N!Y7}-AhyTNb zivE~5j{g3Ds&^3n2x7VZIW7zhJpzTpENTCse&T@Mz*f{XAR1Gi1FaVp&0r(7(yvSm z`n?g?VI9C5)^Nmd&&#e!Cc}5sUPu!)kNodi<<&H zA`g8N$(=p2YY#WC=>pV_&+6y_-gw63IUfL0RBpL5kN!VX^r)3|9(uss0ix8_1=cN6 zIRT^9IwI9D^+BW66q_f6s6XQWm3VoKlO91gCwfQNc2=; z0gO1y?rgY27yY2_(T;?F!v11tRcYvhHHBdw%6WUVu>-<1GVFer_i@x~)|e zLilY^NjQ$8CzEXK%>;w6JJUM zfz2Ysh~-^9YYV}uG91~$o{u<&Ef$SsqD6}VkZ%^98S=fF8fE`=;rO_Ws&diNr_bCH zML`~UI+p3X1zHl9>6baRTu!H8bG>8^RR{)xnY`+uNv8m@3zBh%05Z!D6UQICF4+!Z zPH-w=LrKDLl`NQ(pd0ym%gM(;HAEF!ADA0nay=SR(}qu?tO{?v_S=DN_ z6E>>?&W`e9roG?Rbuu=+Ce#ju&eWA~gQ_KqsP$JDgt_&2bYC@BH>4x=PL@GiP>M;S219*IIH<9`*W<|n+-6V%W@L+jO>Pqn zMiz6%Fb$rY*2{q#m_RfomH^$0C(yf5GZ=d`AvOx#im%X%(L)x+cl@(3&%q+6${L+4 z9dU)AP0|ftj%;3SFk|cI`kFG+1V6Uat|4}{?R7+{gXM}p`9TCdF`jY{h8^bKQelmk zmaKyOCm(MVq6ATDU$4`?ol7JNqONCjt>GlVWBk<-twoixTTj z{!?0*e^s53Ga$*y(X$jIcst<0&A#v+ZLoW{8@*McaR>zYFt^;+d9HwruzBRx=Ib6_OTJatGVd2Pdwsk_Ej}6Ue8#<1Y z$S??)0$d<+)W8ONlE4uX;Q0x%KSjy3x#Oip@MVE=7~wb0&r`X<;V1qP0*#^#20NzS*NZ0D zE}ZV>XnS11*=16jsF5Gc0~?`o+<-xpfS4jYNf9XM(Oi^HF?xjoz~<`S$ol6HO8ZmE zV?91}M=E$bi0g}Ylu86{RIF_(#O?+`$ZlY-k-X%V5eat%CK~-D{kNI(sI;wP8cQca z(YT7+hKg^QP@+su(Ey-hG?z^I|0n+NB5l)J zlJAm47_vn}_m(VJAjJ>NwPe^z13v5)cA$J(0m8KiLYII2lPv_!!k%kh6!EYbHkf@0t^ilgCrn46qvS#96Z7Ro7(8gmzNzui%qiBI$0_bpV|tPX`jI@=Dgcr zwUSEhm6{?=3V+iw`J`->RDl5X@N zQx#?!@yIHaD`lEOu+C!5T%ul-H)xMRxUO4dTUr&(XwSeATF90(n@m&1O1Yp$cZu0< z@((%{Y2`+hgo{1%YVP7Uqw2P_RIzA*L_I?YYR@X-(vnZJX<-a7U)VK&Pb9sv0e&5( zTe96;)r{?yiZ;74+iFW?yQwlVTTNZ1%sTL-`nNaFk83)H+zI(_nt%TM%P;N)}k^ux+cL={-qPy3=C5k{yQ11Z=QW zhou4~JIuFL1xvPA%4uGDDhj_pU;TI>Vs>e={?rbaFWP)uDr%Z*VTN8EJbRV){nKqWdO2G_g^)7-_3^&P!76HB_AN$gbOKY`uIAaoEpY}s& zPaHhYiAIx)!02f$8IM-7y2k0rNPLAD9pH27jxLPqyN+9$3$Ex=7I?M&Z^E}e>z>(y zIOC=fo}pj8-6#{cb8|kCxZx&_HA2bzCGPOuLcA*$7U{4=pTt5FcwuNm*#sae^tZ|~ zl-A#~r6z@QP2|~n5gw+dh}k$6Y?oi%u#jU;>v_V$MU$&&@zd?DsyylmuuW#J-h3?h z>U4mgS1?RrK-_4in~XdKWjl_WUmFDhW?)je>8vBiI4SI!L$NfWsSDAhs$aP6M;y(z zur??x;5aY@VFnyf@q_J3aRu+b!z_b>AqCUe-=Bl$CiWIuhH2PS0Ci|D2B8Ii~$q3u-x!{3KW%-Q)M9ne@{VT*T@3c09zNw>T_}6|FlEv(~ z?}%fcw%HoH5KiLJZNm@kISp;yQE%9x8dy!ESEqRhT=+;wG#wb?ITsOggN40B+$Q<* zabW9<3-TahJwSlrDtTncSk&A=*twsbbZBm*{K4o@vBkfl4M`8#gVM>%*A>uIWYpJI ztExEZ%yLbpr`PqPN6&7T#2{cAAz)w8T=A2y5t_Plt8Cu{iAn5iGa_)T@SywZdJKWT* zBV&G`pr=DiI@qO^YUU~JHxQ1OXkfbL1b|?ml(Z*NHQkbmPVsQP@B7qvo@9ywmCkD( zA`X${SkHy!q%Vgwm917%!>;?jB4tihYdYuHc0LHL8CE)fz-7ob>Z0BzF|;(}B0EK! zqparQ)~V;#80vMTm(;Gph^V=PIa%)^hpfWD(yd654aiZLl3lEFh5Y22ah0G??Km*( zw#b|cVgp*6tbj&cwKh=GBc~0|URz0M0m@WG&{r|+Yc(qr2@!CuBn5T5uccd}ETlu+ zkTN?l&5oFIFU241lO?Stx`)?~(M#BbNqbvhKrd#q9s7-*h{IPE76xNDrWbKX{Dl5) z6zS3yrNKFsWf9Fuw1~4XH(Z%4R@hm*p8|>;B9=S{@7z>izJ;AnTauD)+}O4vgsD$l z&dFSbb!AeyzAV(5(?C*8SJb*n-R?MYP(TYmP1o;+Ww!RoJN?i%1_3S4`t}-Fl@wyNG39K!ji>8e@XDQ zI^Snr-$AaYavQiRy(&f+UwGurY@cv!G|iO>*-i74fLHuMZnFC?+wq;{|O3 zBvR@B0+>K&zcjP%#w{|zBWl30PWgo0cIFlqxYy!-P^j1P)(%5w+~;?x0E)1G3NTeMAoi zYYnX)Wd!Pd;OLv2RNei_cHeDfUE|WLD={q-TdE$$7?X!d++4 znZ(zzWp=|n-S(bYhu~>*?c;K9!+oso?W$+){gr24D+KsdklGIBM1;hu`|C%IRtU&b z2uNE9D9Ps(iM-ZZ=7I0CuRNcVs~y*TU}?!q3Iq4KN2c0`{wGaQAHAmEM`U<{`5z)} zgxVbge&DKT7-tKQ*~m|ja|mY9=qpnd+`h6xs5FMO<_JIvfv-xQ>L1xy@6y|GQg7^#A6V3buLEE8(^GXHzM=2z3 zm`HWSOX?PO;B7vqc80Q47ktpxi4Q3s<4x*3D+~$JWu_jx6C^>vCW~e`(0T2#mNA@8 z4A&3Bvtus+mmC>>2+^z+QQZ=XEKn&}VAJU}Bj59eAKcK8IKM>$YHV_!bMtlZUBwDa zH$G$}SK8%uZ*~qn-F7-SZ+Bm0N(U?O-6c zHqHsgYl|6d?5()5*79Z?%zbdU0z;82@XNq1|EQ6>yknOO)72oqEPn8Yy&yCRa$U!1 zosa@8u-Fm=J4tYfK^EbVd2Nf<%!{E^QCk4!e6)oGo(SEpL~v_XG4iF!k)SojPP?Ws z>>mzKsXO1HatR9#z*`BfnW5k`LR&}~@r~;08TDfr)F6>&=C?yWUqQtIMtm0Wa2OLN z^My-APA-Kcqtp~qg5%;j!-`trzaz|mz<|jNcNb)Z6LNPogD0N~k23*2LoLH(jvSqb z1nPZiZExPA@G6W1xCx=H9u_isHsNFyq%F_lLd!^LBk)TMVB5G*Xu>7)&K){h7FT;L zTBZtCmx#E%EDom=+&Mf_CRe@87`78ZiadVct80ySP39pCsd@vk`pB-bSi)&EWOr|j z8MD9jtq|z#-#qvOUSU0k%Seh zq_&(4(rTw2I`k8I(Ha^8h&CbA4s$5#SZr#kJ;9p;cWH*MD$|l@kkO*R+9R^Z1VgB3 zif}49QFQGAX4@7G?N^pKLCVCvN6?lcq94s>LSyi)b+he|*m7@7{x!y7<$%oV#r+`? ziTj2+M{9)G;Gj^L4slN<3My{UBW6t*)jYCP+)l5>MHKL%&l0|#P=JEyD-up_Xi-ti z2{&W|uA0Fk_@?^p!T=7#BnrfdDI9yT!-Z?(6oosG-biOYu^&3F!!r2cq7v-q2CL^L zC&*U4X1&i^bjM5U2-m|+pG?Qop-XK_#%+0!9_uKVrZgYmuUG)*y*J1W=<2t35hO}4 z>dLgX8%_rwGpgFHT^V$*27PgEWVEiGZa!N);ne@)9%d$s%a|E2el*vn9Az-B2?X#m z@;4g27O<2N)_el%W|q8=&+p$Z3O5UsrjJb7L`lvS;g6^d|=f(}ay=P^Gf6D#Uyz^QJkRddf(}+nxQerr>} z?rl@b9@mo8oS>n-wGKn0YsDvO23QQqFUXmlF(7>2K@$~ruT6yQ9eLihciI2%h zg22ORoGMEzh&Y@;u~REZIGHH#Zmj_6oV{9?Pg+3>p3cBqD=6p}{uo;;KqA46nkOf% z07(%`h*m2=D&+DP1a_TUCs+_MI>7?m)(H%RjZUyoancC}%8X86 zkZW{;MFvJEShQkv0z*|NonX-4NhcU|dQu7&MW2*{LHj4AK(~n13D#CR=>%ib4V_?Y z%jY@)P+Q|!Cje^8NhJVg>vN3&h?R8K2uduK^EDa)FgtK!Gy;U=gm~5nP=XKl9a|%a z#H@d&5d@;PHLDSLqShi5BBk`u(sP9XUl9CDA!zIwaYsub5O~pyER|a!5Kb0y?^Xzy zDm0#TTZBDEP^?;D zzT0}iX^5>DnCWcIz)XCr2B$?X-N3+OD+e2JTRAWwHp;=m#Ys6BINQ9~Vs|PBlLm}# zut>$|28O6ky1^j7lWs7`^rRasf@AP%(+^L5YlbwNsG7Uu65x$q?b`M%{N@> z#E8Og9T>yqPR~_k9q&PQAF>zwpBmxAfXoq_c`nL(JD~+Nb zfEnltKY%c#ow~%j!~zUqzmeg()5576cvm#qs`qdOrfPa&>}gf_p4M`je5DuuvsDVN zaartj!2^P0CyX}SlJF9!o*|qB6^MgHMFPTdZx&_ggjIaEUWzQ?f!WQjP~WYvwAvao z5;R$J(5=};oPC~N5Nj1NBmZped$qB^`Bo2_4VG9S@+Qi#1A5R2vS&rO1!brX2t(9? zCq80@txyZ=O{7I2Eo(~Zshq6UPTbpA6%tI2pcdgv9y#H*O>)Jq>qXvyK#E?&Od>l z9t+Q^*&8Z+qEZPDyE$^h4Gu$9-`Oc5ICg}v(p!?%gBTIMB3Qj^3Aw;|FTWyKJ#I^s zJH{2k${4kXAT?1J>Jh-oP+Fn8LEHhXaD*)Cz8F^kD+1+u3aKSFulQBZ)aek9kZ54W zui~iR!KvdXgM)jXj;iX2Ii6)xj)i^34Ca={!r`)r==aL7W`xX-pkL2C*gz?looB%@ zVI+SVi)J`pEG>Fw(_4!MuqvkmkAO^df*mj@AG=A{9O&)o@s-bfH~HMOxX{2-8Z~d~ z8IL42^#Pt$fd=(u)*6az(84+_TZE!SjJDJQJdB&n8Hw`QrloqA93G{PAcL=>K4sKI z;tW~*hgFwG`J`mZCzVlYiP5a1I8f1&Oww4n48?(1R*cn~@Sc_^()0#e=LxN&mQm4q zP;g_O1&_)g;An6Q=jiDet$xe19FHLrG?U2H`YHR4Ih(fWT&?91S0njPuE*|hGAg)4%%OQByIhY? z)c3VZ9+~80@wzNaZgd!*>Bo?QoyA^IS*4v*jeR^(6iaEJa~`zeGTxXFKLo_-ZYTNf zV@arwkZb6vu?(JM9~r=&T4{mjBA_J9|IuCYh&OS||w9kaHI)E7akzwWgm0JYJ# zY%i{IaT>+C*M+~=iuALhhx}NP4xru=dqsOHdqr!;dqH+-<8eKIf^Lpezhg8D(>bqb zr#8>Ts1yFE6aJ_Z{)?{@{#QmZG7q1d3D%)?AuU-= zFqbvgaz}0(EIA^15M=>WpK7$;BM!6$m6j_k{Y>1}0LC&ZktK~N?>!5%hIj@HXEL6M zG(FP}(OQtq+69QU;GT!kN;3mQ6w$?+Sgq?-_9(R20; zgh!|oeBE$m-z&3XG3Fh1H&s zvO)BOjueaO3CL0`swdR_u-Xh}N>5lH z^qi|j`XK!}k`7Ls&uvmH%IAj47UOI0@%xEzvrRWLPaArodvE@CPbZ$R7{x;NiJ?qf zZ|0*G!^M*pqq5w~u(Y)U7W-4ZXEMdfC@u3;hFa|g9K5%wmc3ar`82|sE$`MK_lzCC z|Ilog1KqpXpnG?=@qHTD;Cs!{pnI$(8ElVLC$-ZJTyOYnRBu2oOk0p|L~rbDJZ~&+ zG+Tt*=kuH0kL`PoXvodZY;yX&0>kJRf)}4>3W$MU!{*n4X}J6VGaRuOmw)#(&tcYm zF?c#osN}BxW;VifZ6gekMPaR(a1>?cf2$z#TPqVBfBV#J)bY2cI(!e)OiG{)85Mgf zVn{)?wZa(&0mZ3$SmF`kQXB3HBp$eqlpdIcrPl6B>m?tlAf6XNgB9L!S?U2tJLE>x zNLF4{94S}i&1iilg`#5+EuS%th)Cacik5VLBo zX-Yw{*)R>cT#i(D=0x1en1u^qULr1l7Ko&x=B-Z_l2Tz-s&T=bN1_Nogp|)4J2E^H zHx75pxc6KQg2Mn2tfnJ@&kOFR=8)}32yXQh*3H}y1B#@ehgkzWbOWlan$>m>8X%1r zxPUGN!Ey$Eow37kY}}Bh15u!sV{p=8wG-GQH{LqkL17UcxXs=Vw3zff zX=ZnCpc;+o{ysyt^(EvRCy5}HUXv9O$9tSvRBVQ#+A|K4S|MzBJ7 zN9;VaFe8%|{dJLQDGzDPhb)!)fe*Dt-blw*V`)?mq%k8r>?VkY8sae%x2~j^%t1&A=YE(FK z)br}$79qWBsk;d&q(lo6qFy5qsN{!mRY3`eiGZp}{4&c4wzRONi5q(g8Qxz8vFKQ8 z7WMJjQ~><~$v2{<7)VCa7(lgYD>8&7ntlc4 z9GF>hoIH>UkOhEFHj<7HPaP}}*$YLzn2*)l-3ev*jWv z_J=K5y)?a8$E5~Mt*Am0V`w80U4T;w`T@g+(UWN|mLruP+3yH){oxWq1G+ZA&QN@g zgfp_6Ce3@^OBI&W(5vogyMTWYJmFmn5~`LjEXW{~dE_ELdmJ@VMChIrS^cn$iVr9b z0qnWVYAR~;LOOlnwo2l?Rydfq#W=uH>|vO^a;l{ghbbdTXfc%?v#Yh?RM+HoMZb8) zDX}}iD%C4&QWI=QAM?s#glf-tC9ZNlW|r8_;FdX`Xg@2vR0E*dC`I|DnhV#qD#|d; z7A$(7M2;zEj>*11Vwp^y8AnNr#R&4?nfV#l$hVj#U-^h@A_orHWaQ{ zs9Itd=)5w~WLcP(OI6oLHzIJhYu~?FSE)XO!WUhn9MxBd<(UwQN$c{~eC(+w(>GLoy;WT0tmnf#YABmN@+x6$N6V`$_MTT>u1b6Ud7r8F z@m%}vX2WO?XrZ}gX2Cuepoke5Tj*omu%1TT7%;Qjs50xImNFK4 zdSSxBbhRO7hPBY$6SRovyR7xZATO3@G3F13k=dfnqmQYBjJnQyymJ+3Q@44Sj5Z~n zcWvot6YrYw_8Vrn-Pa!NJg|Mke)jQ#l7aQ<4Liv-bF-Xyv*`B;D2A*z0Owlcti&~j ziKqTgA1~mZhBBxyA6l%P`Cjx@Oa$ywoMpps~@-xJ;(b%o+Tg$p_`&(vk zJNCY&u+v<&b-SQtgtv<~=6VaLW!bkNT~`1W{N}R4qH5h%ScIDke4PU4*pTHLcsi=Vv@^;4G?VMMfk#-#<;sWoWPpQ>Cj6fH5pvko;8#~FCc zqa7anS^GAfgHEr?N7Hyjm4`mP=lR)LYe zeyMgj-srn7NmzaL)jyQaF>cqnv_3Ulons&KR&SND7=H|)c#SHkUZLP==*}9N2+a+D zMroNv0T~A=iD6_u_NIwh zGZMF1LG3t5>la09c)*L9F2+#!bMbW2&%q}(?UY`6;P)|C*li%?l%Q!%z&N5X4MK#u z1PDJ^gO%ybfWd^Yt$GZ%>@+HJ#8HC7tzsOb?n(jtr|2}QujDr2TxRi=L}z)TMEb71 zLX;AFzRl7rIZrgBl8IJc31LzM6CsVlE38>T(g=CfT|phpL1l0b*0L+`Z5b5FTrH#O zN+F?=G75cv7e#m_dbk3|S#o9Xj`;)ifX23(&ZScapnvHqoY8L8&1kc1M$3)PrQasKKok=L$aHu4W^bCzksQ5u&5W4v-)m2r`MkSCw)LQD@@VHX#OiMcE zX*jMc_O+SN$I23PS{1bjVu=~Slw*%!&a|@L@>-Mbsy0iWjG`Y+Eu{3o9c4iLgqOPa zPoR(A@JPZ9>+T}D5jQ*3=`b~G!dqV-Z=HLXeZQN6UbHbXqDw%Ml!y|rdlHdV!&PmRt!dpRlRbHuI4 zbHq8jlhQxep=F5h@2ul8;?Czm!R(4@9~4y~sP(A)u196KtEoq|`LL}=QQYD zZ|hO*!@$;~3iiAx_C#49ntD|GM6&g$f~gH<>rsW9^{C>kN5vr_ma*K8LNm?UL^NFG zplbBwttoJG);3sT_dyEwS|sNUUajU>cUK$I^-qeF>W7mKH% zVyHk&*V@uW)Q@^a-A5Bo-!+_m+*L519*l9u)AKk=tjyk5JUx#kxIJj3=?ti+pDNP` z^c7F9k?{M_>N91tgy=DoyZqNX`a z0cy1HoSlp~3~M17+T_?FE|!ixv$%VR`xU=Vf-tO=TGe<@CXSmAzsoZ122+|zuN;Uh zKVL8fnpWGc(v)Z7%Wq^95i3#lv&-g?ikyCwdwA3`%p>7uWl3Y&ySx)gBi|-6Nf)82Vhpf0pk3Cx0KF;z#Zz+3qH zhDNA%?U_^HTTxFb1M{#w$-OqG=u7>qn_jQiO2~P;Z>sjM@z^g5zdu{}J@i*|#Z~ov zweaU*%3Lb#w1^u+zdA~fTeWHDm%dEx$=dmSTZVb)Lx$XS=iuurV zw+7jRn<;HOzfN_}-!5vcl-aZAbyKM4TTt`nz6LdY?t9SF1olO!neDy_H4EEUp(iNp zyHEpkD1{~gk!H?y?o7DB5{FFC$K z*~gG++FA9;y7nKreIzl6vVZlRFLVk zG~9N{v*9bE-5}|kiB=xz<+PyMVCCu0RxTQ%ae|RBh%KIAiAv)r9%p?EnQlL&&`$W7 zwWcf@M@6c%6Wp)2mj9XQZ8@r$akpQgnc(t%GYeU6a%RcKqs{!=uoUn{g;KglVQa>DR1i5&CKEYfBg?t*v9A zxi*<`CEUBw;750?L89H#U(=0HSaiMi~9I8zD;9<$Tm=ybXEq7TkU6&%~fB{fT&-)>v?}q=MjkHpUjBXK~GnN_n`dPe_O^ z>VbxKsUb0h0+;4dYv?o&)jA=HA8re*0gcinuvD>hUQ}b2u>Ky@xD*^}NHwif6zORI zpIs!DXIqY19h-IX7@x}TYZL~cC8%uKvyvRb_9^`(ul`Y@1wMV}wTI_VRS6qkKv-Ms z{EYQq*j3kbLKr|Rgko6r>ek8N`_37{ts!;4%>$cNJHU<^Ki)mjkgVQcMO>;O zsH-)^8ga;H!FYhx6~Y@Nj_%i}1%?}8OzfT^%8*D1^e0(sDILWjg*V)0-357?TI$Kd z2uW-T#m$h?a~Df!(B3 znZtr%`j+e}T$NaX*^nq|%H+hj=S|BfFp1l=dZzoz*cWC`*n6Z#XbP>=y_L{}PS2YT zC17gSdIM{R)~NQ;YW8xCW0&(A-TUvK%$q210}41ym&H#i*rVet1Q9gtX->0*Nd;y> zB}^(Xi!YH>FrhaARU)Z?(4P=3ENX@8QUm?MEDDuzod#1SfeKKLVxzP$vJjEF8~G*a zudI|a$OJWV_{}1wl4sY3>PpsWWF^~4p(S(5h?L#5TzG*Sk|lcx7XU-oeKWU6DHCL7 zGnJyiwfhcjJtv~xY|+BBD#)5-_Awe^A;4zlUs?us{Cxjp7{h+mu#nxUVK;UHhBfWE z8YZ?^Wf-1?fMJdcScZ#ks7{6(&KMZXxt*|O*!V)K;rJH)=5*MUGS87{Not-s^-O;I zc-oX|&ap+(=E$?uzfZ@#QS2$Y_mn+z?FO_b^xmxVa333bo%I?e=UV6$DIsuCFHJ>- z=WUb}>O)d^ND2>0;UOtJB!!2h@Q@UKWs<_bJ-I-`43l)aFs%w7HC`G?!nKiv8Hi-w z#kEno3-@sH`6lTU8XZ3INTB%X9rjW2lI@08KngMOKz9aT?&6PG?+IjbyX!~u5)3B?LuryZqk!(1z=xb+$ z=mcj>ExMlN40reuy4G9Y2ttgeOJJ27kE3m)n_p_vqKWq(QOWyVqm<;ydb@zd%XLW& zp|47H{}Bpmm4bTjt#h0cQmAd?-iK%3zMp@?nL^)uZ@#+Sy1xDEQylq-`$*jT;VCku zFp^TvwphBWS~o5S*8*-mV@a;33ZKcGUFeAwJtJz~!|U_txw;|&Rbw-(lNo+@8R4aH zU5*I`>7L5b!FUS<1DeZ`FtekT+mRO-z<0&=sL3@XQ$?9rS1L09_NpfUt=zGqjCQI= zm*NsGM~@g8H-&ADIP8{C;Tm+`#*NH=4#P2z64MVV7$fxCj0j%;v438aTu%a8yhE4Dh=~8hPZ6F38d7PuR?t*vlD534g{#mFAJ|ROM z!q7Gb;Z)A;(2j}d(Ztn*%|J6TZ#5K1_4(Xi>u$(ir?EQ{Y|=q=&l7nbYGk^)y&@!! z@R5FFX7LFY@t`>z7oc#FfD3oAAWgUpZ_L?d!JmS_;~R`_VTxBBtem_tnv~op(#3%{o$Hb`?g@<5W35Y3Jn&k1f+ke>^S$-}p>As8N- zUxnub+$E63BMX>KrF%oqN}t6SrZKJz>gVQY#i5q$QCU?M2@sJPq=X7(3_;krBi^B* zOnBBbw5VB~8+75;Ao@{3YXOA>3I2%OmuT~mNyd?X#78ep#eP1{Y;D2AVXm_m+~D=V zzA1}m_;?(#M@~YOW=^bXE2ev(vUsY<*o^xtEUFw3vVEz5L#KBiMgB~PU{7|29%JA{ zz~KtFXM!^yB`S#ZJpi7uDM69Z*h&-~#S>|`{p;nxX~{`u)b~zOqc&G3(qf6`!(3d2 ze_cKek#Qs0;BLDolM{k2;7PeBouN18G*&!T$D8a#1S~LQc$(5TW1k|4SiPtxARlpxsU z#uq2qC0xNXEA1ps=+|&5%qJ>Nj16Tt&Z2jy>^ZaO``Q)>s&cKskl8>9+CbB<=T=K2 zCss3C5|p6pG!Lud_%NN|-d$R(b~wqzbk^bK#3T{Zk@b1HE|PFs==lX3}(nj7kkF$YGFHD=~dSZ!P=@P00`f_)CMTY6zxgcAf`mp@wz@ zcduD<5^f@_i4)F;Ay(E~K&-f->Pla1T8Iq&SH`L%v-I94uuR~%F&he2iCF0-X0{&8 z5=cUNn9W%O_bi?E%~nRG?Y-I8hm$ST(-4D!#61@lRg52;1-<}6!#!<>3% z7n$>M?Kd8vb3=|}b*8VoPh*Hf0HCzvb0wYqr!yg=ZtQZQ?1wj+=-Hp(fphs5N zKQy8NVs8vB<)Z`Va4p5q@coYQ=%vEfykk1xdL7wQVV=mD>^$b+qK7G9?(2fW% zq=4c8JtJHq0aLJjWHtqte4$MiN%LAo#)b~jnf4GMKV}$$yShtFS!eoJ3DTLAJRdjY zQb>pZ++B2J1_1q>2I~S6Y01nASMOMDg^Dy#CI)5cgjRgF-r=E&G!nb7;5~GAMvbgtar(KMvL>8aGYp=| z65;8+RuVicUf}9x&B4bAdlyY_8$7{Bam)<8!5Dl(Mr9r!3*yu$Dis@*WfeKWr;07l z6H4(Hd=lnSKdCJP$%_c=TZ>vuZ_)v54weIWrDZ2a# z-F|I1tos51o0u3UzZ8D zZ!cp;W~GtRBaZYQWuT#rX^v-H0@_AyRFr7r^D=Uy{;ZS$Z)LWRJgQu_*MCD8{U!Zx zf&o;M=XY&T(HL*$VAba*P~DyH5XOioNEFRqxGhVt7t!xC1-OFJnKcFH6Vu7z>LNgZ zKwAh12Lks-0T~M4iMiW@8aXmAQP8HRnDTjC(xWXy{DM(KJOUc^Y$rtZpujAvj%;H_G2!JyHDNLBul7(AAO%I*5fp9^>e?P>Q zfs2B$(d=xmTU6%7+f;p8IJ+tdj4Q}sC{1rXl&D_cF=dX$1 z-A9y(Z}aZ zC>oNV7~a?dFH#I|ywU?fwMjz(a6zCSx}g-Xj;g3xGzHs2YcgqSP#<`QXFmT;rLZpa z2yK1SGi?hg=CKrqoV|(MgFE83~zfhl9E=KtO zKRgLb$!IJ%YsmB*wA@fYMq&h?mws+wN@71s&;)ujjqaEcx)v=}BTUs3HP>?$j~wQQ zAjhKSWk5i_mcK#ZJ*wZ#{u(Of2@C28-mfz6=gVkpS&xffM>x8s8_oe=&OQfHOZe0EG83ys5A3$s2bKu z-?WAbJ+EL!h161UD;1(TY0*g~b~`mmd6 zN9KEO=LJS6xvUsqU;)9d1dhJJ2GO)>cK!`uNW1N0WI#?u0XmB}mB^nVc+kos1b(Yc zi(cM8nQtXahR5b%#*i#X>>llOBnwg^ju7}cbIG_?rla0kB2r5`NX7;jk{j;6)L(1C zO>)EKRX+@X;ZOuqy+MWK2A(%CJCP>2dl&ME@>krl4S4`31&ju@;SV}lZGgifC-do| zJ2S*0(eq>`HSbw(vfr!Q8^pq4@1+f~z3Db&_x`jv*gJV+Yj63D)sMKvJ#p5bsr&R- z=sq|nMLwbOe2WLubbCYJDi;rWxccIc_ddAns{4DV+vC;XmtW+2zqnq<6|AnN`z>*P zQ)5p4l~mgED;4t_{&yP4|*YuL=HYx|bmN z2B>{J_^J;s8R<2@{OMjU-s|17`Ywt3`fRycdAob%m$T}Z{OS@Pr({*n4utT!0|9#b z8^spi+udutqPc6dUoQU3$r_Ibj|ab=2iIHeiUD13wYw$4ABAOnlau0d<(C5N)ygj= z+c(VWZt0+1;e6oZ_W2cGd49$IJ)B?HxVogdOY*%K{DpPgE#lp;^Iwl~Hw8T&JRbaT z3eDw#@GXwQZ@_)NBtHJ3+~*Qj*JhzloL~1K>VB2BbB-7S7J|AMrZUMa2R`YH4CHMo9R=I84za;awB z&zUaKel_^rHyb`&ZQSouI(R(zz8+kQSeHb7$=>hYAD27odb*dQ$rbCp?&=3&0GI2y zgw>a{`rj`wT~7CY@#lV1$K7YtL%w^+cb^m{V!c?$%R*UxrBGJW^rQbSkC4ZM$Aia% zw?DYe9Lj4#dV(LZHj78Rz@6_tu&yj%Z_teW)vT@`Tpfzx^1!;1yS@E5Ht`IV6&NTL1-D~ta@d^N6PV=twryPb5ecf#Ft-F|;B ztgdaE-vN($JosK7T=AK3NmqBTVy0H&}A+j#|Hp8x7^>FUA1Apv~ku6zP6b?)^88U(TTU ztT9$Ja|0#F(2G!4zKY^Gp2{hBYrfa z?d!oA%f;Dy`QT;k|22L;;Lh!niGr@BgLs&9d8Pke z75^2?7MD86rQC3DU9mTq_OGt@?tgH78E`fD^@GRt!Q;UbSj-@rHJa)EC!Lfjkvb}6%5o(3-%Z0@I$yUi~@$aa3w zKu}&2B~WvAsnb9(#iN&n+E-@0L^=Mo%h2}AbdI`b z@b5CW&?BPxJA3em9scOSBX;-;0Pzt!{QAKkvBR(5QNM!N;qppnXNmFK#}0p=63A^h zzS8wfQh!{eJsvzB{GbnRGl$oBI_!(I#Up;b7iquZG;%+RvEW4;(JQ@lq{tVKF#i_D zqwf^$zk1Q_Yienaxc4vd;2#=c{)UwGkI?haKUIHOrn!gR;~pSB1g2LDOy9!+c~e@e zuXGX?`BXljM?n1diT2m;&qtX42J?c^bJC5e_`U!BLl^4zGz?+Adm) zNBnv%Qg4e6tFQDoZaIC=xek6|W{bzI&EvuM{@{`8;Er`XvI>0u0{@X&1%7zx;8%QK z^)1AY7e%aBh9b54sdQ4863CBJy!(=o-ftBS>nkQwgxNWS!`xIa|$YF5_|u8yMXL}nLX#C-o4momeJ zaQDI(moD&E_y`x!y&fZ6N&r_HOSm-NUE%!2lkAl_yL*VAGO}C|<1fv~@&@O$FXwjj zhPbPHq5Bq)T%z{J0OG?SbY*W7e*Spyc<_ThxXm2yOKiJ%#KUC&v8ewKHQ8U8>t6Wc((-pXt4l5S z(mCfc)84%|49||Ze|h?sr~iJ6F*uNAh`zfd<%nVY2wjrP`1SYyeERo)I{xLTcZ85f{Z9TMt z9=&V51mV}>e5u05QbWOu;$o~Jvn}Ragl#!isYfc^^_)`}AJ-_Uv9;#xGv92bw$2gRhA#PaAVLyb6-j7Q#IfzD z634H1Pe7CYevjWS)!^HFujj!hO<%n@FL8XeM(xGrC?VSU_2NW=T}7`h_*~n0y}i1e zS}GPYy*eo+1q;CrVkq?t>Mkyo(vrbR7gtjP)hx_)ajkX#48^^;9zzG zK;WDY;y*UOY{JdIOk1)+t789s(xzeiRT+ zt-bN{9;-;rMwqJF zy?g5Tg1_HCb(ce}RrK$!os6cwy2m%bGCKMH+qs(MxNR7G->1+MZ~%}XiDT?cdhaXj zHGThU2LvfW>qXW|rkQr8_F=Vx@WT%uK}eK6bf#sRxD8FkpGZ#*fVpmx+DJgwlQa+2 zR-YFj?P)_uFl`~}H#@lB5GHPQ3Kp_wWy?Z4dqS8Q6CW0$kKDc)o_N3sGyemRPEgBm zF3cH`( zmhQ6#KCiITeB`}_Q=H4*u8q69LkRBf?(Xgc2=49-?(XjH?m-eH1P|`P1`qCVCTp!- zXYcRb_5K0ptD(o*CEz#mzEWHUeS3vz|JGQ9)4aY_gd0n z&z7A`@_^0Ll=1TQSFJPnrgqen&nt#HaTY$84ow8(m3e(FNk4-1imoVDtVf!XVxRCK z@+tUtzNMH~9F3%yyU9T+M;A@#WIiidd$v0#EF$7*q#vNZU>&YcPT2T zv|#cudk5HT#WpRi+v6fcLmDr8n?op<{w6SdgI?DgsTKDsXQ zk^iKecXs;1Tcy5oR=Ll*i*Dclk;(?UC18O$cG>mryHC5Q_=V&&yvk?io!FEo@u?5T z#T^_9RwRh~-Mst<-Klyg33o-JnE?FUBP-JLp@IYVWO9d(V{z>l+_>KZ!eYF++Y+Yu zZ&`>g0i+PIJH)X7`G6fa=u~shFDwJ=Wh7$PQtR9Qj&%B5t|8G}Wa9 z*qpjOhFTi#i`C3rCC@NTD2|4nb^DwXB6v%cNBdn!9>aLhk z!%cXML2qmnGV*yfyzfLR@Hxw(yju?SQ*HT6b=uh8()MSr=$PWn}XqRIR z_usx~%?d980?LaEN)X1;=BOFiOc}isW{{!#i=V$eH5HSV86fg=y&$7P>ZOJ57ASXJ z30=4yo@@xuIpm6r5H>622-R(13UmitXTBikk|{_2Pd_>K|KT#Fqx|WQV+_dr&Ha;` zx03}Kvy`KY{U7rATv2;6$vRaW(^B3kSUw2y{V;z*gvo0 z|GdhAyjCQ=+@#grK>lWbJ}S*l_QzO74S`u2WXA&f0lojrK#o~V%+c#_vpP05R&FwO z7Op==Ez&NI?oR*Ln(nV(|8l0&a4~gobux9aa4_>mV3s9wb8)u-xxX?0Z(c2|Y`j83 zWUg*57N+(HUb&iatSG}QaFU)%gV4tWv2nbj2L}kMDJZuyH*ad#MDkU5Z}Sns5+*_n{x60)dI8y%erFdU2vgSFr}vk%`wvBf@NB9%3}9er`iTGzr^ zBA~IX=)lx9mG1hq^<`ulrqoiJ@4V8$3XWc28x}W~N&3H3$G>@K{cri;;9~m^%b7F7 z@%VKqjJ|7nFRnt3b3`&cC);5nq*!F|{GO3T6BBQ;$9Dj@F6dIu6lVI6JVWy)aiPi| z)NJ=UMp9A;_G<4HX?1qSQM2#irEAm1IgWAYay=x%FKTqrEHAf@wk50QUyOy@BS^Qt za5M;K-%V^O-YYd$E2_3_1vuuMNevLHH z&+ES3`;~cj)$`qU^Zn&cr+u-y;i1ZjGKIy|`$Y1JtCbx3OfRXp)o`*Lb-InC`~rn^ zy;MIMru@~`YM35757*}>!hW3UrzhsLwmHNM4ZAP##@wPbxbDw~mqm(Afw`d_?Sur3 zu_=QdsC77VUh_8gp-FloT9Z5~4tK(Tiggo|REHk2VR z(kE0N+S;oKREnb3xc*Nu=mVOg$H06b~_uSt;CLY0QUH%kkYo zA*H<8R-o|(X*|_T!Q|uZxibb^XuVoRaB7=eQ~=pj+A#?Mdhcz77BS(B;GLMyN?M?e z%{>bOz8QhL{5?z5@?^pr*e7rRswTrn1UwFXbErr^{#H-j6)1i!cRh&D&=WkH4D3?#8{gxt(_is{^;Lm5{mJVArig_ESf%i|{&HAtGUv>Yi4ayZ_q{_b%S5jz zSGmtAPe1^1DMHzdu~12GI|QLljr^{9p`+l9=QvHfx~)oaytSa8%dynuBG69wwc~O~ z_pI--4)nIi|D#3gb&v~u?}y12$j0s)kFiJuWs<%fwBML*TJQnK;C+`kedv{aH9vhm zjt%gMgtDV6hDR>)<`#STC)$RsT>^S@2Y>x&g-lDDQ44Y0_Nb*vcdA>&lBXLe3S*4e zMIY}&6o$q{sZ4XJY*3kKgOykGWg%G&VQYoWt1LbROfmYsA0;gmPSnCsrU_&s6Jom{ zu?!5jn1BxUKLVu{`|&5?a%yCNE2r9cPU<278AgV*GQO03qZ>;@v+A6byaF#ueq zQ^AFy5hM7Btj*y^KGp=-RXxpKW-moAJVc+LLNuhUqj*?$(5m5kAtZ7vw8=URB~{S$ zw1j#z@;~e7^qY9%xE0?O63>87Wh2-Bv}=s-(}KZUre&cg5}36W9EZEptC;iFY|#9$ z(V^?fm3mCs(9qeEWjN_+P6(l7Y;M*v{0$6Jb3?b1b}S#yqjQ^2+Hw1MsLqroWqgx_ zDTHCi=|W6VwV`{N-AG++7!aHSo&cMLIY1&*$}+Z2>ajO#KHxZTEcOXRFrXsZe~Ij{ z6+T{zb;jwkGvQ=93&}dWktvNBEYi!Cm7B^$Gz`LlJ@rF#P<>zw!OxEX-RNy@{ZZY- zC>$Kd1L22!L^9kyD=Rx3e0RH~$fB=y*`SKtN)78zjg9u%4XbsO2N;dbTduq#eR;f~ z++~s#OWrA=EZ)HDOAVJgJ#Z#b`wW6A=yX?WA zs0sI03#-~;(}5-`<@5}uWckcA2#^PHJy%3A;=Xr>Bo=&MLOt+Z_2-Ou$x?LpS#RmS zRk{h7j)3=>HbHFJ=`Ehafcn6w3hAXt zEWcfX_}7dKOx|biTgJsciy}d82n6{<+hh84q=XOxEq@=awiM5d_QJkk366v1BAM^n z0@4(e%p13{z?zfCVKz61_zvP4k_X&LOP$^lmlm<0hEL{1oEep^4}rv?&j7xZYy2Ao z%nQ0Y9$+3g%-vsAn|X!a#tjD_|CR|xZ~0MV1a}p2+7IE5NzJ8`SbpvhI2R;ozdjd& za*xvtI78&~)?+pzJk&32pes+t>qM9#-qOTH*~}r2fSdWqZj&!U?OVgyQ3ynNfoVs8 zTQ#u^;Ik;K>>;>`Ea)v$&ED4TvBghf3*C;Ul3B2|2F`)U^BY9a)L|1!t*L$Of#vE# zTK-|m9x+M8mIXigC|hZ0`ANDm3_{amr< zmBKA5v9^^UkSY4uH$UgL%J)>0Q{KZ3*x8Y+w57M~ydW_o1d)9I!-zb+v7kkt$U~ZcA(9E?H zY(fwGCYb=Xq1{=*HHe5hineV1XAC%EpfYS&TLz3KK^sz4_PqE#DQ!ttm)@RoJq4*ru9K(`W^8&3~{m z8$?YnRoe{uFhW`IOG#m>w?p^Pw)qHdTH`$g4}G3Z5B zos@|j1`gC5 zQu2#Ih=D)95k+-IKTSpk}fUleycmRUdPbSeCmfh8lhfdM0c_mM~}8y{j6p_H=Ht!g?YP2A7Ms9`#M6O)F4UN^GM zYkoNRgE8s>#Dg(sHS=W=uLih2PgDHHwIa{v$Oz^QQqN;0b(-4m!>(jhkoO2q2w^Vt zLFzQA31rgB7~Od#)YKMEH+#9n+HMVY$-lM{kQWUW2VKhUzw;5-z$)7$oUVZNg(-BF z(IfLXr?(J-CcVGX)T`}*hM_>I2+gf78w#^nlFUVTawK>Eo);)qdx)I;@z+pQfD9EP zRyP0-)4&Z8Q+M|V9;=f&SpGvq4bFsTW=p=(puoFJy*DFiskg^8v`IbE9aFB;&A^Dy zA=DDmKv3JN{#P^aiMEPn+b3|naYDRn@0wbQOed@pUOY>0>FS$fA>9487bl(7A)&sF zjqK|><~VH^T9)yze>XTtR-+>Zie)4EdWZZdnmCWkXji6x60g!6!IwN8lDMrHR3+Ye zP6akca5IEnmr6_Iz%c+bbeyb@(iM>FhDp3QNvmc|fWJM7IV81IdUKK`jwq(Sgp~A4 z5ez0D!Y*a50k|<^P3s3uRr&<9d?_-9HKcF8kU3;?-q~Xud&Wp-&Jc(=MuENIC1WE) z{jweI@j`2}qsa|!lF9E4)!(02%8Fb! zT}N<1QwKc@XzCE-83}%-pvY{@C%9nFXdGb$T0x;_^^&oXj26yk+0k9*Yxk8UDF|ir zmvxS7p~3*ewy^Vt*SR5~=X^FQZxs&+1~VGxDT#j36mddsMVgn_7c*4Rc;aT-uB}rC zIg<65ib^n_8oH3{T(Nm|t$}muzSnkX5%>_7nwN+EJ#WK)0^X8QWYT7RKg9i!I>1Io z4G!+Jb1z+#8omaLeUU0wWLXk&N5V#oOOa7}NJ~~BX(kp5paxqjRUwa-(|77GAla4@ z-h+n5*JYR;2VH#wy*Kn>xRI_Y1e#3&8{U%Ky^8gg<+`f-r}&ZL)S#1%=wF$qCx*DB zIuw;*xn)V4X3q2}v_uQd!NOKYoI@22c9^zVRw4L8vYz)D%^*!i$7C^S1DXm0&kMz* zL}CaTkLLif++rQy`1gxgfi<8iYd2~C(DZzx+E0{**KuB-Jzsg3_3wtQ0=kO-2X1aC z)HSsF;}5?|pv?j8C{cZ+ za7{=TYIUBr1{?!V5n-NS?v*jqbEI|nAYAH4FuwjMlu{G5x zr|=hPk^8rnC$@Bw)~Fr5xp~S*z8y7_LoaAsK6RcIVT;3ub8wINm?bj)QRM>4QzGRw zf5Stb^nCHCDD}_(8Yu-L*x=wj;w!GQ@^(1|f)#dvrIuR8>G^cBE{|0hc2XMGfEvEh zTRYaLzHXYwO^62kCv-%nlwB-5W>l}PsmtgS0oMYFZ@#R+&39xrLk2YS8*|J(Z+A%3 zM;B{AoKfwo2zL5SOq()eLC1GF6ro47(B$#2Nuc38KyLUfMQHz~x5W&nvfCT8lVYm# z0;eX6?bE+v&Q_m)=9Q{+gABG{6o_>{$#BSeZ1VzO1eJ;V4s!eNMsU#0N=FmM*~Zz9 zadv$;^_=4|4&^yy2G9R2!$FTh%lQDo+(7@g)v>qH2GX_RgbU;BB~r!b{fmG!f0ZUp zHLs<*O0TYBH(Mn_j^mjsB?@9gJnm((?HNoxiUvQor^|uKUU2ZI3J9?HVV2jQ&P`LP z=&JY^GlY!|D((?OiY!X2?1iWW2x7EVttgNt@hQ6;tV-;~1tfZ1!%$wEk3}We<;({h z@;Adx-9nNdC%fcxVHiDy0o>bOok3t4FfXZ+;cD;=x zCFWIOpBi$sqRsrYC@h6Y0r&i+Y$ZSjc8HxD^;_LZ01KkCFpNUkUM`r8?xY&FIK#_Zo2`z*0l4M4A9FXR@8AMJ(imP@VFHiZTw`NfR zIQw}f+M*jlNN_^%`@J4!mjP~8389APXWO9)R0jQZF(uYCJj{#B|Aj35 z52x_|w?Q8}>wkhid6WWZw7_dc84L#1KkGT*xgn*W%n7?6hW1}nH94FkK)HV zpNA|xcAy=W?E5M38Uo#Ld}dIu?U^b6ONIQK_xb;d`gmAb{yi&a zh7Hnl9t z^P8xz6(}*95t2oU1aC*?)IGSL?gGWbb9RM`i^kab`pEDYy9Wwli@uz(^FW6 zT#UX&&ED(fK`{cgr?#NM@irMdiXrCL1_VIyepMPI{puo~>rdh+o zcD+xTl}@*C1yygO^^2`d*wqM)^J@W}MJ?9&^Rj=oFmoH+Ebu(xQab0??-tC}iU6xt z79|+|7rfAWId?rm)6&Jy53EsppYC@G+pt%&3ZISBa8`NOtj|I-X2W)*>D`Re7e(N%X{uuR<<%Jv<`3-4|2NpDnbvxI2PINGKT|`1XWv7Oe0OF_R)1(pC zyRXwyIu#L~@zA%zeCTIQWoCI#q$Mg^Pi7?}PxKbKSc^#4uRrJBq#0{Hofqj}6SS=C zM{#2Xm)}n%qJ4ltO%2baAuJ;m*Nrz{B#X>6MvF+K8{c{1?aR)m%=}+iD~lk8OL<;@ zMF~@59$qS7kOhJwTUTjr~4k?02-o!nqGw}?w9M78Q@EUV8Tj1{4=;= zUr?{sB_Ye!F`M>RLyoJxa-&{Bo7?x(r{snRdkLO{84f^z+ST{C$pVl3j{=f_$N8TVQp~xh z#R-LEr%xKP}ZI|QzT1PeSp~*UwvP(A-9wBtL5W;KW3W3+zo14h)ebT+! zE1laPov=|x$=5U4X>djm4YJ#|2O#5_Y;Wf0dMLrMfo~g9Y%Gb~Ps|uTz+o1Z<{ zq@3g0Wp5+nuIwaqJf9H}W1GTVRfHo(u%mM|?%+aK(5#S`UM#UPCP{Ld;p_IujAK+p zwGEY)%f1MDCsd)!0GUAcZtQdT&Jn*Fe-$E{vhUBX1m4t7u4iVsDvq z_|KZ^>Qp4<;fD-Fij8Z6Ys$jg+5{txAXc@8U2*DXg3o^(&tA@@``k4T{0^H`MHn;fE#aUeWeB_!zFNV z7crc55tqX3`X9O+9dZ^0-}SjVK)Uh@5up~vs5@&1hsT_(cb2ueIy#x8#DXJ-^l&Z= zZ*0e|#AIZSb!{0{m7UBFz8#A!g!H{`XGk1gm5||zg``LIXI5S}5A`GS(fKO^&>3)& z{sr@^H?qbJXRR;#^4M{7`7Y^acGIaB=@_(2GmNy&oIn1q5S3!#T>W88Sq&)7Z9VYU zz?%n!VVw(%s&u#(AGNdVMD{9BFfRlU;7_I5`&`PGBoP*4id;n0ZvR18e2~;XLs?j* zaikxFoQ4%y?zrom&eKU=tKqx`WOfi>z~iOn4$sqtrv%*7sa zxj!PC$vPty`wK$-_|4BmnkrNW-q(@O5B@442ApO}eLJ6v_T5myb&=%y2cflaH}7i% zts?Gd@{)`rv9i_RQBeChxY$Rl}UF=6IEvJQ!UUyt)*<1JCM~gOz@0<)Z;mVDFVHr?K$%GuNbzsW= zzKd&Y>pr}*FVY&*R2HgiR)Oi0PlUz$yd0ia8C2HTiQHD!%(d%q5HrY1gOy;_v1Zq; zT1;&M@Sx``)^L8@M!@ILfS#CD2M+XoT!?8J@c>_{7MnycDg*LYhKqy^eaSm6-$yxK z3v9ziwySzy{gqb4N&O5YMd4M`osGJII=(^TxuKO4u3Y=^W(^#%>r~)yQ^{kv?ER)e zXU8qt6an}{LjDw(U4cxob$o1i`#l$?er=--hXJ#G`|e25H#S0!-G2OF5SO+yEkH%4?ELxt(N>lN)jz$ zKUIH=qR;_&La*xrq?KM`d!VAWvG7*I^oo=;SUz&ZA{FX3S60yXmCovUt1C7yhFTfsg>Ee7Mtd+1777f02B26QkTnigXYFMY(LdOE z81@YrHTc!v!j%y_Y436Xj6z-q=Jc+oys*qzP*!QXV+f{~3iFx(Kh|*>NjGLZEnlRO zNrSqlx|1(W;k)hnv8JhYVn(!$&^W4NmBAcLe_QUqT4IJkip0co_IAyf^yyOL7sB(m zrc+F+@)CzFlN}a8Lc3Ax!0ZCqG6Lxw9!J7I9Qx02xyzFT6+v8Y{m^d;Tip}zVV*G7 zge(NC%`8VX`I|@>eOeo@H~Tx02b@aZz&)~t#=4rUVq&XQb6Z${usWR29I${e?+4y8 zR^#`!LEGGV8=#!=j*9-Uye?0@%eS7Hc>P@^1p$&=n}aUeTZshzdM6Yd(mLzRs{y7= zTS~s&9W`;#+^TCou~w{BS3X~17q!5U)*xctEoG_8@Er`&_s?{su;fJr@AGRFA&DRs zEyYmE>oIf`_XvkXZ~{Wmo8vV&^YgD!=#W%9p7m=yD*bByb`8RE7=zzuj~KFX9a=iP z>p}h&2K!rj)q74x8*cf+i-SJRAtW^}xTAFx1VT4X+X9ULv=mB&h%Qb)a`<>tqw8yb zv&^5zbCBG&d!oih6LRF|!+Plsd6C8}@2ea2@y55|dNqpqp4Gf8Q(%x=V4a8B&nBid zaW3%Z5;}%HkVn@xVPH04W=XxyW&BEXQyOhGnmrQ*^nRZbj%3F46)2+_E=pxSvs|Rm zJFljGMfPR?LGMpLq|M4%c^qkx(#)ZEG+Ebji#S4fYx9_`5IobQ^fu5GOqvX*hi?jb zq~yqe0{M)0Dj(*f?B$A~rImbNzwC{Qi_&=*(&szPZ*&JSqf9uDD?-FbZaYA?X#2EN z)>96lvxD_)hJICIF8~y^>!u9c+JFFUUnx@=s5u)0*yvocXCzOsk$a1v#&T>aSL?IP z?VDHAkyq!S9j$(9`S75Y5=cTK5~Z)Ap{Oj9&H z+g<|OsvBdsqh6N~&9vxXE|V^@%z^-YpxA*dT6Bs6&~e~KP>xCBe=@5uZkR%95$5N` zSRf2ZsMY3Q=#>_Wf3<}8Q4BupGLs?zf32GxU>Qvje39NT(?Fn`9r zqAQ4w>VeLWFPy2VjCo`4(#mU<^!!KDXY6{uMq+dDsk%r(3!~Y2u$kTCn~Rd7_T#~; zP(gDUCP%q68lOJIp(j&)PF;$(O(v|u&>og^&Z#%kaog3ol@0iH8V2zLw>N}3gWWd9(oEC=~X!}ON;`{|7@#* z2EAg33k8gXqq~)cSA+eKs4}_|8u#Z~8Enrm(?(c7T4ZRQNGq#7nbZ_ z&hLLRwZ!jDL9Hi1RzI7|xJT}kq)h?%r>-@lkj@P!9!`>*jz7YL((lr}FRGc!a#XB; zv=i=VDcB)MB&}8Jl6OltWYi}z!g(^!Hz}H4f;_c(d}hS%>}K$!C8bI4N##z=r7@Kh zo6=LL;)pOn%5{$XXIRV>KUcswjb<>82{BAE7G7Jymc?5Qx2ERa9vqwZi}G8b zrfuc;Zf-XS3d77DL#*+4KYA%$$1z}gQXZ`Y{1fw%l_&D0eB~mxk#EY-$R?+@9&XBD zL~;BE@A%}8i9J4ILY>?~I!o;7Mvv-qvt6fka}se`Lnc4C*0pW4P5$%kdgpNXFyyos zr=ER0Cci-et^WCHZD;ACTNt$fRf^^;ocZ+yr1OB~eYyNk`~VQ%tX`(2?LFxtH@Yf5 zFJBY=*%Xs2!`IEy&@VbLTew8-y+49_SZa z$V1>B5on09u9NNApf%ymDwQRalks zdPdC`?w`Z{h9W}6MGN{K)X;2Y#h5L0cWYhf$Dy{e);1I-T*PKlw^=+i%N@yo$%7f^ z#3f0+aW<4k3SOcf9RM>^BY#fM)5?|9L%e_h33T-sb!3UgQ*&Eg5BRkAq)=yaQCt)j zR2~2$N5_KiMEw>t0tw+{G?)cv3_U9T0jyaWVgp4x_e`NcXpPbSsnhC8B$ihApoNXe zGRkx308w^#sIb!CX<+0((=$H?zIh5ZLVbW31(c>HrOc5=W+cFVY+X-Pgba1;- zqxa==$sD4Ih&S06sVA{C*3s0M7i-^(R}F3%7GK14yTdP{k3V6bML%!5zcGG&wBjXI zSHk7X%Obgf)Guew_%HRr_J8Z+eT0LK^MATp89M1;O@ zjqph37n@&WBy+>^-ccF!Hq>e=ZfVlPOSEk1e8=p*s8Gb1Uurh#V6B#(7ihIN`+t;r z=cd>Qc$)LGk5yT9f4F2WD~22XBIIwa;rp=FK)Sk1x+TQzD4cnBy){_`LX>|5i2;8L z61g%!H&1EFG#J{J3cuc-fdq*cAVK1SV=_|H^9SP|r9}aIktL|WS*W^P5k?imvuaHt zlDB8^u;%BZCDbzzqYQI=UkP}BT`M4czpPI?LlqY2^|jl4o2b6qc+V~f2oPAPYr}BX z=t`E&Z0vm|_a@X!3ihazQC$_E05Qsrxy%v+KPS-MGC}rTLlLHd;-_E;c3z6OuL#koRM*po~_uK*&{96kFh^j-Gupeeu<*Q zEoxa=2A>C@kcXc`DtX}>0s|Dt$DJNT&_V}W?R(ga4Smn};yaeRwt=eb3`_v+?~o8(sOsH2gj+j9VU0^;8?8cA1>0tVemSQw+GGxo*pE4fe`zDgnz_Xt1{AG3q=HP=F=LaciY;qz!0TQdH z0lq@Crdo1Miq6A5?=Zf~Azuv$je7EAhdzr!BE67Kl;+ys(I6e3zs+EE*y^ZRo;`wF zJE|y67E1a&c0WAeq%$Ow{7hco%P9)aZpJE0R zhf;xUbiq>_2)^@ENxCJ=r7v(Xns&$S7O5(}Y8>DpKqT^Ql`&=gT#-v>5yio%|zhskK79xjUbZ4Pl6^j$GDk4><44vL-r|Kpcv!4M)={Vj6klP`F?cP9MFKs#6I)$c^edeLQ97uTlg?fs?46T#F;8Qd#U zpH}o!vSVqF2qvT(Ha2`N^nX%B1209C6WNz+f|)cRa_>N1KiGGjV<5{iVw$3EBUGMF z00Rg)Jz1&fKt3G!iC*vS#UJJe2{Vnw)3IH#t+pH#@Oj$;%u5g6n(GxRKEM`Gk8iR+ zpa7bR82xFPq4mXLdx~@2E{)UU8UcuJ6x_Lte^TUa>WpEF#3i7}*-apU%)TCLQp& zyI$JnIy=>`rfj#Obvqlusbh+p(=uw2ocQP4$1TE|B-iNS+pQXcHgsC)b%-AsYbd1p z*P6wA04cjZp3oiEvva88QBh6U>lK~c#_0@SW5x)i{JwhIu8#lo zB4q7L^ok9CStetQ{@{>1GE=Fy<;O1IbjJCZHY>izR?c!_XHlgYGEP>11gU zZzkQf$9)rFA8mm33eE4TgDt90;Or~!I;ux0uh&_YHDr@T!d+U<+CBSS%99M8ZXUYK zjt=o@S#y!>XNOk_b)v3p?lX_5x{wRk{&sF3LBaoHUR?c1Q}o7Fb)Hsc8GeX6$C_4V zPA~m1^DwJZ#{dFC%fZCh0`@@b5CbwArwdctOdK}Vqo9RPjZ-3|?-#sfVzA)>;Ph6V zN%n>Ycp`K5MpfWvA5_}OWCx9HE3kjcLJmSpdKL^+95x&STo>Oo`Cle~0`AxhV7oMb z6X`Nu6=aM&5U}UWKr0tzpc(}YK$yh_s6^VWwr`YJd4oWIqXg@f_0poP-Br7(^?p5{AlW6ca; z{nh4OQD&+%#N` zs;s@!zJBt&x_Swv`%B!oh(f9QZnpKKkOIeGG6KmX=8>w|0a~9R-SXENH2K9=x?-H0SMAB>+0Nk8pQc$n7^=Buw~v}NPF z@)cEjeuKXw$|&4oD>l~2);*m6T1oM;s86quyczqTnqIa#hsQ~On8I>uJZ<5!gb*@|K786c|uJSmDgjT5D%YuB6%RZe@}8+gH}7_*aP060B$3ZViq?>8t&J6F@37;F*e z>6LAI$8++K>9q19*JSojf3aZeGZli zU^n8#zUWJpleuk?y)GPc_Bz!Vyp%?CYx_FU=O~S&eH|#6A4kGsaE#6fog%@@(|&CW zhE{&B9caG%Sy?Hv?QoTth8hu!*#_x6L>Q|u~RF~#2u$OobsBRuYeR5t2lJJQNB?{Gu z3N--Rk4W#dQE2y6kyeU_Di_u&n~KH+`6u%)VbOVnaxNBJB9*E{2(9f~3kTuoFrz zsbooL1P*SAd^%BhRQ7ldp__jGRyQ|8@^oT$K4jl)vVK#)nK{T(MCG8FE|-V&3{h>d zdn)gbp~NThv;N+1^0=Y!uP#RW3`6NWe`M+0)`}0#Wjby@qQe& z*U}%ihy&dCx2tj#lZ;G%5V0tX6fVVUpVzYJvOzO@8mOy*pPDax=9A$b+@8`>ja!lz z>dE(qO0uARI;C?3|p6x67wy4aN zVBO_{iJAcgbaDD|SNP5~3+H(jwZ0MqUXUPKnf?4zQTt5k)3*0S&hmgr)DI(>G^Q6U zrb`jw_X<*sd207QSiJ4$1>H@RY;>OLPV3EqaAruPbwbC&2E7C1=Y3>zb>0CDGdD4C z#}DHCd9*Ub2x>mO6xo)&Oh$~n$m(b?csj7l2h|n`Ls+Fj+#1)`EYRN0p#M>3?e2;- zmsfhM!bizc0rl^=J~ExGBuqq;4Ma}J*LD&?F9%W%OId;@eWCtvaiQwIgQO$g=noe4 zGA02U-R<_C3mj&2-7COSnT>BF0Pfp1#lY5^Pi8l-3;o)IO7e~ZBY5d8^U>bP(2ih6CYV3v7gR+g!GdAMSEDqH|J(rc?K>kc?}b zFkO1}a2LTjX^D({?gBp(Ag$ALd)L*ZXTeysGqRc0YmV=bfV&mJDspsn!(f^7dq8R_;g8)%MQQv>K^b|Ze0rwcHcz_6q)L;wv;@{u z)FE_Ev?(jS$Ok`jS;KCNOGkP5Ur)I(`LIhQ(el5l-LCa=V{nh2rUTSEkXlpQ-=5y!9V3uo}e7X<8Qz*FA{6*^F!L*U<%`6$PT^= za^}p8vARV2S`NmOB{;yuANnMyZmtqV66@1D`II~StqcqX#4k$D#JX9v_SMi%2wIkI z&AFQ_{T-?ZPiHt0#Yh+|OpY(mvNVMuTrB64S4%+k>~!@@;j*R*Jr88CvKVi}hE89> zkJEWknR+)TF^^g1-MQ{gH>@k`1yr-F7t+MCO`-pSSUCPKV)_3C^7{X%@5OqrXy2uHI14BxtU<*b|6eNP-_Aq-ugr^si~T=Ud28_?=HhV2{Lfkuls!Wp~bMsiyj9xrXo7n?eyK+f__c= z(^a5<>D@%ffp19|`PzhMAT4(2*89mshg93fd%)}R`P;)~oA;|~hw-~ZH``_<7tF%j zL*HY`<4-@w_?Q$eDH-YZg*oKFSJ1<=iaTb5TYAaw!0WaXnFDLcC#ud8s>Ei9Hj&%5 z8m+FG*L%aT=J%tegtPMm@vw&Xm45H*r;O3SUR#Uj>uJ;rlANV?wsL=vvCu1Xg7}7n zT&V26oK%s2z`dl~NC&x3b6uopcD+S~Fo|B(t%E^s2lZ|;2j`jbvr?~`+^XT)9kHsC zEv3@CzC0VkR@`n!$<-=c6=z!@%9m_Bk1zbDnof-9AbA;gk8wyOo0@!SSh_^_`vQ&V zT!%3M`NuIE<(`7JMzzuOF$C(6+aBY(#_VooucEx`Vd~z($|q%sqA{qBXI=zrzf%}- z8hNd=Cx)=3QvEqSJj@Li3^^-XyL)8&Fz%@fpHH$@M+WkIaAl*Pbufh7CRv)7`d=Nh zpO|SdMup#2ZrCwG)JI8Yz9XLb`>AC`4?WJOCmr7e=ih|1Mmpm?iWiI{IEMJJiDdUS z+E{%9U2y=7-itK)v4J+)iBC~aey1;f)OD49YWxNDm}4RB4e_-Bw(hBgH?^ZwCb6{t zMkR&PA~Y_XDz3RS7f!Xyup=BrM@_YIrXv7Yb>^0mI-eril=oT6F??tkY*h)mk7}MI zd=-QDLr6@7Zd2Z!EU`z}DEw()oT{2{rj0ySsI&hZeKmGRZMy+%veJJ#+NrihoKVF>3TiTsTW7%{QV~$7G9n^qDp4j$A|JtR>Ln;sf2CIj z({*Zs8<3(ih6LN9X~!dhosMeND(@{_0yac>NS$ga=+I}mz@ZQYBPPQr6`Gn5@@up^ zp=_|f99D*2HIkklgb$E-JkT<6sHFEyjROwoo+IUgu)k@~2B+=vk$A}0@mGC$e}NiN zg@|=kMOXk#LxS!Q&Ur0Y3JcdlH6G@JnS|Y{94te&$Nec+>Stj4pN8iY zsQwbLT`~l1@=?N}Gi1Mz))=2HJ_1b{tTRksGwFtw1NtPF=s^R;gb5+g9^vKIhz8$1 z)1*c6-qiF%QM^?3`1C7>kv?Uu~^rMcaD}yu8>|V(_lv+|< zr(ts@ge9*k(l_YVMS;d*qkC}=jpr+6PSY8#a#UT^rH#bu=U{_4mrZi!4*1r4BkP#* zJlbXJp?AZd{Do#DpT$SChCA-QDY%7=XgWy;6AlCL5n;J-JL#Q>U*?DGv-;CWBEDz! z`;KIz@ftzc;?xLW+LkxA=iZUyZ9FSV6)mfeSHcvM#4TT8tiHg9<@$M~T-gG=A;~UPw=>s4DaW<*s1S3|6&- zFheToFd{65^lTF{hH!B-@lLw9QPAXmc)x|$M>@CKU`sfrCL8BH3Vyxxz-j#{HuK*X zkKst<7(#`UJl+PgiJD}c-efvM4EFG8Wo%4ov)E}(hfu6ej~}kF%``L*|mT#!D*>`bSe(U#CNY~4<0~yLDG-L zs5!Z=L|QB>1hkEY2~wnS)caL_SOV1|A@&dbh}8c>-CKagk!|~VXB7##|@Gp-ktY$%vu4ccJPz&v?}U*!g*Z)`Li{u;r);l2h709Jn^qM%uRY zNx-3RpBD!s!qsd>BDD0=h3|%3t4+2Vu3=62Lf;VN5EUe3bOc=L$>e-2;xN7JkOZZL zx>Bn^O|7u=!pMcZF(ST4&T;IMPzc)ijTSVJZLl!H2ob<4W21<5TZZCf7AQTIvxQ$^ z*r<~05QeL5y+|C!Zo}#2AiKy;l9p8H#-s-(mit+yNB1hFmX7x>AS+nZh!dW~$@yW& zMC)O&Pc6@?YSb2l(Tj!4T~Jd&1?Nt5ygw^Mh?TF|()TrYe;zm>aoC6HD@$v#Jlqct zOhzJfhGyJXc74mKgpB&aREjmxAXGq=|FMAPGFA_Asc{~;2pgy@EoLB@0t;R7$R?LN zSP?r#og`~A3r#>eC|!;#VM}^lTa}N42D_**jR^AooX{yG5$%M_yB=Z$!gxxc$26{x z=ut+{lFB$Y%jIMF8{fC$*>3ctTvQ}pLUt38YwIx(`zn4J&KyVOgyy5@nIQuh`YVLm z5J!tN*6{|=Y$hZ|pn)3qcW7F!NU$#xpCY*Q@n}CN9&z?{wZ^=aIZLamFdT4BJhI9D zQWvKucHhH^=%kpdEMgVJQ%iQn%^)a&%?@JTU#k~PL&_<31+867)Hedrsq~PfY3?X|9TnNii6Y& zeCA1N52lzMp;%8DwMyZnn3Y{Z&IXcp>7?WswV(ZET3OWJRhVPw6UgEHN*-T9N#ICc z?nNBLdBHpR_EwM1s*f0z0gD*TNMclA;CD(#;ah-E-WuxCq`IUr9&MkTGWYj9MI3<#7KJT4Uyf-fZ>Pir2fu)Q=R)l~zidsPSoWw- zj9)1UY|nk~cDLa`iPZCL1qJsxBE-YaGD&9-GLvmgRNo!>t6=x8dhIL( zL}+n`w@QZzxZ3nhz^Gq<8xOWelHK5y2?Z!WkGuKFlWY87Amfd87&P`;(DkRDciBy< zM=2X2&;n~9%V_fLksBL_^mwj#Um(Ge{5t$3Zl-&=jvq^)qxL!~)U_!*=zsHWlTM1mzPec1I^u&hJW zf>~}Fp4QS=?L)!(Sg3w~{p;gSqL0_=TA3%S@+A)jMfylX5!gTMC8c=NS**80s{r&(KxqP+Y?y}gQ2@7q>m(oaDc`TD-5KakH;=qb+ZlIio0 zFJV~{D2|PX$t8_0!CZraKs6fP+A=5NDibH;pddgmp*x0#q;b(jpSHl*>HsC-5UzB_ z<6+ZdRA9R7_03Cp{|nk9Ft1tAf|9{>{UfD+?3h^YN6Dsw2#!up`~}LcWT6yWCrN$u;%dh}n*DtMhV% zWJ_=|GYxaw1|E7C0t*8IMV8$?C0OCku}GM>AMGsLoVqTVo~Xy4_{GGw;;5qaiiqSG zcTv{Dx=(^sbiWiqXMq%VdLTOj?;Lxx^rTL% zN>r%Yz0Kne0&)$}OOi(f-5__A>-(!m>)=E)7&{e~zB1o0T(W?lelj_Ud zC~VKf8dP`yV_Ue-i;{O0^~fElkT6c}e$@s#YC+-&&)=Jg%G*U?zvQ3`8av2F18ve5 zaT;=GOMu6j!or9^>7$>UVm>ozH7S_C3YbB!)kUqn=(9{W&xf%UkvBLD@0&}kahpIN~j(0l?jVKL9pSRnSM9v`*7f-k}2 z{Fhj7OjpIg#egx`W_$_(;Q)!oey%mHyR;7_uxGB_t{-MUE?(`J)|1s%OgXkQtSVlX z+0Dx>CKx+Z4y0h$roV_lEy`(s-`0*4Zx7kShFF7RNftAa&;V}1loZ7$U$dS_#Zok4 z48SG1J8wm-*&n_duA@l^#SY*N7i!=OOdv05Eja@2r33BK?9+;P1X^fcvd*~(t<X*~6?OSj$xUZ1zptI|W5ILL{fc>3|4ska z*=&;Cr=yp4Yq4eD=cYbxLS^#=v{BbNa=gBDrKT~7ELRxwci;4%_d_j3 z%e4$hvz6%7Jw4M>DS^SCREZ7-V~PAKzRO8WTN zu>MnUPx(j%M$u1u(}D5Jl(@u_6T(Ov9Ewu3oO0PaUJx7Jf}@Tr10|u zXGQjf84VEvG52``KEt({Syzlt$r~ zh$KS+VU8r%X@d9(85e1d*dLctq~YlpOjF1>f+_gh2{1bXvHjS6{hbE}FT8)Q`qieY zpo!lB=qI>pvM6)sEPcy~ywq!@a9@V)bOSYAop$Us9kfD3ue9V_;jYny8`b{&X9o*O zI{Xjbn%7;<(mxQTTP5NDrfkFhzg0*3Tj)9_b~g6^cIZ0xKOw%g#Bc*O<@<5l zWMs*jrV_gRXA1FsI= zO8qQ9G6z-F$|iwM+sTDPOHy@#+l@el874LSelFN!ozbyZ`puDq%vUQU1lB+fDF^sN zW78Tp#VBENLYFeO655UT0wkr%tNl-heSvck&U%oG)q)61uYL43TuoODYF zTF9yt7d!g9+^Jdj&*cHFWcyZJEm~{WA){B26A=a_f1@LRm?Hf@ULEHkdezXsKvQxs z5pgj6caLojhCeBRi;7e!9y{F9EoC(VQt-5xtsBc)UDS(|HJ=+`!Bsxw3UPn_MR59+3PZqr|(kdX4pa4ROF$n9F zjo0%9E5@unyC${$tKtqnda|+@u`;3o>6>3}!Kf(RC zsu(1w5iI5_(~qCMoUXF(;jmjIL?QA?V(5lR>KTnOGAYSFp7AVnD5hhmV>4A^PhAX% zn5pu7f5_#!G_{7|@DiOEMTZv}zv^Q(^($!IpVC_$D^ZIOAW z-2wx>lNsMxEVV^T6Ls;*q0>#q6YIbwA)>07)G>FaQq`biXlQsIzZcCJR&2@-b7fr5 zsV98WC_!l<(VR4VUrL_*RxcFbowHfqc@c_zEV1C?WwK(>`Ml`>E(AiIdv#p-p#(0r zI}lUILWlQqM9=8N7sZ`fCcnhmgHObwoI~v`XAcR>IE!xc3KOa){o&xXvq&I$f&$_p zi#jr$bLz0E-`*2xrmr=__vixy?M^Uh_K}fIcq#HxSJwpLKv@i|s74EhX6J=99Yc6b z*!?OTXxMW8dkU?TneBudr|04u-tY5>H%g~76=v?PpP%W>xEoJwyml8j8dH{fcc$Ul zchpvS5}MvMavhQHPKUAYuj+U?4a~2ybL%+iSFLy%Thq1V4(;q(!P%_5mn$xjEnTlw znc@Xstzpk2ySX2GlkPNa#1q~icFGy2*B7<9NZ7Z#Nf>-sh4@!f!OHOO0f4-Lv$Kh# zEe!py2R{u66AZnKfwQ9p0B4q&j*;p0W)B0=FGtLD9PF$_bj<9pptDNm2F4b)X0JX$ z)Wz1w*}~5D^;pEt#=ye%6{D6B@T1?p`Pniv0Or=#$j;d0=eK}XUw z!SM6%k7oh?4QNrA;TObP2Eea{8Gk?iKU@jW2U()m`)L2~PS<(G$^{_vn%Ei>X}x0S z{sho{CBx3n@s;RT)-VvUvH4Sj5ED!pKBXOz3AI1ZeE_ z0e;rMJjwt6A89ZW0bajK;%k4obY&+-4q+~}_YepO0&fcSrEz+apv z6C)!F5hFV@fW7}kSZ86SKd9IrQJ$quY|Wg_iC8&)zYF`@NGB3j`q!b6(@%1ggk^v3 zos4W@UbXr+LWmqqU`(lC{>GlX;!^)+|>nq!TVZm?1;xBjm|8`g~zS=AcTj!sx13oBQSOaX)Z=e2vlKne3)s>u0 z?0@zOhF;_6F@VeejhNl6%tV0q&+h!op#CCifQ5Kv=&z#wRVr+MNLA>?0B4#x{eIfN zONW8z7wIq%{nw@Qhr9IuF^&G0z+XH12h?&-qSvq#&|!cFu(Nf3{Z^EP=vA};Omk5H za5>=pFNXZBwEl~_|6hbOXJBIaeGIs#TqlD0b$S__3 zr!C~n1=XJ-nmBXL*v)2*_pI-|e2dwk^O4imoYtQBY_Iyupz!f9HO7?4H)!R~$2wlm98b)NZne{{G@(V`zUj zYMwz)N9R3HPB`uW#r+MR0sjT4=}vJ1%_?7x6c7qf0|XF+Jg7W~e0m!y(9&=N*Gy=H zgBefTbx=>;+}#XYn_X|&X`d6{C@_%4=F+Io3G1$3*~M|@{$!HUgL5iOxg%Y`oTCL* z7n$AKrEs?8J18w!VcsJ23Me>Hv=hCiuobsygRk2ar7r3&Ao5@WX&5e1cq~801if=m zUMM`3Qv&e=>hTB<`e6`c#b($emF36Yy<^05=_;MUopTx@sjI)|IJQ8TxHQ_B32X9# z9}n|4vDNj%eH8dGm`+TkQLc%@xzjXNfI9f)&6W>Y;DfQg3xmR;nR-DxTM&HV-j`uI z9|M={R>g4c*p6|-!DpoXF1k-Kj^xI@Gdb}~4jGH*$H=FX?v)6% zg`>M-xMQ%?RK~rD#H+qTNkDX`(kw5x&{LCMC=V;xSDT^tdmbjGtuLJi$HfOfT7{PC z9#UF!-vi|_=y?B)C;P*a0{^qN^3_8ADJ)b1WIQUBO2|EhZ|{}o=ma^_D_=ihPV5B07;C6_EP|CV|x8rYisl4$}$>EDI`)9)j| zi;>90-N>BC$;Hs;uO@-*m6(4T+zMLTn;ZO$Yk$_o0AZ;j5$ms!5MZSL8sf73CG-1N z81-A+OfPF^`?^reZ)f}^JNe5Qe~q+%C+7E9>$mp)61o2m&LRhJ_9t^+SGNJM;wLwL z9y1ZK{hAK^+ZY8HlhYP&UAS3K%{}6_2fC>pDYKJ8GH>&iPlt$3j*6xq12T5Dl0o9`9XIX+jt1v(FEEPow7<#~lh{D|2 z+1`nZp5DyD+1$mD&dAP&-q^s_(8QJUkL3uqzoZNQoJbCapLDS^0cd4nX8TDi2M1sW zpi;!d39$T;v4xY7i<1+eR0JTy#>Cc{mXVPGP-A+1Q21ZU{ugKbk11hdW_@KB%PXH4 z7?>G}nAl&*AYx?Yq3k$6@E90Lf@)sxhkNLp*i@q@avSazl6sBLa<{!x8FMjAB)4|Bh{3?$>)4}?W z72^-4|CcE39}~jzlMpsW05vS^>_qGw|9yJ!yJ|bpb8@n=@;li%I`i6D8~;hD?0<19 z|GWtPcs=9894t~u5nL0_F0T1t*cn`@_ENc*v_&k3la>hLt{XQP=14~-*&rzg2BsI zK05j1`Axupw5eQGUZr*l_NT#MhWU;yvXc&d25REzyVcmF<|jJ%_nJN1`^(#Un38@d z$H&lLNEd&2c)I!byu2ixCf%G}MkTGjmm7+DaXRPgEuR7z8hhXNw0W^R)JE5LBHm6( zJt(9xo0s|Gdw=+z<|R$;nHy=CbpkSP7qHG|02ovwDbZIlfAj0eQ_%N6&M&SJlU83& zw-MjpJ-_$8Y-`oIxai~eeLiLfTm-N>!bKN9-MBc!k36*R+aYdA{9ABoPPu6sP8h=) zj*n4Nqad8&9*=}cgn+x&axeEI+j9KfB4rUNPMsU#h=K2`T%Xo^4%R+Ro5;2c&Ki8V zRVzmfUetg3p-%>FRnetS1aA_9E+h8Rly;wbZHNC>igh!VDw(c;t0x_p-hltb9r;3n zgZ^|zYG+<0uOW^oNF7IjrC1`HLa zWU`?pFDtfQ2(m&OoT876*}0n77{oHzKwmn%u}+JjD?BkFQ4(x3+Oeqy87=%+E;P2TkFz)4qpsF8OJHg4_=NL#^>8g=I2VuiIRK-tT=))NP zVi6Rg0Ou5-Kgf|O{H}ToliEq0Y?uThIJ*3X)ma#O=$|MX&lqX%&vU4Q0<&4 z!A`L6+n=3}ixY4LiSP0360J?-qk+&>`SN3hivl9J%s41a9zmU&n+ne^Z}#A3Oz=K9 zRSM1IUA|)^9oL+lpD|(NNOHhd8;hm_CO9*eR&Q|Ah3-sNmQ4LN`H6CSxLE-UCjv_C z##xo2;lmtLWyHY!Fys!?BS~q%M33J*H?5AUYP3ZHol3oHY0mh<{IiHOZR?T=nj(f6 z48q3n#$H+D_;yCmez(+Y)OUR{TNJ4Rh9;2C>KQYMcvMkt606Y}JrMGXu;EZ3$t4=}QH?DU+i~6Gx>-xW|{H zyB*PP{#eI8m}wJ?)+lVA(R5*7^Y-6xKYY`v6+4}-(Z6!k(SmTRx0>E#Z+dWXTSRkq zYL!dMd}G_h=cx6=1K7q6En#cT4ZqUWr=ewJ*9|wad=bX=TJ;vX2o4a}sjttj?X(l^Ubs6)nJ>HycVoirbrfA- z6D0*B)o*y$TBXb173Wibpv^{6h|=;~pIFT7dbtxV21Ze_@)vRGux6+_JBuIgjMJlO0s>e##LQV~tz4+agI=J0qkJQ(oa>9wx((i-2fVd$dhXra$X8%39Rt(y zU?o*1cJ+7J_R7j5GwUgk4sn(}pOANMuDnBmKswCb8HFMSbKpQd8Qvh^y}ilLvA-*| z36e(G(;K5X_ZOq!--rG9* zWU6Sl{PCimd$a2#!W{V)%0^Ap2UB&a=8mqIw{Pd_(8cLtx>e(o+n480tHt)GRyJ>E z*-DS~PWdCzPkm-xeM8c}*adi{Zb#Hdo!Zd%9X^BbS%3L@Tr|5HUQ=?2;N7O>FmSBr z=fW)8_3$v~Qyjx)rM0GW!wNHM@jVl{Fa0D_uY%-bbf@>AzG%B!oA%H~MJ%fzM*swB zB~atDI;n~?o`ZW>uL^FZ#ApW6IsV2?uoX~noMy0FZQe`-GFDEQwv~-fZ`~pkf5X~W zRH($1aqT5q8ZASHg6b)-4_~IW9p>Cn8sn9F%41ouI2>&?0O-tN{icO3loB73yNt+2 zi;1KhJ{9M%sVHAHNC|}vXxaItbYwNac<3J#_EU8t2$KpqCo@uSZwOEXktBo5begw< zJC{f2H)V=g1~GA#e+h|SW6Us$RNFHt*qfdjd#Ggf}qj&g#dU;~*?H zPuhiEgI#-Eop4YeSp~=k$tl#p74F2h2s;UR3ukBBo5AU&9I2o-32}K+Hu{+A^X+3v zDPdH?r~bfB^)$Vhnl6Jee7y2@_~^ywj%@7{EE>Y^U*vck2p zF>rjH5PPI^dOYFATd>iHh7vD zXuy5&6hDs<6HpE_k{<8OPAb3UNgZMAlMbB=)(ai)p*4{i@(f=gPcf zB&hBY(hv(e3K5_(pQy|WB|F`fDQ?*ccEW?GMqAx{nb)t#uSb(~&bV;fhjy-6o5Ai7 zjo#~5(;N!bm%H*Q5VT4R7Z_dP6K0KLE=19m#0Trvpc`{(5fs$|tJ6dd2mK)*<4fLZ z2x(sa^&*AUcbJyt;>PY~*E-HN0@vPtfjkb1IK^bLl@D~9My87>Z#CXHDB`jJ_TKSS z@Kb=jC)cO}uS2Yf%wlMKBKyG7B{@Pi3{E+#-3G(D%k&iLTZHKY=yGXZhmG>sz>ixj z7@7uF8>C;EL1!4X0Q=J%tB`kdi>59(N;Yz1m{c)rS?j!1F|5c~y(Hnqp?e0VmHjCc z2v%1|L1r?>?LCv2pCw6-f>|xT$=DMiN*JShe93-GrrcAlwCEkU3k_9xvhLa{qAU1D zW^!H(&jYmpk`GmEd0Z|Gfp*>D;&wn)~T|SOmnWf;$BNigpP;${#f>g zAx}Gt=Ct>uK~;`0%S-{}Afqp2q}75%XWS`jBTH6*6B?TCqb#0?rMyg#;rkC6?C4C^ zXDL7>bRRQHsSIs8*0eA*k!WKIYZ!r)z(a{GNtLVpIScZTW12BxSn_it24T5&>kFOD zYvwGoc8AqVvOp54X2f4ezt~{6PF&)B$|F&5Rkv`U*XIJ^9RtfiROUbsXa!{{q9?z! zDQszD0%O0v1Bt5rL{D!o$`3(t79jAUb?m5UZ;F(rpdOP$K4UyCpCl8+B}3F86d9y3eEQPlG^#zN+Ys1?{E8y0@#ZvK2uVD@5WWN*7%YqQ6Ag7P)Ef!! zlynLL0*`8Wt=$AaC`Ht*;73VIu#r%7mtB|uA;Y!Au0uExdFgM4L!$?N=)rqOOcTH{ z+}c5AA@A56(W@$~F#8?C=$h#|RO4zoJWt z3=n&Ey=eT1c8ZY@9;phx@B}SZID6!sxqEyF-gmU?06huPr(-+ps0yAh9E>1UG2?A* z#SO^2KR3UtVHqZKplZs?F{TUM4e>NT#{YP?JkL4k+<>a-@69WhHD%R;d|vV6fO#@q ztn2IgguPc+Q7`ZX`iwshHavmhsi+~r!3g~Na4}#r#XhXvzH}*zKRNY%ERj1CbgZ%R zvh^WYMJD~ux@#%?2fi_#ETSoo&)7Sc19Blkm)o-2mMQ2NjkjGn#h3@;vs_js zh-S~`?kwI}@MT(%eEY>sF+iyeahJMAt=ykawsM7KeA8ww&GlZ@zVT}7sbcBtIx;v<_044j2;b=X@o>d3HY?fV z{y-j^gDwJNT#=XL3+b;I<&b+ql>VQCgxo^sM->oNH!FEs<;2 z(c{4Yw*jaq<3%%EQ#}m|s?URvT}PB&{05#Ui40x`A5O@H?B>!R!C%q~I%tFv(@i-6KvXXqL*#V>NKAl)f zCFaxBAg0zZ#xIQjct&vF~^yGs5 zSxmqyK$Vbq|Bix^*T0YDmM;+ zJJoV6w0a2KQ-i4_Z~$4xlEw})$ujn~<-2|3f_v{R`wmB&RyteSSVFpLkO$R%)m2bb zFgMH67l_^PF#ep4rL#|)au3wdyJbD;a#PF4l3jM}UVdDU^1Z3tk5;D`0sEV&pQ3`r zx-b{c3SGW9@e5VphO!Tzd7QEXRtVXbkm?C3*fQ;G)aLafzrtwkzqbdz(Fr1HRgpLK#NFsUR}=kx?rqbJDal4i>yv92eftVAR#t_B)c&0A*P<{r~@-Ra%6&wMgn@G3*QZQq<5Po9Et zo-GE(mA!L^(k-Uo$+Rc4M&F|N0ny$@q^blK1@6iG^ zWwR-Sp>A#>w-+8i^)K!&)tOwXtuXb7hoWTMIt;<_>)IRZXRA+^4zD1}w{$#nXN`)Ztzb zWM0tm%sH#HmC9i*+uNNywVtq)*xo5`TllBmay(8CX|=B0_ndBzsoACd01j`EUzKyU zyZgScEHXxadqs~_46wf`dRmVkrwJ-nUUe~ut=Sg8w4e#csAWVvt?WM43{XoEG{MnI zlMT%n^Y#URY}}!L#r4IygoRcbeUG$n)ZN4W^e}kOyf&!^Q&0TlvuQ?mpAtZe$OQ#h z(2oa)0AA0$>kyF6IMxyG0oHZ7Cwxk0mzLSOn&mRhR8BPs+S$*(C#&TC0kJrhE$5Bh zUyZY4yFNop`k5{51^lMTM`-8Mp1mqLPv4U>j4^?b^w%YcC@ptBm9%-*Jdc?x@5a5O z+1{Nvxgw;tXo@<_%~sohvgc>yH+O4x1!+-2dGLAsNRDHEH&1p8GVhrmnQ%m6o`^8T*=9=2CC8#+Wrr+g5RTmpu4&oqf{2 z@3Qf}V}hlGRcG2m%kf9wT;8pg-1|vyU(g-fRrr=H@x6@9CC;{`M||h8mX7=79c6FX z7nI2*^z_jWtMa1u51o5UKxt)aypQQMT66*F{G1sZ4sBVs9KM-}+RZBWt|PRNwDIv( zFEG~_H7yN#u~vm&NuX$^-L9jpkhTu94kakifB@Frg+obxJSSq%6S(G717 zW;yKis0i6vl{S|b^-@{~!v$M+xm`}5;S1!OC%5Gz>2nUR`WMsr&)SdpWHVN$tgU=w zDw%NY@zg<2c2&#eCLB8FOi`tyn2OR}rI$IFw~NMUwdi%mG5n%k0Ef@Bdt{XM#F@dq zdO=xP6?1`mO}+c+c)#rXFfz|GaOD2Zb?_K`CSUqpyxxm@U|=3~=jbXt@7Tg5|8xk` z$cHh8Hlv$^;FewG(p)#H)4P$(sI$W$C(U#@e(wmvPiY5h`%=^FP+iaNz7Ge+YH72= zZy)k!eU(2Bg~;BG?jnu7SH!(;Vz+O&sa8b#QKElW$g+q5cUt8$ zNWasf&}`pFdtkBs+00$;w3ts+TjvDJo>hUrVL*x~J?!O5iw)b1hb=zI6O;3D2Cl$(3e6JOC6;f`*cfam+4MMxmB&oqbp zfI_5KwbUO9kc!zO9zCj^4yY?m2D(cs_Dv~wjZ~l?(X}BPTTUf z`(7H5$TZ$m`quF}ffUu>Ws>!U7QP`P&CMqCi7zj4k%d2B>AhjbNHBE(SVPRsQES0s5U<9yfx z`lUj86c%hyI%8-wqG({5FB&a%9!1rRJzgC`Jz@nP;1lr=Q!1f2ecLULHRECuv~d{5 zH`3H$%-j1X0)mK%DPY;LVAn334eVpFlmbDp=^_S13c9=ZNVkIqETkdeO_lL2Om`tH zKzj={0xWDkC6k(+KY|=bZSQ|ay4~FkSENdoIXEkIRLpje)=@@LQqW#y9&L4={vZv* zL!(4%f@Y0cx|dHzP5LPWcSAFl0d31)L=%-tXx%?vQ^`biDrP4!F585b+X@av@?2(p zR4`CEKVCHa(`;~PmQal#6`T^8*(MRMXed?8h2Us};<_Xg)Heqq!$K1z?M+hc`VGPE zPf7~$hJj4U-?WQVapHSPv!*~8ER#aZnjqM8;AId}7GC0SJiS~AlD5aS()*m#dei!> z`g{a?JR*h+)>%!A`LfmH(5r(tMBxTUH%*AJ1%+HZ5v$3>6GCy#*yxbR%^vz`vOgGZ zx2wK+A2}KbRPXPvn4}>4(dR6XC+3bSt@i6y%(snsKLsHPRFreU#O20vl~Z1EUJEMQ zAkkwYr?5`JsQ!SP6|ZrQ%C+xImgE|-WK(Pz8Fa$1{-^=IdMzqEd4op}zLOSsEn|cT|>IW5e6;u9z3P@kNQYNOhY(sdZqth{Tnj zF$VJQS8_b0s~938SfU;WQfnowtzD>^aNhOGlpyFjtF({ov*9oMoK0@$4e`@a#*N2f zeW59Unzdg$1?~zx>iCRUq?WCOhL#>XSbvPL_|7S+G z+tBFYkC>Mx5+k)7233+J;ba&k{nX;jbcxX|PG4A?lRsBN(>(go2@f`mqf}WkokbeJ z#u?l=4+MnP2xijPx>CT_3YOEE;HzV5zm3beQyOLP%b?4lRk=Ge`~v2RH6A8s(={cB z`6a%<|JH?%8*A#DA(0Wdm4@F^Kc!RGf=mPpkslVgzk0RYa=V79w2oiOL6 zLL<@h{@CL&mBwNIE0f?1=q1?gyM>?8<1yR6sr29n=2+%xhJiLzqC803&iGI=0;yJO zbGG9(!X*P0BXO})Co5lUHd^u(b>RdpDa{5c4LC&B@#q!0y<_Jkn})dZ=phQHBU4d! zYTBkIr2?;xNi8oa=jW>10GH!mU$CLHA)WTCSx=vamA~qb$SLL3w1R^3LkLWEl3Vu2 z->D)M{g}VGKodl&hFxI~n1Ll)2KplnEOq0EYD=Dq&O}f~S!6m0)l@Bw$g8BAK&f;n z5qO8oxW;1iE|-}L6JPY3jOJ}artZO|Q}+w)+m8vFOh{5#h%;O7>U(syarXrV59o)hDDd{7Vj~LXAlDtc!z3MxRqN&Dsl}kAS04c@p*9rqDc{iZYR!Ob{@aX4wyy7 zO%gViTUdV;MBokZ#pp?HaePn}d1q%U-Yp-}!vIid<;UCJ6oKyaQu0hu$Q5ERW;VYj zdn<#XGYkVm(*OqsL@*@v$Vn#h!gtQvL{^nB;G!a=Z%2T{;x(2e%u2a!)M=2LMwTV| z6;aJlxsX!PwPOV#YRe7tH~RTPj%jXuAGv!EO)c<;Ec98Cl^0B1reNL{BlAZcQnO|u z(~;mL#1;+$uUm$eT(3xM7Wceqh9lh?hGtw8w>D!>>@7&AO_O?Ky1HYtw8?tddR`Bz z&BZ1$x3U0kN@$H);;^6tRR@mH*+|S3b{%;^@siH*HN*K$Bu-77*Hp`BvVs^ls2)Y7 z7~gyNqN#}BA_FKV2j+Oto7cNr<*BJz5bx@wG~RDEdOxl@NMD?0#&9gB{5a0SbkK=? zEGMgYpFD1}AvM?9Te-EpoI@(uVd$Hpq-2Ur)BKIbPx%>h%@SoKxL*o{81zzSV-no) z0^UEQ3)<&vfjI0I=}u&)*^ufyI~@=ubvX{5gj0d!0TpcYhw180lpn=JA!4KRKD;#1 zl_Go7Z=g-1=!lRpw^$=7z@f6})@HXVu?0kU-(f8U=343l?FpdM671{0c^l}ybh_0W z&~p$l;Mc>GxFMoQ9l(*5m5Ebs?+7qWZ@HiC!D;{t_I7Sg3a{NJk z-d~v*wQB=6;td#bghXtZMl%deLqAGfo0T*f2~BL_AsPlGhU(Qw1$l0m=^RA9b{b-M z5wg>`z>WEb5kZ&pdNZH45XSV=F03Y?t9vFm5Ts`#8@?erAW)n@FgOmxH%La#An;5K z1RzyVC&p*qKtF*oM9cKPwN%nozDqbn+Ej;v2=cq0YYFQLAFDL(Oh2^Bp8s$ zugr<&1bY4Jb?{te;Wo~kIqgfTF8Ws!cIhApgl!coSn+erPz$12Y83D?=ya%VYdi)` z<5wel=?)>&5IrZLS z{I-?ea_ts=6mnhU`Q8G=0^HR>u6N#{?Eqq_%i8d0X?n=$G@d^+sm9<%>2oU9bRo$b zI7xXA%8b1&7=I?mP(^4t+C*)ZVUJ4fb|)ED3k6zTved9R1qRY`6eLb`YOm4`1MLV- zI7ISjkZ9omDd5eGW@1V-uG5(KSTU94Z|hqE;O)gWenNZq8*@iRrcR805!fj`eS=4d zQ9Ogx8s3me->8%=UE3?gr_Zo+G-Dipxf3acAS3_96d8$X-Idd9ITuzTpXM-{=YvUQh6Rn@5z?Dq4qOD88=e`w8kngwI8frb2go( z6l|u1aFV;%w%$TFuBp}7g%h6ePhJwb|4it9Kn)D?|<5 z3VdZTs2-z*TuYRx?|oAeG-&K+QqUmH_B;~nvZ+PP{>`tSo61+;;O@Q*Y(LT zC9K)Tp z4iI?S9gT5y0#&U?Ooc@KLc8GN7ERtcmG`rVa?SRg{^$j4QV@F?rSok^wW5ZhA2yf> z=rIdxSWrN-CG1PxzoiwF^jeH#b|4cJW>%x~`-HlsYcp1H#Yy1;EoKe#wZAkqf8Z7_ zM+8Z)IiM~q@62(v`2Kyaw$sC_?K=>nFXRbCeOF1jI}RrSY&_OPbo*H7B9An{o3?qjy=;7Rxj=Xf9qW-fWVZ#kDIAv@9%TvH|Y zBExP#;pdH*c^LLGQ3BLLx=nMr!}TlQM$}zB0*Mxn#!GfG{sE5?qMhp|>N-3%L^1Kl zAjee+f;N+P;mJ3IVnp;_PvmBFoI`3SjZQl$RY?zLZpUqY4zCZmJa{?m=r~mtYZy1p zpa%92;m(v1(|(uT8=0wE!4ixN~mw&u<5Jpgoo8Y!K#2YB%Z@041JKe=M`M9LwE$>6q zYx;EG3Hz#*xLd61+M?DJD)iN5$E0`T(w`5{W`dt`Zxwk~p8T`{%RrwJp)CQ zcD-D4zY@=Ps4r?Bk7k-JqhPpDI@5UFdxbmO1uZ9&Sz#F>PG2f#`#k-NL?C(%zHKC{ zgY*aC4ikQ@3fdB}3za?3tz|L`p{yNEn9s!P#y2U0G118SxS^D!&d;WPvlZ1PiT<&G zb3AHgTH<^3PV{Myfnk14$5eGkp;jvTj_&sUo3uDW4aGp1`-rr3t9Pn%8`q_Wo0IPf zulX~cqeNiJ+cb6jrTiV5ak3Y7KiPlv^c``7>;frDy%q;4`56DsQYKnsDA*B9w8nFQ zW+sM%m2H(G{Ud?&_EAnxqXM^Ofs?FlkcAO!U%0Nm4d^4Ds^o~6s8ul7@F$`oagK=L zYiCUSkWd9-(>TTuO>L;ToC$*jUggywRpwl69HmBkZn!K*?CN#{-3l%$sdlPOg%Z5M zK~)HF=VUm^g_quDSM}>hk2{i>Qr<^%ywzi#(V^(dvK;tHwf=EZ-|i3TME8D-U#q-x zRP*4B4A~_}z`yb=*U^HOe14#fh@qAqy)ZE*P%0`Tm_Jgn%$cJi)gel5nPeLLq&ALg z3qro6Z>X+%ew5vxmObc!rg2l{{2u2+YR^Q73Tq=yIY(9sgo}%TLqMeFcJ!bOEBm&Q z1Hw6zB}Iq<0-4QwR#>3tsNJFbhsDLf&^b~32nlJq#*}PZSHsV`n*7Om0)3Bg!8AyA zYz7P-pG2A?av8<a`a2qaIT_sLHhQMs7z>CdS=5u$}}_2{}0;kytuv zE)NV+M0)8Pts^N&>+tYY%^?6auFx$tcH3hy-C-u=Ywd^E1+bETMeZTjY(oL#zBHyMXHWaW3Jp^&d&+7yKJ zogUMc$hd7mVpYL6KJ3yeEopGK5TCD;+0qCH$>9&*a;*|t@=m#%ME+mwy>(Qb%bMRqrFER;a4J6=Hl8CTg=#4V5{;QQmi5 zC7ua7m0I|e;`pEQM-H_ZLjnF=vh=#|($>xxc7QfsI_Alv47g#YB|9ni=d9o4H9mp+ zxG7%^wr7koXwSM<51MkYksHJb@#+=|S9!WQOxF6f918{qT@71?cBLPg6Ys{dDaZ|2>ZyxTLHpVj95I5^3FtJmDbIJ5 zQVpzT|4D-URM5SCR_U#9FD3YmIo{AL8h9S%K4JF~`3u;k0?y&OY%)h z))j~~wyeyow?()NG=yWU6z&1j5cio(exjX8xDCNzH}vz6UV zRBaWnz8 zkAVJPRX;3@zf-=wExav*1c1OmK|w=7!azeq!@|PAA)p~5z{4Y8p`gA)!^OtO!^OtI zAt0e8Cm^CG#=-f-N#YL> z83u$8gby5)2n1*?1C9*()&rUi0s;yN`r8ZS?=Mg=a0o~!Xc$;H`1c@Spy1$O5a5uI z5D>t>^aB141Ogcng^+qGHYpkT zCkjd?W)@a9b`C)yVG&U=aS25wWffI5bqzx!V-r&|a|=f&XBStu&+h&KfkD9`p<%Ic z@d=4Z$tkJXIk|cH1%*XFs;X;h>*^aCo4UGtdi(kZ28X7mXJ+T-7Z#T`Hn+BScK7xV z4lgdRu5WJd?jIgOLBRhO4)FfBfPNPbGB6%62ncWp=wI=Gg1G{Jz>y&!2^pYJ_~oJX z?NNyUelTbPF&K~Y;S zsr{uc2{A|PF#cLwk-^Gs zfd$Zk#Hd1h(~Oda!53MAUe(xHpxKq=in6RJmSF+X3l_6nx$grv+<5*5@+jHb+cfZD z!*5)tpC~V*fF>bvPcY{pX4QA31k06*PJpVqnpls$j806UiZWVcwgY0X{S8E?nTiV% zdBooCgm_sN@rR*m}S5qt~`7_6E%Mz!&{nd$W z?LZ1`znjg_Vzj+RVXqJ1T#PY@zuCQ;gJPwdTN4#7QLI9TtwqJi|u+ zyx|4H`@Gq6M*@_7HtOVYZl64hgGZ$%ee>+5*fo*u>embLyQhL{i#AVb_X~+?(jUhk z7&Q__xSN{Ny+PhUG-S9V_r?2Ym^5aIr)Uh#FM9~M=sTctsv~T#i@iifell5-&*1g7 zJZaCIF)ML`zu&VE@h8WZ)wt#bUsyHgAgztKOh||cjfh%-h(&wasx?LT8nMSa?R^KZ z{JM{^gK3)V5}uA9|LCXzZ)V(N=Q%K_-7FPcVE6?;M%P6Yma_EGuI3Z`}xB)o&;4#B1x^C}n%!&o%W+Y1@=?7{6%yvw)`z565 zermYg>LBL$`&9a{GNsc_b}aq=&ClagwX)j$0+US~oY5Gv9B>2s_ z1gOaT|I*TpmHpqiav1-?(oLr9{QWX~yKwpwIB4Dh9t9qk-lOt6iXT73UL9foZs|5Q zHnARm090(!eVYFu*U9o7#rgTh_!;i?+2!RzxUb%)d}iYl`@+-N-unDJeC!Z(?xb(H z7R77SD8eFE2+Y8bA7h0FK$B5z?W#O=E1h%@&sLw=!?7NEx8 z1$3?`AwS^dpsiF*VDx=q;VDBIa3azz%-#M3iUPkA$Q~1JAQwKM+DCL7Np@Zs|C&cc zf3paaFsKe+jbQBVWg*>t9r*uDFjVCgRB0CBG?MnCZtjw zPSaeTs})9@>AJ4)O#4QDl{XUp8+nS4uA0nR)VUteC>VM^4wlNPF15YOudlNkqWKXFSW#QSaa-=+_L?&DcOr`L!`M~rF|N83vd?Vg>*Z!`2Lxacd)yIds4L4ee zhadvVXxrh!*2;b0Rt`x>+4y9juYUs+OBo&hz7f0+tYlzo&*tuJAMk2Va_12qMGTw# zfg2+lh7zq|4z$})$WIevuuHA)C?r6N{4!8nB2tW?a$jVJLW0wFuyhj%> zw^TgythNXVl+nH+ba!yOUmI-{AwtTw`2C&82-(~?n6+!&{Ry!XZTv%`=v6+nSM zp8lREH$@tpNW6e1ok>g;W4TKPnks*)9^S->c4;?@2sL_On6(ekxr%8hUFsTW>PN49 zpXg8hNygx_f-+2MpS~?S&7ttlLY3}}48TlLT;6mlse4W_Qwu?YY8k3#EZ)Yskr7dBe5&58ag?!M0BbAI z&~%fZ)LfL=h2m}bmIiUgUnzcr_bIKgez1D*Ekj;2Wu@ARs?cNCI?#2zuasr)o@?*$ z=-%G*Y3tte8C-fLkL!=chV2hp-~Usq1MBaM^4!v`l}97z=n-1(C>%t(fCjLcdgJGC zR14f4pl&{}?B>;?mH{5FT@h*2~i#Sk#@v*b_?2$33m z=#5X~>*JU0qQm1}=Fg8n+{{G1@(d;44gnn1^8-8T?AQ5s8hB&d}bN~mkqyf?hRh@rRAsxM^ z(X@DG&j{usI&+sVU!v7Jk6YdxhFH|`D&#nd4Be_>D{KZV?a3Z5l@N%0d*M6%jBTR5 zkP>7LA)%4|Tx8;$EAvYwT`C0pIXrWO8Ifafau(mbr_XwcCZQYzvGv7}y2&4}Y*-e% zK@hj&e&muq*3hV|+p0 zfL?NWb^YA{K}fE$D;Y-7jkyjsR%fy!qI(OO94F_3`3JR%zU|%_Wi0pxSo;%S zbf!5As@AF5Bmw9d{mg4b)&qo0r%qmvE2uJ z{BLaciCF178UJI$?;oZ<^i03s-*ZdnPBNAIxHwm4IlxZ>`<@6c^phVS2_XO`#&K*A z;^W7PPCam_W#7SMBL_4T0?;9LNh7ORdz?Kcyk*9{k#vvtr#FlY4&7PJVi(t&r~=jY zm$KH-%PrD}gV<*V{?}vzj}`*%Z*S!oMp2kX1y@sXY40$g<{-XK#R)GSl-wIBxG@p3 zVQ=v^jjx<(ty_>&Evf9B6kpDLeoV-KR(G%|)R1~wWQDgFt*+?M z1A|VeOhFTdq$DFz$v5NI#lvY}2c>UoDSi>{4j+}Do6Cy?&Ahj3p9vWnd%Z56?{ZM& z+)KjTcM8&C5Z?={?_qs<;^-3*tlGr(u!*3S$y41%f~XknXJ*IIL*ry^S+CW1)9 zSax08yG`hYX1c`jvW{u18!#=sQa=J43z8bZxG)1lQl~Ys8d^+qxE&*t{k(SQs8JM> z9}Q?2F{ZT)?A0I~ib^sfm#jkl@_dnf?Z)p8pyN)7OCOVvIaM^4j-yIy7SS-B7pRT{ zkL|Wuu}ml7T3A7Uep<(WdpW!FdAWIbdl|pueRX@gIU45mZb`I-UvFW5xY#<0IxFY( zbbH>m+PmtHn=W{I*?Ot1w|FLgf#UcGt9U2wNQ}0L=4I_CqOLcY2)V+HXQ@o|eNYIY zF`zWsZadad)rz7)6K?I4%tvx?RppXIAk|k=pFvECGKPTrS@i2v&ga36FWYW1mvQQz z02+?6j~U%PKfx7IpbLU^+IgN|PiFCbTArTDqj=kRpH8kW?#^u6ygXhq+j-xBC$HCi zI3LH?Ghc7^(&#eBU$5?fJ`gR>SN&h{*T>dc6JMTm%j;d;^zA{rTWR38UV0W|ZW6E_ zA2%O|sJ}{VS5p886^Zm<8kHC_2~q9Q6w0)^R^qSFP$H#9y<#G>{{`yDsC*)KL)w#{T5o>A}jci9*!H&IZw6ue$G3!(}#d5+&MULNY#Grc|A zpH6Ex&)Ruj_juc1Ugn$cYnAJt7P!spbxe#cE$6;c=~{$RcIs4n2C)yvh=jp?h6)Qc#wj7y6sVs_v;qT~ zbI_~@1Dho(1-hOdf~$l*If5%Ws5nxQ@kb>iSzIE`n)_?M5zlo}9Q|~P94u>94nh>B zrIGb|wtGD2&H>Ut+uZrRUX5->%d^*ccpp!$w(e}->9qdzesy|YPBP_fYrlV~uYP-I z*L{1wI;k(0Va1*5=#>eBa=BrR7#l5Tq=X|b!)p$bE&S6Bw1as4`(Xn4|a?+OBf zg%*L0WjOj7Q0podXU7?=@pVY>5xR_KWc}^@^*V}o&HJ(8Y1`=Ggz6%={*tx4PM6EL z)ua01W^G%ktOt_KcU?hx6rma^3dy=SPQ3 zY2w(>xZDS5N7NUkORl&klI=3_fSM~c)X93Z^h|FP_{NV4gqDhZ>+!I!gM6XBgqV&H zIFy-!xI5f9d%UUdab)0-2GT^+PrkLqERU9A(YQ`MLDTW96xdRzuD5eOoS*U$wN}TC@fX4^&UjymHm5$9)V3i$fZxu6pT54gByX-GBqV+ws@;zbi*Ckcx79i0 z@uGHfszkkB=qGj;rCXon`K=J#D$#Mylhia?tImcJYd%u&P~${TcZ))>u$|kEj?H*%~~xj z($y7{eK%-YaEvJnX<9H6m*Wi8>SF4sCpk%n>S{|Ndh1FK`K%In{aWX6xrjP8yf;N2 z1?=e6&o36XKBk+i%dOSZ(qzt|XN4}$29tz!)b!`|y$2RtB;-K4}GRnU4Il&hQ+-`|$ zPTTZJb{#9$)9h3gYb08Ge7N7WrDsiK1LI1=r;0TiLhFU^@x3jx!zLadT?Rm$HMR?) zUD|$yI01H>D0BdM)MB}gO}kg?+ru;}N2~iiKm9j%?Ey*!v&pc@i1_P`JgDFuN*u#wpU8OtnLhg?M$lv<4@+!eNpu_v~y^_;OpvVKL zIwa8siR`naI9ltaw;kORswkjZQ-vti$dr%I%DA~v8B`_2=_t?_bia($*&8e(yif(x!2 zQX{6HgCU|-?#ZL*S!tCXj8t%{AXb$4{LXb}8E)gXPKjdYz9o!osAf z6wwyP)3?;qFK1EwWV=?XtSc$$4ZDf4fe&@PfyME&B1pw){H>xojihmrj31<{s~2l- z(ui|Ci{Z6m49c$QaD;AXfbR zxrYuy^;|853AOc0Xkgiu@&f8}&}=0hBq(%y@-p8cr^r2AIY`oS`S1;?irs5E#AuCh z(z=^H}X>^X#x7Ng1T9X7>(D;$Vp@yj1MKw3};-duUON2xQVbX z@tc`h_H7F)A@NgJeueSrsm#rV3N-YC>kixbNQpLH=fT_9gZ@aL_NWQ6A1bwFxl3L~!xJ2-HK6NDsD=2|0-p z9ni}KtVl_4$tXN8&!{lTz_mV^0Tt=q6Guu2F?z*9&Fo;K?~21}F4u|VJT)}Fo@H9e znF}$@@L&!qn`?MVEiOGb)dK5CYb$5@gAGR+0S^-|UJ@pH;*M7-1924_vTnLV@!1?@ z5ZQgxT3b?PwH*0z$D<83muo1gNyLVYPVCwo1iydvConxs7w z{almXrBY?I8C$Byb6S?Nn?UDy!w>P{3fnM3_)>`JLt;~Nv#Bl`y6!8!1Wcq4RJ453 z;M`>dtWul`!QXnd$=V2qK9lWTAdT0SL8f}5L`#J!H9PPn*fNE8c>l);WZ^vr!@CykX=!ks6+1x_Bd`mNuXhO zRi#joxK|)dYpguRFUC#C50so4`7|XvIz9f$cRRtKRtuDQt@5NXih|lPfb&BWWD|sG zF!bKf3)0XH;m|Dhm<>pVhAuWwdj}(P>^3$?)epf`5-@`ip<{M^#*hyMfubbPvMaX3 zgAx85njLa>h^*<}gV8Buyd zS;Y0G*Zy`RxlvusUR0w&c(l5lKhSdpn1o7`kr`taA7vDL+}}py)GZ6t|3WRN*19S2 zHPt=%XcUQG-cAcG|6IXE+)G``-ayG*q9-u7Vr8J>dH*~+AOxNQ8B-zq>y|$N+$+wr z)qtCRat>6yY{BU5Ja^*4uWsbnOwuEy6n+GD%l$xt(B36LMkhB0Q|%)Ol69aosf5sp z!XOP#d+O-u(sxpYgTbBzyO>xs;+39_5iS-piA_;xF1xYPYaGd==s7h z@j)FC`Om&5TpHl}j=MAj>))cGSx3)M(L&$#otcJO*m`0si9)|e*=O^|+XS1ZCk6v| zREmaOhK^H;4xjzQXU4CuxL%3cccuOL-$clwIXPV;@ggeNAW|DVWJ#Z^pBLhn- z(MKj`bVlbS$tjn;Ow+C}vEEic_RiA<>+<41KZajylwQ`4M|pc??R!ildXRp007>0~U!|}-nkY`1oqsl;^CdFAX zSc6((Ru-eubnj+m{IHf6z7WXaR#ZccsPfXIw>7QZJcnOChd*x~9L6bj z<@oy*M2Xva@A!DWh4fn*IAp^Pulc2j=nV2X5lxUT0Wp4iI7oJVSOi@W+Qc1YZWXFr z9gHfYobrfi+49;+e8~qV9}!xXcf{12E9!JSbI{i~w^yekTUO0iz1A(vR+Z`pY46V)mbXjx=43`k1SYoA;X}hw!W>!ahx<&jDhDfmEZqkMJr^@KI?rDg)9$J0 zO4i>_xWKT$NY$}(X5sN_EUg{$a1wsQL5ajimVF01K4wNObGw~xI*MrLjYyr_vBKKD zKnxoa#%vb~5bE_^F^E_-km5ERUCr*qy8vky;&Vs7U0ZFM&3n3Yf2J%~pJnE7x`CXl zS5ukw?=SFr6vcrO*{F@Uy-@cZ(ikoSbuw)qzaew_T&rx#d%267mcifivq4!BdRwPm z`iOg(^F;s7oqOb=<IR&GHj-`mx~_Gvl!ZgFq*H4;>tiXU-gfBHO~Eu ze2+$D5}czgS@q}Bvd~p;E{FYGo8D7d-tV#u^jh45!E#sc17w}FNj}rWR-CM6fZ7x6 z$#s_inme<$Lqp+Tz!yoTW*Zl)Hezao1x!SvG*x-)ayTol0ZgBUNeG~i)AWy15U!4N4M=nuFY_M3A+#`~HgjR9DO)^rDNr^d z`fl0L)(X8gVKKD=Wz%Fi7LCmPVI z@NX#~+S@NNP(Yz8*Qr{Zh*7Cm!;2@CrOd9-w@gDex_4Lo0FBkQLdmpoD4(}=r#087 z*xI!gnk1d*wS;Qi@!e&)uN5hlod4b#<2%G5C-_x*R9bA3^rgx|&m|?0e2sv6eQ1S$ z9;4Lu1O(t$oVIALNmOGveGDDsPpj+bbURlqY^l~<)F^uJl#BIXk_@q(9EhO-wI>MX zNU$JP;Zv6)OAWDW9TEmXEZC7mX}C2lg~BLxMVqW;UGY^MRD|S^%#Y_X-Zzp8lMzb2JmgZA(+LSleEjoe6yb-@TDg=Fs_fA`4wx~b6(`vC$ALKG_> zUBg(rB4hvnCq;i93EFD}QhZcObK=aOxHKp8-ct2DB#K@jzzMP99IlejAbdkdL(WxM zvj!U=#`$TSE4PEFp@S=Tf~aN!Ks^qIYZx89b?8^i|5HkfxHF>?BLx%eho#p?+g|0l z1`?wWWTxsWS}x6N`*DifEAHfFJG8eJ;8k}#b>CO03A9*wa6hD#;iJG0H0$Xo>F;2b zU}eAz(D~CD<;@zt0WZJL=p+%K;$qrrEgtQuFi&&(Si(36wGz5!b-+6!9z1p&-0pmRmw z*o(-yD{f;R+raVbH^Ak_4Rgu@pQQ&K_?eL7x)Rl!Q`%2zGrLGVn#8QshGD^ONoQAfKiyH62K*qD$$0*SJqcd=nmWJAYZ#(O~nWP~+I5}^1w3V4y zq_r9?H8m!N4$Rjkkgv3XWavv!!oEQwf*Pkk{q0;R)I)YTb(CxO!zkz5_&x0*iRcyD zYiU}xQMjrn`iRx$Y51lWHCGvx=KH|)LZ?_}`V$v|_d1Csj)}uIDtBCB^3E%)22MN(Cqu_ru^DYx z@|0%X-JrpuGqBqT-IECfp;=3P?Y=H3iR~yW2{aX(`lh&k8i0ha5jaxwI9PJC?5=2{ z=LNwjhVDM(Z*``T(;!1S%$LJeY+wN_^mNL0!{r%l{4m(c+6}NtzGI$Q)bssuEzcd*7`-7o(+7QUeckebXq!3rLPVnciwJ2*z*nuTL497pgPR{#`=5G0rY zT&S=}vnIt7ZJj2#rsI3i%DTbD#M~V# zFa~A-=;nbfgfnEv>dp60zJb+|3{u@I^6*A^pq29?5WZawS@JvQg$`;)Db`|s!bVuv z7M3GDZbyM%PYrW!?w06eqg)kL_rL^JU-jppi@#7a-$*GVb{x8NE+G+eh_$pa>Ws5% zt&_s-2TWJLgL-Z$Kl)&`j|L|Y@NpS>KPOwtQ;{GUg1g?1d5;f$j{se?B#ilRWsSb3 z%>Ug)-|E2Xx}83SjXTf&t5~#dG7nnO0SbuII~HN+!|v6Y3Afkw(B>jX5BhF~#4?-9AD9Zj^O!;s{9 zvC5CI4fj#lJjZ^XHlx;?y&@O+#tz7AH*(g94LkM+xd~ zBEI~&)FqMj{gfZrsl}gv67VmoOd8iQpjQC=8Cql;|l%BKp)|IMD=$t6Iua+UgE4)>J42XWLG(7{zx)^*wW~G;JmSIJV zYD=Ro+yb*y_oQxbr)uxmo>mA5Vib&al-NsylqDENyDhzscNPaReyXa0s41G$jR;u* z=fsCV5{C{je zkle)gWEFt`26vCI)t8B$Lje^%e;4J)E_Uu=YuDHa{Uo1 zN;E*Ku}+M<9RqPK2IfxY&y{`iY{!8-NH7lqvohVojqnq@sGhAirQ6r_Fy7K|Ip%OX6&}Zre;KikWd$*&!xGcf$F+JoOE4{X9LW1A5SL;Qje#Ka zgh5#pefu8>Xy@TR=6nsE6AnE5C@_OSaM(vrBPi+IPX9DitP@v#kM8igku$d1s=+}O zBM^}<*G&%0-CNG$# zEN^nYX6q$MW6yG?*N5tyG>Iwd*lo_xe#ttT@x&T;`#|iT&D=qBRoLM+L&lIb!ue43>+3J$hZ@KJMH3nP5dH~kPMGd_c!1`8UX~MS zjfhn8pAef?P~--Y`vcoT^lWY!1YYLsYpe6u1__z9)se7T{WCHjQT4Iko35^*b3PK3 zmLVg$PBOYqF}lwAfA*F0PMjOln3GAZ6Qkjb&yT2XYR&Z1_T?c$2oY!C7X`rqztQ7y z-=$5}6P1P!@JngcaihMxb`s0~L{_3lw)-Oc+II9qZ*@|1P^`HD&OrzRNr4vmmX4<0Eg+BDo5kbVZ5L0TCVDg>Uo*{DS{pdXm zY<^A{k#rJy45&y*HVQ06t}YtAu~Y`)4Bt}f_@gv)gc%kuJoSk#n$Z>>J8h(OrnO{6js*S^I0N+-xn7fp5rGac3eOlcVAb+^`1E$IbA1SD>{#sb4X=XFLAM~V zs^K0{1Vq0#u2&ov&(+Xg0b(=Vbi65p_sqkcg@ryI_6~x~#Dj0uPvsLgBRNe7Y6ptB z-WF7VJBa=oK@x$X(s#danQiEBdb@>)b@N}`_p@`tskND%Ba}h=e#}L3@M$_nmnKq3 zvC@p|+f%eX)RiaUUt6^H&$RRdAcwbm#El`xGe$~EARX?pP(ITpU`eSV;o(%{ris|f z!9!+G<#fm7h;}2W!1;e8h&4LK*U~M3rd6bOGI=H%^f9JMA3|7OEahTr5Y`IKOucGz z8$HpnwfgK_SeldtY}APf@G?6wfw%$5z>lFPfG(*NtN{V(uD3Trt!*&DP&QbhbuVw~jqo?ZG=-vj#ox zt7X-W;?$1ff{2)CVAL(`MK84{3*O8)dp|%g3@B9>q2K5Y739pfZHAHSvqz6o5SDJn z5A{vQWieZz=(8I+jG3Fy#?_>Dn9N?U_4nt3si+j!)oY@Kebn+k;9}q-8CBPC&XenQ z4?YelmLEp*lx+5T3&9ON=B^LG&fpuTc8?uc@RQ#QqiTcP@m@ooiv;a+;UOq2$il_g zM$C=9|=Q`Wx21d-F0l$}NRQ3iSs38LV?U~%N9%zWbms(GkWhSba( zzc3e3QWCgg7WgiTSapD1Kz?>vm8zh>TVh8Vh28qxuOFNF;`q9AaMH{?Pg5#P-`Rh9 zPGxIxaC>mlPL%G;pg5`o0CqUWLHJ~M&bhJ5Eb%9!D2$fYZGNTQ3vo!6{Q`Ao#88Ni z_E+*4fA`a1;%=#xoRQ;~h~pQ(NKk?TKEr-KLl2+mn21#1m6exTw?T*PtJ^VmnEKeR zwsdfWT?||u9r`Kbu0)jJ*3$EnMbO+geav9KsgtCNL9OOLTO09t81Q-v8r*R~(*~4U zvaBzjzTGY(#W+!rLE`u_Ht-&Gv^ZAqx^1yIeZ%l59&O9-q@;j%Ya9#dllpl< zg(sDJx6&Uj1yOs~(Vx#CC3E~UdFxth<-TUVmQ2*BfdR16!;ZF%W^jyVb&O`P`K(CL zfDMyx1EI`XqcRP6?wwItoluxv5eGNRfvQr5<}~AQKdg84SUtMIOOqmbKMg-jLp!U6 zs!D7YZWWmA?SK)dZIzn%u3^@O5>1=iO>Ld^c*d=ti)OYVA0s2f(lZaqvk_^9bz58c z2q(@;MGdK0A=u1Zlr}C0GeqB&<6Ij&1Tq#n5SxvCuT7+zF3zy&Q9JEaqsECZtoS6F z4YEo=L!>DDzelB+9LhxmPfMabEC{qC(!5{LIO$LtL7UQP?t z@6khhlFGDPi%Og$M+3j-NXNJ-&cjp)MiUaPY~wt&q^fzCYc!aD`8GSM%}l*ALo>NI zJ0p*1RA6$NZG7;<5^lbMcXOH*0XGu~4aPDcyXh4zZvs|;vG(7Q*guXI=mT7Vc8R-$+UH~W08D?3r)_Ke+ZnV=(gV3I|CgBwJz;BtH!2KOsWU?_f zf)U-}C1AYDqjRd$eWHkA%JnzX6}Jlh4TWI(KR^^`zo3lFq-QF{w2Q%|WV}1Uy)?0N z=+A#Sb0V566n_!qF;!y~KrHX!1#Hx4Y_7*V_u>J^drKkb zTg6aA1-5Z{jhOIxx~%ag7Klqkr@DRu>_x^ea@Cd8Iq<#mpJzcyDH(DE79%%g0actxRpAXrYzTP@W zvhto_k1yRaE2*oBw=@Ug$YK&W^fwlsdP>@I9f0S?WHJtOiXA=p1nO4P>&kM7ZKCtR z4NQS&>TpEOWk*RUrhPS4M~))5)s;ac-*xwFQn#_*4>S1uzHOruf%6Nac3|a$2vyW% zk|ny~n9t+v+)_mc?vdE>=h`+hSvwiT&vX&wZnZ2S;7@%Iz%#`-@5Wk9Z1{H@%25yG zwa2X;Mr$`>G$EXK^K(B(pPx?6lxMH0FEgpoQHFL!CCxRFH`RpJwOkYG9|>-rRgi4VMjOCY0+s?Q=~ha5uD@gZeF>2FiJ^;126~jRiKH*A@nsR$+g7 zQH}@`ntsE6Ko^IMajB1v;~tCv0@T8%|46fAH1(ga!3G=$bu!=ssL+g?rGyl zeZ+V|Z;$&(_m1J=;adJ+p1oImvpK(cj^?CidFSh0c%j`QPsaVpe8vmq2c~(Hq}G6b zqFB&4 zhL)<5twY|0gkk| zCKvnkx;ZdOtai!%lY%Q^BRE^ZT`AG&C@DHjG4>fw%wQdK0=5iU#u8rvzVsIPRYBQ;yrJ4IB{Z~q z)Jv{U$`9A3tcteSF-|pg(7wITu)Mly(3eH}p*BS7QZ9#@>F{D=_^GImvzm>_-&QC8 zOoh$%J4=(l%8;Z7vI?2$f58ixfv8j98}QEh7fGytMil)$NzxD~cQCbeFb5Jt?P>J? z4ZM);zfWxa8y}RFffd+(AfEF#2qV)kq~u@2V*UwMmhBJDF#j|tk^%6GcgYI)1?^-2 zf+BwbFBw_?U*Vzj^eoK3qsp@VPJaIv2c8j#H)mn_ZN!-velc!YSXurmf%nhG{Wp>< z6SE0DJ1Ywlv$2T*1A_sJ5k0#;vjLO7k%57U5vw6113=#pNX%uS`vuUowV~5@F#Q`r zmJtB>x9RZ*7lD7e=h=Sa#xk@2ij{$xGom16Y6-1iu#Bzfj-of6zhy(_IDt z0$v%JepLuYreDZrM&@4(;=lHB`k&7cU}yb1$oW4VFaz6fdBMQ;D=!EDEWg!70R3MC zEdR$T!u|)%*+1RXzojz(I3mE7{;k>pSXlqs*!)SUWB+$B=diNR{Y!44PavVdprM;DSu&z{w$Y&H(UP0Y3BV-!yxt|mGbc=(>U3r_qyX<3gy zANug#i)lC9jtvZbQ?_Y7DHNEE)GNt@x6LNeQP|Lb(|Q=1_mSSP{J8UYd9{^^pI#X9 za+(?9Zt*mpl!@=OvEKdy)Z0A|^YXkNmp?rAzn%HHYUEiuHNEcmaJ_C^utxcKujtZA zF&oo3*v~y2=)U%EwZA1DWoEd(=oTgE9&5PIdz=+bPf$5yv?(NUoNtPLXP%xHDl@hfq_10abB%%{b zb6}Oy;6l{G_-jdJXnr@-T9RtcO#@CUr<|or38<@_n7NjELvm%jJ%$8wR8B){Ry|a! zHI-Vl(v7^eCMi^WkdqN=O-;w(5g4Y{GI zDoIDp90j0C>bZ3Zf&x&lE%%#DRY^lhv<302Q%v=UX`M3sgYq- zsLdA>M6C~Ik#S!hoUMFu+3Se&pRGe@!C8!YTbvG0-xjjP{$;oYbJQ8(VN|*4( zC3gwwTAIH+{)DBS3h9-I8()b;eQEyyJodrw|M>W`KfnyB*B}4kL2mvKc!Q0$rCopg z`SIsMe{ucs|9$+`AMJ(z`}*@g|Fi%0)Bp9CSHAw@FMj`*KcS6_%B>0G+4*|TTyxGN z=Tg0)|Mn9a`)Ib1Cb_hcn5~iIl0gO5#{5i4+0ia6rD%6x>A5?BM0>U;a~ef?8VXH` zOL4HYVpd7(xvGOo=>pK8u1c4TJW?OxT!bdII8YRebxWma=?*Hjl9>)g2N|rVQB6*% zB`r;)I#es@)lwG`A&0So2YMpPn5e;s@?w5faDa?#Z<&VKdp?qJ4Q25Q>9S}BLu89 z${J?U3MI_*gB`${NQqc%rKBvArW@aYvPf%(FaRmD(jgx}y^maPbZ&(c zRdCVa2*9`%?tyavlg2j=1FSV(>}k9R*r@E3nk?H&O}SI4k+%UXG-?k*02mi;Bgl%u zMlF03D?qE(R)Pc?Yz(8z5>>#a82d|8zTY0_X#f)IYQb7HSfkcM|JtAz8}-?gkJO;G zh(GBwGFWS2;@YSs$}AIYjv|9rZr8!2ftv7`7%x7+p0d<7Uv}CUl#Q^9ga%SBJrLVq zz4ta5#koSk*yMy=%wsTaq=C&0W^`~Sb%r!(Yh_%AaSb+LrW_Vm6P2EI3Wy7XK?`*k z5EmMQaVb4Wm&35kn3PUAG-%aYuOqAugV_XWwr?_x5C81uhwFgz@X-~g(W6yj_ecMn zfgU}zCd5Ibt*Mji!>RiwlE}=JsUsIMWouKqp?#{|J_3P`c0PnWdOVW+X#B$*Yh@^h zht?9JH)h*w&E(2z+tj%vE#{4D*JSfLjTl?wrZ8lCZmoq=YLw%;IR&iDb#tw;Qx>~D zD?wb(ay{*jd5R!(rQUjtCkq>9)`$AJ!fUmEt@f|g{t7 zHOA6a)tS;@N|haE2Qq;5US;BJp4}KUw>CAM9rFODg9kax&Vx+hRHjLYd30n7XZTpu z519Zexyj@UJB@}WOIhJK)CHvEW{O2+jHT#?DY|Tjp=C;3Ez=~xX=t8L+7YPz zEZS#-9cQX0dM(%a+&rDMarJ774h`WP)5L7>aIJ!`{^85R)|HZ>p~^lE1)vl^4yDjc zAwLeSw+^9#v7E%AVfHu=fbc5#_+E)cip7 z#?#dNI8+1oLvi6WG)f8i-BH(@JW-oxp+WMX0`X4|61rsEw<3S@iCfm|pB~gGVYw?U zYbv%OSrBR6Ze8CVg*)NLuYu-!3sOf1tfvNRy>Q2B%;PGJC{gL#5^{rI;EnrQ*q!gp>elK9MG=NjJzylKXtu^cLR5T8qo0!i}H{3t%N_A-W3dfW-`u z**x7|)|MFDQOs^D6+={9x?lo;dDE;V*{il=8aZj1U*8_odm`@A!wb|UHCQL7UFw`_ z9eN)DbxI^fC=8X_NK|T%Q=-SY=EtN~^VJtttd-rX9XX^9GxA4~2q5IuGJ<7rifunwY z6csE~qfJN=u;~!K3USgtxL-v$4OSgjfq^_P%S-Kv26wd8Q?2;90*@6e08@b(&2mqz z492O4!tsEehjF#Y*aR_XWAAWIqr$`~jN5YVXs0K!4ok&O4}-wQN)vUgaiz9IK3r$) z;lm~A_*8sC*d*Ys{e()wQ9!#wOO2AWTw|BuXkv3j2!rd4El3_$m_#@2L_JP-z|ge4 zXTZzAUPbWAMjz*#I5WE5h~`|9cGw(W9)~2HC1r)Bb;!ox`3kdgk(QSfl-3iD%fchQ zrrdJ9HPuJzHp^S5!)%Z|!tn4(ixdjeuy!!ZFkQ|gRgy6RUYJVinc-5i@yD=fIzcZ7 zOf$74KC(p;2LJj5HsyHA(k5?sXfr@)YcopdHLz3|-)6orkIkfKzJZ$qVh-PL1Rn9& zn7+x;rb&>vNx3PLH<|tN8U6K%`)|rnVT!{-b8Yia1r}(#R8k*Agi|nF>lbYEC23=> z*tIqor8TuIe*DHQNjAe+YbtP3u&#R&JBwhP z`cScMMiPSA%!#-TJE*JXje(U4izL;~=(^&@zR;zVI%S+S%7Ety8z`6DZ7y(HDRzLW z89xeDhxR-$2sF}dcMc~Ss^t<8vuZb(%?n+N84K_YGN>I@2#yf6+p%a>OpGs)Bw5r@ zQ57fv%S%nQRjxMhmI#L=!l@E5sl%!rnwP|+!t*)B3D}p15a`him9-h2F3E{V{Rp-8 zs}BGB<$(t}H#;-M4>%g>MM`)Vu1cM&)>|)_fA4BXuxmV(lN8aaVcfN(Ix4SO&ly6} zlqVS`E)iFMC1_Fz)9jY64ZHLJn2S#LUvFN}J{x5J^JM@CvP8&F>iIini@x2EY z0AqF#EN}|f)K#*x#pWr7pH%g;`^A0CsllHrFsZw`U=tYG8SAfVH#S|24@VaqmP(Kh zE{S17jhiGv_Hv(ZPnly{`@AEY#t!E45`Qh&pEF;vmC{pdt>5}9Elr1+II^OkcD>>Z z3cQP?-sWnD*wx-XNGY5u+>0XaN-<*Juw3BaWv-Et9ylf`?TS>x0VRB$B7JcPU@KK* z1e`(Qct0`>cHq5^$y&vbLz=o)ck4V%hz&Z9u_LpZg__%qtw~aW=cD<&Z2=(6OxqO& zg{ql2 zj5=(gBI>$@vZxguxscnaKNlIIo?YaM8hO(-YVO4VQNvF@iA%v@sJLh>){BeFV$rw^ z9bOLW)nfX%tS#Y)3*Zr>xKu8Iv$b?dAOj9~ZtJR!Ctl9F1j*lXF948t+8 zDt2YCzQKcZm6}tjcJ~HL7uN>WELkFK@fh|CVe5*qXB-z7I4{z`#uoQU_6asErQ*Na zDV)7iH7N-;((}k%=AaD3u)DH(UP}kp+1+Ti3qCh#i;q70+_v45w+Y`mxU`-}dIeIe z#D)frUU80i>vCX$!r|N*8w_5l)LWPXXLE|=a+@s^;f|pNw7`xOQ`H-LFhR5SQ6rWa zjGDo0n-)At)&j}+9bxg}eo7UG$xz3;E$oduL^yP6*zvo@FuGoo+QiQ!Mot;GKw+|@ zre>dD&Tg~}SitsSVUIJX>ub6yzV;m|#e!^&a4f$0}wf@o~?xdi3WZ-`!^CV?bDBp2o0%k4TU`{xJcJbgrS z-j8jx{QU4=|Mssh|8Pzl{_O9YUVr&Nrv~Aw-a+w$GMe9^q&^os^FvOv#|M_85A5BI z*49K`KCrdn)I4XQoU7zqd(JYVKkR38$np4UanM`{6yzH?Ag@Ho?3lBH7c;4 zZ8Twu^3$A_<}7V~mN507zU}^t)#uWd6?)!2;{@Vue{Q%`E*HMyva&g>n9GXTy(Vrg zwCnO)02EiC4zwHIB6d-V>l?t#5u%9QsEWNT(_^BjU%fiaJS%Juti@KiZsMze*;OvC z5_`29Wq;Z3d>v>v-D)?(bvTIC&ItVu*+JwtxuyAKa{-jimjtUV?xdT8w3zr=Up{7b zmk6jMzvpI$7CXo6ejh#?%(!YP-EYpV#j<^Y#=c_*)O~@5-^H`T+!tuz)_wxq5s0&2 z?oX(B?%)(*Isq8$Vp^YYZI09w(7UaG_0KlLfp*b9i=2m*Ybqr|5QD_(+V69P@ zMWbR)5_Fp{pwpdVV+OD$;zOMn#3Ai;&a*_nZ@0V_0oE|dT5iof7+ z?#PM3MlDC6`g|KMJAI2@w1z+rx1Sis!>=diX($n^hNB7hI>{o{h-4l6Xh ze_W*8(}9lnPu|`?WShfr1qn5nd;etr=I4>?^V=f`{(d+ph~4PYBeQRE6|~gN9%WEV zdp1FVg;@txxA7BH;>6=v%2R1!+?Z^s*+`q9jTZ%v)((^o>V5OXS_H~rowZurc`U3e zn;Xyavzaz5Q-{05dbXwDEPGp6!ulA*Dy*bi0LxuQ&zk&W8#q{b>Q{C@&u{aRYV7L? znEAM1Dw%Bii7kM68Nf!(kV?gtM~TxAH$&a=vJ#*F9wwF z4gzt)Py!j(pd-2Wb#-cs2It&DE6ujryv)wo9abhO(t8+~t#zu)YO)CcCM;ENQ^hKqb9Fm& zBE=SC6%DupNNI4nVHwcg%;|U2!|VWe>)Ujj955}FW2fvVp$AoG*Rw?^&;t60z5yFl zbXg2GNN8z0P78A}wU7CSSp#M!u3HQfKR`P|C$E4L8b!6gY<3M1S#D# zT+(bS*?;+0*>8U?0_L^heQkJO8{XH3_qE}DZFpZB-hYo9-oN{GN9g?g2%SxG%F2-O zyLnXp(>XfN?8oMr=WjSp2kZ~Oe3VYjJzmT=llcc?ZF-K5os7E-trzYSXJg-qmpel0 zbA(8~Wb`}{QhxG#1e-h%^80X~Fp*>T3?*mD^S9r9{W(J_&Ch?B|JC_k&6#i|qmEqs zcWZmKrJ=p~TMdN57y+~2`}}PZbB_U$ZY2`#`}eVTG|D&liZ9KyGp_{HEq=1;{BBNd zp^nZ_?S`s`-!!~SKDGB`vFXWtuATP`Rg16JJ3md-QfOOG`}RGOZv;|={tsDiD9x6{ zt|I2?6pR`>^x(9pU;M;k%iQ^cwim{+pAfawou@?|uuu zXx|Vof*X{X3TLE5w_blpqgPm$YxjG}W}rHUw2&Ek%o zc|{&)$vD=6t(KFADdjqnM|LXWwa8(89q$3!SrM)UXlb^cA?q9s!eJ1;wSbi|47pVk zk}cw8N}>f3bpUd${$THxh%CbkkOz#_l5nQR4kI-?A5fLz(3;>~bE$I5y&-xSFx#C1 zCJkPan&F_;Y=u?dymoSE?lwg<0~%_s);Rh`huE(fteAH&fQ^URz%gF9#H!7pO_<{c%ak&?7@bK2s$yB} z7Is<&Ix|Hi0@giv@?g7VPBq)WHdz20+`N}bzLsmM#ZFEFp#dF)D3!w!E#rK)u>{ru ztZ`n9H<=7p50SI-+n6uUp$VKfv4spXkm|>lajGIIQ`0%u zg%m+7LUlwDu^~?ogJ8%rEVFC8)Ms37pog?n;h`VtCO9Y48-d|rSbXmrSV4M%uiX_{ z1|Bwb^4sDq0WbURH*lfCr_z|~Fe{X!ZSewIYsR6UQ|SnRW{(+-f})fN2Wvs1OcAzf z5_LpR5qf~ry&#`DH%czR>boizZS`$c6#V^kP!PM-ph`mjLFw2)?TF(jCz}w^71Wq5 z0@*eH(RYRk)9V;dFn!KeVW zZBP6)tAf^6;ATO5?pT)jm=Ei7Mm$Y{H#p2tW!GDFjs;)vnsLHr8csY#4KwZy5n^NN zvEc{=o|D|_HWP50#x+$T$<<+qLU%{}reaAfpqZ<{OHL9=R$tuU&!-(Q&1y*_O#S=* znh|ePrrBZ{Ow5~HdTv8}nQ4_Qs(_TpXM^mM9?;bAt9YA)w+Jrgj6kn4mqMJjG4}Jq zbNQX`j?}o}l+1us$FRr;>0~Muzr%qXOD9d$%Oh3stiuQ-g|D_liPc=uo7g#I_Dc<3njy8m_ixKfmT)VlTmKso`BNZ*Q>=Z+#BA-b$ zm3wgB%r$(j&XYC`x2nA^6V8x<>r;li{f*kKq>ALsE$~G{&Qj1Z@DE~Ehh3UQPHUWc z9m~=%mPEiSR~|jEZi6d+ucO?Uyulg2AaB4ChbAeHp@W0iG=U=PZsTkLYX~Q?1j|Ph zo7|*zeIDqXPTiO#1Xj1Kpc@&Pp1$^4)uu~G;xlCLn=?J5wL;qf(oPN zAWa&U=XS#`lz!#vwApkJ1bHBb#vzxCgh=rlVP)X69|Ip2fEtE^imZb5&sMd-Q;KuR zVw;!1eOObfrN}*8iq*$#pTN(ES8!u1a=s#{2C67X2$r~k6U87)%7%T9i`!DCi06PB z4AKdh*#hieWG1ur&z8=5OZ|E_D2LT#)FP+zsBhLVqh4Cmi5l!oJnFiIqNo!WDueEH zoT~?ZZ6&*?6P59zVpQ9Q-ci9X&WH=aWT3cWIHz?FN!n|R@g!D-zjWQ@=qSza>BKGY7Mu> z4J*H;9Ba6M$Doke?E;tJa(Y~r=md#+9Avq6EbMrg1sEU9m2*S9#BLBFLt2qCzKc{D^F!Irs7s_M|UN1HG7t9T=2TzxjISDBs3)$u833ZO5K?nA}(d3eyKMD5mK z!-*S$f_K+n9=NMv2cL1i0ZiKL<{@=$9g$|l&~;j?&l-wG9Eiw0HUw2OM6KCUn!@;R zh!-|tK%-KX>{+FeapaEy;N*-z9W6G!FAoGT>&QgtYv4KvNf}Dgp@I?uEsfUfPL3>= zO&NYFF!XIG)<8k-u~7|NO_Xu9M6Dxd7Q71@%x8F7;KUK@;=~XX`?xB?FfAeX+hgEW zbK~kOOE>zJMzB#~uxf+yB4hB5&8eZAi;r-91hwn|97LA++SR=!BylXOd64?DkZLeO z>0`d(zZ%0}<8~Tppq^qKyY<&_2UtqcI5yvBgO$T9AYVR9d+RoTQ_ zH;R^moc2ihKA<8R2Fn%JOVM6E^2%_K3f6be#E zHN^Axf;(&^+P5@ixPxWf$J^dhk;txi;Q;QMYPRD^-1G`OZMa(kDjZ`XSAcH}1_FY7 zM=eQfx4kzbV**wq95$!!1tl^i77xwrX=EgfthjY6-6Bdg^;$gm+T55#><$a8+OT2J zgl=?`ZN?5Qf;?(zZnEqrxFAfEDK^8nA4KtlTZgqI9$hpJ4e_eODg&G;2yAYv+YlyO z5nxK$qCFQ{sO54362?4I3$wPV1_@t(8a}V=?{a|wsGe+oXw!^!C8l0U_whAc=g*c(< z*7QNPB1Mo*9Z|%KEXch#6zbv7PTTUmTLf00?xufuV)IF8X>-)A&&_*hbb&|rECM&5 z=?1pncnRXM@fQRsU^y@%lLHs#;kzCQFW#C9oh$}C12Ps21Kq<9le$fH zp&%f}S?h-&}2cqZLN2cgP5U9a10qM$H~rVZRgG*>n#4b;u0< zI-}b|KJ$okTQiPJTfj}g*HYyNuSb*LmexR-L#{(ZP&J!fg4?-{Tz$#5rbXMpM5N-5 zZPcsdc(YouFXR#H1HLLaPwcr{ehETS2}{vEk|N^{wabzcfSHT~2g7R(34SWh765~_ zT}|;6n8j>Ds#?~lNE}2~8 zmr*M;AFf#uaDaVVl7V0k*8?q!47D|*k2X3s2Prd3*_kmw+AJXncEG)1&yA*#M#_xY zWWQ!xFtm~)qojaQfGq%fB`?a5gbFNNiy)Cgo(tX;+Q5NQ{4TSiVf#?d2Hvrthl8 z#(RO{_u(&+$EhBo-vr=}u!F^g+8e$n6>jo(2zIC9qU<5Ixc#<&yK~8Aoxo4CzsY%C zjW05!bLoxhlb@?bqEFWMf~|+v=4Wi@rh4?*%hx`H5y}LCfQH zUx(q173p-{NNH02r-vjaKOcn)zs#Etq^D~gH`7P~*lR;#F=R&UnX!=9rmf5>D7wXlBY4UU_V%3_A`PP?A~IUHm$E`IpYu?C&)Z}05F98klD|?^IY@bI5mc! zFTmEN{X6Go&5cxgU2Rb5^YGa+srtCG9aSOHk>(qN)mk-_+ie1-+6`uFWi1;DgU{|4 z?375^guWIF9>Ez?Wq5A^(Rr{h9#p#Ehzd!mRXt&+l^rhXXsxYq8Mc!aaE&!3Bn1U{ z5YgFN>)nS}Mw{<<(JDq%Y=)XYI$-Q7p2cMeJSHr1wD53E29IT(Yj1|G5gQ^G0gv0m zod|;mBEIt(`D`sZ>5XyO=J<-*~ zC}JON?scdSg;E}d2A)iV#ii1EiHiCZk?AvUt_rWW?y_KYB77U!jn8cRICtjI{aJW5 zwfF1nn@k<##h;)9Ov=z7y6BckF)6CPlJ!R&OCp|Sm8{OD1941t0EOhmlO_viruyqd zZ<#WHG=<|(Z&3r_ekWW2EgA#Ve4KZ1vtIn>c0Q*je#TFJ&TX=4Ex)~6s*$cKiU<+J zzV4Wi#a$QqD^Uf4BAzuESX9D}ydJoj*N%16}tI2aDAkkch;(`-(WM zYg7k=>ms!W6xOTFqlK}S$l2*`;0*fo6z6xwv@vmpu9sZCDP29PnKM43{esMq)2(IM zb39{^jSCH(xEP#!;?>z{;h^F474dZjbufDdeJHSuBQTRQuoWf;}1 zDXN+6W38=MU)2~kq3RV|Td8z@(~JTgmn-HDjLa+-qNzVwCTm{MSORoHP6B*Fp2jz1 z#x?A~e`*YKJd|0OM;ugOE6Xh50}!q_2YI$|tMyYHT5ZQSYXVh=nJ>F|@gTAWK{yM} z6)!Vnj@ScfJCM?NmLtO5EN=0J=j64zMa^L-pM;8_Vt9F}2>%B-1W zFtN{`(HGT~N^@v!;Fw{pj0v+!iPoDr+6}0GbLo^>)yc+KF4 zRk2~K7nD+`CQt=Rc;Tt)FgO8PzIT+U;t3;92YiZI0h-x!39our1~czg|Ke4&{Nvjr z6wlfZh9XkuCfMMyqMQ4@Uc&~CNsL=JH z${nEZtK0$hzRI1T+8880R=ESv%8msz3W}P6&jY^2G8Vt9awk?M?Ox?hta{tM%AJ7O zK2*5_^nH~(z}{841L#AQJ3$#Bc((|Hvb`5J);Onre zZ3zgg-Us!jyl&gas=C{`dDJ1u;_JBK+^k2G}kU6%PF5!;1hE``zoYihaE9Tjjkt;?$eLac{f$=FL*O_GmgGRUF$&^b%U5%?WxB{d8 zfWg>{DF6yjV9Y(^$ilj2%N4>711nsmp&SHddplcE4tu-81{YuWQn6Hq2->J*KKB;G zh|GHk`RE>4hZWRgHUMpfmvM@huF^D>%zII84r&FO$Y7K`B};>wbJl6ET5)N>?AXU) z)y{((stVU>50o-{Mp5FtY>95~;_R^YX{~nbFiJ-}kXp0|43TapzkoLBZfC29$&`*$ zAT0}amA*eBhfQ5y-tZ+U%Kj}7lIoXABB4FNy0Dv(yJv=To(h#PMz-3;WA$Kj0ZdsO z-FwnV>0ewEZ|r!ztKs9GSY#AOZK!Cm@a=t%)P@QL%AjySX&UJVP^0#a(kMEB^{@t> zV5_T9IP#7aC%`FGWHL4jLlAbl*%2_|P9?A{V6B-=92lGn{MyB3W{Lt}G;F`SMFudY zR_jf+0Ih6Zqgx6A3xc}8>U`e6fW~s&-ZzGQ*Aog^m(|lAQ}^tQx3MFV1s;)cmcxn& zOdKtIVDC$yZNMAlHFig^F9*p2e4RXt6gYYrh@m_rBizb?%z?OoHEKLdQ8*Wn1s3#j z1!e(EU=szl0;~^@ra25qR6X@wU{t`wuzXmWEXUR?ps|^skfDq0?JXo^{ztV(aRQ8K zR%hFS2WU0k-@-|jRE$^KrfGXwU)~-;~#ni%;fVXyL^VL*bzmWcF=ZcDHz4%V+O~3Zz z8Gx`lcp4`QL;Iz)PV|k0Nw=o--#oPw^NoB2+m zITSjEqw#2VH3y&2;`ephGNGY{h+cO9g6BtLg!#b3qZZHDjrMv~Bo1ljz*+oRIDk}` zZ&nH0ZZ>fuBYx9nw>wBCU;&mEvsdBxiE!}!0^)*%i*>kKwk)BVt+z7>)ZrUeVfc&} zQ87Jx#NV1&L}e^2u+A*T%=J#Adpu0_{q|=z>!KM`J(Gf%RjN>RmlK0(lsaK`TmaS?O<71$9`44F+pN zEDN`imeg{YZ`-kxIMl8#V5`jIDN>q;&2?XV05(sh>pt-U(2uX{J{coj>8$ELLn9pJ ziD$i)ecA>n64o}^*B$^BB)UPdb6kAPm-BQkm3_`fNQr|@MpHWvEDlnbgs=l~aHtNO z?(MDF5o4P#`&~iXN0ETp#Yb!V(9&Rj@zMG|-XuFQM4V{fC!i>kW;UJYh|fM8P#N1Y zKS{M`G6H1n7Wg!i8OO5kJH#%{4{*=WEMb!!^qwi?x{ie7WOH4I#|JH_xNK+42YJ}g zF>|}484-wnYJCWjWu0zsx>+@BI|h#g#=7VS=7$xK`AWx)V~`-L_jjxXTh&kM4-e)x z5A+YKB6F9gXNpXXDiWh+j_ePM*pk*CDFFyE*>`zD%6IjB(Uv^hnk>=uFP`|SyEMP; zk4Uq8g*E_cy|&dGqAgnil~=wnj7l9Ug0uNEdNcenyD(4NA`Dq?J3;{T8xF^t8qC~W z$3k#thcWZY%I1^+P<>{bV1NxvQG7^i8za}Hfd>E;8{%da+g0hfF%@9zhGTP|@5r2U zCyxST*+Bv~115e$ao8R})D*vhcmQTmkU@|PHt#o%$N@TV1{9>yV0Q2Co!Uf)e|mca z5kC(eRd5Vu2Ib=s3B;=>=OTTK$bSx>eUJ~>>HXHESQ`Tuo)cuK)PJp19Num4cyh5lEN`1v26vd^_GQ9Yp`$rF_PfJ8lg;~53NF~O%4d@2T?=l0LaV5v_j`0R(Lo%c^GvS+0I z1_ht$t7ktvMM>|!8EHrPWK%_+A@P|we?x^oRj;2#`*ZdBTyUNt@za9y4W<8^3FkNT z)l*zO9q!Y0@Tt^3ed8JLKF8Hl8SLLMQOb9iC`ho~RFuDB5q-T?{!Rm{rB^HlgWev;ctl`t$b2H$n7WlM6 zc}lnTR6RT!)u-*wAJCXj?dNA4`)_Ml_utUsep$mWYxrdi|L3)a>DkObPxI#n!Bd9& zv|;`EKAykvX&HQ~8J^PPDb1gA$+JFw0J~3@)TgTNvzFU8mb*_|_0O7Gy!&-CNWaJ8 z9>`IDw}QTDTea`C;Z|9fA6RC7$$Ys5fc%!;CZCP6MbcRurw+9ub^fKnlW2cIKJM=6 zX{~Ch!>k4|U`V!)^Izn-^=iciZECmcjl9e&V9548VWsq$?-sW%N-d{8s?5S|sF>}8h$L)IYkUm2@>fYnth&2mViKvT7%MR0T#^8?XYp98gfh;pwEo5i64 zWmR)V*KSp$0a7oU0g!t2ZA+g^4ys=99Qk|;vLiaQl$n88knhl8*^AW;6wtM~C~~{o zuraIB&3jrcSGZM`1>($U8MA6$Y1UBOC(J8xBf&6IVX{N5W;b9c1uPSGSv@5Gh@u*G zeL=0dx+)CMM-kuZe8a1s>9eD#NNH7~3q`jkZcdMq`CvQDMmIopC`hXoug5g5bj9*ig{fQ?%CE>mMz&F-7@jd7RI^_}Mn zQ?TE{7q2e7TI&w87sqCBjKvsY1Ev>UV7bENXvWuXH*lMW*0W;}bA0hf>eR+EX znV*jy#cZ?)ekfhkhXyE0P$`5^mdHz9gi76P$lojy3%5b@sxKLp(j&X++^%XJyAucM7G*#Y#b+fvM3QQNL&4 zy6c76tUSAb&D3)XAT)`myLs|-v^qiSTk}^eg>;$kSUTz6Ji8$BD$n$fHBCIu?Bv6I zHLG=n)T@eQDE57LBNH@s^LWSVYqa^!Y`ILl*_&1u%~C=ef(6N2Lk+buKccNZA3!qA zNjA&=y6Up6^lxQmBi!ZOETP=Q?EPh@Lc8~5NkKue5r;(-NABMopLEcfcVRlD^D9|B zo81r*uSL?bl;NqyyTbe39vy_*r!$LUgZ+`MEDXFn$4rPPB%G}R2czPpXLdyvYxY7S z>!T;4U606lF`wpScQU+hu__*GL_qc)sI1YCGoxKv#F;`DBU;icq;zb9MPjuYRKl(; zTVm7QqAfpcna*y#r>-(`WsW0-l2SqDgfJ<~=&bCeO%4VS0$0^e8xOjW3L{e+BVra3 zStwiefJldwYpn;Hz7nB|a=G^C zx-)RU`G4&IRixtZ099ta2dJ`@_7#Z{s(=-$J%-{i*RMpV0$O2(hu|K-vg(LoKybKk zV2xXZs=48N)XokpVlE#dQ~~=Cp$g#p2vxv5595h`f@A;6*m%OE0QKCCC!ES+HQt4& z0=9BRo(L@q;JAmV0`xva6|j#Xs(`)^Q3dRMh^oOpgs1}gK17u{TZk(D&Jb06yoIRR zGbdi|G;<>BVuvRN&xZyQpIAMg0iQDT%T%4?c{hA34lxyj`gqzag);XEX(RvU$DByOd$({LU(0MyfP9(Dn&O-`G9 z1<;2e)k^$6NEM*>L8^d#2vP;m<2sLk0Om&wH@pn4Y-kbRAof=QL39NNB17$9W5QneDwKY zh@O#$qL7d-rkm>@& zAk_s6A&;l+6CF8+F{PX zc88|QJyI3iaf?!IQc~?GTXdRkC{_%Fgi$nPqiuEELsXksp_mdQR2z~4V=ohM&=e;= zR_ZCBt0{8Dp$68$Yg0u*k{&3~v?cbUl7UtYR}~m;peSIiTGvi?1KgrhC*&5Kx?c-y zwwn(e83uBAcMmx5_?}phhJb<~I2&0(!ZuQaFb4DovE76U^1TUq(+K|?BU1Z!`fsuH zU*&^8KIemfx_d>W{+l??|GSJxeR*)dJh*?22lp$U@i%bo{!bsz_!*SEf(yS}YJc0{ zLjK-V8$9^e7&iRLR2!$eS8(C?n?nE5g9~4V&C9U)HHOVAH0?Jq&|aZwzkz}FOQC7M ziB9}Y1I2!WOWz-i6NBgFb|*(+vbXbvKe&?v_THVGYk}_&-`&Xp6)PQf{OsN=+aF-XYNj3sLF9w`=ud8m#N4bBT z9_9Y!=21Q$J&vuXz2gBpd%ioApCK-X+B1>kcpImDTyzg|jw|m$;`aGm> z$~X0nuW=;v@k%}naXiWo;~bCj$YRH%+?nz2QSMB8^RJ$~{m!G@rQqgK?!t2SD34-w zJj#y%x}W$F()GO_k8+^oEqwXlQ4Uz6&e5Zs;pD&ktIr6RPk)>U4#CZxHcT1*`y1H{ zH9AtYoyT!02dr7)6-b4$ym#a=<=%lyjr^9_0XWqZ@abKmQJP zSZs(CvQV6nUE$^b&ZC@L`QT9w*n5w1z}V@2pm>o(}q9w;D8w_fL1n@-xz)oA!)?77H^E`flSW=taln&9R(U6vfT4{E#Q8W4TKLMZfYXF!z_2&tKD`?56_-6MHEU9LJvvC%I8C@@C6j6uG zRkGxio4cm+NK!gnXmb@5LZ%Bl8`LL8H#Mi0r^9J!j8Zw->acX3j;*{WS;*pbB`i0dE-W%|-X1R@LfHM_UnfJJB=G2PPeTFq#W|levE)Y&o8$wbJ z&Mf4Cu`7<)t>BAMZOn@f6`H5q>E*CMR=j#1YY3hyS8S~*3US~2Mct!D-tQS<*F?3~ zE6&95MC~?b>}AB>j@ZGjPEmvYU=bDU^N_aDEr6jGx0{5AFsM0i`78qMXnO89*`L7) zo9gR#=M9aGOeWHMcg}8H;=lr1?g<124pR-RzK{|{Vj(XIkE7X~!h@JCf<$qgbo(H* z$2r^md+e>r=P>f|@8ACQjL-1mRq^Yxei)9xidqf5SiFUgXf&-I70T=WZ} zC_PD$(i_(Hk#ZF&y8>z1;6?+w_vDhTea!HVQL*+-3u#vUIR}!aJjzsX`-Xl8yqo!Ubm&ut)}Fh7L%ByD&{-M@Gqr zRKUpwcPL$0K$Uwggl$@>xfe4_dsQ4hV5h~Sv{t-O21NTqiEN*R2x78mRg_B z`%j1Np^d`;tG`Tv0o*#n*vL0GlL5js0tbXS2Ntk47J>o~1YAZYIEX(?V2EEtifnM` z8QI_vKYaZ1h>wI9Z_R~H76YCE84HGi?%|Z3WlSYew54%(cXxMf+})kV-QC@3xVXE! z(>OHl?(PjVc5%1i&3lu{-RozqMAR%ywC)G75^R3kC|-98N;l z+P*Sb19+=guuV*g=c{7Yn+1(iIKd`|%d7?MA;+3TB%u*LpH^NY>h_DL!r}F+{S0@9 zHVhmZ#;}ddCgKhExW0Jj_%-SU<(21w)V32wL?tSrCzn&Inu8Avqxi$`H4>|)f}Pft zu5ppM1QC15pPiT&D%~nLjo|0@0h8@b2PpCM$|i&^UMA~1y2w)^Pl$+TXB{%rlo%{xn$&+#BKaa`B7a3rCGOokJCKut>)#{V1TbfaOe%2?GwRM^C zZ=yX|O4<`m-^m@YT?t@8pIO^B5@|_fP?9T`IW-8t{%t4wT`pKk1 zK{GAfVyn^*F`)m!inK4@oP&|Jr%vNX!$tc4PgaDr{v)BGe70*iuIBg7f1x5WF8_&& zG=(uVqw1DubeR{yrZ=|BbS2vloCgFr1>yJD0|67kZ3d7#$d8T1V-$jzW#0Qs<|s`! zYlty*RFXI6%)K3iXJVxM%}{vMi$>0szYWOdyfbj882<|v32N>=-W+rAv)CN%V2=dO z4pdk67+UU{ospQ@ErL>+|3m)i);l^s#pzo?=FSieWFpbYRe){+u7P667;@QZs7pLdL$5o-g2Ele_J6a-#PE}Py5hX>`a|34b47VuTg-`EHT_gveK!3>v?b# zbnjAg6le-**i|HeB*hX`tv|V$T6IYu^rU!FMFLd`k2y>f>Ba0xjdJ8Oo|ysfN7X)f zT4G~zVE~-mdviSMfIG% za{$UY*&f>d!KtxuNBrM$k@POm7pP7kF%niaHwkLA`xBwpsnmN+Z0QeI zpN9O5OGS*Z72SQ97fIg*uJ!AcyVMyb$vpyNeRBf6`Z9XZRQdZ$i@bq({QsU)l6h-n z`iEPOT&pl^-?yp#7Wp?ST`7+44xeSANChd=o!hTT{}_}`1wE;{Vom69Sibz6X*pqF z`MqZRZ1BbMb&$y=#2r{RknnQqPxCoy0PhkcvY1n+1@RU6??Wx5-QOyLrbAEB3g)ni zGgj?5=LdoYT$1e30ROGB>`e0({g&Eqw+y2;!}-5g^L9BKPoo%FyZ*Fm#6b-vljUWZ z5yBEMGH}e+c^`@d>J8zdS#*EL!D+}P3JFiR&G@~IvG9@VV+gESP=v5%Q97wIFOP*-LhH3zucRC%2>iH8kCgD= zJH!{P`;iK=XM2w6`?)P&99)uat4klqpi?wtFfa#_ZO!q7p*v2$YmvcFz|MXmDM!z7 z=jm_(I)sq^5@jiDDLJQr+GlY*C(*7sAATXk!=>UshEf!$oqJnUbToa!b zFi-f@a2LiTR1fhIo4|%xXi$sb30v1nh%0Bz8*C_q`p{S>HF!q({Ex*IRnLKG05OP0 zXYY^s3P>3}V@;wkaIWua(*Q^r+Xm)8=nUMI;04_Ttu(A``81HFyF{XI?U6!j_7AAJ z3CGS%6?nGYD*a$fP}7`tNFbE_85)2V3Zx3xQ%&C@l+a=lt>#7lsx=>(45orQP(am& zg*}-7^0<2D^Yju`JU|FXeb9l02K8#=oj9a(Jru#lT8WCLf&IMTl*=bXa?2{Gv-t*b zx6Itx-WLYl?WW9dZXq}MSI=DhDXx5dcmFRP)>uVi;NQ|(}`#fTO=fsjsfmYOa-&DGcL1DM`bhZHW2Ualn|C;zk&jNH7(c-VF`F2>n(4%SC? z`^#&l8B{M*A;qL?4BT%qpB-yiPVQ18!Twk_S0}%z#cNyrBEcD;tDwr&aoMcG&yXoZ ze8miM#>Iyxhx2g`Z(|@HLD}D{>&@xMHwUzCd_!MB!9oA4evHwKv{QT2pz z-Hj%NkLLd4_RDpwV^#%^Dd+65z>LeAcfyaqGUwr;teaWA+s*bOfV~}pn1~oyryJ1c z@4RXwQG#FLuYEcaZn{^0O>U7iK6tB4#PChIY^KhrPqd48$p^9TDHpjwQ|kGYYdQ=Om)eCXf~rt2b5 zgpl)ELAn$EeLQmcER$2n+L)ZKoexVc^DkT+zRolVzP@PmAPD6vTNTwbdBWo8T+>3r zjTH%E)5;JCG06N=7czYf@;c0&470z`V6H`6DwcUbVg;D_jlvN_9_B$84Q#Z63LQwt8o zZrbl8Zn)mNpL%=}|ozOEZW&^U)*_~*#lCpV5Di{=_xaA$u#?`p7-~42XQ+GEAr;K~Y9c2K0VE(joFZ?bO z*47==0-(^+^&ES$Tljsm2hRl7r>}8+uNsQnX6KE_G1s&|=^9uGY@$EOS0o=pKu~+h z>lZ+SQOJD(!P51KJy;?o7X=zEB)5GeZBpdr2yD%qO-lvlyHG3m!-)H1h2Rl+3a{6< z4SW2VAuO2z7|j1r%uO+v-%-L~NOl#)t?`Fzzi3j6pPBl@SAdrKDVJbWvabPbR$<#F zbA*Lg1?G<=p*7+U^N-3-K5FFYf-CtXhhDOk3F@|dh0<|Q zBMvvj`qz6#IJliD40QF%WzgIcc+ib}M(`Q>dYl9%lM*{I46v~Y(ulsUlVPVPsGAd? zq1+hQNvYAOz_iXc_uTf!a~!payi9?IV^b`4u=3y}1F6yIm`jXf4DLPoCbFqUmKDW)p9)@oS6Tix29&N})5xH*}%$as3JCJ}-d=RX& zw(i9S07d!|ywgc(CE?W>3A(VC5>v{yh7MZej@7A-_MAUnLy5cuS65EJoZ@Ph$c z#c2OiEe;%@-FDh->x+OaB(GpYU&Aa9hAE1P1*i>fOwn^i39Gb27$-2%4I)#s9|)w& z*MiG&M-|>gu$jRmYyG7OAKpxUwIeYc>(EOY+Kq7CHncKC#M;gsY&K}x zfBvbpRzq{J-A&2Zq+4H@BEd{wqz!8`_k^p4G7d<4fg5iK$#ov%;$_oYrc9~g4c!lh)Onwh^|2(EKo_CZ79ruR?SJz>=Z=eayuLTGj<`OC1L0-2#>Yc{B zc{hc@L!KG)rtI3!v<;T9x5P{A8fb3>oi}-as3{dE!BJ$`^2{Oz5C|vzmCYD|+d^&6 zHC^OIz>1hCz;)SqW0H0=q?~KjpX~ofjaqXf+@E@5#0h~G^Yan15Zzmndb+V6Y}AQW zRtlEP%6+EFA?1FWMtCMfH?ItxY7kE1ay|c7nhDXkDsNk+vJFD}k)CF8j%VM95s_`_ zg31qNV}Z?tN|^}DSiRcT=^A4Q!#^`Kj`>cWADmCeE#mn>F8pq;ZDl`ZH`{0Fr~H5! z=Z4|ciamzDo%EHt^ZRy~9Y%5IrElq<6=MAlO7VGj8eZ+x!?W>RBW^=oI?PMWI zlW5{}|DLfsPrZH~&F_m#Tin!Jh9yv!E8D@gXyAj9n5O?KDT+e=A*R;&@n?wkAB%W* zHUrmomhBK6@ChpJ3;b4B3a@YjXw{0(0wX~p-S*nyIpBWlGLYQu7BrV)|~p`qyCWl$5(T}AjF#`-}l{g3RXUVkT3Z1}MoUu7##T8#}!PCk+0 zSG$n`GB~KA>KO z4Lw_)?a34k$u|7SCAQ2JnM_*dX4qgq_mIf!FN;366AF1je7b@&vre>#E$e z^|M7nOAUV=F@cmO#D;~M$|UhtiPC13=j-Fs1yIr0AN_!H-NL9X3z|xD%+Ut(U(=i5 z*{wMzmr9uVmCFaLv|8MP&Yo@Qin-M^)chu#ZUuBdor!2GGtWaaVRPDVauU517};jw zw%%&|e*>D=hz0?In^?@~>1z1EG*Qj2QHHcEt2JD&Mt21MB`2hic+ZfkzpM_WJ^-U?8Ar5~s-iT4guGyYL8#H-WiiWwUEG|Eu19w_M`%ZSqtufa9U~%mN2z@-halqyfL! zuo*oas0p>A-i6$EVZ{KzayKp_#^HBQw%cI){`*}nNv@KU&&3C^!)L2oEc05nld-I7 zfDBNQnQAet)W}2>ZNH!M@O4& zhP|j|9%2<7Vj*Wb*5{sfDr{#`4<6b?VNrJNu~c1-PmkifIHc%I4< z+D-1MSAYP~A-m3e;D)nX{_1)WONvo&xD=XHO%#w_tE>y(j+`j9x~2-&pQyo#SUBjjrbyQ=2u-sPDkBI@*HJD?sJS=k{FY;BxcNRVRN62;Xk0E8X)p z0>ptiSi5oe(M78e$UJc!{TC$d(x6bbwb77{YD(r%%=;LJIaq@vk-PHa*h15P(Z9K< zED7`?Lr;wqcW$PcAo7~iohN> z?{(geLr@CMirG)L`w9+zoGqDdiP1I|x%wL6mI6l+-HuqnWY6Lj8Fy^ElFo9OI6>!**^YyXeP8GAb<=e|*;8#LHEptIt?gM#cC^W*6GPdq z90~(xR_EiV^?FIIh>Kh&!D|hWm|gG8f5Z>MKq@__=t{e_%hz}vWxTT*uVsMA&bKw@ z@YSUXIQAa3%e(iB@|z{3rAij`gVX-w91Ea9#cN1V)RfjTMe;L)$5uGWB7no7itJaM z5v#tv5O-{jE-XVw&+CYXkTxwL0Z!8}UW|6_BzI>h0o7&*VZl^R-Zf+{Y*wGY29N#t zcU!m&VJ|X?fSWzczO(WxiCGmLL$E(q5Q`Omqc&`u% zu4fXD`ZjB2ur4n}7#eO&2gBFnLRLA%5aB*iZVv^}hh(+K%23k)y!Whiz3E7a%)zsZ znXO1c%!}|UzakD|anf9Yn)^+IypfDUTwp4$en3k_aGsJUfa$-QH<{}Tgz-;=wWjlA z7Dx`3mGKG%u9FLBp!!Ki8R$u=ZvvaGvx2Lh?a&rvCX`nO~#r& z@FTZK8S*v<{kCQa9vckjG}I9wj7GpgZP2Gy)BKdSiA9h-jwW^+6mwx2tfi8?3!7sAvmU}k_@(`+|@Fjm+7S*1_ioTL7k(se}Qc-{w;XPMmD z&d5g971%H{qT2;9|JgZ)eP5Y79(%_bUO84dH3{T3U3^{%Qn62%3Gc&#z7=>L`l~NN z(B8l_QwDE}*d{DZ4K9~&e2d3A2|<>C*U=7kRJ~1Ynbme}=V?SVwSespcdpwgrPDX; z?V9-j6{~WXb-@mO2jXufZ=K5!Y>EC=p#RqN;H;>~dv-po%8oJKO#gWb^=KlMQudby$sD@b9SWuwqT`|NLt6gp~0p;c?q7f5y&0K!F)*x`fR) z$GUuTq|SqoPKjx(CiIvRpBmUVG)oGi5cZKS62s|UB!zkW_4pc!jp`v29|ON1bK##*A*JeLhL-(J__0m*4y??tX6!$`!4OI zAnVR%Ta^1v?RXP6#V-a1D}HXBKYLzmlK=kKqpV@-j@VPGeh6dm3_TM$?_lGFSg!Z( zR&sqN{&(oi&PbO)%jmk_9Xp(aSk8`2Cp3!Cq>aK^&N5F5#neX^$^DP9fH*ou4IP{d z*CvmOv%206+&gRMsoqqo{Tk$0s}C^Y_1wt2I0bgy_ci&IhAODC^cPBY*=K2veG&k{ zQd58z?}(#99cO-X)n+f(UI9*1pZJa75W$Rl8Vq4i6Uu zwejB!dU+xg@0e(`zPJ&p;Qv{kWAG8>#G33^V;Xc_e;B)mx!fi*8q% zN};r9FLJtMEQqY*IH@1rj1^dFbW?QG zzQh*bQ>I?%ZmQU38T2~gcp-{d<_jpr>nF^ae<~r`838)bV{HMEUGgGV&GPMH*$s*{ z8!hJhoj&w9l^ebT^}KYHCw%JfjHopaME!8a1KqiqshbmNcAf$E(4R-*QtDNuemnD( zFfsTrM-RRtf?wimRU?NoBm=A5?q9lrgXPmZ@AOMPy>6=?tHew9mieHzhF=Ae4at*h z=0iS9;lmB&nbcrh(VX3)lg}dh(LT?IaM3oUe28>KjHnj7HUaK3$T4NY%pZxdTP8p0 z%FG7di&XRKB*XfUf|I`sG@P)J3&kV>Ij0g~f2(1uKTyoHS9x5?wxWU`3{;mfzuV-k z)5QofMGXteFpFZfA5i_;i#5idlz)9>oosKoAa4@wMQOV{RbRoOal$%gM5HyEe+?R; zj>BFnzjck1#fse(-Q>pj4M-pAoR>92#+K=3i`rz9hups+MxM@ai~Mt_hsozS)(wtp zBPSASBQQjJ220Z1B1R85D^T@phHFA@H&L-J3T1b@30ty~~4 zgIFfy_&V>E%8`)Ldm>>CK%i?lk;rY}Qi7YB>1%s=I=+1Obrtk8m5XY_G9Q0CQa#y5 zUg&`KiMpYZL3vi_U}5C~#MA!nK&9F|-pETjsaXzMGjyUB{%-F#{lRll_mdE8IPIQe z`w@$icfnH5FNQmF=9kX!^_N8Qb}+{okhFna+vloz^>2(NxJA*MO%Vjg9Me1tu^E0n z7aK-`T#XM#)_OtQh$k@&na^Q1q3%igZ>+7g_SHB|U*_6`_(=D+8Z)^4qzt2<5($x9 z_#Ss+WKcRFQU_FvXNl?=1*_GUv7MS$x2j*tr$gLvsi2~uW)L?Re`C?^2v_ewLJR+t{2p7eH&N$~g;_(y?&i%tr{g541(p`Wa=xO*wup5Sxa0~PBK+0k{c7x zC@|c155{EQ+UShu1nzeqR4(;xdxJ$!F$sdiVxbw~0UF+4VEx-^Vb!b zjIuG}40r!uN@~!}Z}b6>F~s^-Vy@AQ#C2S4ovCzdRqmcw=xe*DIMhw~k7X1{7&1<9 z;z8C0`fQOWp(C`x%gZ>w+Y?7XYN(hkbwwZLaw4IDW%G@Kwu?;A*1`j*z`ffLwh&x= z3VOr}m#j>VF%nru+`Bp?UrW4NP<3YVJ9N=D^&9u8PaQS$%I8h&@S?FX*u zl~)>Q_Y+wg{Tx3Sdf>5^b~d=>Al)BxlA1>~^@$zq5sWG`P)t|oQ@!^ydLW0K{9&FM zn82cG-LxDfaV7sCl_h&v+@*4P(!ROz;Sw*kuCHT*twY_V=eiVeGw~QlSSgl&E~Pu& z@Yt+OnEZ90Y+_;Yf-)t}p3nHva_Um;xil{U4)y{Eb5{L|X$w>ub^v}Kh zxci5n|0?|}(4-^?Q&CY@CE{I-2}#6QRfnJInhgr>%B0wG`U}BQ8P?qlctA*~m8H%! zN~X1PvGbrlmK8{KeG)$`Ux5wh6UXpLdz5O2QsRy9HkPiTMv@0J%vWpwY*OTKgk_|o z!QyXNRA~~fEuuJrt6=Z{Fm1T28KWP9jmVhmQ9WoQ9W*`P&`k(R51Qy^A^={20niL} z?e7+W(zwu1dXB2H-tDP&ga*0EV0T1~(t2S5FQZare>nBH~(Ge}@~`S0eQ6 z-ulM6%5|a-S%#rIOcU}+6IiTUNM^(&n>(IVmv@TrnX6rV84EpCOQsRa`wiwjM|>(C zWba1FC}Z6{#uW@Px#4H9G0>V_da8@oE1r)R1lwz(CrKbrXaBBoT!>vNlIcy$Rg_N1 zJ?f*7b`6Tx2cr^6!AqKV?t_k6n)+_%p^4*;FI7Uxhrm?PWBROPOXs~qr3mNon}r@~ zy{m)5!30*%(YQg&8KHKKo{}zf8~I|yPWeOAi46UE9uJm9G4>R*5+xxf#pS#u;$aXm z!qSq^0+)lKZPp_MO(Wq0tbTZgQNEYjLq|H=$Glj%Z>|&uZbBg+uC4eGG;Tg9;$H9Dyl+D$f83DE$(y(`DL0zR-5-FoM!hPB((o(jALn}b+VCVE z61aFg+qu?V%Dh+{fBG1Kc^e`bh2Z;x%R7ZKm!vW(#Nw;3FDV*@BueHkg33D$dTa6p z!8&YrdVrvB?O`NRf}wAr;8lAZK~X7F713!ZYv>&PAnt>(6@4XDc`?^en~0WkV!j@{ zLQNOenGGdl>i;$|D9vw&kd@d7miyOZAp8Q(RJiJ%Sxe*uyVd!Cl|HxG;pD2+N2y!} zZ4b>9tja=nfVS18%UQz#cN{0^6f<_-d5NJ5tGNPHY8dh84c*3)#9`=Gh%ti2n|>B1 zr*hvdKplbpyXs8uc1OQfJ)_-b+Xe!0=Ls*%k`gKnnd}2^$kL-AeF{P@pF^8(G=$~X z_WJbh*If0K*JV)?kE)wn_GJxp?(_<+Y z?;e|c{(GBD1jj*i%p3(~)NI6vf^}$fYGe#S9}E$k{ThK=y0@Rq>Fd5tJIuh~11d%s zmvrdIgvKzSH_e=0+GyCkRfi0#u+8iHEagW=tp3#hn2ac69QakeBVh$Ev}r?Vfur4g z1xpl=%sgvw8=)4a21#6eLNE-x{*)RbvkUWZ_0#T8!CD~4Vzu@#LJjT&jal=Hs=heT z(vQn+cHTE&L&5gK#0&@J!eR}t%F_;uCI&=F*s%gO=h9Bv4H^66-!`&HoT2YCF`gzk zj}G!W%Z@jk1L0!Us+1wfVT4HaLE}h7c~FsR2;)dX-l91K_h5?DZ$#mpAWoNgwb{3F zLbb-(UwP(W2u8%%&p-jn+tlDE9&)NJc;lMBn>i4+Wprtcm#P!&R}O^~gf=3Rz5 z$5*dN(EQ%56_qAiMF#%KAkxn`tmzO_w$y!28j_0_7fr$^^h{nFR=n~SLLFd0&ySO&PKMcZv!^p&!6wFO7;V& zf8_*C%Hy*$NoCJlg}WY54w$wE0bw0mL~9!1?NR8NU0ZS#ceNuA{i8z5%*YGrljX;N zg{$ABZCsz;)y}2MwOw7#|9*^B^$;ca!})T&C%pzJCwb2^AZJmcMu1r4c?x`}zL03Q zY)mDgy>fJIM*O4w>+xDRIc6F0t=rRoVeN_E@piI3e692`weN{m`sl`47VjbH-K_RK z%y+QY_kB28dA3K|iMC3D@%$uh=xd3`LU62Cm|b-u>J zuh;X>1jaxAW)-9nWH|bOCOh=ci{w(I4Gvv@vY|LjZxN^~3X@~(?Hb#Hrb&0+TCKLl zUp1Jo1-y%pl`S!G-ku-};j}@!R6(7FYir~H5WMpQrRV`rZ0-o8j~pNF;=jZVK_{sPMJVmjxi!SyaarOTs6G;A7}Eq zp3LOF4chLU_|J5t+ppzw63pNs=|l|TZuj2{u@(zu%o8HO0RYI7WlLS`uc6KlpA|M& zi3j8U^N-t zp5*wVErbuJA(Ku@6t=~~TXRGxInD>D+LxQICrhP2ABk?)Bv#w>wOSS3z@n)fQ?6~d zYbw2t&)4Zux#4d2*L|}rvw+{t^NjE)vtwQr;I>ELgU!Ao7Q_A+1FuO z0hwt!{;q@F&wq2p&#g3AFkVj`{S^jtJ%My`qO8bKVQ!WcLRG~UN`rjuC6{m-wp#m z1D-CQ%D+B#^LzdMA5NaQ1HT@QuY?T0KFyytuOjs}tM0dk=0Cfd(LeMbaRRL^!uc67 zr))9&0ICIPcMmqvBIKM_;njQ{2IRWSax1a78oBa}C$Nxr-^;+={!Fp()NSUk-OgPB zD%d>MAvc@2HnHET<`Llt{n>p!Jl{`O;=kJc_x1ic`Sl^x>;Gx>`mwrQ-%I%ZaKilc zkRQMM<@IoR9y3eK-TU!*u{!IUFKDL}>!~DgmX{wemPxoj0~jBULF?}p9Z;KmCL_|T z#>l#t4V7%IO0LJ(z?W=Mq35 z@veEfTs<)qyz2Go_r7bs_}E-#&d=-i?Rq&qoFU%bd4KtsUfs?A`gnR1>g^4X%Zn{sC@BhnJdwHA1r1$O~tQ1 zb1$AKNUX#Z4YxDbzA+Q;;4kwT*j!5&*zxC?UtbI??4Y!q9@62w6$%V!?dAUS=5=s_ z9l~7?U`9$e{GfPwyqrFnoi+G_^8T^9$(-+zFL3n!uKIHDve4D`+E)M2>(Bdq?eOuj zNv%uin8?XC>ql|i+%ka)NGeyU!Ql`EgfL^fQoQq)y6KRgoU4=N%GRVXRUd(chk54r z@=1Rt?K{r@$&tQ{rS3i{az0A8`aK0iBUA|!Suv% z4BdK?4D+VGS{?q;=K01Y;^kwf#b8NNG!{TNTBcq(DxLB@e7Y70s6|Qz8nf_T@O*C| zB9&*Na0z<~Qg*l3v;3L_Y}q`lS_o&6G#~S)E(I(E*G(?%6ddeht<_gs&K?a7Dg!^d z1KWGw7vWFa>X`#SZZ8faLy@mM|M&@dJ$VFr2Z;8W(^2Rri`XnhuL9045kIOFTYKt! zyRFv@R)u!vvhU$i?bZttahFuXy-t@W=N|y{WdJl?-GXvv2*L|O!uWls4f&Be#Kpcx zsPpm1`+lOY@{_8MpPw;et(9HU3khR2Jq3(DC_jcz zx765R=-_j=Fa9-H%xwU0Y!`h9>!>+f*L`42Ejlc>ygh*~S{17Qc-)-)5Fqlc)26aU zh0TJX=YD%-2f7VKL)a6{gn#@5E(+|&SXSlQ1H0Pdi5=NJJ-s58j&zrQ1x>2TdP$Ym zCe@t9vBc_T8ZdI%l=!3$6(m+1P3mHP5$D7IH9<-z-g$!QM>ngB8e^mUSvHkz*xt}0 zSfApK?;Z;F3Au9{P93k6M>wW^G4UrDLa^T2-d(pmQ<=VNbOg?$HT+^-va;PY&fkE{ z1{>a?8q;5OJU+hdI3Fm>B@fmSMUTk(YI@Q>Ddv!2w4@ver$5 zdXeV&`_&2)^1K3dvx;@92IpdN@pHoy932+)Q)lIM&gNZg7_d#1BX+`el%?=IKE}xj zzcV$B9XVd(CtL#VsAoPu?yHTDB16l`tu9XpM*sUG$;-;6Ir_7k6n)p%qhMpY zMSl8#XEGDd-BC$t9nCYSYwflm-?^UN+K9#1E#=14maJG%mIUX*U%7fQcAPSofAelL zo1wI(Eq9(>Rr|T64arMU=u>>MO zWh~kNCU&#ZWTJz9eVW^qrqntmY3W%F)sm_7+eq?mHa)sz>J8kza`R#m;ysML`IL5}&;cniJ1o|Up*=Km&@VN?Jm&vSsDwZDy@&IYO~ zDL^$VM>!GGFfCChS%y&oKrb&nC88EnvFbPsK;TG5hiMSb2o#?imV|P>2laJviWXqn zW5GN)Qb=}lRjAX7QFw(+Hn5p%5!%7ihmFQdg@iql^ zeHz1}nqx`|?sju_qj$NM0pSTT!bz1c%OovbGgXpSuJ-Hn#=J5*bsZK{2Np9krHRAE zSXGsXk1rlCT?Y40V755Yqo~M76eu=6&Qzn|&U=Y`Lj?-sxCFexT*m35Yu63-%r4T}* zuBt_NkH^@H(&(m14Oq<6MhOlZZ(S2}O)p7l=@9PMF!&IN`xub0A>Wa*XKwD)y<)JQ z_wKp)cIH0E(kghC26I%ef6IrTHXp27gvW@pp8Ztb+wUjHndCPkkM$%&GZ1^ zZtiAo`M+OZpDrbjenh^p*Bi>VaY<=~$$0wuDdmKqxc04KPzPp(29l8^K*1GtKrL=v zTR#@kVxeYwa670^%NulgV=vDWa`NUbZUq1F6uS3Ff9hk2aGohH*eKlZ)VpKcoq1bZ z4^K%P|0i|$$?mG%!Pe_}{oDaX7OB^lSDR;^mxpKjXTt>!3n(o8;N?u?|LEquJlEF2 z)zkKE_hV~j-BHp5y}YrXu6B;A0RlS-WvrItRLMkoYNTHUFE|aJevqXb>(HtHTG9Jr z;3@k-ng0m9ZUzWSAZ$)D8lbbY_JoQ&r0=KKnOaz!J^>@QY>=ya_I!?{uG07gPwM$r z4I>cZP-Z#W-Jc*EkE3hC6V@RY#vpiG`oPgo)K_tSaB034)LR7r$z01kY^v<6Qe4W> zcNZdN={x~Hj?nVp2P{Ki))R`y+{i7wQJQ~5M++M7XKdESvPDQeSc@A|{(k&X^F4qe zN}7eHu!EHK1O(|8{viQ12eG67)E6k&OHW4&9vu#H>rhq*dRhjrwlG)Gpel-2uSV== zk2S}Ru+q!2clK{)M4W@7w3%H@gk7lUdMOGVNzNUx1u3Cp;XwB)`!dKp4`iIg|JcfZ za~iass&&5NbP2vF5S-^ymm=G-ct|1@1JwmrDx?# z?dO$&=HcQuBj-`~EDK)-EYh>C6*%mk1aPT~yxjav4m!ZQ9=53OhKp2N2F~LqB;i#+ zFRY;N|BY+%6k8VtzifsF$FckXmj>~w;6-qhnShkSY#+hs8cp>cN%Kj%C4rT2!kFg` zItwZ8%D0tPn(?TumD0?Ake+mwnRKTb=HqBXp;Po-<}S+8+n(X-;WCa!vWA?2$tlLh`$r5Kewgbgnu`j7 zLf{CG+$_8|oe0rcBL6?Ee_8}e9z(PYDxn#{GZK-1=ztZNZw}AZI;)IkIYy|j68Fyijf`!$ zj^{~JK?3>+3vA;EOM~X}M2kl7N~z6Ht@pg?r^T zA-DFL)xv2MlFKSjSb z#Xp3AZk?;~3m`ojwy-_^f_nM${&fTe3=%E%0yF)XNFp+9w`TD)tifsMR!ZtwTr?v|h11du0qWyoKm4<0(QL6-~Vt z#Vs2aakYi*c3`7tg4|H^?VAP#C!+Y}NJ(>W{JF zkb68WVWt?}$%(?zqv$&fRvvg!d&*fVRcq4$As}TG#mP(w_E1{`oI%{W(W!REnO8tD zZR2C1AXK&+WKhgfa@0 z9dyse`NvqCCSozW5y!}UmhP#XOb8RR;f`)Tiw!T^zNiJ#^7u5|p3VM03+uyoF_39e z9!wlzXb37^sB#eS)3{G{U-jPtA%COdTt^m2!w}jMAU6AO;MLo4*|w&b2M{EF_azfn zWHz8QNd;tnmD*O!?6fa2)Q2<)N7tM^3cP2!uE8`x&kcfswIRvf$BuQ*kw}L9=efyG=a79pnR&gaf;2YHyIi~NDf*6_-dPQv;U3#z`3gE( zQ`*O&3l1V>pGd|ljdHhyVQEIbN$^9c5USGqTO(+@0F;h+MF)@CZ}9@Ti8q5#^1b-x zv7zOs$wlKfc{_E`ntp~X2CZR~&&gZ*QPEHkN|$V?ECySVK}bLx9Mph~9|xPs`E?Ug zZ4eP52}|-)|9wURl-N1kUtH*$Gnd>!towkAm5rs+u}Li0wUmic2~n$IRJ(V`y+_?x z+h|vz$6B2L9Z}Wx5_L|txiL7o3>M}koF5w+z_WS1wTG#Un7J_SJq$x%Lqt{k+c~K9 zJJAIvc{$KA@-D z6YDtbv_kHsGlb`9DnF4$aV%9Xe>(3wkBSJ6Mm3HNcSxn<(RC-R;3O%qAfotCAi-cm!j$qwlm#i8 z@h1QC_?$T)&y}75LHw(e#WW#+of)U`7a5%p=&@XD; z7Gt`xX1_oWB^Oa^rf6hHW&XvDqogFi2o)g~&8kR>N^&L4|K;>H(fqILC&s6&y(#

3B8PkYE)wGlvYpy#jSb@a42$yToOe0NWHPj_OaXqcLt_xuW@M6e4BT~joVDy3 zcOqaiO`2Mzi}{Y$3O3fI?PT^pRXqL}3(Hebyp$MGcmrnV6C#7LwH%nnY8X6V$tm%Nb_D5lfK*gNPGYr=Q`)A=lT7f|NDPG&;99c z*!Ed_ukT)a?X}lld+h^C-))W^v8vFmo2T-9n3l#TT^sASGVC9D6<54YJ!f(Y!lO5q zA5@xq=FeIKh4}Eu-8~oiYIDGA!Y)!v!XB7k zV-JGnvi_gs|%zTLG`Ca0E%Me+T*T{wI8XU?rw1(xQOn|Ci)uF06$wkh7o@!dOw%1Q~rBlif8NZr?n-H|4du%$T+ZJLe0_X)rp0MKBWe! zO%>sT;|<39M=G}ER+Q{4K3pXwclT*ZU473F8#YJ{7{`#i+e=ePCO5r)R#DQ^T~B3A zX6@IS_I+gR&H0~Z6{XiNSL|nVhEs4c#;4Djg)vr&-uI6T-L~M&Y}St-t)sVYUFr3E zJ>%D__cvQc_ciHR{K)QU<)>xmB5!f5peRTQ^b?Izq*Ad8{c{zF?X@q zk^tSiZ#P${9G_g&@_Lx=KOfd_km)~QPuszqWd=9z4C!86AX67y{mL)oO@pk?JTDba z%(sgfYg47GjKj9P8nQmfDd<+_uIloTets27W_y1u{W;`CRc!2^h_GCX^4zq^W_#~X z-4mbObA?q~){B)o$;ZDryLC@VsVY`|Xc)gcbd_RWFXQODNim}qDLBL|?%q_jy0O>q z2Ht|G^_w3yG$twhEQouQF=zZKmBDJO*2S&ZqQ%G>)IEB2?W(h3b{>~~PHnn3;%2Md z^9A1VRgWfosNZ-!OrC8$q1RqU#6rGoZdSca!OkXU*<6FS{B>5c30F!z!6U8V+nr|Y zDLoYK>bH0780&yAp4Y2Mzgsxz%ZL1_4$z2Mb7w|Bll0=elB&rqJONJef7=MX|kt|HKqEkolrFK z%%6lAuijee_A{J1$TH+t^vHxc_mk87>^%Q)&NbCeE04>&l4xsqlvJ8Sy2`E9AnOh=rFEotg_xUfuq9y4*e6U`Jq&8OO&@dj2AA z>hZ#(Gj^=o@v6lAPO(nTS@)~g!)K%$mVcO6;CJt2?{Hg=oAGm#70O@JuE>?lluul!?Y<8ULlcwp%K9h zb7iyfz9Ifm0pL8%oG&~U`&;q1Qfjr7VwWxoUE~!N6cm({6qUi%Emaj2)m}Y%cGKwF zTVJnlZ(Uu+KvM%of8znVxK1SrOc(wWu^N|$*4=qs!RXPlRhXVB`q(F{wIb1CoLl@C*MUuQAt^)yOfNy ztgMWjth~IO9H_Yz{F{`Vx_pm*OxrF!CwVFK57%I=+;Tv1z^IHIP1i>a1KHjYtCWU@oQ7_x9>k%ezr1?!j2xu_Y}W=&?}$grS_3)NgBlT*P>wpr(5iol zqjrYc`T6@yN>x@G44ACC)JUmj|0iZKORJOigML~ zil6bKfW=M;^)~&Mxm8r1(bU`SYqk6I;H_?~U{!q`ozI3p+}EzHxnLA}{JHVD?pjs$ zgWq~p>g0^v7?}NfY}#%AoGraHvijQcx~#qYl5gr3d8X8Pz|6931%{ihxtSdN;OYDD z=^GyZR#Vx<%n9DPPL~2hP37ht+N(5jMc<&wMOzx>zl5(G~DY9=KhA3DReq1)Z=RB=E&YsA7%Qy4nJwT zxPRb7#^(Fq_igE!WmNijT8NgFveQoPmLst#HS;Iug!a)Y*s0NlUq4hOF`?vEkjKh( z`Uk!Z_`Idnt?#jU&yHrEd}^OI`rVI@{g1u-ygA~<^M{#>w9-NhGU6B5R4yB<^ZVGO zE9|^+#|KVY|F(9V>LJy9*%hn9#@6f&V(01gi*w^nGI_OiYH-PBMa7yH=V9s(`<|R; ziyeaKB-=Ms8}XO+C#bR;oqlJikB%= zymqz>rm~rj7xI0)9jg8u(eG{yma^G|@eB?KO0Z=_MJxcia?}KdnKRImUh}Q35xR)- zW||{$2ap=K4G9S)#xjf#3<&}u2neZBp%HU^BN!ft>Nf|m3a%jPf#Jb1H??4JEm+{Y z1rPbv5)^=cwPmy~4Yoc}fuSLwFATG(t{{~N_{JC#5ET^`In2z=KQJm_fw!qoXfWVp zzOQ#kOlVN3Pk@)9HTX~1QTD&xQ7*H+qn7-CucLFlLcD#WNj>EJ+dX7*k$zdQnWmN& z43;^QVZmX8Ujdk-eIuh7W^)50eHKJU0xSVq!5})sgvm5#vMjhD1Vo6G#lPJT3%)4} z?FU##S?I4E&i}@ka)mvW6yst6dLdlQV|Ra=>5({SY{asWHxBOY>X>gez*doSNM+gd zhG(+h7cU;<#R;1B=kJ{U=ZCV-U3GFT449GY&@Iw=${Ul{i9xsDkE&{2uAm#XWLio2 zv%1*UmX^vmm95|3)$}}=GVJfd)|Q(C{`@WfvZ=B9oxa|_b+c^}m~&X6ya^<5DpDOUj=Oo8=v^{Q}oLJw6Seb2uaJbw&M< z)3F+7OqrkM(_N~W^3V7R7ivb>ZQgl@?JB?6c2x2Bp~jzv)H{z=zn#$>`RrNS)ow`- z|GsH?o%rx?^Phb;tY5W!EsU9cWBHOl!B(3$b$dB7EI~EtdcoU*wMj{4Qgt3u>Qa-W z!liy{*?cOj$z5)B{b%W~0!w{8d8JQ7?rcw5H7`B1R=)9_{@!5ecUrZXCY-I2DjTF6 zrPlkX+>~lvwxsMz@Egl>b6+a9vCQ0`L=HIi?sb)RzPrtVMc3M@M<$tdIW(ofZiN2V zB_}j(ykBzvSf%wdEqYRUJ}^wRVqx1mOPfC}ALCXZj~&%o;P12i%5`?@&$ljtk3T-< ze=ks2saaxoK=X5H&p8?K?gm{wOjP|MhK%ZWyh3gM%>9Pd2NI50a7OIatMjb7e9>-; zldtTs;>`U!@&b%j-~VP)P_ml&BFKLABcG^mt{E01wjE3D^=su5<&2fOj?*e+H%>p| zFndz3^<4W;hmKD%#{82P_Ghl@y4_``-q5P_Cj+tvdg{zAD(5n2mBbec3imMmL=XzGV`7cyD>O)sMZPafyT2D$8YDH$^Uf1K8N4M zEaS1egW={2tKG8u+;O`8t~^blpN2xpw=VN?cwRdPAC_^w^erJvr+36Nok=D|rvm?K zuqK`SHE!MIDGF!vdU_kpts2s}sX}E%!G?8(dFfM*?98)$UiHQGP^tX*M@LhR{w|%_ z(;)E4>SGsftjmttwC+*V>(y`bcPjnL%Q>#M?#Z|<|BOF++vM_|c|XTT?$aB2{qDFw z(TD#2j8Uv!E|;Wyv!GzD&3oIG3r2;%8Q>7Go$ug!iZ7o&w0m>;g&{$TllLEsJbO}R zdbOiYD9f$kkDrOoUbp>Ht0wljFpa+=FFiFf)A^h0#=2M2?(8t!ZFJ#aLeyF9wOvd` zENIxg|8{^`(QCiBXF4BpG9Mqk@BJ>pKHIVQtoma82`hEiJ(~9T`LBhW9fM2M%1Tnt zUR|y_l40AwzqI~9g>GBUY1p50wB0^Rug{l71NS5i$=}^DXK4ERA342`shTr2qYmj! z+OOreJuV~6_oA)qm(PcW^qHo6VQ6|y)*9X@#-3-zYG1qWyqNfSr{3d~`g`sw>y@T2 zo3}<<+GN5(`wiLWtf+fW`z^|ZCQ+mYQl zzotxnd?42)=~KToH1ge?@MR)3Y*nM*gs? z2ByQduc%M0%P6~bVXYnC%P%heg}dbg1HE($W6j)^<13gw1e-Edra&pa)u zeM3tF&%K{!@aVY5WToL2zrU6|Vf1}>t?~1e#>P3lSMr$g&P9EOmTy z-X!v3*4d&Lugh4Qccd3?HI+6_yjpuat@Vo9@{kn3rd-ZaXe$fOCPK`PveP zlhJd6kAO?b%$5R{x2K-gd^y{S^6!ein)AA?k~-0{vTI@ichTxNhEJ43TDV;HL>1}v zKlZ20o4t7TXg2G^-_^IgrA+@BG26yS>h?DOenYzK4?bV;xnUekjF_kQxH-pl1p zIkBT9x=hb@QIds$TmQ288&~)I(ck&o(SykkpFePjHP+tM13WIUrZynK!alpHZ=lm^)h@i zq2CaSh;GROYKD& z`KR0O1RL(z_-=3V$g6M1jXOBYDCGU&S&N<4rQKFK{ABn354pLfWxTfkq)2I1*DSa6_a`)F+^6SgG-L(!> zZA+L^I$E{7SLylroW3SxD<7p1e7o;LQttMOZ}JeQk){mtaJ1C|}ywrIx>^D~Z) zk7TUXyO;cPf5y@ohtIa2bMX4^zU9{Z)#=+xoJ;jy%yW8w{JrApg-&xP^q+g$>daMF z&l+ue<(l&K8e_~7HR8elUCwA2s(NY}UU~m4QRBlIO~VV{e;;l=VRr1B>Rb0CDg7Rw z?!V@P!;*_>tgnYgDBYNLSW$DS=i!O8UN_D=ZyU zlB%x1RqlJ^l*=7We(<1?dTZ9C^X?pvz2g?aw%%jtqPM_zOknHwQH_P}dC$HKwLg{o z#xd`8&d|}up6SE#Ue9}=tNqt))8*O=5swUucVBSMIa42<z?m(JPuiz*%>#T zyEx+0MC(e{^hA}yvnC-XJyV}vng6E8&2z_{CKk5(tuq~-J^Dud&sBHM{q8lsaO^2o z-q%4VRWq(Ro7x%wX;<_wv}KKk{;RyfyGlTV=ELp7AK-yXpP&44=x0`uzrcc_HO5x=2Yb*|{MN94i7eJ^#3^UY2hGiI@C=;V^*ao{D$bs8r# z&zb1Qhi=sE>h>_zeffdL zp+)b{2kM-@ee_ku-4|Dm>Mwbk(lC4aLGM7VhI_Av58wNpzdY1`#;-L`p8FIp8+D~M z{ov8xPmX)FdRyK&d0Dl38T(P|%7;94E5EO%XMUa2XZG;E5WS@SlIOdrE>q$%rGM;| zIpDBo=(Hiz3-;`oEMIDS@&xO1(NOPn-68)kitsz~n-kPMb;z7I5{sG>_VG|c8_nqAAwr1anNmIvecb1hJ*oPCoHQfIAtIWNj!@R3}FLue^ zB^6}yDpP*K=!qZphd(@UTr)J|uI+}!&&nc~zOp^8qggUBY=Q1TgOfF5G*7Cx?&q#s z@78sT#pQtPsRzw?x{>Mu`+s`f*%I`8@H(St+mxmJb+vYV-Yy-t?wU&Z+snsJB!>9c z%5;D8JYVHQ(UCmndRNbn>E`P)AI~^hytVs~-u*5Qnw0;hYo7+w#|inx8GemEMyA;z zm*=dSwB)R3Uys;Z(~4(Z82rkscA!J8(*C;qnK>~-4rxycmAz zo}Vzk+H~2Ti<>QP`yE=ij?t&wY06Vovn_scOO;0UiC&y9JN>Iv>C~BL3r*jw@{hJw zx$|_wtj!N!)Vpr1|M5=`-);*7E8mWDNx$~Heu!IMU+FOo-|n_8i@33`>WjgTUxHs> z7PS5GFuQtf$dTP9dWtg(XN0bD$ls;E=Ji_R>^WzXr)dsyKeQ<+$$j9m^qx1IN=}}9 z>Nr`+J8fP0ij$hvW1h_GK5K*Ss15G!RkD-5M4!H;S6|#eJbS>NV;gO1lruQdZ!&sV z%6(>R?SF6dqnNT#r305PkJ0bzxv}C_Z>z_%@>d_Sa$RpY?aAjYDhEB%b>^1o98=i7 z{$S?9?kjg+F*4L$pqwdtId!Me54Gl!TMx$k>ZKi_Ie*a{rt~qbt~;kxWyC3}ti9Ur z$Nu#RoAg8N6BAZ+W7+&J8sxaCc;{r!T<=LryW+<(8~k+V=gGu-XDb|4d_4GdG(#gj z-YZGvxYzktCK`%{)6IT`#TK*}226@`oRzGSH(}m@M~5`KPyKrA$HGajU*|Rcj9qjy zLvC-p@w&A4DS7+!f{dJA1wC|n@iEq^$|Wg(-M~-z=LdeuneBW|?Q`^`yw6)tr4|2k zEM-Y~TH4rEw{(-=?TB!0arahRlK%X@$(hL7^Ro;*XDZ6?xHj&0?-9)dUnf^?Kc~1? zZyd{Mrc->sCt*@E9ytB!GsN`N{8Ilpz85Aan$9%MdRyZgV0O)HP{q`m$Ek*I(+ZBx zocv%{|G}TGZQ~ZkOl~;*Eyb{+KG&DaUE3H+-0jtSHdOijyu#n`d4=KU z!l?d(7yeNCXUF}EKYnk&d+t$LW~`NagiEBc)WFGmHrlhEC`rB7QIdXNv@73H)ltXc zS`lOVhL=Ted$`8uZ1s=N>G3DzTB%XsEAA}!TA4Y~j9+8keJ?$5@sOu+#kGnDXY+37 zRjH^s^-xlCNt!S(Wk}4=y{wUow9AtYEY9q5T(7K>T{D@Zo)XU-R;)4pZ20avuauCR znXUu+aPk}mb5_}H(!T0=$=zg;N&h;pE#VDayt7S?p0#T#GdMOzy|S-+!Y{Ska`9dJ ze=2QhH2Ec!lyF>GeoMlR*3%AoIwKOcs5Aslu{rj`M{dT9%;94$ebD{1L;hOKKE}4* zA7gq~I|SbfC_5Yb=${^wp15@Tv~*AR8^1r?FxynVYFO6fq~1&CGY_b~4V&maD>2LW z?9m%fO1G>`hRv-vWn$~yMfly7gPmn>#KS@`tFz23_%-7b$A5VCN=rRSGA3`-1@odbmm#tZKjVyotJqQ-_Q*=N@{Xh zcJ1q{kd2kcYn?AdDCjR-u5@p=^qcaS1vgr*UbP5``S$7g>zHA&5A#-zHXVBZP-LB( z_DGKj7tA-_t69?U{nPawZkG;j|L%A3<);sSa`p`SSZr(Nxyz{WOp?vD$|oiscn?4|WW5mnP&LJk%dzh_#jepk4{@J&-`wxTVi)a2`e!lL8iA6Sqr9Sgk&ljLGZ6Q)tFqZxE(38| zBERt%Jp4C41Gy83lrha2;0yc?{IO>Efha;?I+Mu&UudQ2ObZ71!hh#7m?%Ny17kwL zeLqlp42OaEk^#Xnzzd82X34O`e`8uQz!&!KOdbP#f!|SQx!DBYxq)7zLSr!>Jr~?6 zwB)l8Up(T}k7Rf-(T)PwGV&1y3+)kuh2A}6uuv8mEVO9`@c*J!(9SVfXnPD6+7yF@ zQDI|L*ccVI;0B`|-~za&BN=QQv&rE6!6#~>R|HxL&S$}iLe_C1fmsKqG-6ptpn%R{{zI(u7;IdA{-;>S=K{<+OFZiwM2!AF zV4cHcfUp0_QRkpj0335c0?(Gfy2zo{e`eGut`_`-i@GI%InU}$+P2Kd6u*$kefq3!T$3v?wop#@)n zZpT($9yZ`SY`}Tgfb*~c=V1fR!v>s>4LBcLR{3b(8GN+c3_eDMk5S>HRERRc$Kivg zAy>Ri@R6qfGt*FLFST1qV4T7EH82F)^)PXyL8b*X*#a^_k$5vK<_tBhC@dwywCto! zgvLO)0`eHfOmhrmra6i-5niS_E_pD%mg4cnY#b4QBawN$0L$*+6gU@HX4o*J7LY50 zjmZVNGP*K2Siew>>B>N7u`2@|IM-Q1{WpXk8X<-0@i6xuEVOp@Eioo9FT9_jN0|*UaWkR%qxj7g45Y7zR zIq)eS8gL-?LuiNt!vkj>{Sb?T#7KakJPA$lSwPKFe2R-G2O@L`)9@_72H1YSZ!j9Q z;v5=+m4J~3hCDhT%4SXs4`8ih(T@}CfS6A7XQ1N2%!}okbF{bJOBt$4#i5&pJUdV*O zZ7T$0jDPYNa@mlKpu0~=gy9K^?O8j3DTtFIx6I^PLYQ`LUa&ekFE~^!^P!gM=Edm1 zXcSEXw@hqfF3J-RqD-gWgmOZdY`6!2iLpp2FF5l8wMWGwJXPqX=6DKuL04lkhg>!^ zYN#us6Y?UI$M$JJrNEgM6f9Xnl8U-LI^HtoEa`xxa;aK|z(h$&YPnrrgxoUQy)=+U zV_L#`1^Iv^%0hX;t#4#vr{fgSVF2C^*2fE2!*_$+=|Ex2U1+9 zyGIgXSifLqSqDbVl4{goN$v1l6u8e&Y%znM3R8RpBw=bnR0b$nNvu=ML5zPotGgye&LUql={9crL4WBTQFohH>N>*YAMCBCY zUj$RwK13ZiftM9extAN4EX^ik6W*rF)!qU}JEf}Yh#YDXZ0*l7>m0y+oClvx%KLKY(4Zjubb z2noEBU=ffwuc)RK3u4KFZnN;OHB7bNV?kqvvLFdd2jpA?%rAmaq9}ol7}{whEhc2H z@XTS(=$2HNGvIzey@7O)QtiJuka>bqlEhiNKCuw0N2Ud8&{F9U!7fe0y2FOH3>Ju4 zAgKc+&V{{@C`?qlBo?&bSf#!Me~Ql&Du(6n29-7g8_iF|nkU+lP}dHLwF{k>P~5B{n;1r&Al2aI6=3HnJqKEaJXc?V; z9hfQeG3C%=%Av)SLyIYg z7E=paOf6_JwV=h+f>ur~Xfd^*#ngfpQwv&5Eod>dpv9C+iz$~DQ!Xv0NV*NlGm1-# zDVG*gE-j{9T1>gLm~v?`wWP(=k`_}-T1+izF}0+{)RGocOIl1VX)(2=#8hxZhz9;p z;sMp5!~?29i3e1J5)Y^bB_2=>N<5$%lz2cj=<%SGF^C2du_QXB;41W}A~7>`ROoSK z(gTYG2GBJ`4=s}(TP8iYOnP*YQ#YFKBJK;iD)i{G=+Q+sYjh3KqsyX4mqm{*;%A`i z3q88j1}7xDYxMsZCD^U25ACu0oG4we<;8 zp+}e6{)DN}qf2dp!c^$drM5vKD%3_OT!mhCsm)NB3cc)78=^23dfBBmMPVxRvP*4@ z!c^#Gm)abKs8HLZa20ylrM5_6D)h2TZIi-O=w+AMDut=g%PzHD3Q?gpOyMf@vP*56 z!c^#Gm)bamsnE+VwRs9tp_knb7^s%?vdg2l8F=*Q^5|ukM~^O#UUqr(=40CF(KBVhJ;vy8WCa*YCwoJ=<%QyHX;o%=08-0TG*f} z^sqANaTR0!Lk-a*%cO@^jQI~WL=UbQ^PfmXjQI~$p+^_Z!71XM6NGW1f-lDWhZ>?6 zTNXXKV$6T2A$rLbWBx-_=w(-o`A?)G#{7q>(95nE^B?sSB&`&QK3gyjQI~yp+{GY`A?)G#{7q>(4#BH z{D-K}>s>MCKSYIIGl((&Au9BmL5%qiQK8oiV$6Rc6*1;NRE1tMh%x^mD)h1|#{7q< z(95nE^B$Vncj-+2zxt%cqxJ zK0Ug8dfDYuqAR$fA%0r~)u7}DszJ#PRD+Tms0Jl7Pz`!sD5oJrgHleQ8kAxJ)u7%M z!Br^51g=64EA_Srra})a^{EI|y)A;N(916Mwg{p^y)A;P(916Mwg{#|FT2#+BA5!j>{4%wU@G*o zOT8_Es8DZ<;41X8OT8_EsnE+Vbx;mWg@5k!SLHV3Xk zFT2#yIWQG^*`{7?7#kL%La%ql*su^4df63Y!$MT(Wmk+1OQa&khJ~uo%dQw37NSBgyJBouhzh;z zim_oKD)h1|#)gHc(95nE8Skg}15~l77o?i*IgsI1wSVN3$2vwn;MNk#$afYf;&myP_^*BRSsAmyW zg?gN!D%7(GszML07~7CYMT~6-RiQ^$jBN-}p+{GYZ3t1JM^}t(2vMO&SBz~4QK6Sz zF}5L*iWu7vszNWjV*HyB6?)kfV;e$L=w(-oZ3t1Jmt8ToA(4t0+YqWkFS}xFLx>8! z?254sAu9B;E5Df8i5=V(%UNt8*LNXV!ISs_4p$Ss?5p`<8iR((k= zi{5v~GVqh>@-3ko{AfmgNpxY3usr_M_uCeJvO$Ylp)oJW{H+W8J?$tcQ&c=k3p&l;7M-T+!9iX=tDpM3oTQv z@K8#7R_9(M0?NFD#s$>^59KDuNJ-)uPbY^xR);GlEHM}hdl(74D~e7}61d=BAtQ$N z@fgyuc3Az2nx&va#96?iPm_*67{J1^g=1XenbOE#U2Af`D^D(pah}Gf*kCK45$TU8qz+5)V`<@-ax`S zOqsSlVp_mFA+#hlEkqTa zb-1je%L0`RAjpI&303G(0T%k+ki;IKuN6SDiaj1Af~o~%PNTvtkuZ{qkcGX%g=-pY z!Nx`!7<|}UNThba8&mLfr`!AErUrn%U5(=0kSGI1LiEI7ACSZWBp2YRDFS_Tghc5> z=2z!l7h(w#vQjO5Sk_x%7#pc&@O&VoooQbeY8N}f>lcWM!jhL#iJia~3B(TT357a= zyilypz0t!G7-Y1(Q!as!B11_@8dZV35T5A4x+|ic!OnzePSZcWBUpGKR55F}T|o;M z9?bWO`hvuv3ijSm4-m^&5^oO3d&OX4371=3tiS`|s5MFw9Iz}wlMQ_*L{bCLlwe`6 z@K7&6kjPSPk)h{ZFjA6QM)#2{!L46ZUMY7fK&wEIA+uHzT@pvQMUK)()fE{ru;vlz zckYoNmSERFE9g`^Z|JEV7JBC2an1zqThq2owE>1wm9uCc`Ds@#1a`6hQrZfF^sy4u z3yVeEudvVuusgxK(4o>k4+Vx2B&VX*Cs3`>3_&@P)G{uo1UbUrAi`h+%O3W6x2Rk} z`zB83wIR~91vfK8)zxGchbl&%{8A6p3&mH_c%}I-K^Q7(Jvnn`V;%!iz zcL}n{fgToOq1zxyJ%EP|d;6pV6Oro4WOL|Q4;D&GQqzJF6`ThmOA@wi6B$u98SIU1&I`; z@eN*;CmP4JPcb=gFl{a52A?eq7B+_9C;D$B7JxAcsc6~6;2;Y z`K8V$22-I7AA_q<8TNe0;6_iSB&h%r-uU4TqGHgY60v$Y&?7c1v_F#Q3zQe(oh24_ zR3;)Ruzg1Rnv#GrB!iUh01oWFqYT6hD0B1 z>tL0Ke4s<3Oi+3llA@HMrKqDXJ+UBBre2C*L$`CU5wWn>aynwRP`OL^kS3=M%!G#n zd!VNyO;aj0==_29rJ{DbhQKfjKag*q!-TC1?nsejF2Y_a+P9cQ=P~vM6N5$hQW4ld zLbBMocwLsj=SsiwQ|$Sf=Fm2PHk6lwHEIIZ_2n^;9!6ZLNr9WBbtLazcB_ydkzf-`hgBnMuz}}i-kXzh#R&H7G@WMpZJkD z{(2q@RZGBghrgkPOA+{+(l%vVLURy(uC+rw6E2^)ek1eDhN3oDv=8~VFZ{w7LKfxg zvB2d48ABa?uNJ(IEj$#%ydS`+MCqzy!|O0@Ffr`nnd=qe?Hi5xcCaw$4hTGQYl93S z7m7q>(FTIID@Z~NI17(VqEe9f7)nr(Tqbl@M;i|=7(^^A3KLw)NN^UHNt|r#&c(R0 zCB?YH7Ko_J48$zMP19|Pgs=*Y53IwKCjUxG6k3? zjwnW}0~4N&1wf*WV*M4_LvSr%%E&yHM41{^)YKPRo`mHb-^56Sl{m00+GuoqM^6Yt z#|4Xlvr7Vx#f5@3*qw{gWn(d;9T_(YV~GhZer(!sNoet7qd~W>9ch}O|8* zml@V;QS}R*{jxh3*U82LDlzqru_*!_w3op?AgTZDrhw4y#yUmOb65`PcI>1<5z?m_ z6jARo@MIHrzic!JQxa38riJD?8;$f7Wesu^v1#MbC_$q%+_|tv5S<#5dfrZ1*k^2k z1ak$y%zz64m^)}45dnZ=-Pp7tkT6zQOlt=;A#w|y)HF~R&@go_^pq_r^b|H0MLp=i zsRlRQQLBbcV3>_|n0oQU_aqV)KOU?rX``7bMj9I|Fw)d9s61$mW6|DtgGQExT_8k9 zmW63(&<^T*ArGTDWIkaZ?2ya~7jUd|qVtSNT%}pGHzC_GB5VmUK1h8A`xK(_!|Ldb zRcNz`?FblHwsiN0XmS;9qwg#|!@phL`}z()?qFuLTDv1CG*zF8=t z4z)$_})o(~b*C;)J6^v!shEY$o`RaIDLw9UxNE!eL@l4ig(}0wMZz!$hKw`&@ue z7wZ~Xqyc@R7_e!>qhb1lXkN+CQSCP%cM}^WB}q&K8+g#=cE=jlU&^IUa8@9GsRK^lQkv8NT=7|8UWS*!tSQ2 zXyK*3y&%x9gxdpbY|b*UDFD0?SkyQ8iR~rWl*he5MZhi7!P(w2!IZ}aS9E5=%NCs} zk6r3z6Yj6DDX$2CE)%!(*asv@z<4CGlufvt!lt~O0`xTTv>y9_B)aWT2rXm0_aach zZZ6teQ}Bc(SgH5|C*X$E4ikiD5iJGU>&`d_;95Oq0igRP^mhifK>=cf-vwo37c;@% zxHw_!9;pYwkw7%sFv1 zC^9>GNr{QA{-|!j`4Oc|wA`@|Na`~r2f$C;K1x`p$o4zHgt*VbJ|Kzy=un>p!M-6; z%4XxeLi`y+d_&N<3J^soYzcQs;M|coGAFrD+izNUFmZLn#FlLa6IVw}Tpcm-Re*`b z_khtxoonDHI-9^xbSYpku_P4WCnk4H%ze%Px)|vr_=%Tet`Y_lcQ}D871+Py^2)@` zPH^)ZE5}Sl&<lz%u@CZ znTv=#xVZ+#8TrkleQ5vh=;D7x<-g;Asi4l`KoD)gA;y12yK@l!ck2DWi5FpNAglg= zwLW4P{l7WX|9|0&Ad&({;(xXEe?{_NS@{3AM4lj`0^cRbPKRxqNI3w(wg}yPf>fM> zpZGQiPjHQAd;Z_*>VN6}PfZ={z5qy>ifw}hDV>F>cd_j+o_ZHVB4Fdq!eWj@p=B?1n?V@ zpgo+f;3qysgkfP=e*~8r@Bn`h7>GeF2;LS3*<$qYAX^YHh}Xk{AOVg7eDMBbcTfmJ zfz-!`0&lwAc_(o0VI+Vn6@?T(xB|XnZ1#)#knt{H&Kg2f2tA~!_S2JzUJ<~dG$q0p zacD8!5)HAr^CxVhku^u?13$ zd8LTOTu_XQCb1Y{2fxKcC=ueEVHlA66oNYsP%+?A zpydff!0}Z3Vn-}S*uix-u^8z$zGM=Mk$z*D8i`GU(>hQr!aRrw0>vT-f`|riR3us( z41++$6Pg64%Ai=JHaKqu#bi~0oP~m7vYKInoQUl{bs}xRj^IduSPa+^oDdL;0kDFD z0%9?ut3-Rk2K^QsBM@q{LB9p33B+R1Z^7XLu^8zIe)2>t2Ea1$RgqW>fMsF_s>IP@ zqa7rx872qqpa`%aLk{SPXl=BEnAt^W5+t4mwMAwF2LV<@;eqoZq*!Eh&{axA;eioA zYGVs6p-FJ%5EP3@706}+j$K50!bLiWpUM!3fPhX=EW$h&K@hhHiM5fD$81X?u;39O zP%P3Dbd?gFQpj>7I0quo22r!1Sft+|qXj4?YkMZR1`Ucu`i-nROp!%^2@=kOVv!Ca z>yF^$iO>^d-2qa8+FrzBq=O<$23X>eVv!Dl>S*CXb~h$ngS-$r>3s5C_F%V0j>QF(@WC z$pdkAK!u4Eqmx9$hTx&I0ZYI|U>wk=cO1e?u$=V<~tBhgb~yEqLvOP>kS>Ki5KR z5BL zsj`8|PtxC@7-^EczyLH5cE}n66eH}A7Z^~CAV^jm03?(MX>x;NlnA-s01iNeyug6k z2s>mQ3o0W-$Z7@@qeRFEg0vw>ZE{bLOtGMt+!G{4tsuE90dM5a6euQ(CXyKh6q6PK zkdg!$9XzRsP^ysBsGyjnHPM`{pqM;ek-R3Ln9L56NCgy&umdu;fMPN`Naz_TCJO`y zne%wYPy&}oY$_-wbBSau1jS@7k?fkFn6##Wy&{4)c|ixoh=#y}SA>`#X%|7UNWVc? z7TCU#B^F8Y28u;`f{rD5_BZY-W+t$@UR&|PtZXlt92ka2vlU(2Pj4`A!~ay z#S3n(U!C6aOm6q6A|Zb5=#vcz(c%^rRMl)xnyHK0Tm7$kcJs7+=EHADo7H-wmQ z5xWY{!$G8tnEZGO6k;(l#Da^=1R}@~3vvVyivgE{Q~`uy9$GAd31Z&jch`wcA_gmV zkDss=2R>gg0RJCmT8G{k6JBq%0nA5efGNZ#0hVw4C;V$lm+V4F|sAaEWaqeI@opf@Q%F-fXG zF~TKzfdRz`-guB)JFuV_;gW2yg2s?xATKcJO%+f~Qh4YE2vAHKc~FedCa*ha7%nI# zLxrkdJaSh!44^TDdGfjgiV-f!Rxc<<1Og9D6mp4NQUJvw{RR>KpqMnd!6pT1lDxp6 z{$)^%t|X9?8kv%~kK8G6F7Q3G@l3WtPc{~D2|3ayM@1~~K^^I?p`jo`(+TW%(2)nx z%R^Su^=NYyEUQS*J_lZ;f*%gx~ zAG&60c>3uDBVSxBJooXm?oHpg#1MO5d;7*^ZZ=b6Wt=ax>`41O{G!?NUd)P}X$?9qhmBRj!$J(U#WywW9b_WU}_`KOYwtT$8Q1By+Fo@_CQsm#{&T zXWVbaZk0JVs}i+mU+?$wxm>~}>A8)+o%}PG{f+tErMsu^@0nGLe##sv`qRrsSxU|< zFgx~p!^^fPHHOoQhSpV1+9pSn!+Y7xt7uuZ=DharuZg!zY#vD6)Kyri$UfC@bF|^^ zIadO;E;15c-jtK>-9JlubKkPae&O>y>t<|to1`I=s$cIc`ye#QGpcrw|IW$v9y=E% zEeV}>vZc8E-SD96b~lR@%9gvG-}@xgbJ)dKb@m0^;iY9Wq>QsQM!%mQ(PwVz8#TW2 zW}Bm0vv0|-sx(&N&e$wBu6v%Uet}cX<2l{K<-*tec3i~D*rl(rnxW|H)1y}HXs=qC z?iG`zYHeyYA{btMSM-qUXKSl?Bw@d@9kWL!Pj-P$Vk1u_*}Ut>{2nvojfT8=>^N!D zy`BxX4=@wUcjg?@|FdsoTELgtDHFd({z+OEk{cW=clE5wU&g!5k$0??TI)A2S-tMo z=;{U43&Q*VtulOJxNPuIwtnFDqI;|A{rmhH60@(BDL>%`H}S2nRt~FRsE1ib$c4a* zg~8p9Wqug8_;g!}+LOkf1qKhoAKZU%|Jjt_6Bq6DR%Kd^Qry16Tr25!+Nk7ooz!&g zgf&t6m+A)ACl9WEJ-@Qi?6XH$#J*UAX=mN_RP}HC@Yr2G{Gz_c&x+iNp{rS+_Ij_M zb8?AsslluTuT!V2%gK&)WBeVtWn6^Mm1f1V^?e@o4<3`KVVfA%_r$_qV@?fP8`g(E zdhGhKv5V8sbqkttWA6Sn&kGt>`=6b*d*kI>esv`qvo<#UQT;vhpKe*s<5M48n|ZFG z;-8|6N}Tb97v*{8+)6F=(c>>w5G~V;$sT=uO8t22+Zr2#3Zd@~c^};NT z6h^|lZ9Y0DTIa~~J-bhieb}RDtdZ8|H>&zAT ziU-wFy!&Z|Rjki{`YvTa0^3}_>1WehFSWPcOfBDi*AtqqXq?|tn;Jd+=SyDE?eWHW z^Jm609^95Gx;-d0-YC{6VO>DJW|oFiOY*xK*P?1&d8SH=xAf_ow_fD>8l2&@CKA~it;&EJ`+P1{3;CEiOXEc)vb8c%N(rhx< zPn`Im##-7-eyG|m!=kgr9OJVkoKw2b<{SzlJMvZQ+2@ma-B`RTP?Eym|HrkLkq*c?|&ZJG*f)}TE>%Go|ZeAL< zi|vltTUo8sy)m%zM_BMp`?x1?fm;? zyOUJv3<7d=Ta}u0T9y1Fcq-Z@E}Qf0n|ph;i=5D9h9N6QV5t zsqt%-K5f^hO=@JpyjJOHue6(GPvx03FP)j>QKyh4eW~O}_<)c@Uup(dIlnBp-!f=v zx8SU}>uaY5qxmv=mP@rS9gMr4SXpfQGN*Ty-OHR$Efz}^ZscV* z_pMs_tFAJjpshK$YMnw|WtK{}l1q(7zvHj3x%z0_%TWr;oL=6tnhw65X&l}<_>aQJ zx?Ne?H_}31TaOt1GOsN*;rdF227}fvdU?6cU4M7^_`XkFUYFKk%L=RA=N~>(6H_sI zcEPau2hUXhs2D#xSHD5KwYT1=_1EJ^_P3v%zwhjt>+9Dx4q2-CvC*wnS#^Z-?3^xY zmajvbHRq4gUMipTN~x9gXG3ta@BG`z&Az|oT}o57+$wDzG(RW0wTE6lyLr%W&Ea)U z73=-qn~sQoS7$onLG{bC$)87QN0(?f%6T3(^X?P?1@zieY7W5FSPVsM$a!L?;2&+n!UDa9T8oc|}HvIc5qt=-Ee9Nfe*0o{!*N;>-s$V};*~q+}R@tbe zR{E(TDYrRr$@&4{Tl^vL?LoD4RB2k+=3FzI_mf+;c5l^CQy;Z2&u#Q!m)AjRH~RHh z+T&%99U-|fd802?FZMoK*&v%y9LyVks#)oGTt!_^-l!?9a+`V#Un*0OkQ{wrSHK08 zKsCegN2XGy7hTRj9lv78_i-;6juSP8JT>~~%T2kS`)?MRru$hRmAd$1mqzk@*MZ67 zG&CQ1vv!Q%YP{p-*4|J2tk+y$qP&z{wBg{ISHBUHu>(-l$*0y@Yi|hm_St@~VYl&QpR4u(*_Ja-`JJ@C z=-z8-+LL*iLkH*Id%w8qLsIdji1@{IZ09M{p7z>XnA0^#z6w+g*4i~S z?)$NfxV$+lEeoXInr7Q~Dcx=IQ8yy~x@`2AM|T@W^tL>;F?iVG^%F8raFb8mJ2y4@ z#jdI z)N^fawYkTAwXFU9H^1}#ykPfrnfxwk?4S{*smj{BHblqj=>O5Rek=8LQD{WKiMQrK z&dGN}?k$+N(=TJj?mH{{w#n$3$Bfu={iyH8g7=lxURR=T7B+lcx}-=+4t(NDy=hwh zksagJChvV4arxAVsd3tBaw^%NYaZjxkB5cFv(G2=^t)2Fxv-)2=Vh;1HeO#Q>@0X6 zKEo>H{0z2smX%)8eXqy?+Rm%jJ0A&MTe)X}n)AzrGHti6>9yKtTV7^0TkXDXVE0mX zBu8UyTT%9=Qp?XncOC+m5c5W-ms# z?0vZR-6i+zGU-3pu8l~mtRCAhV&ZA-RhNB2Gj4qt4Yw*RHmHU6&F!sL*_^Ft)_Jl+Cu;!=FD(5sYy5AY5;I_M)?a`o- zUCgcTt$&(Z!S0`J`Q}^Ur<60_j3z!AG1aWD{LH?>k3(-twPeWKvOcIM?i;Uo)M0{) zPk_sz6KU%21GoL)I4xR^vP0TIe6@^_~nfp!m^xH7awZXbCimDaW z)<$M+&DGTPEUad!ov98Ryf&GqTW}yQ=T1$DZTH^F7if?Q#*fu-EcZbMGA_3732aQN<8)hd?O3P(S%hA%0tZyr9 zPn~`~rT5*(m*r1cTYWZq92FxV3pl zoz)!m?XIfj`q8Wb|o!LL~=Y{5KJ_ z&Uu-aW_ua`Srigd-&7#q)~IIdvSoXpFBd-;M$9_uzW4a)jl;noKdOSCeEHnHc~kTZ zmq*W+|Lk9&eRb)^JI}(uc_f)d@1IptexchC@4gpj{JB;kfBw#%PougV$c~*jKQF8< z*RABBM!#KW)SG?2O#fywSL688A$5)5`*aTDpZ;4mmM1J)zb(|=@WWNt!_5x|{_zXZ z^RM_iTrtNtx_T&PFj6Ss7Y zZy!$&YPBdFxb*404O#w0&F}L4P7iFIV6<*ojpM=DH@4OpO>=BbGy0q1ntExg>dbL6 zx!HfbJVP_ftsZ~eu;|IqGe=5JPKnJ=(0=oR?RZD7M9OUNmUWEjHiK7e-crlq-Bs)n z7pFQPw|d3K4L1Yl%Ju&9^M`IPX(cX8HEoMms%^aO>M>(hb@x5wkoZ7R-6$hxTv48k z!vA6Jox>yP8nxeG;)%_PjgD>GnmC!*b~3STO>En?tqCT!Iq~UvUOnHp_xa8~=daV< z*Se}!t*TnJRNd?D-(8BKFbLWr6NEL$%yJ|qEU)$NG>p5BhC_;d@p`zmhLE*}=(R$m zQXSnA5mJy`knm1wh>jv5oFPZiN*k;EXIMxs7#N)XqH?cMDncYl(D72q^Ks$79qRN) zE{MP^LfHuU^n1U*KuO3=7JjW@Q)Nt3Rg70f5~Ndv4M&^R+e5P)L=?H^^8)Ej(@Ml0 zMbF|C<&48)6nI{JEAnbrA8%|g#PcK-kg^P3_*_O+imfo;O|GG=fm(9C>}I*nI)x)0 zr9`rnW1{4Q!?+G_mdf_?Di)GO2H#rbJ~$C8(@^CYKG=9JvIstKJsyzPMzo$toN6M$ zUie9ddEI&5l%-ID0Jn>;1|qP>%&het6GrFKloYp!4SW2BERuSurBV+JUTpCj2@*kN zwl;=+joED8k5fKt@BW;44cK|n1T`M<&X<~bi*}H4;za@S$W7sC(=%n(HKL9 zzeyI1NGx&Z`RGcAN9t?5dSG=EU`)-N^h0PixQ$X(WQ|eK9L>M;Cy7SfiAH!97}0H7 z{*L+?T*{vpE{aPlbDB^TX#<#0#`%0(fGy_Z<`i%}kqBwDQh*(dL(mB-w9x<$1|F`! zZaoxP#N6=>ExrI!k_k;xDTp+TP;xAev`_*CU6PncQiK#HEIX@U)agd!CSy;1RCtsj z0V*Mz?3Zj;mKXDrl!NKb(_Z~3{-|mET7o=TGg*6l4A~b+8;L6UD!D4DCNf*ODw!$? z-3;yI4suT5pLRIuk0suV(>ZieK3=q(bohFh$XWyTnXy-zB}VRM-tG=|0p4mJfVb#f zk~cF*m+=@+Fus6xv1qXA)!pi?`|=j~k>qXUL^D64T7hOs3~ z(BIg}M&$3#?)cRo8MD5spDX&)tvvC$JYO_l2qw-uyWq;}Gq0QJTfnWoYd*YJqc&Q; zyo~IMa2TIKK6IhT?@DTiz@$cx-f-V=w}8sFz{`Ft^xyEcFqrIrq8~PHL8)v(SvDT7 z+T{ZKvU?eC$6eKbt`YQNkLeD9T8kz5?gG$C1h{MoX;1uun&@YCLmIFBj5&&L2Lrx^ zBG^6PoO&zY`SHQfY>06oqV8e&Z0Tdug{BVotDhdkXA8H65O}PWCE&OIW?yH{SrG@V)~@3#gpodQ-{cx+Lw}-9v>+|iGaShQ6tLp zihJ{Gz{|_YQTP6T-XFPjU|RN27dgxGA&d|KNq9VwwOAQ8@mh8?Rm1ljDb*d_{nayp zye*jUsUhM=@2DlOIFe_*PAnidngXwxrwPnO6h*R}FBL;!@TfyE+>TZoN6b5o`Eu2q z*59lKt_&81^Hr13<8JDdyhpYK!KK5Y0$)P>MT6d8!JgdTwX@lWcU{coLUbB6=}Z!c zrSk+7q+Wx&t4iO6NUEy_75a_(zRoC2b1h&oZXpErRq3+i`}{QUkq zG%)vwB0tkObnxdg5%|TBK+R@sID9DwPcEzZ=kz%=Sj%cCk73lXIwg|sDk?NsETLK= zv>5gQLt)wa&{!pmkjKFa71$-)3rLI-Q_GOoSOaTjJ>EMcE7tAe zub~Jh37EmB3Ad3QZQsW2vk1n_SkSECnqQ1wM~uigK>0g{7UknZJ{mRM&Q9F9_p0(Cs!&>eKB#aRq28i@CZ<0gT|!br?2DC z&cANBLI-PIn!7Vqent&eGEHTY`E-bVtp;B1k1N&wQ>~a~!fJN$8^@}LO#)F9`Ir>D z0Buz^_avoRDof`mQpD-1-uOBp^CK2Q>b3v9V}SKGqm`9S1QABaFd#pBCOeb6J*XAN z943Z}hLzS?mOZ!)EV#Yuc*vu_5clB-&n@Wj5xw6l-gC$fy?z_IrGI##9N? zSL4Forf0ji^<+ll!h6hU6Y{XT^eE5W-=yqvUaFQe8hp2Xk(Z5C7uO#O873vaLcnqr zCEsA9Fi>AR<5!te|Z=P<#zL-vaM{ zFV^8KL%fzGnmT1;L_%kT5~eV8VQ4=CK>tOV5)xn?nOC|pGTS(#oLe_nRW}z*C3|lK zrtKkIV#)QE!*2&rRQm}iZO-_OTg1{RiU7&PWs!j~ z%QR`R=%(ZkFjK=MmP=S%8qu#W;>frj@3BV#YY__TOPro`P z;Yv$b2!A6@H_2IH60l%4f7$sQKgV<>925KO{hY;~1#)5Nnlxha@Y6%tNoq=@6MHO- zM{-k7RO7;2qzvJ5CHo0l8hGq+3U=eFfK=?2P&W~IEFn1D;}h)bAN^v81AP@FX3Oct>AtHeu?ezTjtX!hlPMkhH+*+t}o&g+ZfL zZP-92EL4=>fI*{v`|kn3KCiHh#v&oz={MSeT!A=cJjCWtXXJ(@@#)#6_~;^;k@k!g z;uKqo@N_9mV5m|4rtxFFKcyNLYq?Tx0$)?jVjv`NOe$az(tNw#_^Yy-Gzr+M=D-iu zA7KtVZq*o`c5;h0{SBy9&ojnwLL~-YFCp?RBNsg-jH{*>z`YrUvsie#OtFBS)hWtU zN_Msi*$?#@_*LLqM!i3=$P!Gw?p}m2pFMC?Hi3O$R93l|-5Lqt5drzhHhrl``AjHHpdeIr)}P_jj+c@;deKq`kB@`k9bExPcDte#?_0 zm3|#C?>3N1ETBCA?#U>ud|0Twm%E{d zjuF;esz}r97a|!5`K+4|=Y3L4rb8j--l!ds6LDF(sHg~S6RO&YrD;&}`dBeBaD4pa zejL}tT~JTBh5QWBs&nxjn!;jNP!X-de$-+jmPe$ARn$-fP2}a-QdWc+Le`&3^wGAC zCaNg3tHyo09PV+zuw*gra#^>evVzc1;CNl?)J4$^^AsfmF7MY1 z4i6EfXqkwV`e+A}ZolLH0>~G_Jibi^@_4LJBusWG=rSo1$kjwKTFjlQ4tt?+pC(CF z9L>(vA{JQWG!}6uo#|htr!!sCzce)k(=#FlYeE**yT6TgIU#WNbWyZ@_YouC=3G%g zausAW=`qsGMy58Tn2gIl7p&CPj9UviM#S=ro6YymA0IzG85ij>`VfMY@`*?*fh;=smnun|AqH9Qs2kjD@W1Q#Oc6# z@~}atRe!Gq4kawEo9A|YaXdMTcgG*@;Mz7#xNZ#p6%IRpCx|$1EwSZ50nM~ZlZks0 z9|q2zGgEFbmr`@T#b}l?;rnlr3ylf%r~}Il)ioNLP?{6?_P#2lc}$pAygUxFB#)Yx zj@!1SX7PFOkz#muZTqlw`-}U&N&aaKRj9r${21m2hR`jBTU)8q$JNmETqv=e!o z(QGabKGWv~$8>QOnUb;gO*3q#hq-!kTAM63=jWQ6;8!PUfCd2-Y*0AyelOuXz>{Go znOolt~Ag|O6Z33e_YUK=}n6flaL`8@tt_FG3wmu<@RP>R*562?hxzm@V773}UMLOD?EQg>tIg*)32WLs;amPf-lXcz;#2unN24V4HaRC9{*R7>1KjpD zJRO~%QPEZOt>qC%laskMSSbCZC&QGHp<#@icd;F+L^CZ|{s7z+>i)i1S@}R*+WD+Eu(#{LVAkQnyQ@mT7F>CA{=Q^=6 zyYt}nHJd|!HL1@yw?r|4X@V_^{fb+=>cBc!#bOa&#$vHNlCEQ(I9WDF>1idE zKjFVu+;$NyntpF4FTerJu$6R7Tb{NCITdgWNwPzthtDn3wv}w*w-hT6mM*{KPW2)3 z#%)cB^mq@B7Wb97Bo)o>a<`pinG@S72&>?>o|m7?n1`AZpX2_TTA>AcjnE2?F%gym zdy^ihOvt8)G(p~+Rw0=x`p_tvkpq}o#hk}MA1q>_9+#o=NUUa{dgjgWt}v;eMyoXj zdoOOQQm^g79+eStjg-t+SBs6M*@LewA>5qplvZeqsX*OK3~F#uf4o04#yymL{(A8p zUVUIVZIBhFo;YLq2z`LAz4B~4SAYGxJ@^oOvW_&g!856isyXR^O)^&S0Dv@@h%Q0- z4zQ~(H*1UCE%S?)_QZqPQcO`xYuqjBh-ZqKVATUJsY`b`tJ@~)>Y8gi`69?c{E^x~ zGFd);&oZhBdmP#xj@ZId@*83|uT=c$(kMPQk8$q09|%E|XY~YyG*7_*^8^X)9rBlK zsfQn>1BX3!OE>$dtA^_9^vzm{JqO6C)jbpYlcxAjG8X&^=>S2=aDu0LS##1i9x4xx zAC~GkybpW?!*@A!j)h0dGKf_b8`{V^zw%E#Dx_S8op|%u#and^Y3mM$oOEmtXj~jA z&v3YSDpDm<6F+}`>5$UNayk^jj~mUVhBc>wi`qO?SL_I4DNAz}^V zy~%lGHfnAqT$A7ZMnn68<3JBhi%V%eWi}Eog?p~>a7F1(VckMF ze?D_@eA_Kh9D_ZMEJC#b>9ACHiAQv4}`o4{~fF$_QqSkfM7aTQfvkC5$rfx3pyVtu~oFt*^ z#|726kE)3UimEqQXf$FFk2)8e?_S(d=<%8Gyrwh|p}AZpal7z;cs-}qRKLW%EX9H$ zJ+yO4?w^j-wa7Z5YQ`;-qAAMFl4PXXE8R(C9)>aD`BTRDD3aG%VWcL4B`E1D!H$cJ zR+AbQhsZ#?4J~QmanN>0 zdo8TV5uY5S=T~EkoBnl$htzZ+KpkQP{pp)If~EyUgi76jP!);n+}Vjb8NcH6341zfoL;ptf$)o;1t@Blo8J!a z-&=;oa28>cSjq&s%~jsm`ts(Ouiq!S9mN4hdjR$+f}Oos2Fz9}k5KHTQG&S`cmBAn zAKHT!sFj=qLC~!j<;?6OMev?E@MqB**Ldhra^#81qv+NG{8|NH%uOsUO@dFW%vf%# zho_l?++U~)Ih7^I5w6^3aG~lNOUb-v=Z~9t;tTc)ez`GuUtlO5#~E{GXTF1!&P>E7 zN{8=9lVD1`x7AZb@;=wD+uy}K2cKZi4dnYqvLd{NvA}>8&A;uyj>h1jZ6jV&g^TGE zr59Z7^&E6E8yHd7uEMSl&|r)}oWrahMrtkC%N1`}+sd~z=G6C5aafB|K!qoX)ZDs_ zs)NPn=?yrp4iFKUZo<}&`G3j)ATu+Hr#rvRA6>(sMv0|+Vwm!tI%20(RDeti32mR$ zpj_xFRia~BrcKZvF{PmmX6zcFN@>qh=`@^P@W)Y+K940owErN#7i+qiD$2(K{X)bq-S>oFj#^ig~YEI7_~gI0V6FXyo0Lu-uF8fFKQt zm4|nc;8Po0eL5SuX)2v8J3vh{Wl2tZc@lGy8)|Y>6{#8$K_~sii{cIb_~Smx5##;U z*M-OB7rZoyX}r%DmjtuLHc8L-n~|zd^@ea84Cb0No2Xy5RYHr<>_RZW9KO2)uDLqc zY(4vmIJfJvJFcop9flkcO?e*`yr3V~iTP1yra#>=gTB#BSh=L)Po$+aPI^bJEN6R0 zjg*hU{xC4f(inlpaTXnnd}1m}Y>+eG#+3IGy81~&r(|k+SjYYGdrg&{|L)3VhclI2 z)iQ`C#>Z`W(bvm$EjVk9St;UD@(lOQUE{;FILs`(Z|;C8!zeQki7KXn;^v05SkyvZ z!%I%jv#(Ugl9>F9)?4b640*-Xl}>PY*bgh2`)>icA%iMS4Q@PsJbh?lseRDOSem&^ ztR*-Zh;J)i9OlRi{FR2sdM?=`F2CL-Dt=sbzLNH;m(FuZvmP zyFQi-cGhcmH=Or|#xePih-`W8j{2h0NSj}zm6G`$B=6}St2+JmBNo5gbrTapS!?CY zR{fS}3b_LF*GxKhY2-C*{-1Grj?yE+SGKU^WJFF@Auy zF)2${+!8xwUuT6Pa}JZ+gVPkUhU%hMHNQ4JU(LHcOKST=o`+0GYtPDND6k&Dznr$7 z?57l)3yEz34Ajjg`fwbbqBz?yu-(>YUqmjn>EG?;A0I`of9Rl3+0riwy+NtHlw5O2WAg=?@g#^d7Bn+bb4ktDD<4K;z`^*+bZq88CU3QJ4YDNQ{RSp8Dun_L< zmX7**<*(0dY4n+*J6cOlwn>OidM$L`e4idZuw&u&nt#h@PKAf$PNai|%-f_<{ZzFl zujFu-{c1jaV*>m%w>L!g*HwnfHSk`ETQp}o+3do!=ZQ;+M^V$2SHioxj48>wKOGmR zq(pN$M0!^$<&ew=Wg?>cXrq0hh89UU`=QBv##Wq{D(dJdM0+*4P%&dN2fL3(re{gE zL~8{6s9E~x2G^Pltv-CVpsnT(C@z!Vc>G*XqD{F3$Ve-YfJIHes&0yz?&!V%i ztCShhzOU|lR{YggzVwtYtG7?J>ot1nAl~wIs*&@9NY)qSna2;}gT2>bMv6ndflRBB zr`gnmxW#e*DOE%#7paI}I?ak$;zKr15D6!UYnwmQCJV?VeSDa_9CwO*2^UncLpmzY4=UmXaXX)=_BBtt9n$@q%iAbN{PJl{TE$nrj)z7Tkf$cH1K z_}$6-dhNB}Y3=-ECs=#Efs1iHH(d4cs2T8j3$)^&NjR65Bg?q_xUBPT?wNrIrUy7} z`=*^|*LXa9d+Ee@VPAcJc#P?|e}w2<^>J-~9U|e6KdGi~ujHc~5{AIbSukT1(ium!tc!X}w4>{UT~wjMhbme90h( zXzg3_;t5kd(cRaZH>lmF=OpEmxa;Tkqckv-kNB&Kp@FH5NP<&T?UBiIr3u|B^p8_` z2Mj4r<5qW0ci=C6co7mo;-4bK$zkVVOnNpKIeqAI6p)p%t7(BT`!)_iv?C4>sjNMQ z0OGc()aAz-SBeV_(wEJ^?11{<2z>dP$SCW^jA>gNf~jPk2NYxd){)H+<40qJ-V9~R zJ&f=(XU&$6bzB0s`j&^xF-FIe{(Ocf(f<2epNH`qn8ml}s8lq3X5XEyHi%~Vw&s;o z=d~>j^A%Jq-Cs^WTcmNN#+K!-l2xFf;Bn{pDSX}D?i5imWw3f!3g)K+Pxqi5?3`a& z;ylgeqP6VmH%b;Q94OfmB+cbhGr#jrO~s(K8!~gJA!kY1e%GWjMkFG`$N+wG>XjQW z;4Af1CCz&*sIuBV--uUJJ31r#9$|Xk|CGk{D*|>oiiiI#%D@SQD2ssGT_ufGLMrH1 z?Mta#cSGBYtz-H#GmOp4VV6hsfKSt_fY*v!P^5aO*Y2Lhad0vyoRHXBGiXe>0O8FsNeDOe?QLNa)d7bi4`V_W6{ZX4xrCT-QFE zGmh?TPTSOyx3ejC?7uwU{Bdx#|LDZ`o6=oG!o^n3cEhgR4g>llDTb&vyQmxQQHOvq zWk0r%x^+wn1C^;yD7c)o$3HOY5`Vs;$)p;hY3apU$_QasY2TC1r)cb}$wRv+EGxj7t;yr8KSY!6@9@%W=pd3{cK z-2>$0c-_XkCJh;ye*0w<7bi#EP4Rc+NyQ(%moUGbebH-_GMRPXno_T<)3m#YxBJ}Z zDO?6M&?bx$UD#hcM~yrjjgI(5gHCFK2JcwtnlA)Yap?`t_|8f@oiuqz!=Ue|&}r83g}&(I^|6+8V1$0=HNhm^u=&z%mFr8UcNh{%{tY^gyQ*R<^%T zi@)N4##i*5>}-Fcl%0&N)d*Rc>Hn~se?DlMP0gGL*#Q80Ae79^2*f;q=!UtG zpp7Yzj{yAR>_S4eZiHHNtn5tm?5xZ{Y>f^G%FqLC4uD=QKu(pD9`FY}WduG_kRC|) zaIgT4k^W>Fh)Ea&y_x>u3iEfe;t#v?UoQC{`~2s^fjj=i%D^%R88{mMIo^LQ0A*+3 z45S^fWP~L}{-pRrp8)v`;F^l9xTJ`zfgK@(49!cY@9kkA|8mM|G}W?f$KmMC7^|d7BdGYJ&>+rVbK9X z)<8BFSRcUhV*?_`Kr$HUnD@`LIzZltP>Y3ygC2<3{$)1s=X`)abp$LtHb!7QvjG0W zI{}0&9Kci@|M0(H2XNAJvH^fpAQ0!IXZ%w~!1RCK9GvX*oGhF`K$;zRQ~=AL3;_sP z*#PtaR*t_Q)IX_Mnf`Shf7UpeS?B>AK;~G75J(wmu>v^g**Q3YR%5Ij?DT&Ymgz2BB0@{wzb8>J1Gx4{rY^=aNfW8BN=wW)G`xWrg|FysZ z%p`yv0K}gE9mN4W!Jn1}EUAC*2+S}&Cy>GYTR#4Cd?40E4{YAR5@lgxp$Fo9K!gm) zqWvpx|2_zSkR6!Mf290Vh|KgH!0a>W09&t!`d>Bu?>_nm!24%|{XelkKoIWlMNvi9w2v-QQ6m_4I{8? z?(}>c`chXN$5!Y1I5txt-9l@cMJ(0ayXt-I$K`Qh4I^XjH#3+`*cy0_^j$gU_O)_3BINGLKU9AQC$zeBaz!|WBd+j>XZB`bU0^7yky7xU0$>-rX6_>9a?FIYH zyPgDomfu9#OiB!JXuzxL=xQ{is89(G-G5MgwtBnloKtbJo{--M@XEW<7Rn_AEr3!% zhcV?b$1jO+Du$v_=kY2EI1&FOfdEw^4J#2A!S`pL1*Ib9sz^l1vWs1G^L_N4lFm-q z%P);kk_b<{r-7gn6=fN=cz4MDk`z-wc1eXb0l-pncN!Sht^7(__jxmkf4+J*T@Nc-j>s!fQlVJsd-hXd1gBmbi%Bo8rzaHaRHWHjD^mW||zMD&~)u_e=H<>Gs$0tK3e2;g^ z51PYJ5`cU=qn!ePHe(Mz4Zx?^o8YA^?;N5H+bbZ83aE&P#^zNJXIK%>{IK1W)(Ynu zzkKoeb;-?s`7yl$TKws^e4wN?#$UHs1H5^g?TY>x_FBfqOuYY^7~hvz^<`8PsRH zaETww4IO-kgLWiH>|hG~YYLbZyq!kdG$5AXfUewJ`qVq;yRY9{K_ksHR$NXS;l0>< z4nZnPOAE<0-d`KIK`zulbl?J-n{LuqCPC&wkS$7ldeCjnW8hD2`&^&?P3ijxS$8o5uM^o9mKMS?)!S^+?9eg2BSpud%i zB%AW~4#Ft#A|lU_Bi+sNHkUe}JGiT#UMaB<Z9(eu3PvN2|1dFS~3Q_DKdXo-vA zI-<#wZR}WJ51~)xSRi`9^Zkc%_s5rlaK8_|pDkAl-F1r7A)?-&xklQV@$X@E&ml%) z-)0P+_t8Ijh|fQL=pd*kG-apdSIh+jsHnp7xh1*7AHbV^Xp%vybxgy3*LA(TyIfV> z?)nnDanHMo%HQ*bq-t5Bq}u+Z1U?O z+mZ39ZtJFePt33R$;#|?n6< zJ!k6FIAM~sNjc^OMnUgrWHsI^_RPCs`5_|fSESVswN5{;-8{~+cFFW4Juf>tu_Zai?zRy5 zSaRbQ6W4rG|D^gV+|ib)*IT{w(dRo@ZuE5ew83*p?VJ|Om~UD#epHLCi6eboa8T@a zM?0H8E`3*nyA>9Myq_AS>IR+eV*=+Qe2e4~u)DUlXvN!(sk(GiMOj9hlDAP$)}&N7 zkvOZkox(yx=Js0Oi66Of-hNxv4ydTn9?LjB-vjqE2Y2>wAXg!5I-=hf73u?b=Dp8z;#jYuS% zS$`8~tPn=57#Hz0h#TOlHJ%wY%Bl)@8a$wh}p+7d#V z43EK~J*w|SrZZD0JLp4MpvD%zDH8(6NzzV#H{iLK;{Ak{hrDQ}R3KQ0qkf2WJwKOH znln-NDMB%SZ%uFCc#LZ_p3F?FQi&JT+Dzfwyh3nUmuk7<>&ShlF1B8=#)1J+S(3#v zHRYY?f-Mc@O>A9Llo`IpAdUOTGux-xQ1ZaUW>-0%L&Z0lPBfmp*o)AEMQS@m8Hkk| zD;L}ZM)u(<_ES2O&L?x9%l(v=6b~B}hEA+5Jyv|YA%h@`AP@LRfI(ZoMP!WHFvS4> zA?^^_XJe0mcg|n2D#Cfa-%>5zCa(Miv-BkZW^)No8t_&RVC_Wwzf9USZJV|3Yt~*a zX*W#9Ck;$C@r(}!N%zzB)#$mawJkO?3oMR!S(D(I?+7yYe=K^c1qVpddRtsxkg~0x zKGeBYuMNk0`0ykR%I0E z`pRe-Ov01B5%b`7dWrq+@Y-$Fwo*K5OUWlbvcXIgiAnk!I_mVg66(jecJQnpW9o;K zUv-%Ix}S@Ud$ZLwTP8(C;a-_dCMFsu(|pDEH)2$h*wI^tAl1|sdE7jAH4gosi{=KX z0TqS)uTuN?W7}8+2`gvfGG2A$t*h0umco2R%f*95*F<_>#jJ!zFv|t~(7gqV*KR7o z4Qv|@4rH)WET(b!n^qhY((o97_7AFYeI7H9XH+_r|jJK{E_?k!XwL!M2)TJOFcJJ@cfmKhsQ z8P_{hCP^zDmk-a;Lje(yh6%|L6#u*Cn>Sv0VA<~OK z&@(Mj`IUpo8rMPY7{|jIC5HFoF6)&`*zb)(g_XDK1?RT7f!F1BomF-@Y2!`Mt@k({ zm^CX^X`LWx+qyL#>D`l|dWec=yY2X4@Z0R}NzLoMG>Xj&Z&T9y z%@uYJ>B*y8TgCky1!Lbqv2Qfx=GtE^#8H?GnYG_nKDT&>VHbU-Po5r;m?1A{^M>JS zR2y&i{<%;tC){RzLB~$>bezV!!S=13m7Ri$bMXf|kthkm;hhou1COxO*oe%&S;y-T z^EBW6%8;z7`RT7FNS2tYw#VwyvzusCR}}_BVeIahRnKVxY@Nm0Yu2YrxF=W8Uxfo^ zuAjrA=ERbo8v%*W$Ps~`bfc-b6eG~k_RvN0!WJ~c$k>M~meWEkmG1I3tIi4*(0KWa zA79_9r_%B*c&{4%VWVWojH6MmgNvB}d`Y{FlCY@#*yn8m}>DRsX+6fCg3{Dz5pS%HysNpewhbxKTU z*3MjPFMJni_&R=^R<;$YmcN*tHeMchZO*RURnoVA-uK(!$?IbOy#HpOJ9Yh=5FWi5 z%vTX*hCF29&QL`FM(sib$ABW@PMiR*gaEOQ7_pNRHrLTH(;Z-j3@cV9CAL7HY3>L` zDOL_Mu1p_?=_v0tSO_!DFZYfvSRcz9j-)ErmJyBo8Ka+Y20|O0S?4=U|1sF7$Q)aJ zH9HR4J9v?AbL?1tad+^vSfYnTjMdNN4?NW+E;18>)%rMFbli~L}smwA?d=H84jnZ`b zjNTH~B}CS7hi=j*t~c6EeoV=eDl5(i=XgYE(?{oorVHVnDy`QSL+Bm03OgkTkKa34 zQb^JQ;q3Iuao-dLELA*Y-`15Y(gMN~kd2U!0e2Mp`XCUZ5M#nc+D$P>#FCSQkcS*` zM0jAth(7=pctn_96NsQslspH@LbKv;>?GjBoDZ%MxZm(4qSpU`q(C3nCjU*tf9)8K zcF=bQQzO{r8LT-ki*MNBHIQ#O22I;9d-kve-gWM<1)smz`#0jN|HC#yMS-{3FheOW zzgTM9HEKnX%`2L#BD+s;yE((fz{WyU_AbHC2)!-cpAkvF(a#4Nt}UNG8?4J`8=(6L zXB)5*lj4g@gB4dCm$MDxPQxN_CafI7W34O|k!JRhh( z%0s-yTbd7XBVYgqpuV~Iu~*dR0wMlczj4odxU6#U-H@wip?))T$bH!&>k$01HSLzb zCxE$G@K{Tbapc_**?jE%3XyT_-Qn+g#QTb_b;R3Iw)LwfYO^2GD*xOKe$AX)ci6K5 z-#w(m$iK;eZ29m`dxY^qrol%_}g0}fuz1|$6w%d+gvbNiBnYpqn)mxyMg|Zx> zjtwv&c0bs*0rWHEc)buFsCqlZoniScBDOm;5HYzSY&$Lf6ao#%6xjJu9D`V$e%09W zwp$JUrm5kZJ(@*D%l6Ttv!@)>4R>5!2D5iu^fg!>@W1LbJm4`k@j8R0X@BVU8z)QP z&h$GqfOf*7%+EuLA6s=#pO&U;YHt5v1dhlzxZ~P|WK%nNeN@r@qwF$m?`^MPKon$r zvNmdT3Wt?xo=FJLhOLm|)Hw|1x|VVFCuP*vLC4RU{sUlgVa?Fb@i|9m0aa(Ty=j{Y znOXa2(qQpOXa(`#3&zlN=tW5($%qIg(gY=;5m2B=#T8;tx{Z!QHMk&Y)8tYEC2!F3 z2(4)_g6I#bufqME)N7AW3wB}oWw%w+q$s*H?$Rb#HT_YN@G7?wrIOhKv;20>XqS+2 zl}Rxc2fvx-l8`q^(1uS=`Ljp?=Z*5onS-{)IIB;$a-vd-liVVhHE3FCI;ITsSI@?$ zs1r~I3`K*82MY_R;`7i_g!1vY)L`&6ar?^Kok|cib}a^fZsAoAKzXlf8f;=CdoF!J&E#Hj!xsO%LjFL8rQowFI#%c)g_?%ft*2w489!&|F zkX1<-Ch6A8458GLA|a%N3Q2MxA-E`dHc}o9*okXvw=7~Kk+yWAz`4whmq#j%T4Wc% zvDBP;d6%ZBBuh)8*LR|rQX%8`qu9o|N&A`oFk?A|vReO;fTt>xm>WT7%h+eCIC03~ z;vT1MRCgos23;JIWyf<2J3bgh^q%ox%xMMdW@n0mZXGobgytG1tFJsM#>{bF(CfI92A}ZR^OmvfNlNMBm-5g%GUFackEJal6o^Y{4Yx^K zp%pPI(Try$(?eDG(1H>^F)-)-`ij#jk3+|C8vb&&_pU0p=r9_g{kF;Cpl;|#oHBdH zEKbQCk)VQ+YilTJJv)X7uHGHq3>`m~m&73*CyhMnET2+N?Ka3YOTh*m-+!Lum@uAS zA)&{Zcz?{jud03ziB7{U?cdsAkwb#1OkIlug{e-h`V6kK?7Cq5WP`w(pw z!e}@fZFUeU(lY00Y!UOTogU$Oq9kYfkS39edpvEEFnL^0Q!ZC+ld|~|uf17#PWC*A)*vHHvA?(cKEfBeh- zCyLcyK68Jca{lj&;ono#|3A{L{^&hy0HEFj04jVyaSABG096}CR-n!YR7HSV$shfP z1*nk#wHO^jpzNi^$_(82M`+O@1Ug)5v9SZSE1&|#_}?igf3&9mfr0`w?fm;}xK5m` zMLz&3=#guXHbFFYZ$T1#2q_b%;|S5&5kwq^+y5&@UB}gVgt!f?j>HwCtqFmxo1zZ2 zCD;`re+dqF!;nSHF2Fs=u765{9Cb!4DCtPudl!qLOqw?a^ku9Ge4Gj7JuC}5Altxk zsUN;MvfDFia|MBIId79mSANRdD2DC3zB>Yr)=&3N4e_Sd&+pGci%wGCYP6K{kxq_B}Ngt!M0CbFLt zap4jadBvuA!cvK25)5%uBEuw{8m^RAQ z^lACz_92^{=q{nu!y0I9|RHLGjW(%|K0}nJ>9)jL?14jFP2`qf~V%P$PJDI<>o%kAxQ|MG<_G4 z0Kn(Le1eWZ0u>b(3?xLsLTAlM!yYuod*jT=(M3+ESFU&x2OXK)*}M~ip5W*0dQIS5(2u14APh|KD4N9I9cWJ{SGoPepl?a)EArMe@(jjQmDV~m zD-Xyl_{{VV-@%H)#`z(zUi9y~OTo_(48Be^mq6P`M1YK0s;2sMIY0fU5mgXK7M6*M zdR*3KumVVd>EMVI?XJq+JMbJ;@DBI)K5S<_S`O0MTF^N!`;GmmK0Wv~&>hGv9R=6x z31Xc|H-RK@J187XdR-w3Ln?uA4bR=Yv?%3eD{c_1_3r|LJ%aFbg#f=*V@!(vh?g8N zC}#gum@AUTAfz6@5Tzsc$Y0ZFU?rC)k)TDFa#P&baa2WiJe_AsfSq&ekCwxxbIi2x z?hx@x7M=}vh6F@ghD^83QTJlDbi<$6AS^m~ZU(IC;K&xWs-HmzP(Swd`e~=iFgUZq zQ91l}QzKuGg`+d}?wKa9Q-6C{5i^!t?r<K`tPwdZA~S@?Vn|ihW5Od=+DC``7MqBA-@Pzs>LWW0#8`6H=%43>a??0Ax(XkF~gE9l0&a z3;BhZJp7j{Kc!0nZe1Zq%ne zO<%El5IF+d?Lje&ihfu9roZU-vg-fL6ZpXy^*{sR^Ej>DfFoTCi95a3z;oTscl}ZU zmJN1AVA`WW+Y9S8D@*@Wi@0o%me-Hx6>Qyt&rbE;V8?5)E!f@n%Z|?GXy?^e?*V)z zY`3<1xJ!`tq!v`&e%N3OB%mhFCxWb`_i{FHwT!dY%W%N+H%{>t^u?g#?@uv12o-~1 zUNH={n27o4DKMS}acuQwCj}hA0WA>R_DD2Wq`D*#qgd-fDABV42&_WsT|3&XVu)z5{WY~$$Kh4$7hN<$#3xFB%v}H!a}6|HEfyRjob;9` z6^0=)4!TANSs3z5Z-hZ=%+pmKiX#9QUc-NIY*7Y7Pa|*~fO-#G2>Nv&FDNZFS~NAR z@XC+}hY=M{A&4e19n(lLgr){5g_>k~SZG3x^u`bpBM(_!UvXJulYaFUNogr*Z&6^6 z(qX}X4`&vHK`a*m4vl~rz#zyC2{)UwVuMC6+L>psT}{oRADuV;s+VY*Y>3DZOw*KE zt$&JUnOSO-Io%gnq-&fgT?@^O=AEFXTjmyFKtBk!K(lmf2!n&R&Jih=WSGS%)eRBu z4Mp>2N{u#AJdbXl%7@20W$}qv1vW*$%?_qYrU_1(@wDN2&v#I|0~#afLR<#p!;el# z-s*|$>${z6MuGVKaktJZn^Ao#%x*W%?H%!6p3q8&>??B##zXGc(46$OFrGr6FKqVo z6)HzUQr-z;VX}STE2KQdGq#4->-JVVn7dvV%AW$EYNP;yB-r}u&zgj9?RBpoYCcr& z?^;2F_TP3wpt7nK*dMrcO4W7u@3h@j$1Gnms}{zSxpYb~*@3|==AQtAgZCCgyR&3C zUiv(GVbu{BXgZ!nFQmb{Oiu(arrb$rp1tarhhK)zq5azZ28P|en5oD=AxUlslLn1o z{Eeq<#BCwrdPQ_dSO5ka0Spy8c=8(-hsV`+^~tA1CxQqDP1 zHZh;dcz#^{z#I_R%i{CyPoQoU*dNyJp*eNhw&gnjo8}pZAm(-ozKTB26S{oo7)15C zb!Pm(n0w2xIG#7}7k77ecN^T@CAhnL2*KTgySsaEg1ZykEet+L@DPGNlmG77{XJ)& zJ=b36yg4uK^6KiU?&)cUZ+-6TBTXY4%l+M;cTM0%LHnZJj(I`I(<`I+uR zC*yz69A+17XqEktaEh|Wp5YPFbL!ixRpS%xY;S9F zWV8Cl>r`fHIf$@-@x@RrWG%Om?-3dv$-E}NeIb3s$eT>xL+bKQ6&WULz5BD7G$^%* z;-ZZnHhjUQ2ir{vq7)bni1ov_#sAGR%kqF%iSHM)Gn!!G*i@~j$%1<~%20q&L)F{{ zg{wVmOyd|6s`OkZ})2LEKS<~)tTBMNIz79m~f;~=E%cg zOCGcI!&i8QHRu<2ljve-tD6+1IZS3oz28UsqmX&AqOsv^Q!brl7K;KoE(;v7s#PQ= zl(o#vR=>ilKcc^1BmMa^h2%33UfujdEKj7Lf}&15k$eTm!Ojka$j%C{Ngnz;ZDvQqd=Bt|Wz^aD0(d^lL^AZ2f!q$7GctakcvrF$(0~ zQ+KOUbddxTkWykPAJo0yy#(4TS-?VKU?N25z0e^=Vqlcf;nLMSI_$j=3~pA`+r8mo zX;NS;6iU%9Rd~p~;k_K@D_jJ^k}C&97>JMHi^UUa3=Y8<#uEU}oZ*!yOhd9?mNRm} zk|1>No*=R$j!X)1>XcJjUA2FRBd*BQoL_daK|My}o;l5&Mj28k z3TJr5($13X7Y9@g!n0v+Ljk)3+d zV#C5L6@})aY~_Xf7DlpSqfoGHBOjg^w_caj+!4PP6`EmwEh{u@g31IkJt4ArcCCIddeMQhUqkTo_*94~9m5}c-)m4z26Mij+Zz6F7Nq|wL4Bg)hmBdB> zb`|6nw5v<#O-TKfqkQYoSm0*5T-W?=&V-J828ZiZJJz`&$x*mMqR@)sT^$zZfp8*FE zLN|O*c$Y(A_pICTQ6R$(ctcR`V++*Ps1BG^8X9$)+kuY0^x8qb1qysYCo6rvf!GA6 zaGk-RQq;sHzH5qtB3B@#u!3n+TqWpyy=0)?JEt!pswuxOL9Q|Ea=3I|;kP+)LGkl7 zK|%TRe5CEEqc=%6IDnd4S_8jE6*cT#c_;V91eu`+;HoiR+h3G7SO`5V>NY|*%6=Mgq#g2au5i8NdP4HATY0$u(wQ#oV5yzmnGhHf&(b z70AUu^6J!uWXQdXxbQ%(i}>ebvS{ouzeM3EKEFcYXeN?ysA?B7DUvXfqSqd?B#4cK zI$SiY0a2}RkPqo41L2;~UrxFUdlwsbC;C8I#cLnFq*&;NdSQ|DF&YFy%cJn^f~JoS zxg21$DA`(2`2I=A0;z07@|#@ES*2>Uj&_2W>-KsKXQ^B~iH4xRUYQjrX@4+mNCp}n z=K&q}@C5ex=osed;dF;tsJ2pU>10M;P3hOYynA;ZTK9|tXKox^uWGGEEsX|HAe3Z3 zW2YQ&B2Tir<{HK+(~jLOZGGo>zzJV zA)sQ`kUf`v`B5({l#ta@5>;pmG%h2W7)NEgOkLV6tEy%qTPj9>cmS~zn2~joY;%Vf zplF|s@{9)(%h)rs8jqnU)w?83>SSq&vxp!L#0go zeTdZzUc+caQuzmcT>K#+xrF6Oy_21kmR_HLT*4(s=qKNxfE`!>mnr_t%m$QbZBMbH zYJC<*x$JSv)Q0{R(E5^16T+k5ThHLu1-qp)Y8zMK5xb?T7_{jwM@vQoV)d^~@PIjs zIe@;OWE%zsEfq%XFjisx%DgX^n&`VmmjwsbnKLtvJ-5fvfXZ)A1aY#Zm)j(+9-aL? z#qBJ7mHwA*$0=r+TWNMA#tr=R&0lAk8?r?C$oxFdQ*W*ud8eNaL=)#u?l^xlkYZQV zHRMlA7Z{E&vlN~?oA7CBc=nd$%$?k1%&}V1pMzL4HCVfqDwosv9;dtE!J7xpd$kVzYSv=h5zwU;DaunN)Z-} zrXJtw2SdF0r~SZ_mY!xN$N2TRD`y)LZT)5uvxlFzN5@WcGu2gfjO=#|o6|=2Gf8OQ zDyegFH93osRkm@{0W*VWYTL57sV}(L2O9H9Fm2j5h~GVZew@V8fpEY2?G*GbZ0BLV zVYbc8P`~`ZoN1f!bpAB@sekRl^+(AXA793rZ+f=c*(M14T=@3OFO3bOEpfP`V{iSQ z_^P@jXY5L@-*MNx%cH3>(Yv~bH~J+CIf(tsd0Dp2HCh>IKC4cmDY?|=N{;?yR|pv< zo1iADL?(Z*+X(IQzy!b1QQ2Ja46G$*x2iKE(vqv z$Maq72o)#CkB7L}j{9pS%DKU6gatSwPfH3-f_UVT< z4Yr}~wfA(cBld!4mq9daQQzK)XKDmXi|H!)?~zkh9kHcrTH112F3(j2-b~>>{?V)| zC#yetykr{UE(sTm^*2*x?M7qL<@>C^c{yySe{6EmqDqTYmTrLtH@Pw%9D0u8Mp6;Z ztLVu#ZAUlz$@iF7eGVO0<%q$TU?RD%4}1d`D?6lbe9%s9{E$q~8N04mn$2X}?xoME z;PKUyxn{OQFrM8I!yi}M7(cE-m@3H%x1WsgIxkS*tnPSW(q_UaV~s0^ur1z&{WdQo zSBqM1zba-b$GTv*=~wvhHEZCy}J(l zFARDK=7cH6-W#w?@-o&lI;!>aeq@Od%xO9j5)5nHn+Q00g3;7j3uB}1$B*J0Q{tFz zSqpR{hExg;kldTg?%PPPm1(SIU2?4!(xeO0_jw_APLwEZ+HqBd6v2@|a}iFAxeQz* z`|?m1YUXx3ssjE|Bs+N;wmr|49VuP4Y9Bpx=o%_Ni@G)f7#`||N7jknqRi`>724gOWB)7Mq8>}VS3D;v_nF$4_Xsj5Gv0#QD5MnG(ntDJD$-U3k!7>Q#o1M<+EoXd`jRM( z(v@cMrK(@=G9oN=`CMKbzYTYMt7aFZD%U=pnwu5ci8q|vltrHx;}&5mZ+4Iw#244T z2kDKNt_|BP(4VGULC64>1@rd{%c|f&@rZYh1eDYOP&c8yCsDpm`BLfG}>xV!UbWFGPxXF75MGJuX`LCFuloBFy4%$zPP&QeFoyEDyvqb=q7F;hvzTFZr zin`;nZ$6>m3*FJ$VgW)2>9LhTGmW2pF@PWoI|*k6C6%@~m8@}SYC>fv4n;P9p@^4= zmVhQ|!7g03mh-HB{(z6MKjVRB;Gmu6)M?U05Tr~uDyE=O-?)xtZfi!q)TBGWtHeiL zx?#ra@ZAPCj_KQmQ>0DTrf~LjsKsdhf>fyrK~`Bbi$2;#5qx z=DBXOZ=T#-`f+%vs}>r=WtZkndU{3r%Boq{xoM_KeoOFV#iiSi^8&1 z(9i_l(hN-?l~JW9cXtzRF8Avq6wth2RJDvTgs)^{rlp0osbw79i-7J|_|w)} z3H$U*L9Ig96sopmaJ4iFd=tcSB^@he?IJ$z*%sbbcyWKM6x7@~)=DIE2_R3T$HnU& zJ-GGS*b`%kG__xYgcbO4sQ80BWJSnKO~qPOeLpH)+3H>ybZA4)ZVP?w*G>3K-mB7& z<Ok@+jZh6PZ``rzH8J(g|@MDQ9?888TyqNo|X!&Iy{4GuM&Lo#5eNTnx`? zDCw#jhm~i<4CzIqp0Rl>pe~OAabRYaVi=Bf&n>WEeR|@V``JC>wWS&P8U08S_B3zU z#^(`pTC-!@!3>R8#-=`V< ztmb0k@U-RjIi57>l0BQGWDYF9@s5qv1vwp7MvN93Ssr|Lt>+iUnYr)qh`?E%xpUZlCFc{#3O8*o8Vqp-9 zhk(cHf@xxoSPz7Pi^6bmb%!6>6Y(05T_ZVKYqPNTN$?_6>~8AUEh_rRDr=q+lSLD8 zL*Hpo5bF-o7}36VN` zG%;)AA&YB~d74+_h}Kliore7sXY}?h`;Ocga8}}n{xz?o%gatdX6084Ja+YN;ju}F z{f{E{ML4q2> zVgC-_euj6(5iW6ZM2sRW4hY%r>IWZI{)e6V^`u4}oe{K|NQOG; zUP?u8^Xx&h1(ql(X*nvEH0$rvvPZ1NP&hdOh7&*i6`iy$o@UhYX}->qkF3t#u2VO~ zM(C=&Nncb>pi?;Eys0$d3TjFFf?t?m1}N+^<-U|{U;q>dv22fMquug4D5q|uyD1gx zY+?3g@Ks~w%pqG4?hrh}Mo;APUUDmc9F5Knmr>gsz{S+Fj~(}6)Jj-fh*&U7dsE3$ zam_h;+UCFCeCZ`Hl>16WEid*_;bL>0dG5{4>10|?%qul_ z!i_89g>#kQS?*$1FYH?LVxXzP;tKNwQSysna)~q%qe}!bB#z8t9U5!vC8|b!urgiZ zj*aJ+3hQ3qqYB#n@+B;^E7`^^U#IxF@3T6$ALi6Umh3&(1#oYy6|jOfd90D)6UFj2 z1p~$`rMnyt5M(?=q_D_@ht zCZkI3>I`HBW~KrPo5D2b?bbX;dYY1P^ zXu-@|-~5;ZAJy@<*l_enmSQu=P+FY|F-p;WYE@mk0dXopK_Sa=ODxQZPqDJ<0+U8f zzeZS^ejPr%9qsK6sb|PA*$D;p10zWZ`NcjId2EwK%+?(&dgrUQV!;p%Rzsv?yeT&`J7Nq&cn(CSYhX;UJAwx(?*)b+N8CV$J;V5e+Ify|uN$&uM9X zdQPXEanz_@|DmY=^YK{>>-ci#9zr1Tu=iYst?c?S0Iw+GEl3N;?VQkceLWAzO}(v_ptlnkRSPDlJ3rPCzOsr=(@eJ7{4xJMGr+ zVf=s-=d3yAcIk+NH(3()YZC3q_zWgrJV%wWo$yPdnrh#}RD|TS?Haq@#>Zh?C08Fe zz9!ER`N7`pE7f*WznglY_os0=@))I&LX7Q3Dp*fUYMK&DMs<10+!99T`=9ddqg@~C z7-^{^TCN!xiBe_P30C-3X5%$d^~;Bta%WN{bxy++6)zYN3R2m-YybF+oLH@X{IoHD z1{(cLpAsXsARzfTt@69hI0$q=KwA*-j7CbMWV6!1ol3g)O*N=FvO}<+s8mG1o&h@%%2P(wpw*E3)#ey%iqGH)CpgRm}UT12N>XgZv&HYhQwS#`bsCXK3^Ihkyu{;O%Rg-nFu?o2+J7o%wokDc_!Uo@!VU#Rtd|4%Ro(uHP z(aG^LdQ@sUg;VGoe)`J^FA8|sUQoGRh?z_{??=rKhQB5A$a9xKaNcm_pyZtTj})C$ zE(r&u4w`W?qai1exp=^b(smy(#^!w%v%WKwlqN6W=M?T}Zm#Mz_I6a}7yj(zJq{Mn2D= zOTmpA8k?#!FBJs4uy>CD_pO*V=a_d(*}y^uJK8SVy+&JT+r_@wSMTwu2T6)|21+3{ zT#}ZyI2e5YW6_KD(~_ZQEamMZF#d-8&b_GeQ}aZv_vtzo-(=wj8;if?C8>R^C|$Zy zzmV(pXU~(|ZH|<;wYGd$q>l4wC3aux&l*F4y=f@PUy$k<3GtPco(VOQ5n@_OsMvy~ zKr4N=E_ZvwL;GHDR35|fW2CyD+7TZM^RF&c4##9dyZs86up%@Uze&@!*x+uR)Ct-a zrbbJ0OTs$MP12u;o4-4`7|304FP@KZmLvvvKXz^LnRY)M=kCyyhPlyje+G^GZt{98 zS9?909sjoD6+~=UYY`A$3*)tBaHQ~6J5)ijPUog=YAU}^C!bSy!(gTStY5FTK)}ua z{p;AFL0k15-A%omNet<09@PUDzCb|zCX8DYEc15|wtEeU19p+|gz=1f-U5nV+lN%9 zbqzCf!|~oFB&1N<=!+HWkF*}7mqLM`J^Z?p{yg6)`Sw1yv$ zX*@JQcFc6A=Y~ns24tSamC9T1D^3SOzZ=+2q`sSFT_QbtPu&N4&d zzA=Wl#y4wP$lXi<@C0^*?s<&elX?Crob8``UX81L`Ll7_o>r2q88dFg^;Oqwj-Zxr z?cn@Gt;IV=Ry8*7o0StD0@OXn??!@f(qU@R?wu+94XlQhnz$6z@)Uy-ChV;zVw(KT zWf}%%Nr^_sLEF?m$A!e*J-xJ1v!V!Al&R1;5uJtoU7D?gk*wy_G z-wFe!+Q;|f^ZSB$=b_3M}6pA#JYXxgGR|MpZfwN$C2~?jFvX7C5ja9Y4_}&D+J^L9XK@8113pxQ}DAt8Ts$95b!5G?pt6E5-@jges=iuHn5$Ze7jB}*@qoh zVn>1Db{o2`I*bNZwm<4sZs}c2o%E>}Tqe6t8WcpK9govYXqG>=^qNtK&3)c9xPJvI ztc;GXq)vXrju|=_kBX&xA-|t&{Qh6%n%w_#P5)o?#Q$d@wbcJEq}H@{wzqI~vig6i zrRD{5P;VRLq4#2`mc9$Fg^#&0W##`;bh|id#>i8;Nj$E1Mu;HSv+9y3z6f0fo?B+g$}&0onj7bhFXzgReY zoP1ym5P%a*;sK}r3bDcCf_oVL+#m250(@Yg5Lh72#RdizfoVTHd>r7(fED%sCk4p!pEdAqZ{U7A){C`*ee;{WEyNLg%l-W3CCLxRhBlt=fcKl~C zruuU#0k)FkzIjly3;Do4D5Xw5iOl~wKJF)-AI9At%3Zt^O|6G@Yvn=|u{LVK=gn5M ziknNEdE)y`3^u#kbW_GU2RoK%E6=Ydlf&xSE;RbG_Vq(%UHel=N z>9qDLG1iKgV~`od0VB1Ksv$_8#+3MZ6-B%>slDES!{ph=Fa4M4$eL3~%m9h=^7@b! zG3-}Pk)$}zv5z_F*80aV%FkCqrb{mQPcB9Xi5@webb4fCcol;eJ2p#^L|^aQ$z#YW&AD;=fRV{@cgO|5OhDuP54v|6LCMuRG+!KjiTI z|A3zTUvhZvzn>rQlkfk@ojf0all!0d(9d2(v((W|UjMlkE7Pr31#HEX$ktYS`K?cs zP71miP|i}ZtT>>Q$dV8$YfKEtrcfCfBMb~kfiJ}Hi_HWhu&qPx5;B+xCnT98$Xu+%-iJ4}u-Q`(vwCsLxe(K_A>93*j?$ zzj?fYfsO~k4-Zju8WQoAsgvdk;bsekBC@!ye*Qczbqoig1r4zgACKC$uW^q9!Gi); z!_VgVt>rB|ctPSMLd%Py@vN-9DLl=jLhOe?IGNe}G;Yzx4iAC~K8G;N(H3f<>{^6` z(1M3poO5v~Abx{gg(?bse7g`pZCSb}??X#^aO2K|Ckqi<~`a*UUF*pw9^Z-~X zPE%0^#VNzdGs*(pmv;zo37v!?~>FDbVp{DoB`si^_TkpmP>+F`Xt zA${fVh_M45wBfTt}!Bhoq*;iWDn|c0vNi92nd(~7!NZzrz6PYkZcR8xv~HGqqqHI%ca$Ey_+W=!pqL& z4n8CLmq(JUQH+)R?SP11Evy%J7w&@edEZyu?t)764hE2yiXdOygdQm&TJ}CyMKW^W z`g1_r4Cl|Ic$kZ_gv&G%WDKzALB#Hnb>Qm^yf((99)?crlsA&-3@Ca-=nRKlV+f-- znh9(pvo>Ps44%nPKwo3j?d|Ui8vp~@WE=~59QQc&;LpvWox&@Dn0m;GLzdV4!jR{{ z$sgA$*F5-l$JZ+F<}Wji;3;!AqP7pCd6Suh8z(msQ?d`LuA@3P61KZjb^|pZaRez* z)_{Wk=$T0f3hFqE)bvCNW5qY~5b;LL+QV8|uy;j>$5Pz73FVC_1|40jdz)sAYc?GL z@{F0o0m*oJqiuunc;0Eu@4oBb)6Y5A2ht1EUcHP6s&^Y4qpn1^{NBHv_1E*wB41;{ z6q7y-n5?6-538=jc7roRqU(rrgLl`GWio;2-4qDU$sNxf>vO+5zdh@ENnIn6gzN4B z_)#eNAsRk{w=JH}V|LQ%M(weJg1_w{kVO36Bk)J61Twrp{s=p`=JX#j4uT7cKz76J z3vam=@kh&dgX@cQ1q$)Q;{*LV5Hf*WMp5S&k>~OQlamQ0>Lf98I)~C<-wTpo-lrUQ z8p5gs??pY!nOQFKN7wNlIDPp6cty`tT2cUEk}gkjUuMK9p|~JMVQ6y?5#41;Hxbkrsv>#KaTCoOYaNWlmbyzp(P<`4+cMuL$|L z^G~fvFB~4!LQg~c5h@XMk;SlNqRDVQW0pql-o@_w*s3#oSVO++Q%>V`_P zs=Zb1HGWk4tEJa(RteVjX}+qa(bZ6$-GRDG`WI;4o?lIU)t=P)`Nrifi7eYkoT3Hp z-$AdQEYG!Gsuyv&Kd$V5z6ZXO-?6RnIuXf68AX|cEZtthGaTWM*1yFEhb!7F6oN>u}=sq@;N^JMhf#FsyL3k{k``5&(+#{ zkZFh5J92?f$jz|!SRyZ1;IiaU5-8~4E{9O;?4@vAsydFG^Ibx8i$E;zk1IQ{-}F83 z)$}a}#48+$#vLgA%1-mgwB668X;OzP&>Q7kWelBn4E<*uIoL-KcYC-0d%Ew-?!Co} z`Wvx1YrrG48_587wYAG#o{%lyOb%hk!2@CZ(kp6Tm%V1CmRf<9w!VhwprMWdSKwSn z)2O9vB9Xq9UQ%J^xqBjks1pcd4VZVqGEU)lHXwZtp%ki7sZpEuv09o13rR1s0;3Gi zAjUy?a3qH!)s7G_mC(T2R$*r0To{~FRK1GB(j}leuQe>gy+g93>4@~?8huL1DbmZC zJjH_&xuK~Nfv0(`7cz*LI|S!iHVPcLaLLc3v%#YmuO?na$-cFV}Ht>EihQeP;SG)fpgA)@^fhV_8XF6!ay5O^IQ!>8jx{! z;(}HkAC6F0QwX7HH2T8^sl=c_NTyc40JKYxt8=wtCEs7vZOBj(7S+u&5j{Oi< z7H4sa5|#qq@*#Y2g=h*Bh=FC`hzqxU6(43zgqmk5noVXv_h~rs7k(R^6ikiWsRLTfqtRL|lcAl98 zgH6gioIcRf|CDEE>_3g!CRhF%YR1NP)mwBs%^W7f3(c-`=}Y6a<0o*Iu~qtK%gg~D z-5C`LOK$Be8A-amS{pE>z5Qu3xP5=t*-nbY{7zRFR>?^_!z_QsVvP1%WqY)^4O_$H zx5^SKC5`y?(^_lEBMxX?Qr9D_VJf2> z1_s&DBcF$5UZmSi7CUYIT=@DNj_TWBq=D9&p5}Z>tw0qM?JbDbn!4Ly#DU?65Bxar z`^eaR0|GwGTa1|ayKV!}jd9)6ZjjraU;-pQ-TpAfaufqr1r@sDA=x1$(wP)W=fR#O zw>mjia-%iP!+^XK=5mzQ<1DlI-mANRYR@JFU#HxVuQXYcoa`i{6!DP6&pNq#Bq$8( zb!w@PR(6K6M}D0^41-m=2Ld!V0Uv74%n*XTM(bX4N+h>dQ9^C)Spz5NOh@9VHSqzj1vaqFofswFS^(`dU9}?D{K27#bsz!nOhUSHXz0La-$&C zm4;b3da9F5QZlsSx!YeOgvU(zhIQ7g4lMwieZvGg>X`JbhtccvG)Cjiv@vA$BVk~p zY1DWz#U?aXh^!}p71SXBhNiD06zvI!L*a^876>u!?ltn%h}IIT2Cj%mb-w^~i*j^Y zIn-xT78I1I*69*-O(EdFda3vzbz|EVk5Y=f)z!b;jTi|c%t_X1>uJj%%^BCnNh8F7 zKPk&ZIP?$pQ}Urj6GUkSatF;Yu;8Zwfy- z?+&4Ho)}F`p~_SRj?6QITd0HL>Gga_8DFYb%9>G{faq*De=nAmD++Rm=cnEXVxH+U zm2^)y5A7rcmKu9jhf!I_0Ozs|DmA3p=AE?iQ5@-`^kWL^(G^5CX26P8wu1xsYeC1J zmsnY%y>8Lzb`4G>!=MgHQ6WH)-hE8TnfY>G-I_y}Fm&ivF}1&a(1JJ1T~8a=KEOzp zj>~Cae$&Ao_2WVN#9;#fc0;<(gQGEeaD!LL8Jf4|b-ls`xQLf+pl}Y{Mv1`l&~}ze zYX0avNs}Xqv%Hu1q~p}AR^0@vU)u$!Svhp5_lK>hg2)qgEKLsAuK2TewsA#=+IK4T^``6 z>B;eRU1$SCTKjXb5%w>X_3C6dn9pa7RYkm^`I`{9 zW+k9r3sNFWBn=!j3^fW#bw0Hr;nV#|K!MTGB)f3uP->yi$837Bn?t#RmoR#<>q9Ix zw%bF$0wdt0PeIC^_FkCF622b1;%FX{C8!hd4uO>Ym83;btO z`>zk@g#Tcxi5^Un`+SU{r}w#KR8ze^#4hmvo_Zks=LRSc8$+p; zKF!heYByHYYP{E0B5DPPlY#}pgQ?>_w*+crN8lg5g@;lB!aiB_=r?8Q^yt@CxN17L zhk*sH*N377b->Aw1uRFCBEtGZsog%!S@g(0AMw@juB}AXj&IAd={>v`M^g2LvnNv_ z)GmROKZUagQ)LU3PofLyQH5`A$_nT`g{zMgP}M4KteDg)uFG=hQ++;+rYaU}kEKQn zR|6FU)Ov&&4-^>H6ur3$=nuTPBI!$gK8&Oa7aZP>3}f@DMc!F4s2K?peorMSu)3p( zrOy_2zoiMIKNVIuhz_F$5wZ51;|>vFnRKIRCgCgcM;pItL&hfe(>_Jzk-8Vs*<^; zZ9`zll4T8NuZ|EO{Jm@(48*)_iGJ4FQ*#h~tb$dCWO9Jas)8->StdzybSm+PZ>cgm zK=HF2{nXuGGI&{A^=(ai*=AXb-(efD+!|MnJEq{OTw)0-gBy<0$XZp~Nscq=8f+ zSBORwZUqAZP`j9qLLsh*{!&1ekOss&%44WUN?$Ppq1^Mm4AFk}s(M-fO7qKiO6f#I-rL?i{WhI&T4pl-|@AcOh| zvxxwMctO_~J8*?~L3522f{)lE4)lSV!(5jD>O*ZIz9bCrLd{{UE1DsP%)k`THAW9; z!ti3Pi<^mr&?DASU&DlqBMQp`HJ}_3g(b~UL!uCcrGXMqwTP|lj&k79Afajz>S)}= zLhzt|W9mr&O`xU`S*RP+21H;oFgkJvETCc$SZLhvLttT!v9sjN*g|ZeJP@`O%y>iS zpjr^V(_m1z!G~-iey5n1Hxq;u3xR_Yh57|^hOmrPMadCAparFgpoI-~vxODH1eN}? z30LIciWdqSQ452Co+BhUQ=Bm50~9UH7&d{N8B2%-lqEtO9Y@T7Jd_L!5+V-91bvbm zMP_hRaP9yC)G#6nHAmV21{51q*p}s)@0clCh!d1P3>PB*mZhTKm?>PyC#)>l0SPk< zNWX5nV%wK;tj0DuLvRgJ$l(KnFg-d=k~BqfaQXlQ6dn{J%y$^&pJ#OiH?}nMh2s9* zHAS{Bzo^Y9is&)q#O1`5C^E!RAW=i?pfq5XVYFbpS(- zvGBf?fvpN`Qs6QPa2YsnqJ>A?uZu^_&JY`al0G`-0q*Vvwq0=A^{;^s*#ge5fsGJ+ zSXXdt4~`qAEG#R*L!^VP9BdI_>jWDagU99d$>(Hf;J(aQBlMa)kL%9_$5ZEV?2}6I?0@5l^SHTbuz~A@4n4X<5KKbskJ+mG9Cbs?tCbru&4PgI84cw7c+GPFPPTLHQTrNof z!nNs~IJK#qxN|F&F?z6#TZ0W8%YoyzX1W7vuo3B}-k96h-cW)~!oF6cT(0HU^!@Th zhqZf})X8c($gXs?*wgWpb#upHsuVyFYXDlq`0dFj4#H&kh8q>W^JBlL>eEMzdL#pS zpqko7SrPb^m5EN}tCJLHkIBW0qH2s-blY=U3SGkA#GNBh(psq3)6w5V+ue=um<)Zq zn-l)aTy3+qZ(0jrnupxJZ;@lVWI+cP$&-AuR8fT)9N*GZ#UaYHq=Qgt4NBKb*6WMB zn186NyE2Gs`BxW-#9;c!v>$ig<(zCd&CJY1GFdqm@O|s_VOxKj3n~usgnawmGhpMG zFn@e}-06{<>kMw}ak6o|nRD{+qZx3}z3s@>mosg=KQY7G{F=6kmSDoSy?qWe!FKLXkgV`h&=UQewgA#!%#42%`5QkMpqC z#dkodlYyu{X!0ka$N6tSsoh}od~jzXe-O&!{O&-MY8d(}u74G_KZyCdn0+v+4}$!O z#2>^ClnR0)f5Lv8w*{wQ$OU0VKVkWUG_Q-Xfl`IR=m99ApTNziuZz(KqeLOd1>r>R zMIYz=fKpeX=)KUQ_wd)nr9dgUfv6vFAE-_DD@jWb};%5yl5+w zznxE_f9Au)?*g(vY>?z#Lij;!m&w;dWQe;f)O)ZZSOvS(JDnnn&X>uTd1TeRzvMl2 z618j}4D?+|yzJB1`y?k4fT+NbS+1XXHgH)ncfXIh>aLPDQ%h9jQV2OIgUgSgm zCiMym4WRMbQmy&|2c zlllX^d@4DI_FBU~DK$#Co4D~gnN(dVr*_CJz59us58lb47N^)JTJ!;RdE6NRWIvb+ zDX1%eVGG2Bhx5f~VGF(m`%ym?YYYIk2bjfv6mYUX)s}bG@dzS@sqU ziQ#gNuMeLdpxT;tW;U1$E7%4=$BPJYDmm}kAz-REIIeN$4REC{Pk0clpF z4~-xJW$5ncQ$oW-ZKH=7F@m5h8KOspL>W;@F0-^BVlhbMfGnedLydFT%o28;Y(_`E zts0f{eDYbglSH$`C@ddsSZ?(RCcySqDsGq}6EySu|6J-}dd{C4kdcW?K$ z?ysxvN~Kasx|8=_l1jbz`TCfxdwqwoeom)J6pdRWOXlZ^EG{A*CY8X~0J*J8Ior{v zF3**!O3ABA(PpuS%QaIHCLN%Cf};5TK@wX04DqVs?4^NT2Kht{dMwpsR#Ki=fxoxqOV=IkuUDa6q3=r-NmtYwLJK@EAtfjHQa{d$ zP1Nt)+G|IxHuF;T+7Da84xD%)G5#iS+-T;9=S!f@b<(a}B+)Qo;~@vfpxrd`^1j_;f4#<%@3<=v%t_t`rnlOl zxS9*sDizc`1$&)}LpVeCkbW8XS^2O2#u2_l0wFX*BngwPqlTqur@*5SMIg!AZ`O`o zUsA6-M(AAlo_KkDMyOIJ-4}%guLwPCBHmCAuFjO48zm3t;LppFvvQg_PE2?mT{7#@ zBZb?=Tgk9m&*VGSc?Toxgb=tpf+vh%>I`^VG$EygDLNByDGGh1pMqP!RjFek-tLiT z2ewuXg~1n^RrWz?Yto2Xg6R?DbvGYmSN}%U6=D_VIE9UcN!QY29o@I z1N$JijWE-m{>>bRYl2*CkAg_He1MvxX3`rSuXvq7AllhO`SJ`9mF{3>Rr-APhin&c zLROQ4TmF8ym_$=2o_wD96O0FU-^~XsF*4=*aL{nK|3e@2D`w&oz(nnF0*nN`s+`=#gBhpYv0*J$*11e*1uJ24o zVBifQYvi-(dF7UBNHS)ix6d8)q_|ZHo%MLplDbs0hOl~S_cnHklbtVzjB=_$jzgUg+koh|(gBw?!dgS76pcnIx_QH`)P|5oyX;+O zLWLLGG5mfk8f~nT#Dk4;*qpL?;lWF&!n9`8V$0&fL#W8lvpc&VN`bmZpua}lyE!5Q z=Nv4mIBeAiXS=nvkWt{KU!LyCXpX2qr@YijG<5QY75Ev;2?Bpnx`MN969}Qsgr0@} z)dE=SiYoMmfwN#{U`|ytKui59zh&iBCwo{DX=tOy_Fp|hbJnL`9r6^(vZ*RI*-#Wl zX_HonQ|F<)ljKh{VmkrAVcL<@#3BjNEMby3)hY81v?vVH#6zw~8g=S&)t0(}mlJg< z(lw`urOK;uCu8$kH3=98T!FW_$E6QCG3tf+=ET-{84mj>4@q0s;yn3PrXIJKgOQ?x z*r)Gv)FN85L338nE;qkAgZ+KEefM88YDjA~8o=|0?At*Cd&lvFk=(ZlBE)ONqcOQb zVp@Gt!M-yw7EzKJ*O3q(7-t-K@afl4#&;3&a=WsVv1GsJy{;0KNs|w|X6cnK<)Xrf zFXSESEii6|thdVsoyrT7phT7Mf$Qb=Zh|LR!LJhOVWNG9w_Fz>tZr>7*=V_;mqX@% zd-){HF)Y@~5cGR)uaDzgw^^O*I{U2qsq0)FzQ*pqiUgae0GtAyFYI~((Y_`Ht_osF zG(nle^qeH=tP>11dEcyka6*EMN%txxXWVm>)ZTcwJ%4bC#@a)T5zYcxPk+$0Xu zBlA$+Ir+1^tRvYMLq%FUnF+eVtbwItRE2M4dp|yo6P6E5kIwqUAlWk=z2HIo_=O7uk#?07?E_jRzjbvM1j1;#gt5)A9D;yjVOBzV^qrU|8n z9*6YOurfOl{A}*m6{V?7PgbkB)UZYCJ*p#n8RqeZT)|rYqL=;P zDEKj*@Kpwb)^>~Y{Xfv@Sz~Fj>uHbdk|!s^^71GFLHY-wJWLMf!ScHWxz^T6o? zbc=+kzBc1f;VKmSxRqCGi#Wpcd_b`Y5=kaZe?_~yHrE=MurVXEh4pYQ>ha=s{j_jA zc}DzxbW~Cx0DXV(UYWw1P>yD5x?)i7DLiBTCkuTYqp_xuyyPR*#pI zw~y~`YyRWB`rP}6x_HjO$Z4^B?dYN;7{Uf)r0zgBqN#oF+Ga-%LSp0$M4d!*2`Ta)OnOeN{GWuS z%)iONb&w4W(UHibd(krnkowzsYC9vASX?COFhl3p-a_Fd%Zf$Mul*^+MIjDRylwXE zP|;XbNSCjZK!(I^)m29+zW2N(kX(JjE$$|(a6B%#nvssfWEh;pwAC)wyN5oi;_sTd{ZhqB z&+=rSPen2hpgMMLhsF<-2`7~KI*`j$E01603aoh1 z60Ee%M8>23vL5jbIff$#1U-dX1_axU`!j5dS9UU~=2Vm!Bl52O2KjRX7@gbj@-K#m zIB&|(K{c@wmq98{dGU;xM@oqPQO1TEgA;CXPct63zQ?p^NsyuFQMIe-UZGDN1=mYk zRmaV==D3Vyw{Go z*!C187gtr7o-38;C9_}T=oE#a5IvO)RG~Bp+9jV7hS}a(_sM0r`P>bC;S969y!tj7 z;BA@wB2WGxS?!^COl4$+3)KOz=Wq11RQFH{txH*HQI?9vt1#J1 z7IMF1&S_>(QLUKE`!|#NJfC%_MT|%x0uk_@mWR{K zOH{`=kpdpdCad_+7?q})RThJGyQaEEi#>tXe`xP@COQ`=+ZkIu{z@B~>yqH1kymq8 z-ji%1l7Tdjl`KXko7w4QG^+EH9-yOMLAE{emlFVx6QXJ0TVgdv-65SQxDHZ4;(xgE z+2=A{=e?n&EGxF$S@O^FWOi#Q zd?q(hF)F?LqyHR&o{y^gB`qAbM3X>U-DPjNCKK3be83aO)ex;KWq11-v@2s6=4CbP zZmkuJMeKzN)Cx^pCpV}PpR6z}Ekh9<%&;6Q@%qr%gx_ZCTFY?_c0Hx(fJhEh#rrKM z{0(oe3}$%gwHkEOCx%$cNG7mGmk^K(DgKK=oBc_&-Dmpa=QL5Djri@P${%(8cF#t}LQr;GdV4Pw96M}W6H%)KDMVN@ zKy_EugX|OqdaO`GS?=or0#+}z3!WZ@rAB8pC|UyNCx($+6z`#YYBR9vTsmt-v4wIG z)K$7;CHEm;r`gv~Z4=||Rv~zSc~wczS=Z)`(EWQt+6G=8vZF}eRJ4F%+)!|U5zU(!>pkC<+05h46>!cYgis;)jj%jihiuVROdWs2uYq$tP z3fq2ruli@DyqC^NTusdV{2{^x(#lz0Qc#&`>=+nAN-?n$e<5?)gb?Dm`rs6(#!r6l z8=Ii{ngd=80n2V>?YiTU;O@vEr+uNV+8ij=VtapcUa%^DRgl&gD8%gv znOMv)<*u>9L|Jpu$g~lf^(oad>C$v@L-5PSPCZp92R)VRB4@yyCCLiv$^>$;UOdv_ z$GoJPO;}_PK_cA>SrWwZ@(D;rbdye`=~{Xb)e!wcG738Sy_k7cllmzFqX`1?azKRW zfr|aXnzG>Ls@Gf#`}D=j)If_qV42U>v#_PpiT&wW5mAdBj~7AAm|?l!a(GguP*XbG zx#H%;E%C?Vh1dm(&EvQuexkpw%d34`8TFFIjR;T>qn`zsPraa59gTz?NB7W6ji;O{ z8`D?>gBcYq-m)R4gg{&A&)l`_*S#m+xY@7a0U%IY&W#XuH@F#|b(r;tUhE`5g4Vpw zh!?lkuTsP;4@%aMSUBjfAWM4g0h!1@7BD-H>Q79*zI+DTDErtR1OK3s)THEkfO&w( z*_6Q$vokX#VU;0?To|_5_yoq~1z!_%hmsbUDQB7{Qkkq!QR!g1xtaY=#?0zGKlPW? zI@;XYZjxVkT^LHN@VYQNOAPzTpbK2Ai49JA70~ z)%Q7izf^7fNHNO_Pc1c1KN)_MR%(%-AnNPhw4Mh}u~1nkqtOkpyNiG7r!=L&m3K(a zxlBT>FE)1saoAV6T9L)mq6+xx3$V2XbZ+tc!qCSnG;COze!DO#F<*bdhy@RiAA3C1 z1bL;|0^>CiXr(Rliq8No+##Y&9;qz7Zpl;~fw0C+y`L)RW$zZHj5GcT>0NS1Pk0zJ zB~Ot8@BqRzY(~cwCERw_PP6elcDt@IiMYr_al7w$*!F|XB+S*14r{X?l1pm|xqNK2*y=>e$S3^DNWwu+$y{I-6kvr~vyf=db}(BL zMW7f`RmG>j1Opc@Y0VHRmQg8(878IJrB?YQ%fy+&oC~Eli~fXtflKND^???m#@wt{ zM3o*ErLKly%$TuXFl@+rp1_! z#t$37Pq*<`m1lLUeQv!EWA|o=Oy+NtFG_|J_@y%hgY!?WrrSVoE#E*hDOw>ZhD8NI z$lM$fv=Nfm7O+Vm@NP!oaor|LL}S(UL}fxbh7^s+dYELdT%^2rwhSdC-ccTMN-oIm z9qt*a{D>8ITHt$ryYpl%diu?@>g|Q@!k3E;qxybS7+tUpUQuf66j1e$N>i!C4Ux8U zrN_$21lgK8*M=K`Wr~cr1utr~Kfp!HG;3v~T{2r?(s-A<_LHTYxKBj zrcxg8+_zb$i|5U?PCOWY7|hX1t6Q;AW4bf`nCm&9Q&`*G4Lz&7JJZ+n&o3J4ZmWrl z9(jz#D)q`wvUbO?nkqG~vK>_2UTWAGoMSKbhC>sn=)~GDHgb37#zwBF|c+T3VyT>M> zxY~)fB6-0b1-Vqp6gxK~u?oCBm;H()PM(z}`XwEjZxtH5CL-It6VuQ!-BDVceSGYR z(s;g!RZ5UWGNtI7{<)2x>B-^kWAD;9V|07n3)__~ol zRZwnVy*ELC*PF7?IV=a3PF<=ORjQ+j)<`uLVqFsumr}3ZH)E+hFNu36ko2XvteDUjt;0?wAc^eTVn>>oL2PswJK z@p|M=dXz{;G^X}*OX)Yz{ALkg^H`_<c+_c9d{DVIMg>&wl4qGAu{`%lBH6Zdt&wOu=E|k(HxIw~H zDR7i%mFf&8iQj6Aaidk~G^%Kn#~XgB40I4RY~9medkiy4CumkB)2^UN(qzDo6r%;z z$BLS(=w&rHrt%3bdH!U+9W@|+l5yz*ea|n*?w*jZR(t!hl46mblD)FmqRFpa_hWtjj15(k;}gc>46 zclNwqbs178f`t{?*%}}UBNey4uHaHQ?vPT~|i8Fu|jllO!M!6OrY7-<$ zJI0MDq_klfj$!W}cC9J* zlJZc3l6sA(8>&oj`}Lvc0-Iq#m4Hvq21D_w?x6j!MGJ^@i+H(< z(ed2vYcVK5!_L7i;4+pEqTP0*xF&cY^t*i(Kqk=Pjs1G}Zzt5|NI^uM)2f;PKG^qH zmbhdt2uHU4werrYM4JLNI}|u)2a9?6X~Rs}8QdjGEM+S-dPcbI5F|RjCpQK+D#j6`{AA-}Oy35+9Kb*CxW`7fd{0(mQ)5>( zfug)Tj8MQ6G#+{Yo zJVwUl3VLrha_}pOjuG$OzU0$D2Bxf8gtj4zkZ_wwS2w`d)v>S-`|jomBz3nw$C38F zw%TFxyPUN`o8HoauA_&JzlM`=McEseI{{*tl{6)qF)dyy2u+JZu)WzZF01`Ik*qzk zG!Nn@{z-`)?9h#OkDphtInJoDRfYe&lN?xwbUgxb>6j9F5%NW)I{RmU7f&C1+&Q>( zXy39g5g`5mnv^$G+#VF7b9|zKfnA{GO2UTh)rs$RYG!95HzFL0kK2GU&$So?T~}5JCXu;+_;k^FD=$b|9ZAz36&I`0TWnM9>}* z3;3pNTim?NY;5^DEii>Q=;=uSr&W?c<8e?%?61ut(9O?(p5yfA>ZL-tJ)6koiv8&} z4UT$nIwwEqr%yo;_k2oYNuxTBIc3lLRV7E`QSp&$ za;C|X@zABJkxKytk?_KYW1w{lulYJB*EFB+_b_xZ_yw~#*U2j|rl!kyuZx?Nj%f>$ilWq!7L~DuMQ&Tx*5~j@Bkual(#R?q>DqZA9O}e43^ZKfwe%byF^2$gPJWHcS%4@Voh+;=+&Z|Gb~rUQPH<`gUeoFNrUGAY z3!Jz2-uaDPywJ8Jx)!a ze?OsI-qAoI7|+2CtHY&{38vH_7ttil&i4hp>eNy#nWvU+KyI4ZH)4&%OC1He5(Dm> zn-RXc3am^#Rz2oY$MO*_Z=eF^X0m$dd~pc-)KP~86ex}6pvH%iM=ZghZk=BPivj}d zUpM+4`IXnsqFNDFP;=sZh}x>db6+atP@O>xZ&X0Tc06!Gupc8&F= zvTK*@mg}w-mR%3Oa*}lGt@dw2{k4{G~9?9fr?FzmwkWgZz^#K2i!l z_hbnDL6ZOqf*$moy#|-U4A~5C0)at=;SP zFg&`c>B!YZGXgUgxt?M0EaL43KJs!{JLYl=d>p$$%4aUnGHb;y8M6Rf!1Y5w9Z&F5 zM3=i5bn9Hp!1sM_E&b^GIqR)&DWN>K@6ylP7`3rVg_j5lel|1BiN6Dp!fHxcqT};W zrF!dsoFNhdO?&c@J7mfKN(G8TqEk~vrbe=#lS@9pWZPlEhRXQOj>66WN3QzBc46bh zI9{)#hNx;>R$>9&*R7m!bwTDaQ*&xD+(oLt^na(#Hg7$kjUm_*32`>v9#kEg%W0+M zjr?BU^$QXl*(Al!r}%xIK=gZ@5ck^{NbOJ&p>Pao=&JYO%3&_Tj*9!N1LLBNnoCUv zR3`I}?z6wgyx@tjgNx!vWWjk1k!&QVg# z4R#yg>+eRZj(G!rt^4cW9xC5h&wjUnvRrIe;s7|3QUck6lRD&n7la{j(f|3PR$2k<)moQ`C8beFJ?_~{lN7*PW{%@QHqO8m82 z_1a~NDojxb(f4K!(kIbAf7o!jPF@+9k()F+Z}SOa0Rb63H}Swp{|0QZk5|fbgbc7w zURuRR7vV{|*XX)m*2_7Hj|ZmF>z-i}5&_zu>%^y{XJ;Kpjamr|`dqa3&rkHrMRk2M zS)09`VD{BJo7Y7;piMeVas(Uu2Fv31--r!zlpCO?jGUh#q1wI8C7#59l`ms;6e=oc zbw5Wbq#M=du2SNM$1KMh7BY%27ZaXl6WR?59%rSXvj_z~fR$(9uU<;Ni%B3EG<2`3 zBvtVWLYtNb!<czW9l_X3mRAnUBdWUL-bXC5Em*WNE+_p156UAe9zKMAR(*|i{N zeM;ZdZ~9h~&0@5?KgU(Bs%%%e6M=j%+Jb-r)Lz&WNPWAJ=j?noc;S>H6mN%3BIIUp zC2sl;bKby?nOfh;E%O+XLKFi4vK^h0^R8K8Ynp8EZN^>3pn?Gdn}D5EZ=O0jtTlc{zM9$m_T&AbnWQcN+YhE6 zyMAKkBM0b4OtS~STk(homjJU;iUo=BV-~!#w5svr%Qit56-5SKGW$ohUf358 zdDLt#OUzJVM|#3VPZetM5KeF*7gqhs%qRwMFupxc-B*$UfA8L%=(A%p1-Q%PM>;$HtTfarNNO{R`1!Qa1vj`6QjUofOKgy0$w znCDII+gt1FXEC%%+n&Q+M_bVS-5|(vW^W$kYJBwlfb$PH2JiNnWhq#&3(l-KD~2>_ zLN2oyDbOcDzMqAyLz?EI7zG^Ztf3CUgoc>h-y=$TT)Go16`Hls)hYhBso(77;ZI){ zl>=WMKq?;mp$Vz|iOZ<_!t|8fb zPJ-K@2PG8+Vm9(5tQTw}Ii_dbcK7DhjRq`#B!sObfCfW%GA~}?FxMxw2(sA0`1M5; zCt+_BjxS7{Y{fIX-y(J6lZZ>loV&Ak*;LfBclGA-#>kd0<_r??;}iVql@9PHZv*cy zhJT}hLQ!zJ+9xo%i9B_UnH8adB}7)%?Xm$&%@W*M{%yP#(KK%i@o?T^05( z>gRu>H~*G0`fp^@zYpy9r|Lnos!R<>r zV);^rzSy9z;8Hhpdrgx6(yHQO|B{e+*g3z#fMwi_?W|2j94zh3Nm%~%*F{Agy-0K! zIoR3093vh8D+wd}msrHc@ed;O)hp|N&atwv{!fGG3vH2fba49;Sj^eJ$f4*YAwe@@MRu&Vyk3Haas%>Vz$UoHR}2`2}DiHC)Q3-EQs;#wqMDZBra z@IQpn7bNv><@(=IH0<2}2WB)CA1sU_@%(|=Lx3HA3k8WTN=5@4^W6+b2&BkOdNjQG zX0We7Ib*>LOIWe$ay0}WW2LUK=c~s${$_SJ|L;~;H$sOx%I~GFj$vGlSGq#zeOq$u z(S;QrzdtT7R5zAL7S$*BQ)WL_y1INfJcg36$h9#FS6LmFCh`?kNu!rTW60_r!8a4N;lWB z2)27>b~tT3s{C*uD38dJPh+D)uUs@9Xojlgio`5rRLcx;na2v{ZGyfL;rrnQNhf0a z11DPsA=n~0uzglsSqHNaZJ5rYtPnSZV?2;0D=KsKoajN)f^V$7V4?`E*f3r$^gKhV zP@urMR}1LbbJmjd!}C1~s9udmV}ELD!!QefZuT|2&aJy4>P!TzD7~x!%oo~p+)&E~K$uCswzi5&FUGDFHs+RxH#?e15;r}#_{w3}D z-`Avn_kaGEam2y#Rqei7*?(gkv9WV;epN^kS2q`PV|zHS-0L1#FZGo_LZ5&$rlz^= zEHk#0X|q#BDqND3#6%Dol4MXc4D!YZgyiJ<@hS#TPf8{j$?Ekgck7)?*)n<2Kx6A7 z*_LA3Z?p~)$|?(ZWmwLX&u4%ply9#e&lis$&z1p7b29>0K64NGo`5rfXGLKU8VHo2 z`xLoJEMol|Flho|SFxM+QCOjGF979psB6#Z-U{=|_NU4M&}GE^q>YRkW$tTUH+rL2 zLJ~vYgT?RI8iLuFPy}g#v|K;S+-Vz8K_atS*c|Uqur+(V?x>7psaQTz{h0Z;32%ZRnB&H~48enB-Rt}Od1U_PtYg>sfLFMM82LIPXVG?j z@8Jc~iC*|1!I;6A*2iH75s4k>?K~yn8WA9nH*gDjz9yH^TsIHrj4fQH^JnqIHA8Uu zJCK~@nZ}3-Gzdf>wq9(JV_g_T^uj{jl-aFN8D$_Y@Hkm890Cq>+xkO0&(aBEKJHwA zxCGO;=l+TwBi|JdRh5BUMT3F1C;$Gu3CpL!b?t) z{R7lQdUJpUoEjv0I3)|kt2zs%AwHuj_FExYBpHGjqGU1FnGpe&5iQe5$olJxeN8LH zaK9i5qju~GEzHeMfYZ!q$HGZKOf`FFQ8|<9i{~0!W9+tq0ebA<(czd&z>e9+w;>19 zd0;Z7Ne92Xj=N^w4Vee6DOv_$<<$M4kmuJO$A7=d-7xRtDtAlZmygg>)VsINhlWu= zetJPlK}uhfp=6)w$KP)}#V^5s4gSWgmE|J2WzJTpbAT*ITqQe$ku%AS($;8a#==)f z(BNXf^-DVqWyem{gW*J-X!loo5d0_vb!{N1)efX=$DyNRK`Emd;mJ1$#%w2$W5phN zr58`B9fhgffFs9}kRUx`$1tF(H|tYiC8v;3h;gEfDqBvcJp48w z*@5v`bV$&=Z(=ZJ#~?eHA48cr*WibwX@WGSZK8*O4gaEz^>^q~Z~VT<1Ov+m3+r)v zxF%O{R9rwVnOx&LJ$(Z~(;qLjCO6sw{PZR?Y*VDM{AkPz0B#H|0U!K}E4J%9(%IbH z35u&Ts)e)N+(B>lwF;c($)DO>u1p5~q#w=8JlcQYnvFoQN-)7eI}$QWHOe7M)+fzh z{+?T5u2K)(Tgv%tfIeil>LxdcWYMVD35PC}qoG<-c;#e{w}@vNQ; zQZ9e!_k(wh@gEa<8=dj{2BK<-!f;JeL%WgRWQpVg(0>A&3jVr(eRSjN#;6xP%;dI` z$OYo{k*M+f+z&#GET@W1XhPm7hJe`WLghn*l z!EH%QdHo}DSYT5~im*Hr?P}pEyv2)##qe8Ug@#()a$zr#7AG`Y$9i1UTpVHh)L z7Qy#XcZKbytUax=kV0`o6~dV|K=h5edPPA@IG&?TFr>+#UclK4v9ZFau_7jv!2e2N zKIa7Db;^2a5|qRRV^%x!4RQI$w8HA$5=j z?;zL*5igLvBuJ_twn2>3jP}0FBRvyc` zPJ}5EC`g){;KsmE|3QRT_?;sG>cYjFwkPB>^|s{gTn=dZ znzoPj5L(Y7UV&9Sh-REsFR4vkgU$u-7y~#IR7sgEWxHUy0KI^{z+PNhWZkIgQbhiU z+53DqJD6zmUg&CluG^j6WxBEFNs1%zI*{U|Du!g#j+}DBCK{3K7M&heaFHg(6`3VR zpO9}%WFXH-x*A73Hh7@<^~OA*xTs)2S)i&6|NB7@xm4_&hBrUHT&U3Xf~_uSS1;PG*DeJaQuTqon1;}GM}*NdxrhrnfOYuHO25oR04 zYs~IYc4ETyA@Cs(&5G_C>f8)fsFc_Ov{{sRESow$aN_|Gj}4>rgb2zJ7gC=)852}e zr!*uY$u3h_M%!^JGYWLGWnKOX>X61yiu!6zFdp=+>Y|!#DBh7PPL&E{$FBLZRD~=u0M@ z)gHtF`#f2&xYmI7CQiPKmXGGS=m*Rg3ME?)>0l>CKpy6-jl})VlfXcs=8Swh($kKghvJNjMS{NM#Oj~G z^c^nFG~bNU^&XU@cb*S+sb$P5feRViR!Fp=mDskq8f6UK3%Tt8wz2bBmLUxJ=Mkez zK1cX%%Ba;yb6&3^^-`B&fngv93b){()C*-G>LOBe%uf?Gn`Yf+p~0xn@gafCFjr>? z>;v@f2-naZnqLA19#*EIW$k*rp;4Ph*#$VPhPtu$>rqnfXy4q7fsw2a&~^i9nR3+D z^r0O+@J`SULkJE>+EF=9Dedyz2Jdif`ySh2Z+9d_Bi?q$-3jB6)mqI=hOKTBBiM`v z%}xXF-?3-&R+f2cjW1L0&k|ynV|l8*U+SwSqe(-irl8W&5g3@@u#wL9nAH=Br{OF7 z4eQoYQf@rV0qpDq9wQblnF^922GnxJ!m!bo!KT?O+7$q*@ZhstwdutHEKV$&i&Uht zDx9lBM@L@1gbBJfAq&EV?a~@o+mE&@IM$k1CZ*Vw4Jj7?1SYpGEp zv9L$R&o>5iSRtfF?2M6u7rL`D`FVdF%~ZgzTRAmtsx)_fq_+p_%osoB?&qfC#<3Pr zPHxsmnAREDPrHwxK3AGKt4yW_LhO3N|oFm79SSl zFybt23sPZOcbw!N7Qaqh#*X6X6WYfntt=dprNxED(XsY+4DhBebt}i=2A2dsZeeq^ zRW#&2;ZW%lolj3h*)1WFmn+&AEaD&x+wc!q$WP!w!3B z+fPJpEON7P>yMfec$zXnzBg3(`9kDq1`mm3xJ*$m$p|3 za#@;~?gVEVySa9;#hx^snW_e%sc#8M#d1LV2nq_6Q)ZE4(-eeA;O3ReFiL(?z1QX; zpl2|nD8?sD*?^I}11268%N=sy=v7rXPtHfc#<4D<5o#VXF~LEsL+lYG7->4IkgRew zuqJ7;PAx@A%kbcp%+#29Rt*Ev(wfU;7ZbRl!AB(YeYr1UzxqlF8E zXGjF7%tc)R(ecp%;)LjG*>f^4DyAfNN`e?`@eSo(PL~a4+@?c=<>e6t>4LRBryfQA zvhoNEue3{BTXE%SD@w9lJY2ZV6|C~s2o)u!nVUH1a3-Q6dRzJvw-wY_S&QScKREoQ zLZfQT9yo-R*ofjJOJaFrVn!jxINI70V5>|wbGCF3zirhNRzYmFEtFp@-N;>KtA(Pk zweDR<#dT2Psb!y~7>`JZXfEywmV=89q>gG2FNt`SngC}O)?b`BaB?t_pn~IEOGYFJ zhm4Tqt%>yYmPJ^r$fZ{muOky%Qp&j<)Ii*CQb!eK&(BX)7UF#yJxq?cFeC5uCS7!> zm{t;RgY-|a8Dc_6{B)KoaKan3g@%OzRdy^nsN zR*(|iN|h{pH5(Z|Z&_1AqQl^*g+I+sC0VU&O7T&onpQPHoHvZ>>j50tQ41($`1*wr zyxvm>*MB81m3Dt6#&ALn3NV%)qX}lbu1%#%>28W(pL)=|W znge$jT}{L!m240yT#FIt#xuiFKZPL3`~ow{4$|7_htz13-ibch(wED>zw;IFSorT)Em?5G{ZzF79ep zwgy&|y9!oo6I0=ydP*$VR+bJ{Qr3LfCH##F-A!t4C=;Wl)2NFd997t@(}-*bc&5Po ze!;7f7yVyfYeFgc_|ywX7PB@O#d;gZ;H8MN0yxqkRcTr|2<)<^G(E5z!*;fZlf~?n zj3QP}E8(Gv8C^}A1CLY@DxITsrgL^?SK}0B4l>n^96g(>HE!az7E0?&>NXH(4vd5~ zmRacvF0qtY`V(2w%Zi6$V$cG^!Ju$b;9d-AJXG3CWVD>K;1z0hMGhcv}~C=G6yv0-4VWZgGbs%GV-tmHzQOE6FHRz*+u zitTKFKZ7Gt)>~E)UR6A&hqW9g_?^OZH!R)UZu&J`pQietc1M@e-u#8hWMRky=X zzeZax+Ls@I29vlm{_+iS_JYXrl0L14$< zCr9pIzN>tvU~1=eJRSHE7Bl75NSTzoFr1R}LjwJvJu0f+!w$?&8_aHgU;L?GC92*~ zh0F~jwWVR+igrtYO>TTpP85qK9ZrYD4cRWVKLHw&+9ViVJuwA;Bm#k2 zAI?s~zuF14n4>h(z;7;RKRcEQWaPcfBxq zqZBNL%rArhNibfiTgQ(CW7g({f)<4qghYxQgXg&~a_Sep1QL88qCK}?SyOS99ezTn zTrz5a2;1QU1$lf;BhQD}nHKo_R%qhBIYx`v_z+Ag{P#S_kay>S9i5(j;__b(szaa9 zQt68ykqaN!Fxj^eqR(PwZ>&MLQtUekWo|-d982KZvdD#TME$(pU(p%hkqh9llf6?a zU*8LZ3ZO*AURvtc(s}SqVPs(?!%Ov{PoAAMT)Y$FEQtW6f)IHBO@)B5A?%ALe&3+a z^oY5oy+LT99l3%V;2+jS@dC(mSLO32<#Sh>qP{TkfF|0#gEh&`3nrA83#Qb0{feS4 zmi*g6`A+z@1^vVertCGq*e@LGV!nDU|H`Ox~voUk{ zS^Uc$-cuVRdHrQ|a(DYU`wIF@-pPvNn-}C%WSJ#G4WeOzs*yldj|2Lo-#5LpgQ5Ys zyZErkG{81{YI{DJEsDh_7V8|j%{o&1wd7Wt#V7DZEGm10#Zz*7)Wy~ib$=;&dx}N( zZ;f<^RQ5hHTTqLuvYVo%wSmniDO_pl;)fjbfYw}Scm8#bWL82%J6>DyEc?7{9 zBgx~fuPk_>cJlS1HN zBM8s6w@nm+rxlTJvcc>#x=>`6qIZvux{2g16_3p~ex} z1@P|zetF;e;oh&FiNC%wdP=+>d}7X`ydPT{NS!mf7kl*?AqM!-DL#B3YxPDke{Ftk zd`cJ?uyCu~g{-zqlX3kmop<{S*v)xw+12@!w$)Q=eX;Epy3?XA46nCreUa@==xGw$=0G>c$zYm{s?$hGA zqhfc}Pj2iSvVMK;sA#N=$2Bc?RJ1T0)2bIA=)%y}~_a@*9$Y%^Lp9RszZ3Y7hI9!3jz~Mx~ z;5u6K>uNjl^Er%5MJtDCb*X(}_7>FDb`%tFn5;JJuNiEUJgQX z;N>7*lEXn7PJxspL2gQe+{9ZF$SD|a0>*MU1!Fl}8;yqlSvPl7I7~JTkItUK%vjCA zy1C84y5`u;m&|vw6}j3mJDRj_g_>v0p3lOTxv^+)Zf$IKuvYCDKI06qoSi{B*IP9WOR&fZ1x1Aqq3BxBRCBY54f-KY!^YEfGHe} znS-NZmf$E}R6MeHBr6iZi>%C6nJjW&Gd$qgPM$~>DZx=OdvFxOD^|9ygu8A*ZQ`%B zwY7D{%GMPt8Nw5Oxb+{oa5!8Si?r0Xt^n#|g_G-JH4~>dcIb3`WJBF-q8e=PX zwH%${8I2>Y!cd_EBjFcHg%T9QFBA(UD1%=p6G|eEkYAt`euKWkZ!*^Ub#kpgRcka1 z{_dYZn3Dv7P(t9AJ~r*fmwF9)=(S!LV9-v&2qQncHEAbdhFLoa3oP16Sh;V5RXYg< zHti(ruxa1I0lW4sGT_j@MJ6(|Z{b9S_6@R-seJ<%oZ2^V!>N6ZY`C;)4;r{X z4d-h;XvBE!V?;4Q`xw(PLF>T8JMhnj3!LcKE@BYKNC~6Kcg89+Mm&aY1*GL z3)8fZFdI?rBh0~c?IX;^1=>fr5Hqw7F^~K6(WHHd1^9t>6bsR;eTYTeUyK&*L-vl8 zc9gvbrX6K(f@w#w6mzr>uncpx53n5bv=7kA{S}z6y^obxpuLYvuuyv+mtvvz9xlTo z?LAzMi?sJ}1uoX!!3QM&2a5a`{@8KFO(~e*j_uJ5_9l^C&p&h|>SgEly$xF0% zaRc{P<5KM%+=$Dxcd!PRYwut!uF&4WO}J8f8|}DCdmA_7YV94|f~&Q+@k3mry^UM3 zN_!i(;acr&ti!e1Vcd@Ew8QujuGbFZ$GAZ|ggdx@Csu2Ra2M8Shwu}u)ehrstkn+T z9^9lI!o6tM-okyjS$hjV#Vy)f_!;-_$F161cmTI)Z{b0#)84{ESf?Gt!?;~Lh@ay} z+Ce;mA8QBkDDKeygvW5F_9h<3UD}&?0zc8-#FN}#kGr)u@eACey@{uApY|p;aQ~P1 zsrDu|;%C~Mh~a+iO?2RX?G1F|0qqTJ!h_ly=)%L=>)4E+Yp-Do_qXB^?KQ;lsP-DR z;W6zsZ0G(CJfXdYop@4v6~DrI?N$7m`@8T9?N#i?Q`)Qe4K`@6;%WR+dlk=Mqjms$ z5YrCex9HIRh-bO~96Gf>;&<4j?Z@-z()MF7Hf#Iw0=8)T@glZr`_YZKwjaO8Hf=xl zasMT3*Y@LO?9}$-5BQb#3SPmlv_D`!ey#lhf5a~B4>*9`+RJ#A`>)|??Pa`|LYy0p4UebP#qul=xFKfTYNBD!*jX&cRts5V6zX$uZZhV3R+Kc!UuWB#i82A5z z*R&V$8D7_3#9#4-_9BjR|8x9FdjThKPs{%?@!fB6rW>3{yGW%`HzK&F5AcQXCxKalAk{GCjH z|DTZQN76EV`Drr!-G3m{-{ms>-M^FR?{Jy^P9K^64wvcgaGCy2ADRC4-^ujDT&5r9 zGX3y>piF=L|B_7K|6iH@Uzz?NC)5Aa*XjScGW{Q|)Bh{e|0~naEYpAY|A0&fd#|5} zCZnEznVJ3|MSvj4X~)0zl%eO}mGT?|mLbZ5EF1s#KTr7o!--~- zf#X1z%3(Orr5(qCj+GjaxD(#rIGs+`=>!fgm~=WFM4h0M&II{AbfU#%=AWsWjYF_}%!nPijcOxXG!I-NVy!IeUK&UDb}ENREpg@qx&o*I6}8LF{Z%~p%f zYL(chK~{?udb7@IIuo|OhfeQtWpbsEo-;l4dRy9Ybzy^p{i;QB_^vZlW3yRpHl58T z19FzlW`n`1w^`1Ft?!{TVdI-Lv$ey7t3v%~1Lp9x#vLuW3`SGi(L&zTu!vn%bm%5=dC z_>i~}JVP}um(%64xLiiSnD27AV9hkUT)F?wk^Jkr=IXMgmp`4r?*j$zzSK!|{l7HRV$_9k^+Q*y9vBGNgrX61udD-Ce zk+?D74ApqO*

J>oo)B{$8&a_H3&+`%Dz`J#_Y=#RK@_z?aIg!DjQN9bY#1*x>si zaijPQ)%biKpU>vYwE&jlT%QjPk1f~J@83C+f89BT4JzV`12vVSKvC4R4W9iH z8UHo1k~37Js=2DFs4Bz3AXQb7k*lb=XQG(zq01OuRmRtUX)ez|Mn*x}@ugk?oBL8E zZd9D1nu3D7f`W{K0vlkfDkvyGR$fLy;7r*19=fdhno6!13sX5xIGu%Q$5mz_%Gj^1 zC2pK`hH456LxqLT!a_S>uPH1nM0UtoSTN+@Ig)?fWluhP1Xqj$Q#mfUTm#dNtIUCH z?%6K~vKw`0sAk~6!hr+b0|z<)$Jql14urSRHE_W2f9FX4b?2Qiu8u3l@>EU^a&pSk zj;qXaHus~DxKV$GYRbz8mX~`5muCW*|EFf!2Wo0D-W!}m`Xa74#@~^u9{~(qbkC;_m zhf!UW9FcW#dQT(+l6XXbQ70ZD$kxl!5i0DYgJ2-{kb!WAl@rw`tIt=CSC2bcjhgC{ z)ylWkC#wgQ2J8WQLBJj$5b>=l?EW?);Tx#pZuTpaPVECRD!l>^{N$3@>TqLZP5<1= zTxt*vxpbOsYsOaZa~aQhzs!{hb;2MDnJ`GQ9R|Hlfx)CxJVvulaa(LU#btHKc9%6n z$aGn=sMBT5rcSpdhdMn*uaN06<_ei^i%+(@Eq>YVF&aId0vIv@gT?JGa9J~5E~}Fk zWC{WlU4dPWlP!@-tJPvL8V&Hc-7Xi5&df}^GQz5pWr2=>`woli4oiVGVy_x+eZWes zD+9(mJcc_w1&C|AH`}Y&cXDz1&?fcC`3yZx;hrPPk#u;xx>~7D_^4sg>V&VHtUg(7 zuPUiNS-nOY7`|HBvu2>11-8F&gGxzL(=yNItegz5K;uG5#I#rtU~u?tCY{XE#>G0F z!3~4OK;W(^uPJv_m4wU7%gXGjNa>)cDk9kWntMyWANR0?vRC%m@e>fsDYs zX-__PZtth0WZGTR$na@DnYQ7DdXm-q;RS30C1jNO;in(B1B^a=hvI5v7M?i}^~Aa?Q1)n$kg0Cn+c7$#uPr+0w^fXM$p?_JL@Zc0)lf>193Wpox#zb9|zd>9bf|hPZaL z%VwifStOEevCydoJIpKxS!Oex3NyhmIkczJ$EyspKv(b6z&O!d9-O6 zk0wz;nwFs{Mn}gY6B@f@HCyqBj9(gKaOkoSDh%teSG*Tjg1|edx4_+j$qPNj|^x{(@ zF+i56V33HJlD$TbT_c+(5H38m!(EC(ry{O>6tQ!1^>T8xa&nDp$0FH`7*iC-sV~PY zMJ%?#606I~>XL{mMI9#wnN(V&)LAxyLh728AQBJ?<8u`kUpS)ADLcDUsRsLU| zy>})ETQ7<_X*YbdALq>kM;Yvj#hsBMPqi?HvfFv&lZZi4S6#sA1n2F zt%^L=XJ#Yx#p#ZSnHpU#)vwqoRsBqy?`It<=|%}7uy76Y_mnXfrs)RF4hQ88MhrGP zO|^44Vsd2AslH5>$Lj8+Mi&zpCKH|NifbQ7IJVE!8slo#9&3#^7CC#kG+f>x?UHxs zp3^_?)r~br%~Pz4%yX<)IIhUJ$+6S%Ue0@-V>#wsrmY#&XH@jEyw{tP>GkI5y*UD* z`W&xd@hNfo$fogjVviH|X4X%zj%^~;Y&?zKjeXeNm}YlNgR#~13P2dklO6P0s8C2{ z#BASOLub+@bQKlpcA5`AxvhgUY18o@<7%wDEX$cG%Q{)siMnqqsH@=T z_kQ}sJy%}$Q?f1N%a>m{an9pUKRm;?VZ+Gk*}JdV^WKGve*Dw+jD4?vw4rhR&PUd^ z3<5!e_P&@U?M9fK=%Xb}+3pCNU$+;8u}s)(p;O6#pwVJ8+k8gj0H@C@`UZHV0hXY} z?9L`|s0w33RTpAZ!oWi%OvZPYunUf=ni{1?>2Xw*^gOFP>!?!pgv;2?=%ZA!WLfGg zH(JCx`!xF{9$`|}Vr60GoUE0W%QJ7Zv}fMrdDLPwnJrdPM}ihJVT8bb_+Ph3JyZ`Fw25YGMWS(%w~&auOMa$f+$!(%@$E7?4UcK z2PF?ij1Wbb&Dd)c4a+KeT6GI<#_dpYI_x< zx2mc&1H)osb1|rt@Xs49+FPBDDob43A2F3z33suSZG`*+sc+MFpDWw$~GbiU1|>uA?5aR zXV5O#3BBv&4fJO}e)id}-XUb>qr#SN&waG_Au7^4Pc8!CtKNY07<^R6S0ouJ8gZl| z8Rq0{J5rGhb8<=@W{YJTWAua&+yD}l}(*FNG) zXddALlc_%@@E9XIs!C4oQTBwHj2rCWYTCyA`kc#i+jH;Acs%17^8xdl9=##MZ7s|Z zGK@|~M#f&NEz_EjX|-A6^pQvgYdT_m&`PaVTf|9{-Pmdq$tz3-#)&&(XC1Q7RF){K zl-m?h`KM)=o69gafm?A?cS?r2*Qt)3WC(2JPFP4~r*-q0WSReTS$3K?_`6UB3*ri5*&A24|*D4(5fX{>pjJ z;_JFL+%oN!{!iXYUq88Z{0+D7Ci)dWJpQ|rq)lnRdC$Z5c8;&fqJMd+_mUaCCtiN; z_Rhng7^i(8I;GvnMIrgHkLL8-h@Z?P0`c_sMJ&W($&`E^DbJT_G5QD!6t>Xfdo9K1 zQW)vDxZZT}y_PGvG3wsmt^77cLQOr&o+d^@#f!2@tuErM&8}6aJEo|MggLr7`h|`; z>I(fz?`r*x-UIsmS$3Vu=CEHv1jti^Y%%9yu>db%-I`#b7E7jwyfTv;Bj!g8sZJ86 zyRf<7H2Nv%Lq7$5=%=7n;q;>rD2hUrxOQwSTXQSxii~kmxyhGeI$uf*`}7ve4qlIs zR7ET`u9>bSu2nA4r6d7dikXX5YlAC`RprWJeRjoZ{-$tx>y*%yed(5)#+R9S+Wh%VHR z@0w&C(~b_d{0ip5u2e8xNizG;n()mgchT5I4I`({q9b?C>pFSqOE3FeP3xoa`|sYk z5dX%z&@re!=|)e1Dnrf)U;8RN-J zGLs55WF}ccRuPe8TayfYzH+>2S@pQ%J?ASY*q(=38`x^M%3hUN+LC2Wo-Tt~AmF@4 zIIop6vqQ#^fK!Gb>w-faj*1px^DVtS^+RpjgzNr#llb+9Tkh<2^nMe6a|8K^Joi%w zn5=yuW=p%_LJ+0&*;9PeWroK$kf||OMh2ZaaG)dLlcoMXhsDPfJ~vVv-@7JeJ9tWluV}?052!o*G}Ov7V|bQj+XI>SkO&Zb%jx zkTo`IZ0LRShozFClw6Ig$yMSC{W8;X^GeGVuA9+LZV_+PUu(L-e52)ut{3gkW;o37 zxncJB20l2D^g-rneC_MQ*S-{AZ)xzg?lKU=NC%yVa3AP!ALwu&DG+Y8MbzNnQX7Fy zu~A!`+}>5@PO-i_#rp1)5yagZ6NnI}^ETzDfbvs7`6;7HeycOJRac!6Cv~nH^ju2U za0SDSC&$zJCcSueR5fu_vQzSPjLs!yN-uk}~B-Nz?LW zSQd?XHo@!}$i;%kBRl$YvEZ=C0WKEgK4PJYtx!lPG-PmcGnQH^W60p48JT^gL0{28 z7A{@<{;u60ExKsU4|`9%{(A3;+h^UlX#VP(E}XY^*w}TGuYF>}b*mm1JOl1t_~09d z9=z};1B&*n-Kl|)-M9UQOqqYf^)qL$x#3%F+`93PwOzOV2_)@(jI(?wq|;B?6tIf>8hLE<&9J>E+*HOTZW%{-NjS`yNMZWx;BV7*hCZD*wOoo6^7WFP^Wr~f_4Ky> zrm2#?4Mol#mUCWKBzQsAwBUup;;f5u<^`|Fx!QM2&dt7iv!2Y^ne$QB`|1fbT2!v@TeX@B2%>rA$;^0G$AD-#mhu!}Y8AK9-D?fd)CzCT6#EKaIm>H}xePNd;X zec()_GGae{v9+#<>DtZMT#({|f)p1Nq`07<)t=@8d&Exd>%ymTL634G$pK0FPcOMn zslXlRryWL6JG?2N%4*8~u#7ZCxdIeT)`SAIS~Js&Dy|9pS|QR3FtMB(L5B?P$21=u zG8pA$Sq?k5EQSatZzrewr5m!YYMFfXgrQ{Uj*GT@OLWiP)^p_*e|h++H|PtGuDGo8 z$*Zn@h)h3e`jwjkLN1HK5E!_>ew%I2~=^B;MpvRdPE4^ zB{|(S-CT*4JB1&#%hhCL)m50vC znd_m8%q>4l_>j!;BNvd@7IFUEKEA)YOEgCQrm{R}>4N zM*RAev54?#%IPjEN$%-MXvpA-p%rYJPHp8yCZ4QR$A(H?BfhLb`8z5fxwQ3O_qOaW zUU-#Y`t^-Nb7;JqEXa2(95-xu_|aLz7Cf--o~-USK6y+oEXXV|>F~y{pqNIUNQ0ALr%3sZYwfxm; z5Y3`Rv;`ec~ZeSSJ!OPDsQw!Ms~{$SkZg z6EjOIHr+lQU8QT&QQePi2VPCEPxkSqWyhPAxqFnJoNT412kx5coSvE^oKAb#fmG@& zGMu!8R@tkn)+l@SSoiE%BPGH?rKCP)np_|AO`M*PO4A#=M4O=3ZP$(g+LsCKyPR$N z{;~6P=;gRDC}Li$*Mo?9=*@)k6U-WJb}k(h4U%#a6xa!+5b7uE1X}(wZ9MpkllMLJ zI{C{zWAeP^()O>%ke$7?bUL|f+oeCe8I=7;Drvg39X8~Wgk*83ell9`&Gk`2?TXI^ zJv&e8{RihsC)no$BogsAxI(Id__^WKpt3cn!N||o+&qU-N@4|>onF?n?|MWz+nwX4vdL&RT8x71%*xEl$P#3a;0h3j)fFIay*EIz zjP?M+>=cB(2y`uJ@^nDqwrl7~Zen{t(PB+76i(Y2JC*JVBmi8 z)i0)B6J61I{uQ@(ukP(2RkuGnsBYX(7N5VN_eE*DGxxk%z5Dh&-rM_ROWB5@gX%tf z?ENnaee7)Y5kSh5w!=imB+gbX+U`hJhI6ne_zcDejV~FgQKHnO*Gu}-CQjdH;`FJB z+uWd6bvl_@*BGMZxb|7Z#4s)Mow>~R&rWu1DHDcmxv9-UEYw7kBEpoKNt2OpUAH!v z)Z{tU?uaqNIrx8?$cTy0q@JIOo;IIZiTO0E7NS}vSeh+ME#mN~JKVG^eL$6--=;)P zwZfD9sJN=BgikUFdpZPTTquuN*lPepuZLcue2QvL@eMPV1nlMZpgmxJl%QK$Xgb|$rq@m+hNZV!>|9iS(#0YY zJIm_NVrB;`mTYFh07CT!lNEXcHJW6GD3ijFV~T5EZ(&d-1l%yRL?wNdWvF#Ltru!_5&byJD68>qdXI3I?k@ck!s9yGL2XuRsYEj+iR#T3 zOR1#Klq9`*l5G-+5K8rWgVAKNSgcls%~P|Z%|RX8=@YP!L7kGSkCQ>0%?4via-DzH1=62gL9&pu?|$*QiUjS+OSoXnx!^LkmB@- zO?F24+3aPHrfT;|M(JD|9dD}6>61Lt#HhMD@xDy*%29f_mRfT)S5j-P-ZQ8aX=T*- zE10!!pw|w7Y6rLys*jl`*T?#wQdCU3eA!_&vcjY#eE*g}m9;3qt>IfLs;p%dJi58K z%374Pc!#6Qmo;Ho6I*73&6mZ*IFJ1ZDYYfl7Q1Q^Qj%o3hE|Y(J!mIEV!xZ@lM70- zvWJkFMB35Yu(7vM+WyUFx1TfNKH=N1$A~X{GekW6jS7l$wWFd}9D)9*5c0V0Dl%9M zh1r&z!U2}T!Ya#9XN6~2;n>0^OH<)O%Ywq@(ss*@1MbbbFXu^%vwy;V!i|MT6R&GL zmc73Jmh2t<_hj$u{|D#6{`%T1;$xc2&Ir}vIJL$Z!sOz37W2FO?r>4z;3~1IXsmcn zQG-4jzEHm)e2ICD`FZnKmaoG0iosSQDkb@YU1fnx_sjuH2G9ZC5^Ih1HtPdc%_==$ z-Dv&PDp+@<7JFM8to%(BE88+M=C`s=WU`ZRE8neJWwx8OhLYOT>gGhZxxs4n3a&U^ zzsX(1=`mt$a2FYkqo%s=%=CJ7NRI_|{fuQ^!8D*nX~9V4GNQL%o($HaQfXro1 zK_EY_eat(>A_-lQFEU}3&kRD$!j#W;*$kigaqUkL6YF+9?{Gd7U99>1IK3cZ?H6IE zhia%av@s-A#kJjCRx6zvifacFksaFcMDv>(LW8RKCfpYsTw1leiauCHs$9%s&nkE6 zQ#|J?aOahz4yj5~Tg#G!l8V?HO7eHf`()~uYh)^CrVh?B)8|}?dImPgR;~=WjZAiP zMaXSuGM8JjmCb?7Ee7(S%2O(dy*829nufzl)3Pvs)1~Kls*o$J@O$sE4gHaDP0z`2 z_(xKO-xvDNDBXB5hKTrjy^E^lVy ztW%dh>#X5c8h#qU4$;}3l^JV>R`6@c;C_ky(Fn?pI$c>#XJ(cw7!ueM7&=6# zp0jP?#+_%ko-<_88}mqc-P%=`=f>O@zjV{u^%E3>D{rUQHEYk38D$qOnE!An_xh=0 zezE%eYtPTLT5|FWj29Q56jyat^@}a@Z5L-*)8`-pKu2{8}+9POB(Wc)YTX7@c0L$vsl(W4dtnF>kn2>!(OwgFF8v6H!bNQ?sef zBqmn9CeE`acT$6&;OkTqvxqXHF{NG;GqJM?Mmj8q@sB*!w1U`ak_hbyBHYqoFNY(w zeG?PMtr-pAoW|1fRFn%T3Kx>+vDpnm0j0YMSw|iuF>;KEeo{lm6M@9FUe2_H8D?`C zdl63EgfH`mgHbo()J>Q^W^BblvsjmjoAsAuvQ|m9@9YY0Ht$RI%d5w+^%u`O5)SjV zSRaO96kpR*W3S>Cb@l?tvgKIj=^{qO>P#45Orq@J1{hgK!BkneswP}klX#z~+&;9t z%txIHf_eRfOjjRkV)5f$%R6RmTo&p5>{mM%(ZN%1zvQV$S6=dzwEg7gw~fDTZ)@+T zy$60q?%Fl==I$3>dbS$`6SSj3k2nH3v?X!&?$VB>Hsi+BX55g9*isQC6=749PS;05rC^pJ5!|XObYKngEN1hj`(#oI;yj>3Uo$RfsSisUFsNHXHK$@Zjfj2 zp7d@oL56)c56hGPIEbHfS4Mln;pNAf9dUW;M8aNGqBI>*nvO7fT}GOgMbj_8842L9 z+H4Nd%_&winH*g7nsjNCVR_0J{Zj(u7Bi)g}0K`uC4n>E-^ku=(QUk@+?~X%qs_;z77;)w4=fyX*=x5r5RjOUS*`B zrNA=SQfrZhWDfC8qf?BNGADcI(K*sw!|cpv?{5Eo>5mx)v){{jFY{B^pR?cN5;4o~ z59cs3TA#yYs%{|7w+zf0M*mM=*8(0zd9MF|?(@&g&SiIJ_LgLm>~2CfLP$11Fp@z9 zEC>k5)=O5Y3d&6hB4Py_ju+ITrS^EVtu1&tUfP}-NQ4!%=A5d}@xnuTs`hAWp-)rG z(PDXgXr30_>^c8GGrJSiljq6qx4W}Dll{N{yS(rBed$ydP;->Ip|2&rj`GU&%AdGD zo2TKafCj^KK+;qYPe{BDP&h$Lk=ba1sjMJckq!tBd1^G22WefMU{%#GRku`LO>5eS z4mI7@SLppZQ)lC?;#gPRz~)OQ5e&AoI)^uwPOPm?vR14UdI0N$?&Tc1ms{wEysO)v z*EbnLrRc${kZ`nYf8I#(ng@Srj!``~{|bMEKgySR25aA9p5|-G7AAoKU+ZjbqLCyx z<6}f4qqVt9e0CC7S1!a#SjzehXSYTavy|-~bqY#lK7$EG2(-%jSQ5_Vz80 z-}B?0XP(>*>HC+=rUIwuQpVeF{`>16y!|Tn%?rxMnObHR2!k5RBpPScfkc>EP8ZmM zxLjIEuVrr%S4u({M}Oq2E|y2_rC6#-5H8+$pFJHKi!oD;nbE0<*~Y@y?8FkIH@Y?& z!iR$iM#3P~$^XCTc`$F;O0oPql07S4L6L%kY`kI${!p>0B{!fzC017~4-cj@IedQr zhn;n>&OGD=#QXDRN5vmh;Vi-P(^`tRQ!P19UpzH)&6J(ZZ=M!?eer+B{|ZCDdlv@a=f}~$yRY3@`hZ#@&&c1m^*NZgo*aU8kcKjB z9{>ILKeXiY2XBB6-*dqYPh-3*_K&y*zedZ!uAs6BL zn8Z~C(jhh4#qw!VG6J5AL|=jjK|L6Q2t=~kF~5z#DgLQ$t(zE%6WJ35T%gcR3Ua$} zW%dQP*&$8mM4l5kiqk|R4s=e9!z|3UY~K#EeFd;F{sKT|5VypfGqjH8kXoeM_pL(? zcarUBcQWzD&JIkSOzUX3c;ctRxbSS>?%l!IotrQ2jn8Obdf|~H^bdCSt3AHwMNz+2}rMaOz zf0=ZHbQ=9F9GKkL+BBpZO>okJ3-&Q92SCv7lzz)(WfNVp?rUrk1x7FJuufWL3P7MZ6HM(;?4(?06x3 z6t3HGy^!S~cHDAnHu9=GiCsGFo>R_Sn^c$v4RxME)_Dq9=PIO?uS>n69#M~~WtFK@ zd(_1$t$Jofbv@POyh_Z98pCR*Hjtu_1im7t@=hh5NsVS(H`Qb3lwDM{EPV@IQq}LD zh=U>)XAIZ0jk+A`XivA>s?is>5dhGn0SG2o63Hi2BsY)>Tp5{MIM_^VN6^Viov_~M znsW731>9(b@o|!8HaUDM_FGnGhfXSsGX5HZRnJT97i`*gZzKRW5B&DzjlX)}rEfmH z^0#|_@$vVc-nRAGfBWW^XD^E_Nw;5j#q@!1!|o3sg>dJi{hzP-`;jd_rCWYA{L0(E zeEpZ$p6mdCKF%HlA^3)apPS|3!LXIXmBlv6?9)sqJ%>K1FyyJQ70p?KF6$whg+NWP zd`Lp_gm^~wX_f57Hg;29PK$QOv|L$)!y+_EFk{+;8fhlCA*?(ihEa4vl91$1jN#+N zA_kI44-vtAM2xwTWD(OUkg3vB9N#Rg#T5iBRX;K@^J?4N}HW`x&c}f)W6eZ;8 zS19d3^jgQX+)3Q9l>~SmBt!TW7j8anTf~|%9!ig8*OHxf5<@@{^F@(#bWwQTB@us& z-Iw(s`o8R`iCX1S_T5fW*aZ#8js>^?pAK+x94dkuhX7M8+d&pLp2YV{2q2nOi*(!2 zIu`STPLkN;LikR-w%(`{b>YJLj-fk-H~)OW&@F2(d7zs;So-Xt!c&ixuBIN}ar?3d zx0Mb73fv0->}C&QUY$Y&*{3`pm-d7zn~Ef9cGM+XKlcGarGU)=3JbxbUas7{0c@(e|b9DS%f?0!N^p1uPDj^!A{@o5$? zyO}-A05i-SW4K}FBtrov$&4_^7>03h25LD|Q8k9B8iVj9FvMvxp3`J7fPvMF!Kehn zYR4=R&Q`dM-K3hkyXQpKVPi?r1Fme!e$`L>0E1W#mqd_8#-1LpCPMGf?;RQ%Vm>)? z}v~xo~6G;Pe@! z!D}$r>D;$Xb?zI|PUBZPY~@t8j@`{3Wtqk7QT8ND*RdPeezwdq5a3o*$0*=WBE})y z(V5!~;4nCe{~1`-Lj1jIA!>XUf-IXG>j-Y3HRt3iOAeedr1JBKi(?OJs%!^{GW^i8B9;~^0A8D(uk^Gj9MeR7TO8&3G-xKEDx zl@M_}3 zV)?)Ag_4p>Go#F?_y_AxN%mcKEJ;~HvQdo0lOjzw)+V?xjwpD@HO8VE8cD-l={;#G zZCSBEdY2A$hUDoHlA{xbfaK{RQs_on_^_Jj6v@_QlC2YNKzEpZf6iWng~5oIWK~Nk zT`*M1N7B3EFiy@Cube4P&J@RVtB%hUCoU^au%dXeJnlHrxQx#k_xOl-5f=6Xs-w|6 zNu%dF8{POTly6MK5dblaM%95Huo%!-zug@`Xj`B;jl*Ogn69V%+>7~7*&*UG9rY)H z28gE8MYv_Ke%{1j%mGwM+~tE=3r?$4^>Jy@oRt@T)VC4fbfBjP*Dh-(bkd*K^W2~Bm7E}Xd?~95I~TV!bQ4m|2l~mX|A?Nx9wZUIe}bzPZo3N&OQv2 zW+{tQff5Hyk_W#le!TtZHJcx)yW`Ct|75VSch-h~9=hzhi?`2YG7m4h`kKoQKEJ=z zMEz*p)iWP{s`Lo8Z_AcTe(>GW`(Cv65i<*z@FS8{1X-F3QqO8d?T_@IgD2^+AjjZZ z&IU=z-KN1uwUNlNNIAkJg-{@58VQz%oT(r@y>UXw7Vz!-KqX{LT9Zhe zCXqNzs>Ep$v3F7fIf8)r1mGuenp7BVm*vCv~fy6%?3d}*kL&I;9ZU#P~o_ITJf&JO(+ zS)`z{^5X<=r%3oV;RUzi(wWlG({rL4znPpaA_4J;=Cq8?1>#UehO2hCUZxLZampYn z$V8B%S%at)>*WR&b#nPApDyiq;+79rJbsCWhFaEMbn`Pz=8@;;Y*^U7t#mVW&yDM6 zKlFC#B|w1-%g31}W)@ID6qd2R><^Q^#vtZ;Nd|;1&`K;t$qNRLqVhcMA|cORE?m#8 z7KEHO)0k;?M&@V>j0NVLNH5zfF4YP~!CV?y&#o7*)7Bg7&Fdn!!m!A(%9Zq`?4{^R zc^$oyU5VDo$VxE0o{*%_gj_S^Lp(#3TvN-3QgM=3#)-P|m8Nc<;F>N~;893KAi$7U zW={mSHe=Whq|&)I9s*wDlRV8==u-@aAC1E}>661yRvg9yo{|MT-3qus`|Nxm1>^um zGYn$Y2x|cnM5zcyaF*Nu@7E-)FIyPP z777#f%Nq%RAyC|5CoW@`iPx~#hzu-TPAWm-dnMgLLM6s>8vl7;W!}v*2=53_14<>aPrro4~S zNzJ$>=>ghIU|Nzh69EYXq7xJ<>Qks_MWGV;XlevO4cTY~>PJkHs8!N-J19T0C9GU1 z;y!jt>HMWS4n>LZE>TI*&L$Y_pgO96xo47GjC$8ZiV+0#fJxwpfCM?3OBCsIDqf5vkXTnd zA7+L_q?Wr$r+o^urZdq^3!KF-Mn{vHT&}YOP1^`KlhP;?z5CXizX1^34FL5zGYjaDbYt4*P6U-!h`Jv`VS|4tE3Oy%3+4NNNVCJ=q z+3bbu4W7{G2`Nt?$KVBR@PtNBNO?jHp2cI-c3mMfrDep#l9@0gO|FSyFts7tN{F{; zPjqqg>ge;)BT-I`)7~Q zbA+qW0(uU@$-On}YN(n-m}i_4A(H%m)DA{+?fC8O1a6Nw zx7eCQ$&8+862CK=#P5oda-1k3PVlR_F)iLK zAR^$8X>mH5x#cZ~yhR_(x5UV`)Hk){R#Q$XbB%NiTmJj+lTofU(#>_6WnAPN*}2_!g=ZE zBO7YX=$fGgHJWQsl5rq5^KraO!fyOv!XjEij5{kp-CJ}L2CJySf8XWiG z2~OS44M{gzvfHKvo zLsO<{GS!)zHr+Aj6~Y|@%~mbtL<5L;Z{59zE4fxNbbv&FIoQI10|0={Tgc*u~w~tZTjNuE7^x2xgmWAK$IbTKNQZ?M}$A8-HMD z>3a)XW7yB!Sw2o5V=^ED4>+7a3{$~jD@P@RW=8-yt#Xv#2XN*q9Xf|fvs zIU%n!eKDHp%PY-v#KH++jAWM<$tx{`kVlm=N{jettCCS#A(uvSk$;PT78cGZ75tT# zEjVmJYf%iVh>4TQ*vS~RA+{$r5G%(RTrK1%h3qMX>_iTOn3<>q4tM}qQXCPFiHzt4 z4q_#6a7hV7s0e&&5-*4(n-mG~FD{Bsfbv{)|BGp(6GV_&u=>g36jh-( zryGOjG)9gCMZkl^F^Ac*9TbRN7y<~=IsqhT$>fL{^ms*LPUS;Wrc>|GLl&_H)38kU zY<>5tCl_ndkfh(ZWXXf)4m~z>(fY-mH&YLl1|OI@Z^^Rn+)H&4bT@{XAtnP5b*)nu zCRA%!s6Yka1?7(`%Pn~FP@Xa?U@h=P4MMfHjM667jajS!kibC>0ag@PNUTTD?voDQGDa#a5h^WYx7R0M)B~hc2A{kU;GEG#Zw68)cm6XDi zL>fkmY1N{9_d^rTan1Ez*Z0Tw`{Va^b@ATsea^YheeV4{-urODcC&d)%&LP+qpob5 zaHMYGp2SYq{=6pI?&%z#t5(}_cgi`H zV)-TAqVaFnG@QTurE=S)eU&Q;3oiAnarX@_-~6aUdQ$gGXKM@|Z>v!g!sci?TX&8- zpzU^Bp4qp=%x!7*jf@k?Rp-3skLmnjhLZ5t5nHNMQDfc4i>eL%%{%^ma`K+v?CoNG zGoEiO&DQxkK5=}a;w-a1N@?*4k%5!UHtP+IP`<6TGhEhrkh5k-Gp)`M>-%q;qGPl? zLhWgntP@K!<|pS2-g43T*q1T6y@#ijdM()AbD!eLkfYSj znRW5@v)6qr;vV;|wOePDH15-|3ws0Z$JdNm?(pu&kWH%Pp}HDhSFbKJFu5DB{rUWP z{V#667W4AV*ZtG@S@k^*S9NgiPG~zHSr^nbIMt+b^_M;qt`}NQc39po)v#O0;UmU2 zQz!n_)ot?BP&QrEXx!Yl-m1{lUMY1*L{Y6~c%5?6%Fgdq9zC@vnR@H#vH@qW*u39a zGsLpvJ#Q1+wEiw1hHI(Z8}gx6^J=!?L(aV3;BLRwrv2-!<$31|Zb#4ayC=&(*6C%^ z#d=MjUZsoQk-nN8mPJ~68CHATtBX=_R(llGBld0b^K*oumTyg(%PN&vd4i$IIQ5Vl zuliTnbdDPAUs8E}Tyo!>nyZSpbSEBqlBK-yczw6Ubx~;p?_8NOD)-B)=#+40Rl2M7 zn|Gt%DFw`#F)lj#VUpgK5j7kri&_)!f_zSyXVvP-_+C z@Gk4+n`S64OBo*3(d)w0t6ltliQYRhvEQkvJ$a=KlU+jOA>(H3^>IqkXYao*?poaM z+s(!DQN%1_Y?jwSP<9>0yeeTdVZHJQF7e_~fLpu+l-=11)hW8NKlo7vE@ zq(E1#VZ@sO^%nvY#k8-bLhkO~J$r0Sw;z5|r<$20RFGM(oR0M{RoF=e>;FADe)e4j zHOtO{b3**)jEpTm_PZ;;-^#0GvWBu^`1<}k-l|=%-9=5aPQPGTo?4zhM3-cO1I5I! zl(qL1PbxGP#XKL`V4oejxa_tAf2MNzCJ%>*CqZ^o%%a@1z0Rcd&R6Z?;5>Bm)AZ7V zQ95Q{XI6&|P48zC`_y<$zEaef-*+0BS$ABYmg+EcgGP@Iju~#j>(6B=r|K(d`2~d! z&R>7&O-ZJ9YQ(dS#;)-lmmPDuIC=Ec4_mhG*sOW3>l#(-17>4S=H`w&XWkfV`PYZ- z$r-+igF1MfP~ID)5UH4Fw>^3AnZIq4G)DIN^kdkBC7o5bZySE=$&34Ec8kF|qiU;` z=S{viDKT)@O^+3wGiQGczQ1){#+G$`)QZL)%*+{OE}EaWnb5`gV_)SM({Q7Nc;;Ld zn{2*3%gEc|IBl-C?fKIOvY-oVOV@{VTlZjx)r1qfW{fk?>6zd&KWFcS`4*KuRdQcn zop3>YugAc+NxE{&?gx9Om7R3_dh*#**HFD{Llv|q*4|WBjVUknk%!prQ*9hlaDLK5 zm4MS5=bS#jIC2kHHug&L?$JGb?THQ%!=(PD?6c8DTIrJexryVaFU@|ksm<|ni~l1eU*C4clQg+;0OWgb186uPk__>^w*kZI3!=c#(Eq^L#n;T; z%X6Vz#*>^rO$h(q+koR3eU>G70eTxyj6P5B1TCWof{>8}EjE9Iv^9{vmc30|1F^l$ zHgBPh6wS<)k*tgqT7684 zGD>JckWr${@%!MpGMbS&ejg1paU7>FLXQd&J`!aNE#oQZD#7t0FJlCmqd?R81RMi7 z7aCQ_II-2lJPEk1-_xxYY6Wjujru-y(~iEsYqf|#%Ah|*%MgK;InpgO2zCO8EX>Pzv2B(&xba(3b&PxHPE4YwMS13_kC4ide7+b?uaf$(Y#%#5=#-xvLsdK2+S=Z_RJ*ROpeo>5(d=i1fjb<;iWm{rw zri`4aqpADT%wOrko-+skbUkda_MqL{4tU?&{xx-_mgaT`n;q->Y>7z|RSmnvj2pIn zRq7L?IrXc~7*;IXQtYO+_OI*IdDHxRWvh-Tm~L4*PiM&(!?hv($|{c^8rUIz&qLf8 zfzW>Ee@mm4e{1W2s^$N>tAk_+kgg6*r{CIIKo^zoDf<}<7P-61TQSST$8V{MfrX3Q z&kI_^NP7WG32MN}#*(1Rpa3)7{e1%bT;2U;hK2@Kem<_V+~qPy1FI<}G6Ng;K)KAo ze5s44`{ZvwjlcaghoMsg<)*XbE^>Eis3|S`)<6b&Vi+{HqG?iR8jLe&xBvr8{d@v^ zCFEvuo8AJku#lgNx4*B8pS!nf5VW?D$^8P{rD^_+<0L_`t$FTlC+>L4soe}vaWbMZ zI*o`6Tpc!l&+^;4r`9x%>SGpBG|;odhOLjY_k9&LbQmMST>bXligy8--VJY*)G9td z`S3(3JMW@WIHzIqVVo85OICGA;+-(2wtT|2>&JhK9#io~dAeYmPwVM>c#H{k^6*g0 zIxZ$f(PNBrdeGskMn+`nt~fU{J#THXRgz}rXRG77i)MS6)Mq~Q zFfe|s7S>b!!P}hunYG$^R6JP{`Tul zRxP=koASxisM~`S#xuan?o;23Q4?-uI_0Q_KAApxw{U)T%-iz#nUz8L1%Il3c(z*S z#fyE%OR^^!xt2!Li+x;=_vIUwcY0J4WMS!l>)9yn+odlrt$Fq+LRzA9omj z_&XrVJO24=SwZfJy?Nz%=}!jxg-sixoJPD^P}A7&;>2z}#t2hF(+AB8oM@8o$f!TI zjI``B;$)@!qU*j#1A?9&s7(_mtkx}AUy)p+`|EYH=&hcMyj~2tsWy7;fMI_BK zyOHqq%d+X?^yJoM$Btyz&(BNmRPlOcPyNc|h}FE!0r?*(x6>UnPB@P)bm+cDyUUT& zt}`=S=fC}$>SRj%=6>7d@k{Rn^}Q2Z){L!l%1u@~zxm}g!FPe)yR9!HP1JOja);wS zy7eubcPeSqyJ>L|?@O{~3_f%B6_HVJlrfvB=NZJ@`Ep&e?6Y%Gy}~Mc=dAKyJ4OH9 zg-L%A`GJ_e<&Bey8nao3BLsij#IdglNO%6F~5WGTke|6 z@~~+C9P5N3q0##ac4;0s?Djm=R*m~(e!-f)QJSSkuh>sYoIWJ%P*{Uf=J@-4SNv?` zbfq-PsOXLF$^N015i{PWEco+!^~Yl$_gF<m-xumu8yA{$ zbAKLPNdI0p^?~Y?O?8`1l2;X0#cr>9m7ZC=!neR_naZaZ&mM2K&@>cWHI$<--pvob z5gHuslJ=@&(J#4s^VWS~49~S^ASHt&b$J4d)7+6qMsqDDgAz!|g1QkZ|JX&zUb%w3 zwD+V~%2+nVN2R@4TXh2aH~U1kyRyJ) zKw(e0}i5@>EVvv4{ z`&xyhk3K`&<5#QbLYUL%`7oCUp+4 z2A1H?T$VHgXE&z}4Dqe|S#x?~(wv?&O=D}`Gzr!#ttICm85;7G=7wt|0bSMLFKwHW z5pXW>!MEH7JkhsyVSKxLY2|Mv$3{2P_m9(sIGJDsL6SCUG$9*{LQpUK&5cWr@|z?! z`+A5=nw_Gbzub7Ci=Wi@ZMw_1-vzRH*=Em6avTKhlBaLw>Y@3XjgP7M6blz$@ZxUJ z1jtJc?$*Y~*4xX~$ITrg{co+@&=zl^L^Ep|Nor_lI@%;392NcQbgGlS^{h7reGSxsM);I zVoSjIO}_&D+&xqzL{0^?=G#lgu`JEXJY?TtklQwWCi8BFu_DBR5Y0=ke_-${M}UyF zhH)Gz5|CfCg$cBjbF_xR?;K>kZD5>0&>S)!An+(H5{yOYlVBqBi<4e0^pO;S!3cqd zdPQqIhT#PUSyzHVMvUOWJ`kFr{hYu8lUvWniNGubkEa-dM`)3{MiR(A7>*>su(X*E zU=#snqxBj9gK7^3V|ipJ z%`#Cwb08WR%`{CSYk>UmB*JF~8gMe`-UN|lkac0K$RRLBWGL*+MTUbiOdFb6k)%-h zV?>da9F6q)N7eu)qA(cCpl8Vv42{YGz<3_vAB^Rt_KU68U-&AK%*5z?pcy2 zpwx%Z%u^W6BIqiLM+r1)4`3uXXkBI@&4Mj%mJ3@{3l0}&1rU6FM31p%0k z(kw7Mi_a%u_C;U`On*g6ggSLwUWv4Hwa^9z&O<=?$3Zt>s6e;D1L8z%U6B*8^AKqP zlSydXg~^LZgMmTUpz-~RG>7T52!(oVER^(7{(+yt^hgBtKzP7{WVAE39lroVj>>@u zVvnzj+h-BZ99tJO6O{vSZDXaL3IAPT`?5b?H!u@FX3m|XER z&KFo0$D`1Ak`)CK^W7rfPK?Ae6h;e=`z?`gNA9J-9o+{nqHz<@BI0r-;679oC@hW; z!9*bZgZX&Owu=HZdq!!2APzY<;4@?(ZSgpo649}c&VWTl@JNwI?Kt2;m+7{6u!d9{ zKx`NUnK*qkjnKyt1ckz2K5#17d>k4N!C0uYw8aA$f%!6sa^SMD4Sj$oT|lBR8qFa9 z56d+G27w(mmP2C(4oWB38URE6AmD+4!R8YQ6h^kATX2zz#($6sQD|NPFi5VDHDErR zJ|45NFc!110K;r7zy#?gNqYU0FGw&fcP8O@F^q+3JeJV>aaVlYTbP#AMKu_H&(kf3Ap;kr*z zEG`Ey7~>0+WKdfNWee1=!hS_G?~%MYnzI9a1R65{jK%Z~o&Z68GT_npx)6(E{DV{+ zlLH!_=t1~G!y*_@A+*Kz3-LXM2XQA3!}3eW5FzMC)`c7q%}oIhtAlWmiMK-wloHYT z7%VU2pnQPFe=whb*8c#8`&uY}qHzw)N22itjD=_lrG;g1nS>Y^(_aW3v2$ZNd~Y0s z);2i`v=GxHkPe&{tmXk@p(2LT3?U6RAC$u|KJ!pr!p62E_mC1}XAY$x90pbboe#=Q zI3A6~AQU{ggwxDP*BsL8yC3{^X@kJ9`Y8tyA!di*iUh3>ft*6HhpoX2I1H;G9LxxVLX6X6kQk06&mYF6%Q8jD$Jc=89squ@bIlrK=1!uSGa1Ba1V z91Cs+t+jAq!LYa(9>c@=2lWkn4cz8HMuOQAs2rm85)M31JM@WYE(Cbk9T31s%-2Gh z5a$8IV15R&EzAxXFc^)?Go%`*ji6bq_RrBQxIJ_}Df>fVEF{k;jHfUk01|}d#I(Snb!FflR6LRS z7y^pJ7z_%YD2$}B8VZO8#y{}1*crj~0%otEo`}T)aN=0L0JT?)2OPm-c9$arEDz;K zc&HWG2L$+-oPw64HC51j4!ySm7*_w|piGVF5u}nBuXvHd>^Kj$9hGqb@)=Zyz}#ax z4VfC2$3upL*0Ui?hK#x`58z2)5#<$6(O^(fJP0unxraMa3O!4JL12uIg(E^_jfdxs zaazDUx5AVAxp*yc_miGgn&q`pdKLp7m!Ik5BbPO~Zt3X@b8inHczUz>adv;Xi=Vvd TzMO}9P=-|*Hf-t)6P5n~R1xhC literal 0 HcmV?d00001 diff --git a/w64devkit.txt b/w64devkit.txt new file mode 100644 index 0000000000000..025b5aad40069 --- /dev/null +++ b/w64devkit.txt @@ -0,0 +1,394 @@ +~ $ cd "C:\Users\Caleb P. Nwokocha\Downloads\llama.cpp-master\source" +~/Downloads/llama.cpp-master/source $ mak -j 8 +sh: mak: not found +~/Downloads/llama.cpp-master/source $ make -j 8 +I ccache not found. Consider installing it for faster compilation. +I llama.cpp build info: +I UNAME_S: Windows_NT +I UNAME_P: unknown +I UNAME_M: x86_64 +I CFLAGS: -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -std=c11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -march=native -mtune=native -Xassembler -muse-unaligned-vector-move -fopenmp -Wdouble-promotion +I CXXFLAGS: -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX +I NVCCFLAGS: -std=c++11 -O3 -g +I LDFLAGS: +I CC: cc (GCC) 14.2.0 +I CXX: c++ (GCC) 14.2.0 + +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c ggml/src/llamafile/sgemm.cpp -o ggml/src/llamafile/sgemm.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c ggml/src/ggml-amx.cpp -o ggml/src/ggml-amx.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c ggml/src/ggml-amx/mmq.cpp -o ggml/src/ggml-amx/mmq.o +cc -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -std=c11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -march=native -mtune=native -Xassembler -muse-unaligned-vector-move -fopenmp -Wdouble-promotion -c ggml/src/ggml.c -o ggml/src/ggml.o +cc -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -std=c11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -march=native -mtune=native -Xassembler -muse-unaligned-vector-move -fopenmp -Wdouble-promotion -c ggml/src/ggml-alloc.c -o ggml/src/ggml-alloc.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c ggml/src/ggml-backend.cpp -o ggml/src/ggml-backend.o +cc -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -std=c11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -march=native -mtune=native -Xassembler -muse-unaligned-vector-move -fopenmp -Wdouble-promotion -c ggml/src/ggml-quants.c -o ggml/src/ggml-quants.o +cc -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -std=c11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -march=native -mtune=native -Xassembler -muse-unaligned-vector-move -fopenmp -Wdouble-promotion -c ggml/src/ggml-aarch64.c -o ggml/src/ggml-aarch64.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=nativeggml/src/ggml.c:87:9: Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c src/llama.cpp -o src/llama.o + c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wwarning: no-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c src/llama-vocab.cpp -o src/llama-vocab.o + +oreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c src/llama-grammar.cpp -o src/llama-grammar.o +"GGML_CACHE_ALIGN" redefined + 87 | #define c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN3 _WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c src/llama-sampling.cpp -o src/llama-sampling.o +GGML_CACHE_ALIGN __declspec(align(GGML_CACHE_LINE)) + | ^~~~~~~~~~~~~~~~ +ggml/src/ggml.c:65:9: note: this is the location of the previous definition + 65 | #define GGML_CACHE_ALIGN __attribute__((aligned(GGML_CACHE_LINE))) + | ^~~~~~~~~~~~~~~~ +ggml/src/ggml.c: In function 'atomic_store_explicit': +ggml/src/ggml.c:107:76: warning: unused parameter 'mo' [c+ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-f-Wunused-parametersrc/llama.cpp: ction -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c src/unicode.cpp -o src/unicode.o + In member function '] + 107 | static void atomic_store_explicit(atomic_int * ptr, LONG val, extra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src +clude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILEstd::string llama_file:: - GGML_USE_AMX -c src/unicode-data.cpp -o src/unicode-data.o +memory_order moGetErrorMessageWin32c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGM +n/common.o +(DWORD) const) { + | +~~~~~~~~~~~~~^~src/llama.cpp:1717:46: +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c common/arg.cpp -o common/arg.o + c++ -std=c++1 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unalignewarning: ggml/src/ggml.c:d vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Ii In function 'nc ude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c common/log.cpp -o common/log.o +format 'c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wn -array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c common/console.cpp -o common/console.o +atomic_load_explicit%sc +' expects argument of type ' - td=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c common/ngram-cache.cpp -o common/ngram-cache.o +char*': +c + std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iincludggml/src/ggml.c:114:65:e -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c common/sampling.cpp -o common/sampling.o +', but argument 2 has type ' c + warning: DWORD td=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c common/train.cpp -o common/train.o +unused parameter '' {aka 'molong unsigned int' ['} [-Wunused-parameter-Wformat=] + 114 | static LONG atomic_load_explicit(atomic_int * ptr, ] + 1717 | ret = format("Win32 error code: +memory_order mo%s) { + | ", ~~~~~~~~~~~~~^~error_code +); + | fatal: not a git repository (or any of the parent directories): .git +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x6~^ggml/src/ggml.c:02 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c common/json-schema-to-grammar.cpp -o common/json-schema-to-grammar.o + In function ' d -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -std=c11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -march=native -mtune=native -Xassembler -muse-unaligned-vector-move -fopenmp -Wdouble-promotion -Iexaatomic_fetch_add_explicitm les/gguf-hash/deps -c examples/gguf-hash/d': +eps sha1/sha1.c -o examples/gguf-hash/deps/sha1/sha1.o +ggml/src/ggml.c:121:80:~~~~~~~~~~ c -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -std=c11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -W In function 'po ntwarning: + | -ariunused parameter '|', + inlined from 'th -Wmissing-prototypes -Werror=implicmoSHA1Final t int -Werror=implicit-function-decl|' at a tion -march=nat-Wunused-parameterexamples/gguf-hash/deps/sha1/sha1.c:265:5 + | + 121 | static LONG atomic_fetch_add_explicit(atomic_int * ptr, LONG inc, i -mmemory_order mo|: +) { + | tu e=native -Xassembler - examples/gguf-hash/deps/sha1/sha1.c:219:13: m se-unaligned-vector-move -fopenmp -Wdouble-promotion -IexampDWORD {aka long unsigned int} + + | l s/gguf-hash/deps -c examples/gwarning: char*ggml/src/ggml.c:g f-hash/d At top level: +'e /xxhash/xxhash.c -o exa + | SHA1Transform mp es/gg' reading 64 bytes from a region of size 0 [ f has +-Wstringop-overreadwarning: h deps/xxhash/xxhash.] + 219 | src/llama.cpp:o + In constructor 'SHA1Transform(context->state, &data[i])type qualifiers ignored on function return type [; + | c -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -std=c11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -march=native -mtune=native -Xassembler -muse-unaligned-vector-move -fopenmp -Wdouble-promotion -Iexamples/gguf-hash/deps -c ex^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] + 125 | static ples/gguf-hash/d(llama_file*, size_t, bool)atomic_bool + ps sha256/sha256.c -o examples/gguf-hash/deps/sha256/sha256.o +examples/gguf-hash/deps/sha1/sha1.c:219:13: atomic_flag_test_and_set(atomic_flag * ptr) { + | + ^~~~~~~~~~~ + referencing argument 2 of type ' warning: cast between incompatible function types from '' +atomic_thread_fence ': + In function ' long long int (*)() '} to '': +warning: unused parameter ' mo +igned int, _WIN32_MEMORY_RANGE_ENTRY*, long unsigned int) ' [ -Wunused-parameter SHA1Transform + 131 | static void atomic_thread_fence(] + 2055 | pPrefetchVirtualMemory = ' + 54 | void reinterpret_cast (GetProcAddress(hKernel32, "PrefetchVirtualMemory"))SHA1Transform + | ; + | + | ^~~~~~~~~~~~~ + + + ggml/src/ggml.c:SHA1Update At top level: +', + inlined from ' ' at : + + 2104 | ' + | ' reading 64 bytes from a region of size 0 [ +] + 219 | ggml/src/ggml.c:2105:5:SHA1Transform(context->state, &data[i]) ; + | warning: ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + In file included from src/llama.cpp:1: +src/llama.cpp: In function 'void llama_lora_adapter_init_internal (llama_model*, const char*, llama_lora_adapter&)] + 2105 | referencing argument 2 of type ' +atomic_intconst unsigned char[64]src/llama.cpp:18918:20: GGML_CACHE_ALIGN n_barrier_passed; + | ' + warning: + In function 'format 'SHA1Final +' expects argument of type 'examples/gguf-hash/deps/sha1/sha1.c:54:6:long int ', but argument 4 has type ' +ma_lora_weight>::size_type ggml/src/ggml.c:19445:6: + 54 | void warning: '} [SHA1Transform -Wformat=( + | ggml_thread_apply_affinity +18918 | LLAMA_LOG_INFO(^~~~~~~~~~~~~' ["%s: loaded %ld tensors from lora file\n" +-Wmissing-prototypes, __func__, +19445 | bool adapter.ab_map.size()*2ggml_thread_apply_affinity); + | + | cc -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -std=c11 -fPIC - + nter-arith -Wmissing-prototypes -Werror=implicit-int -Werror=implicit-function-declaration -march=native -mtune=native -Xassembler -muse-unaligned-vector-move -fopenmp -Wdouble-promotion -c tests/test-c.c -o tests/test-c.o + c + -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/deprecation-warning/deprecation-warning.cpp -o examples/deprecation-warning/deprecat ^~~~~~~~~~~~~~~~~~~~~~~~~~ n-warning.o + +~~~~~~~~~~~~~~~~~~~~~~~ ++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c common/build-info.cpp -o common/build-info.o + + | c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX examples/deprecation-warning/deprecation-warning.o -o main +|c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX examples/deprecation-warning/deprecation-warning.o -o server + + | std::unordered_map, llama_lora_weight>::size_type {aka long long unsigned int} +src/llama-impl.h:28:71: note: in definition of macro 'NOTICE: The 'main' binary is deprecated. Please use 'llama-cli' instead. + +LLAMA_LOG_INFO' + 28 | #define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__) + | ^~~~~~~~~~~ +src/llama.cpp:18918:34: note: format string is defined here +18918 | LLAMA_LOG_INFO("%s: loaded %ld tensors from lora file\n", __func__, adapter.ab_map.size()*2); + | ~~^ + | | + | long int + | %lld +src/llama.cpp: In function 'float* llama_get_logits_ith(llama_context*, int32_t)': +src/llama.cpp:21259:65: warning: format '%lu' expects argument of type 'long unsigned int', but argument 2 has type 'std::vector::size_type' {aka 'long long unsigned int'} [-Wformat=] +21259 | throw std::runtime_error(format("out of range [0, %lu)", ctx->output_ids.size())); + | ~~^ ~~~~~~~~~~~~~~~~~~~~~~ + | | | + | long unsigned int std::vector::size_type {aka long long unsigned int} + | %llu +src/llama.cpp: In function 'float* llama_get_embeddings_ith(llama_context*, int32_t)': +src/llama.cpp:21309:65: warning: format '%lu' expects argument of type 'long unsigned int', but argument 2 has type 'std::vector::size_type' {aka 'long long unsigned int'} [-Wformat=] +21309 | throw std::runtime_error(format("out of range [0, %lu)", ctx->output_ids.size())); + | ~~^ ~~~~~~~~~~~~~~~~~~~~~~ + | | | + | long unsigned int std::vector::size_type {aka long long unsigned int} + | %llu +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/gguf/gguf.cpp -o examples/gguf/gguf.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c pocs/vdot/q8dot.cpp -o pocs/vdot/q8dot.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c pocs/vdot/vdot.cpp -o pocs/vdot/vdot.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o examples/gguf/gguf.o -o llama-gguf +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/ggml.o ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o pocs/vdot/q8dot.o -o llama-q8dot +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/ggml.o ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o pocs/vdot/vdot.o -o llama-vdot +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -static -fPIC -c examples/llava/llava.cpp -o libllava.a -Wno-cast-qual +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/baby-llama/baby-llama.cpp -o examples/baby-llama/baby-llama.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/batched/batched.cpp -o examples/batched/batched.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/batched-bench/batched-bench.cpp -o examples/batched-bench/batched-bench.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/llama-bench/llama-bench.cpp -o examples/llama-bench/llama-bench.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/main/main.cpp -o examples/main/main.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp -o examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declIn file included from arati ns -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtuneexamples/main/main.cpp:4= ative -Wno-array-bou: +nd -Wno-format-truncatioexamples/main/main.cpp:n -We In function 'xt a-semi -Igvoidgml include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBU print_usageG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llama(int, char**)f le/sgemm.o ggml/src/ggml-amx.o ggm': +l/ rc/ggml-amx/mmq.o examples/main/main.cpp:48:9:g ml/src/ggml.o ggm l/ rc/warning: g ml-atoo many arguments for format [ll c.o ggml/src/ggml-Wformat-extra-args- ac] + 48 | LOG(ke d.o ggml/src/ggml-qua"\n text generation: -m your_model.gguf -p \"I believe the meaning of life is\" -n 128\n"n s.o gg, argv[0]); + | ml src/ggml-aarch64.o src/l^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~l ma.o src/llama-vocab. +o rc/llama-grammar.ocommon/log.h:75:56: s c/llama-sampli ng o src/uniconote: de o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/trin definition of macro 'a n.oLOG_TMPL omm' + 75 | common_log_add(common_log_main(), (level), on buil__VA_ARGS__d info.o common/json-schema-to-grammar.o examples/ba); \ + | by llama/baby-llama.o -o llama-baby-llama ^~~~~~~~~~~ + +examples/main/main.cpp:48:5:c+ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpe da tic -Wnote: c st-qual -Wno-in expansion of macro 'un sed-funLOGct on -Wmissing-declarations -Wmissi' + 48 | ng noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semiLOG Igg("\n text generation: -m your_model.gguf -p \"I believe the meaning of life is\" -n 128\n", argv[0]); + | ml include -Iggml/sexamples/llama-bench/llama-bench.cpp:r -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DG In constructor '^~~G L_USE_OPENMP -test:: +D M _USE_LLAMAFILE -Dtestexamples/main/main.cpp:49:9: ML_USE_AMX g (const cmd_params_instance&, const llama_model*, const llama_context*)gml/src/llamwarning: ': +af le/sgemm.o ggml/src/ggml-amx.o ggml/srcexamples/llama-bench/llama-bench.cpp:911:43:/ gml- too many arguments for format [ m /mmq.o gg-Wformat-extra-argswarning: /] + 49 | LOG(sr /ggml.o ggml/src/unknown conversion type character '"\n chat (conversation): -m your_model.gguf -p \"You are a helpful assistant\" -cnv\n"g m, argv[0]); + | l- lloc.o ggml/src/ggml-backend.o gF^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ l/src/ggml-quants.o ggml/src/ggml-aarc' in format [ +h .o src/llam-Wformat=common/log.h:75:56:a o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data ] + 911 | std::strftime(buf, sizeof(buf), "% o ommon/common.o common/argFnote: . T%TZ", gmtime(&t)); + | in definition of macro ' c mmon^ / og.o com' + 75 | common_log_add(common_log_main(), (level), +m console.o common/ngram-cache.o common/sampling.o common/trainexamples/llama-bench/llama-bench.cpp:911:46:__VA_ARGS__. common/build-info.o common/json-schema-to-grammar.o exa); \ + | m es/batched/bat^~~~~~~~~~~warning: c ed.o -o llama +unknown conversion type character ' tched +examples/main/main.cpp:49:5:T ' in format [c + -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-dnote: -Wformat=e larations -W] + 911 | std::strftime(buf, sizeof(buf), "%FT%in expansion of macro 'm singTLOG- oreZ", gmtime(&t)); + | ' + 49 | u n -Xassemble^LOGr -muse-unalign +ed-vector-move -fopenmp -march=native -mtune=native -W o-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggmlexamples/llama-bench/llama-bench.cpp:("\n chat (conversation): -m your_model.gguf -p \"You are a helpful assistant\" -cnv\n", argv[0]); + | s c -Iinclude -Isrc - In function '^~~ ommon -D_XOPEN_SOURCint +E= 00 -DNDEBUG -D_WIN32_WINNT=0x602 -DG mainG L_USE_OPENMP -DGGM(int, char**)L USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/': +gg l-amx/mmq.o ggmlexamples/llama-bench/llama-bench.cpp:1544:58:/ rc/ggml.o ggml/src/ gg l-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/srwarning: c ggformat 'ml aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/batched-bench/batched-bench.o -o llama-batched-bench +%ldc -std=c++' expects argument of type '11 fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wlong intm ssing-declarations -Wmissing-noreturn -Xassembler -mus', but argument 4 has type 'e- naligned-vector-move -fopenmp -march=native -mtune=native -Wnolong long unsigned int- rray-bounds -Wno-format-trunca' [t +on -Wextra-semi -Iggml/include -Iggml/src -Iinclude-Wformat= Isr] + 1544 | fprintf(stderr, "llama-bench: benchmark %d/c Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/embedding/embedding.cpp -o examples/embedding/embedding.o +%ld: starting\n", params_idx, params_count); + | ~~^ ~~~~~~~~~~~~ + | c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -W +tions -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE|= 00 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX - c xamples/eval-callback/eval-callback.cpp -o examples/eval-callback/eval-callback.o +| ++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/export-lora/export-lora.cpp -o examples/export-lora/export-lora.o + + | c + -std=c++11 -fPlong intI -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm .o ggml/src/ggml-long long unsigned inta x.o + | g ml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/%lldg ml-alloc.o +ggml src/ggml-backend.o ggml/src/ggml-quants.o ggmexamples/llama-bench/llama-bench.cpp:1596:62:l src /g ml-aarch64.owarning: rc/llformat 'am .o src/ll%lda a-vocab.o src/llama-gramm r.o src/llama-samexamples/export-lora/export-lora.cpp:' expects argument of type ' li g.o src/unicode.o In member function 'long ints c/unicode-data.o common/com', but argument 4 has type 'void lora_merge_ctx::mo .olong long unsigned intrun_merge om' [()m n/a': +-Wformat=r .o common/log.o common/console.o common/ngr] + 1596 | fprintf(stderr, "llama-bench: benchmark %d/examples/export-lora/export-lora.cpp:268:31:a c%ld : warmup prompt run\n", params_idx, he oparams_count om); + | mo /sampling.o common/twarning: ~~^r in.o common/build-info.o common/json-sch format 'e -to-grammar.o examples/convert-llama2c-to-ggml/convert-llam~~~~~~~~~~~~%ld c-to-ggml.o -o llama-convert' expects argument of type ' + | l ama2c-to-ggml +long int|', but argument 3 has type ' size_t c+ -std=c++11 -' {aka ' + | P C -O3 -g long intlong long unsigned int- all -Wextra -Wpedantic -W '} [c t-qual -Wno-unused-functlong long unsigned int-Wformat=i n -Wmissing-declar + | ] + 268 | printf("%s : merged t ons -Wmiss%ld%lldi g-noreturn -Xassemb + tensors with lora adapters\n", __func__, le -muse-unaligned-vecton_mergedexamples/llama-bench/llama-bench.cpp:1603:62: move -fopenmp -march=native -mtu ); + | e nwarning: a ive -Wno-array-bounds -Wformat '~~^n -format-trunca%ld ti n' expects argument of type ' - e~~~~~~~~x ralong int + | s mi', but argument 4 has type '| Iggml/include -Iggm long long unsigned intl |r ' [ - + | in lu-Wformat=long intd -] + 1603 | fprintf(stderr, "llama-bench: benchmark %d/ Isr -Isize_t {aka long long unsigned int}%ldco o + | n D: warmup generation run\n", params_idx, %lld OPEN +params_count_ OU CE=examples/export-lora/export-lora.cpp:269:30:); + | 0 - ~~^D EB warning: U -format 'D_W ~~~~~~~~~~~~N 2_WI + | %ldN T=|' expects argument of type ' x6 02 -Dlong int| ML_USE', but argument 3 has type ' + | _ ENMP -DGGML_USE_Llong intstd::vector::size_type MA ' {aka 'F E -DGGML_USE_long long unsigned intlong long unsigned intA X ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o '} [ + | g l/src/ggml-am-Wformat=%lldx] + 269 | printf("%s : wrote /m q.o ggml/src/ggml.o gg%ld + l/src/ gg m -alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggmltrans.size()/ +rc/ggml-aa ); + | rc 64.o src/llama.o src/llama-vocab.o swarning: ~~^ /llama-grammar.o s format 'rc~~~~~~~~~~~~%ld/ lama-sampling.o src/u' expects argument of type ' + | i ode.o src/unicode|long int data.o common/ ', but argument 4 has type 'c mon|long long unsigned int. common/arg.o common/log.o common/console.o common/ngram-cache.o + | ' [ ommon/sampling.o -Wformat=long int ommon/train.o ] + 1615 | fprintf(stderr, "llama-bench: benchmark %d/ co mon/build-info.o common/json-schema-tstd::vector::size_type {aka long long unsigned int}%ld grammar.o exam: prompt run %d/%d\n", params_idx, + | pl /main/main.o %lldparams_count o ll +, i + 1, params.reps); + | ama cli ~~^ + ~~~~~~~~~~~~ + | c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-funct +-declarations -Wm +gml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml|examples/export-lora/export-lora.cpp: rc/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab In member function ' . src/llama-grammar.o src/llama-sampling.o src/unicode.o void lora_merge_ctx::|s c/u + | merge_tensor code-data.o common/common.o common/long int(ggml_tensor*, ggml_tensor*) g.o ': + o mon/lolong long unsigned intexamples/export-lora/export-lora.cpp:355:57:g o common/console.o common/ngram- + | c che.o commwarning: %lldo /sampling.oformat ' + co mon/train.o common/build-info.o common/json-schema-to-grammar.o examples/embedding/embedding.o -o llama-embedding +%ldexamples/llama-bench/llama-bench.cpp:1621:66:' expects argument of type 'c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler long int mu e-unalwarning: ', but argument 3 has type ' g edformat 'size_t- ec' {aka '%ld rlong long unsigned int- o' expects argument of type ''} [v fo-Wformat=long intpenmp ] + 355 | printf("%s : + merging from adapter[', but argument 4 has type ' ar%ldc h nat' [] type=%s\n", __func__, i --Wformat=im une=native -Wno-array-bound, ggml_type_name(inp_a[i]->type)); + | ] + 1621 | fprintf(stderr, "llama-bench: benchmark %d/s -Wn~~^%ldo or : generation run %d/%d\n", params_idx, m -truncation -W~params_counte tra-semi -Iggml/include -Iggml/src -Iinclude -Isrc , i + 1, params.reps); + | + | - ommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x6~~^| -DGG ML US|~~~~~~~~~~~~E OP + | + | EN -|long intD GML_USE_LL A A I|LE -DG + | size_t {aka long long unsigned int}G L_ + | long int E_AMX ggml/src/llamafile/sgemm.o ggml/src/gg%lld l amx.olong long unsigned int + ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ + | gg l-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/%lldgg l-aar +ch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/eval-callback/eval-callback.o -o llama-eval-callback +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/gbnf-validator/gbnf-validator.cpp -o examples/gbnf-validator/gbnf-validator.o +c++ - +std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -Iexamples/gguf-hash/deps -c examples/gguf-hash/gguf-hash.cpp -o examples/gguf-hash/gguf-hash.o +c++ ==== CLI ./llama-cli -h for help. ==== +-std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/gbnf-validator/gbnf-validator.o -o llama-gbnf-validator +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-fun +ction -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/export-lora/export-lora.o -o llama-export-lora +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/gguf-split/gguf-split.cpp -o examples/gguf-split/gguf-split.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX examples/gguf-hash/deps/sha1/sha1.o examples/gguf-hash/deps/xxhash/xxhash.o examples/gguf-hash/deps/sha256/sha256.o ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/gguf-hash/gguf-hash.o -o llama-gguf-hash +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/gritlm/gritlm.cpp -o examples/gritlm/gritlm.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/imatrix/imatrix.cpp -o examples/imatrix/imatrix.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/infill/infill.cpp -o examples/infill/infill.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/gritlm/gritlm.o -o llama-gritlm +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX examples/llava/llava-cli.cpp examples/llava/llava.cpp examples/llava/clip.cpp ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngra -cache.o common/sampling.o common/train.o common/build-info.o cexamples/gguf-split/gguf-split.cpp:o mon/json-schema-to-grammar.o -o llama-llava-cli -Wno-cast-qual + In member function 'c+ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX examples/llava/minicpmv-cli.cpp examples/llava/llava.cpp examples/llava/clip.cpp ggml/src/llamafvoid split_strategy::il /sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o -o llama-minicpmv-cli -Wno-cast-qual +print_infoc++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602() -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema- +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/lookahead/lookahead.cpp -o examples/lookahead/lookahead.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -': +Wc st-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/lookup/lookup.cpp -o examples/lookup/lookup.o +examples/gguf-split/gguf-split.cpp:290:28: + -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ gg l-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-towarning: - rammar.o examples/imatrix/imatrix.o -o llama-imatrix +format 'c -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -%ldD DEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/' expects argument of type 'gg l-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/lookahead/lookahead.o -o llama-lookahead +long intc + -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-fu', but argument 2 has type 'nc ion -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/std::vector::size_typeg ml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o ' {aka 'co mon/console.olong long unsigned int c mmon/ngram-cache.o'} [ com on/sampling.o com-Wformat=m n/train.o com] + 290 | printf("n_split: mo build%ld- o.\n", o ommctx_outs.size()o /j); + | so -schema-to-grammar.o exampl~~^e / loo up/lookup.~~~~~~~~~~~~~~~o -o llama + | - ookup +| c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native +EN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/l|l ma-bench/llama-bench.o -o llama-bench + + | c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-funlong intc ion -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -D GG L_USE_OPENMP -DGGML_USE_Lstd::vector::size_type {aka long long unsigned int}L MAF + | IL -DGGML_USE_AMX -c examples%lld/ oo +kup lookup-createexamples/gguf-split/gguf-split.cpp:300:64:. pp -o examples/lookup/lookup-create.o + warning: c++ -stdformat '=c +11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DG%ldG L_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/lookup/lookup-merge.cpp -o examples/lookup/lookup-merge.o +' expects argument of type 'c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/lookup/lookup-stats.cpp -o examples/llong into ku', but argument 4 has type 'p/ ookup-ssize_tt ts.o +' {aka 'long long unsigned intc++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -'} [m se-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEB-Wformat=U -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/] + 300 | printf("split %05d: n_tensors = %d, total_size = gg l-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/lookup/lookup-create.o -o llama-lookup-create +%ld ++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/lookup/lookup-merge.o -o llama-lookup-merge +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declM\n", i_split + 1, gguf_get_n_tensors(ctx_out), a ations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/parallel/parallel.cpp -o examples/parallel/parallel.o +total_sizec + -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/); + | gg l-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/lookup/lookup-stats.o -o llama-lookup-stats +~~^ +ve -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/passkey/passkey.cpp -o examples/passkey/passkey.o + c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/perplexity/perplexity.cpp -o examples/perplexity/perplexity.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declar +-fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/quantize/quantize.cpp -o examples/quantize/quantize.o +~~~~~~~~~~ + | | | + | long int c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/quantize-stats/quantize-stats.cpp -o examples/quantize-stats/quantize-stats.o +size_t {aka long long unsigned int} +embler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/parallel/parallel.o -o llama-parallel + + | c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/%lldg ml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/passkey/passkey.o -o llama-passkey + +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/perplexity/perplexity.o -o llama-perplexity +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/gguf-split/gguf-split.o -o llama-gguf-split +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/quantize/quantize.o -o llama-quantize +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/retrieval/retrieval.cpp -o examples/retrieval/retrieval.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/save-load-state/save-load-state.cpp -o examples/save-load-state/save-load-state.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/server/server.cpp -o examples/server/server.o +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-movIn file included from e - openmp -examples/retrieval/retrieval.cpp:3m rch=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DN: +DE UG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/simple/simple.cpp -o examples/simple/simple.o +examples/retrieval/retrieval.cpp: -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o comm In function 'on train.o common/build-info.o common/json-schema-to-grammar.o examples/quantize-stats/quantize-stats.o -o llama-quantize-stats +intc++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic main- cast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggm(int, char**)l src -': +Ii clude -Isrc -Icoexamples/retrieval/retrieval.cpp:146:13:m on -D_XOPEN_SOURC E 600 warning: -D DEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_format 'AMX ggml/src/llamafile/sgemm.o%ld g ml/src/' expects argument of type 'gg l-amx.o ggml/src/ggml-amx/mmq.o long intg ml', but argument 4 has type '/s c/ggml.ostd::vector::size_type gml/src/' {aka 'gg l-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/clong long unsigned into sole.o common/ngram-cache.o common/sampling.o common/train.o common/build'} [-i fo.o common/json-schema-to-grammar.o examples/save-load-state/save-load-state.o -o llama-save-load-state +-Wformat= td=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-t] + 146 | LOG_INF(o- rammar.o examples/simple/simple.o -o llama-simple +"Number of chunks: %ld\n"c+ -std=c++11 -fPIC -O3 -g -Wall -Wext, ra Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/speculative/speculative.cpp -o examples/spchunks.size()e ulative/speculative.o +); + | c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/tokenize/tokenize.cpp -o examp +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_^~~~~~~~~~~~~~~~~~~~~~~~~L AMAFILE -DGGML_USE_AMX -c examples/cvector-generator/cvector-generator.cpp -o examples/cvector-generator/cvector-genera to .o +~~~~~~~~~~~~~c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX -c examples/gen-docs/gen-docs.cpp -o examples/gen-docs/gen-docs.o + + | | + | std::vector::size_type {aka long long unsigned int} +common/log.h:75:56: note: in definition of macro 'LOG_TMPL' + 75 | common_log_add(common_log_main(), (level), __VA_ARGS__); \ + | In file included from ^~~~~~~~~~~examples/cvector-generator/cvector-generator.cpp:5 +: +examples/retrieval/retrieval.cpp:146:5:examples/cvector-generator/pca.hpp: In function 'note: void PCA:: run_pcaLOG_INF(pca_params&, const std::vector&, const std::vector&)' + 146 | examples/tokenize/tokenize.cpp: +LOG_INF In function 'examples/cvector-generator/pca.hpp:305:49:("Number of chunks: %ld\n", chunks.size()); + | ^~~~~~~ +warning: (int, char**)format '': + ' expects argument of type ' note: long int + 146 | LOG_INF("Number of chunks: ', but argument 3 has type 'format ' size_t%ld\n", chunks.size()); + | ' {aka '' expects argument of type ' + | | + | + 305 | ggml_format_name(ctrl_out, "direction. long long unsigned int%ld + | '} [", il+1 +] + 397 | printf("Total number of tokens: ); + | %ld~~^\n", + | + | ~~~~~~~~~~~~~| + | + | || |size_t {aka long long unsigned int} + | + | long intlong int + | std::vector::size_type {aka long long unsigned int}%lld + | +In file included from %lldexamples/cvector-generator/cvector-generator.cpp:6 +: +examples/cvector-generator/mean.hpp: In function 'c+ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/void mean::gg l-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llamruna o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/gen-docs/gen-docs.o -o llama-gen-docs +(const std::vector&, const std::vector&)c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native +nclude -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/tokenize/tokenize.o -o llama-tokenize +': +examples/cvector-generator/mean.hpp:18:49: warning: format '%ld' expects argument of type 'long int', but argument 3 has type 'size_t' {aka 'long long unsigned int'} [-Wformat=] + 18 | ggml_format_name(ctrl_out, "direction.%ld", il+1 ++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN3); + | 2_ IN~~^NT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafi le sgemm.o ggml/src/ggml-amx.o ggml~~~~/ rc/ + | gg l-|a x/mmq.o gg l/src/ggml.o ggml/s|r / + | ggm -allo|c o ggml/src/ggml-backend.o ggml/src/ggml-q uan s.o ggml/src/ggml-aarch64size_t {aka long long unsigned int}. src/ll + | ama.olong int rc/llama-vocab.o src/llama-grammar.o sr + | c/ lama-samplin%lldg o +src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/speculative/speculative.o -o llama-speculative +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/retrieval/retrieval.o -o llama-retrieval +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o examples/cvector-generator/cvector-generator.o -o llama-cvector-generator +c++ -std=c++11 -fPIC -O3 -g -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wmissing-declarations -Wmissing-noreturn -Xassembler -muse-unaligned-vector-move -fopenmp -march=native -mtune=native -Wno-array-bounds -Wno-format-truncation -Wextra-semi -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -D_XOPEN_SOURCE=600 -DNDEBUG -D_WIN32_WINNT=0x602 -DGGML_USE_OPENMP -DGGML_USE_LLAMAFILE -DGGML_USE_AMX ggml/src/llamafile/sgemm.o ggml/src/ggml-amx.o ggml/src/ggml-amx/mmq.o ggml/src/ggml.o ggml/src/ggml-alloc.o ggml/src/ggml-backend.o ggml/src/ggml-quants.o ggml/src/ggml-aarch64.o src/llama.o src/llama-vocab.o src/llama-grammar.o src/llama-sampling.o src/unicode.o src/unicode-data.o common/common.o common/arg.o common/log.o common/console.o common/ngram-cache.o common/sampling.o common/train.o common/build-info.o common/json-schema-to-grammar.o -Iexamples/server examples/server/server.o -o llama-server -lws2_32 +~/Downloads/llama.cpp-master/source $ + + + + + + + + + + + + + + + + From 7dde288320d7ad2a0e68b5d94c26e5cdee1b6fb1 Mon Sep 17 00:00:00 2001 From: Caleb Princewill Nwokocha <47554663+calebnwokocha@users.noreply.github.com> Date: Tue, 22 Oct 2024 07:50:24 -0500 Subject: [PATCH 2/6] Add files via upload --- LLMCLI.java | 74 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 LLMCLI.java diff --git a/LLMCLI.java b/LLMCLI.java new file mode 100644 index 0000000000000..881c6fb14a9ad --- /dev/null +++ b/LLMCLI.java @@ -0,0 +1,74 @@ +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.util.Scanner; + +public class LLMCLI { + public static void main(String[] args) { + // Path to the .exe file + String exePath = "bin/llama-cli.exe"; + + System.out.println("Enter -h for help"); + // Scanner to take user input for various commands + Scanner scanner = new Scanner(System.in); + + while (true) { + String commandInput = scanner.nextLine(); + + // Split user input into command array for ProcessBuilder + String[] commands = commandInput.split(" "); + + // Create an array to hold both the executable path and the commands + String[] fullCommand = new String[commands.length + 1]; + fullCommand[0] = exePath; // First element is the executable path + System.arraycopy(commands, 0, fullCommand, 1, commands.length); // Copy the user commands after the exe path + + Process process = null; + + try { + // Create a ProcessBuilder with the executable and dynamic commands + ProcessBuilder processBuilder = new ProcessBuilder(fullCommand); + + // Redirect error stream to read both error and output in one stream + processBuilder.redirectErrorStream(true); + + // Start the process + process = processBuilder.start(); + + // Capture output in a separate thread + Process finalProcess = process; + new Thread(() -> { + try (BufferedReader reader = new BufferedReader(new InputStreamReader(finalProcess.getInputStream()))) { + String line; + while ((line = reader.readLine()) != null) { + System.out.println(line); + } + } catch (IOException e) { + e.printStackTrace(); + } + }).start(); + + // Use OutputStream to send input to the process (if needed) + try (OutputStream processInput = process.getOutputStream()) { + String userInput; + while (scanner.hasNextLine() && process.isAlive()) { + userInput = scanner.nextLine(); + processInput.write((userInput + "\n").getBytes()); + processInput.flush(); // Ensure input is sent immediately + } + } + + // Wait for the process to complete and get the exit code + int exitCode = process.waitFor(); + } catch (IOException | InterruptedException e) { + e.printStackTrace(); + } finally { + // Ensure the process is destroyed if still running + if (process != null) { + process.destroy(); + } + } + } + } +} \ No newline at end of file From 9b56176c6a0b782d18a8de85bcff0d77835ac864 Mon Sep 17 00:00:00 2001 From: "Caleb P. Nwokocha" Date: Sat, 26 Oct 2024 10:10:52 -0500 Subject: [PATCH 3/6] test --- examples/main/main.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 374ed47ad6311..b5f913e845de3 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -45,8 +45,8 @@ static void print_usage(int argc, char ** argv) { (void) argc; LOG("\nexample usage:\n"); - LOG("\n text generation: %s -m your_model.gguf -p \"I believe the meaning of life is\" -n 128\n", argv[0]); - LOG("\n chat (conversation): %s -m your_model.gguf -p \"You are a helpful assistant\" -cnv\n", argv[0]); + LOG("\n text generation: -m your_model.gguf -p \"I believe the meaning of life is\" -n 128\n", argv[0]); + LOG("\n chat (conversation): -m your_model.gguf -p \"You are a helpful assistant\" -cnv\n", argv[0]); LOG("\n"); } From 52ab61795489c49176b0fa7734edbf2777ca4ae6 Mon Sep 17 00:00:00 2001 From: "Caleb P. Nwokocha" Date: Sat, 26 Oct 2024 11:10:29 -0500 Subject: [PATCH 4/6] Changed "llama" to "jarvis" --- .devops/cloud-v-pipeline | 10 +- .devops/full-cuda.Dockerfile | 2 +- .devops/full-musa.Dockerfile | 2 +- .devops/full-rocm.Dockerfile | 4 +- .devops/full.Dockerfile | 2 +- .devops/llama-cli-cann.Dockerfile | 6 +- .devops/llama-cli-cuda.Dockerfile | 8 +- .devops/llama-cli-intel.Dockerfile | 6 +- .devops/llama-cli-musa.Dockerfile | 8 +- .devops/llama-cli-rocm.Dockerfile | 6 +- .devops/llama-cli-vulkan.Dockerfile | 6 +- .devops/llama-cli.Dockerfile | 6 +- .devops/llama-cpp-cuda.srpm.spec | 40 +- .devops/llama-cpp.srpm.spec | 40 +- .devops/llama-server-cuda.Dockerfile | 12 +- .devops/llama-server-intel.Dockerfile | 10 +- .devops/llama-server-musa.Dockerfile | 12 +- .devops/llama-server-rocm.Dockerfile | 10 +- .devops/llama-server-vulkan.Dockerfile | 10 +- .devops/llama-server.Dockerfile | 10 +- .devops/nix/apps.nix | 8 +- .devops/nix/docker.nix | 14 +- .devops/nix/jetson-support.nix | 14 +- .devops/nix/package-gguf-py.nix | 4 +- .devops/nix/package.nix | 20 +- .devops/nix/python-scripts.nix | 12 +- .devops/nix/scope.nix | 6 +- .devops/nix/sif.nix | 8 +- .devops/tools.sh | 10 +- .dockerignore | 4 +- .editorconfig | 2 +- .github/ISSUE_TEMPLATE/01-bug-low.yml | 6 +- .github/ISSUE_TEMPLATE/02-bug-medium.yml | 6 +- .github/ISSUE_TEMPLATE/03-bug-high.yml | 6 +- .github/ISSUE_TEMPLATE/04-bug-critical.yml | 6 +- .github/ISSUE_TEMPLATE/05-enhancement.yml | 12 +- .github/ISSUE_TEMPLATE/06-research.yml | 2 +- .github/ISSUE_TEMPLATE/07-refactor.yml | 4 +- .github/ISSUE_TEMPLATE/config.yml | 6 +- .github/labeler.yml | 2 +- .github/pull_request_template.md | 2 +- .github/workflows/bench.yml.disabled | 26 +- .github/workflows/build.yml | 148 +- .github/workflows/docker.yml | 20 +- .github/workflows/labeler.yml | 2 +- .github/workflows/nix-ci-aarch64.yml | 6 +- .github/workflows/nix-ci.yml | 10 +- .github/workflows/server.yml | 32 +- .gitignore | 8 +- CMakeLists.txt | 118 +- CONTRIBUTING.md | 6 +- LLMCLI.java | 2 +- Makefile | 388 +- Package.swift | 14 +- README.md | 170 +- SECURITY.md | 8 +- ci/README.md | 4 +- ci/run.sh | 292 +- cmake/llama-config.cmake.in | 38 +- cmake/llama.pc.in | 4 +- common/CMakeLists.txt | 10 +- common/arg.cpp | 382 +- common/arg.h | 12 +- common/build-info.cpp.in | 8 +- common/common.cpp | 254 +- common/common.h | 140 +- common/console.cpp | 2 +- common/json.hpp | 6 +- common/log.cpp | 2 +- common/log.h | 2 +- common/ngram-cache.cpp | 60 +- common/ngram-cache.h | 28 +- common/sampling.cpp | 136 +- common/sampling.h | 22 +- common/train.cpp | 52 +- common/train.h | 16 +- convert_hf_to_gguf.py | 140 +- convert_hf_to_gguf_update.py | 20 +- convert_llama_ggml_to_gguf.py | 8 +- convert_lora_to_gguf.py | 16 +- docs/android.md | 30 +- docs/backend/BLIS.md | 6 +- docs/backend/CANN.md | 28 +- docs/backend/SYCL.md | 76 +- docs/build.md | 52 +- docs/development/HOWTO-add-model.md | 36 +- docs/development/debugging-tests.md | 14 +- .../token_generation_performance_tips.md | 16 +- docs/docker.md | 70 +- docs/install.md | 18 +- examples/CMakeLists.txt | 8 +- examples/Miku.sh | 4 +- examples/baby-llama/CMakeLists.txt | 6 +- examples/baby-llama/baby-llama.cpp | 82 +- examples/base-translate.sh | 6 +- examples/batched-bench/CMakeLists.txt | 4 +- examples/batched-bench/README.md | 12 +- examples/batched-bench/batched-bench.cpp | 50 +- examples/batched.swift/Makefile | 6 +- examples/batched.swift/Package.swift | 8 +- examples/batched.swift/README.md | 2 +- examples/batched.swift/Sources/main.swift | 74 +- examples/batched/CMakeLists.txt | 4 +- examples/batched/README.md | 14 +- examples/batched/batched.cpp | 76 +- examples/chat-13B.bat | 2 +- examples/chat-13B.sh | 8 +- examples/chat-persistent.sh | 14 +- examples/chat-vicuna.sh | 6 +- examples/chat.sh | 2 +- .../convert-llama2c-to-ggml/CMakeLists.txt | 6 +- examples/convert-llama2c-to-ggml/README.md | 22 +- .../convert-llama2c-to-ggml.cpp | 144 +- examples/convert_legacy_llama.py | 14 +- examples/cvector-generator/CMakeLists.txt | 4 +- examples/cvector-generator/README.md | 18 +- .../cvector-generator/cvector-generator.cpp | 58 +- examples/cvector-generator/mean.hpp | 2 +- examples/cvector-generator/pca.hpp | 4 +- examples/deprecation-warning/README.md | 72 +- .../deprecation-warning.cpp | 10 +- examples/embedding/CMakeLists.txt | 4 +- examples/embedding/README.md | 12 +- examples/embedding/embedding.cpp | 70 +- examples/eval-callback/CMakeLists.txt | 6 +- examples/eval-callback/README.md | 14 +- examples/eval-callback/eval-callback.cpp | 30 +- examples/export-lora/CMakeLists.txt | 4 +- examples/export-lora/README.md | 12 +- examples/export-lora/export-lora.cpp | 6 +- examples/gbnf-validator/CMakeLists.txt | 4 +- examples/gbnf-validator/gbnf-validator.cpp | 20 +- examples/gen-docs/CMakeLists.txt | 4 +- examples/gen-docs/gen-docs.cpp | 8 +- examples/gguf-hash/CMakeLists.txt | 2 +- examples/gguf-hash/README.md | 28 +- examples/gguf-hash/gguf-hash.cpp | 8 +- examples/gguf-split/CMakeLists.txt | 4 +- examples/gguf-split/gguf-split.cpp | 14 +- examples/gguf-split/tests.sh | 4 +- examples/gguf/CMakeLists.txt | 2 +- examples/gritlm/CMakeLists.txt | 4 +- examples/gritlm/README.md | 2 +- examples/gritlm/gritlm.cpp | 80 +- examples/imatrix/CMakeLists.txt | 4 +- examples/imatrix/README.md | 10 +- examples/imatrix/imatrix.cpp | 54 +- examples/infill/CMakeLists.txt | 4 +- examples/infill/README.md | 12 +- examples/infill/infill.cpp | 108 +- examples/jeopardy/README.md | 4 +- examples/jeopardy/jeopardy.sh | 2 +- examples/json_schema_pydantic_example.py | 4 +- examples/json_schema_to_grammar.py | 4 +- examples/llama-bench/CMakeLists.txt | 6 +- examples/llama-bench/README.md | 124 +- examples/llama-bench/llama-bench.cpp | 136 +- examples/llama.android/app/build.gradle.kts | 6 +- .../app/src/main/AndroidManifest.xml | 4 +- .../java/com/example/llama/Downloadable.kt | 2 +- .../java/com/example/llama/MainActivity.kt | 12 +- .../java/com/example/llama/MainViewModel.kt | 16 +- .../java/com/example/llama/ui/theme/Color.kt | 2 +- .../java/com/example/llama/ui/theme/Theme.kt | 4 +- .../java/com/example/llama/ui/theme/Type.kt | 2 +- .../app/src/main/res/values/strings.xml | 2 +- .../app/src/main/res/values/themes.xml | 2 +- examples/llama.android/llama/build.gradle.kts | 4 +- .../llama/cpp/ExampleInstrumentedTest.kt | 4 +- .../llama/src/main/cpp/CMakeLists.txt | 16 +- .../llama/src/main/cpp/llama-android.cpp | 148 +- .../java/android/llama/cpp/LLamaAndroid.kt | 12 +- .../java/android/llama/cpp/ExampleUnitTest.kt | 2 +- examples/llama.android/settings.gradle.kts | 4 +- examples/llama.swiftui/README.md | 10 +- .../llama.cpp.swift/LibLlama.swift | 126 +- .../llama.swiftui.xcodeproj/project.pbxproj | 78 +- .../llama.swiftui/Models/LlamaState.swift | 46 +- .../llama.swiftui/UI/ContentView.swift | 30 +- .../llama.swiftui/UI/DownloadButton.swift | 22 +- .../llama.swiftui/UI/InputButton.swift | 10 +- .../llama.swiftui/UI/LoadCustomButton.swift | 8 +- .../llama.swiftui/llama_swiftuiApp.swift | 2 +- examples/llama.vim | 138 +- examples/llava/CMakeLists.txt | 14 +- examples/llava/MobileVLM-README.md | 158 +- examples/llava/README-minicpmv2.5.md | 42 +- examples/llava/README-minicpmv2.6.md | 30 +- examples/llava/README.md | 14 +- examples/llava/android/adb_run.sh | 10 +- examples/llava/clip.h | 4 +- examples/llava/llava-cli.cpp | 88 +- examples/llava/llava.cpp | 26 +- examples/llava/llava.h | 10 +- examples/llava/minicpmv-cli.cpp | 102 +- .../minicpmv-convert-image-encoder-to-gguf.py | 2 +- examples/llava/requirements.txt | 2 +- examples/lookahead/CMakeLists.txt | 4 +- examples/lookahead/README.md | 4 +- examples/lookahead/lookahead.cpp | 82 +- examples/lookup/CMakeLists.txt | 16 +- examples/lookup/README.md | 6 +- examples/lookup/lookup-create.cpp | 20 +- examples/lookup/lookup-merge.cpp | 2 +- examples/lookup/lookup-stats.cpp | 42 +- examples/lookup/lookup.cpp | 56 +- examples/main-cmake-pkg/CMakeLists.txt | 20 +- examples/main-cmake-pkg/README.md | 20 +- examples/main/CMakeLists.txt | 4 +- examples/main/README.md | 34 +- examples/main/main.cpp | 120 +- examples/parallel/CMakeLists.txt | 4 +- examples/parallel/README.md | 2 +- examples/parallel/parallel.cpp | 68 +- examples/passkey/CMakeLists.txt | 4 +- examples/passkey/README.md | 8 +- examples/passkey/passkey.cpp | 88 +- examples/perplexity/CMakeLists.txt | 4 +- examples/perplexity/README.md | 10 +- examples/perplexity/perplexity.cpp | 204 +- .../pydantic_models_to_grammar_examples.py | 6 +- examples/quantize-stats/CMakeLists.txt | 4 +- examples/quantize-stats/quantize-stats.cpp | 28 +- examples/quantize/CMakeLists.txt | 4 +- examples/quantize/README.md | 48 +- examples/quantize/quantize.cpp | 132 +- examples/quantize/tests.sh | 6 +- examples/reason-act.sh | 2 +- examples/retrieval/CMakeLists.txt | 4 +- examples/retrieval/README.md | 6 +- examples/retrieval/retrieval.cpp | 56 +- examples/rpc/CMakeLists.txt | 2 +- examples/rpc/README.md | 12 +- examples/save-load-state/CMakeLists.txt | 4 +- examples/save-load-state/save-load-state.cpp | 110 +- examples/server-llama2-13B.sh | 4 +- examples/server/CMakeLists.txt | 8 +- examples/server/README.md | 208 +- examples/server/bench/README.md | 18 +- examples/server/bench/bench.py | 24 +- examples/server/bench/prometheus.yml | 4 +- examples/server/bench/script.js | 34 +- examples/server/public/completion.js | 40 +- examples/server/public/index-new.html | 38 +- examples/server/public/index.html | 34 +- examples/server/public/prompt-formats.js | 8 +- examples/server/public/style.css | 2 +- examples/server/public/theme-mangotango.css | 2 +- examples/server/public_simplechat/index.html | 2 +- examples/server/public_simplechat/readme.md | 6 +- examples/server/server.cpp | 166 +- examples/server/tests/README.md | 8 +- .../server/tests/features/ctx_shift.feature | 8 +- .../server/tests/features/embeddings.feature | 4 +- examples/server/tests/features/environment.py | 4 +- examples/server/tests/features/infill.feature | 14 +- examples/server/tests/features/lora.feature | 4 +- .../server/tests/features/parallel.feature | 10 +- .../server/tests/features/passkey.feature | 2 +- examples/server/tests/features/rerank.feature | 4 +- .../server/tests/features/results.feature | 12 +- .../server/tests/features/security.feature | 4 +- examples/server/tests/features/server.feature | 16 +- .../server/tests/features/slotsave.feature | 6 +- examples/server/tests/features/steps/steps.py | 16 +- .../tests/features/wrong_usages.feature | 4 +- examples/server/tests/tests.sh | 4 +- examples/server/themes/buttons-top/index.html | 30 +- examples/server/themes/wild/index.html | 32 +- examples/server/utils.hpp | 122 +- examples/simple/CMakeLists.txt | 4 +- examples/simple/README.md | 16 +- examples/simple/simple.cpp | 52 +- examples/speculative/CMakeLists.txt | 4 +- examples/speculative/README.md | 8 +- examples/speculative/speculative.cpp | 138 +- examples/sycl/CMakeLists.txt | 4 +- examples/sycl/README.md | 12 +- examples/sycl/build.sh | 4 +- examples/sycl/run-llama2.sh | 6 +- examples/sycl/win-run-llama2.bat | 2 +- examples/tokenize/CMakeLists.txt | 4 +- examples/tokenize/tokenize.cpp | 24 +- flake.nix | 60 +- ggml/CMakeLists.txt | 8 +- ggml/include/ggml-metal.h | 2 +- ggml/include/ggml.h | 2 +- ggml/src/CMakeLists.txt | 12 +- ggml/src/ggml-cann/aclnn_ops.cpp | 2 +- ggml/src/ggml-cuda/fattn-vec-f16.cuh | 2 +- ggml/src/ggml-cuda/rope.cu | 2 +- ggml/src/ggml-kompute.cpp | 8 +- ggml/src/ggml-metal.m | 4 +- ggml/src/ggml-metal.metal | 2 +- ggml/src/ggml-sycl.cpp | 2 +- ggml/src/ggml-sycl/rope.cpp | 2 +- ggml/src/ggml-sycl/softmax.cpp | 2 +- ggml/src/ggml-vulkan.cpp | 2 +- ggml/src/ggml.c | 60 +- ggml/src/kompute-shaders/op_softmax.comp | 2 +- ggml/src/kompute-shaders/rope_common.comp | 2 +- ggml/src/llamafile/sgemm.cpp | 4 +- ggml/src/llamafile/sgemm.h | 2 +- gguf-py/README.md | 14 +- gguf-py/examples/writer.py | 2 +- gguf-py/gguf/constants.py | 12 +- gguf-py/gguf/metadata.py | 8 +- gguf-py/gguf/tensor_mapping.py | 54 +- gguf-py/gguf/vocab.py | 22 +- gguf-py/pyproject.toml | 4 +- gguf-py/scripts/gguf_hash.py | 6 +- gguf-py/tests/test_metadata.py | 40 +- grammars/README.md | 32 +- include/llama.h | 1070 ++--- pocs/vdot/CMakeLists.txt | 8 +- pyproject.toml | 14 +- requirements.txt | 6 +- requirements/requirements-all.txt | 6 +- .../requirements-convert_hf_to_gguf.txt | 2 +- ...requirements-convert_hf_to_gguf_update.txt | 2 +- ...equirements-convert_llama_ggml_to_gguf.txt | 2 +- scripts/build-info.sh | 8 +- scripts/check-requirements.sh | 6 +- scripts/ci-run.sh | 16 +- scripts/compare-commits.sh | 16 +- scripts/compare-llama-bench.py | 28 +- scripts/debug-test.sh | 4 +- scripts/get-hellaswag.sh | 2 +- scripts/get-wikitext-103.sh | 2 +- scripts/get-wikitext-2.sh | 2 +- scripts/get-winogrande.sh | 4 +- scripts/hf.sh | 6 +- scripts/pod-llama.sh | 158 +- scripts/qnt-all.sh | 2 +- scripts/run-all-perf.sh | 2 +- scripts/run-all-ppl.sh | 2 +- scripts/run-with-preset.py | 30 +- scripts/server-llm.sh | 58 +- scripts/sync-ggml-am.sh | 36 +- scripts/verify-checksum-models.py | 10 +- spm-headers/llama.h | 2 +- src/CMakeLists.txt | 24 +- src/llama-grammar.cpp | 400 +- src/llama-grammar.h | 118 +- src/llama-impl.h | 30 +- src/llama-sampling.cpp | 1050 ++--- src/llama-sampling.h | 30 +- src/llama-vocab.cpp | 416 +- src/llama-vocab.h | 138 +- src/llama.cpp | 3446 ++++++++--------- src/unicode.cpp | 8 +- src/unicode.h | 2 +- tests/CMakeLists.txt | 128 +- tests/get-model.cpp | 4 +- tests/test-arg-parser.cpp | 42 +- tests/test-autorelease.cpp | 16 +- tests/test-backend-ops.cpp | 26 +- tests/test-c.c | 2 +- tests/test-chat-template.cpp | 28 +- tests/test-grammar-integration.cpp | 30 +- tests/test-grammar-parser.cpp | 400 +- tests/test-json-schema-to-grammar.cpp | 14 +- tests/test-llama-grammar.cpp | 208 +- tests/test-lora-conversion-inference.sh | 18 +- tests/test-model-load-cancel.cpp | 10 +- tests/test-sampling.cpp | 124 +- tests/test-tokenizer-0.cpp | 44 +- tests/test-tokenizer-0.sh | 2 +- tests/test-tokenizer-1-bpe.cpp | 34 +- tests/test-tokenizer-1-spm.cpp | 34 +- tests/test-tokenizer-random.py | 86 +- w64devkit.txt | 312 +- 372 files changed, 8811 insertions(+), 8811 deletions(-) diff --git a/.devops/cloud-v-pipeline b/.devops/cloud-v-pipeline index af8c0cea6155c..8ba5f117e5f08 100644 --- a/.devops/cloud-v-pipeline +++ b/.devops/cloud-v-pipeline @@ -7,16 +7,16 @@ node('x86_runner1'){ // Running on x86 runner containing latest vecto checkout scm // Clone the repo on Runner } } - stage('Compiling llama.cpp'){ + stage('Compiling jarvis.cpp'){ sh'''#!/bin/bash - make RISCV=1 RISCV_CROSS_COMPILE=1 # Compiling llama for RISC-V + make RISCV=1 RISCV_CROSS_COMPILE=1 # Compiling jarvis for RISC-V ''' } - stage('Running llama.cpp'){ + stage('Running jarvis.cpp'){ sh'''#!/bin/bash module load gnu-bin2/0.1 # loading latest versions of vector qemu and vector gcc - qemu-riscv64 -L /softwares/gnu-bin2/sysroot -cpu rv64,v=true,vlen=256,elen=64,vext_spec=v1.0 ./llama-cli -m /home/alitariq/codellama-7b.Q4_K_M.gguf -p "Anything" -n 9 > llama_log.txt # Running llama.cpp on vector qemu-riscv64 - cat llama_log.txt # Printing results + qemu-riscv64 -L /softwares/gnu-bin2/sysroot -cpu rv64,v=true,vlen=256,elen=64,vext_spec=v1.0 ./jarvis-cli -m /home/alitariq/codejarvis-7b.Q4_K_M.gguf -p "Anything" -n 9 > jarvis_log.txt # Running jarvis.cpp on vector qemu-riscv64 + cat jarvis_log.txt # Printing results ''' } } diff --git a/.devops/full-cuda.Dockerfile b/.devops/full-cuda.Dockerfile index d5acd35e204d3..16d3d6b947eeb 100644 --- a/.devops/full-cuda.Dockerfile +++ b/.devops/full-cuda.Dockerfile @@ -26,7 +26,7 @@ COPY . . RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \ export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \ fi && \ - cmake -B build -DGGML_CUDA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ + cmake -B build -DGGML_CUDA=ON -DJARVIS_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ cmake --build build --config Release -j$(nproc) && \ cp build/bin/* . diff --git a/.devops/full-musa.Dockerfile b/.devops/full-musa.Dockerfile index 34ba856d3d1ca..51b6061020b5e 100644 --- a/.devops/full-musa.Dockerfile +++ b/.devops/full-musa.Dockerfile @@ -19,7 +19,7 @@ WORKDIR /app COPY . . -RUN cmake -B build -DGGML_MUSA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ +RUN cmake -B build -DGGML_MUSA=ON -DJARVIS_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ cmake --build build --config Release -j$(nproc) && \ cp build/bin/* . diff --git a/.devops/full-rocm.Dockerfile b/.devops/full-rocm.Dockerfile index df496bcd2b7ee..620d7d89cf40d 100644 --- a/.devops/full-rocm.Dockerfile +++ b/.devops/full-rocm.Dockerfile @@ -9,7 +9,7 @@ ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-co FROM ${BASE_ROCM_DEV_CONTAINER} AS build # Unless otherwise specified, we make a fat build. -# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878 +# List from https://github.com/ggerganov/jarvis.cpp/pull/1087#issuecomment-1682807878 # This is mostly tied to rocBLAS supported archs. ARG ROCM_DOCKER_ARCH="\ gfx803 \ @@ -41,7 +41,7 @@ ENV CC=/opt/rocm/llvm/bin/clang ENV CXX=/opt/rocm/llvm/bin/clang++ # Enable cURL -ENV LLAMA_CURL=1 +ENV JARVIS_CURL=1 RUN apt-get update && \ apt-get install -y libcurl4-openssl-dev diff --git a/.devops/full.Dockerfile b/.devops/full.Dockerfile index 2a06f82b738ae..62ee6f5069f00 100644 --- a/.devops/full.Dockerfile +++ b/.devops/full.Dockerfile @@ -15,7 +15,7 @@ WORKDIR /app COPY . . -ENV LLAMA_CURL=1 +ENV JARVIS_CURL=1 RUN make -j$(nproc) diff --git a/.devops/llama-cli-cann.Dockerfile b/.devops/llama-cli-cann.Dockerfile index db5ba2f25ea67..99c83c0b15dfd 100644 --- a/.devops/llama-cli-cann.Dockerfile +++ b/.devops/llama-cli-cann.Dockerfile @@ -23,11 +23,11 @@ ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/runtime/lib64/stub:$LD_LIBRARY_PATH RUN echo "Building with static libs" && \ source /usr/local/Ascend/ascend-toolkit/set_env.sh --force && \ cmake -B build -DGGML_CANN=ON -DBUILD_SHARED_LIBS=OFF && \ - cmake --build build --config Release --target llama-cli + cmake --build build --config Release --target jarvis-cli # TODO: use image with NNRT FROM cosdt/cann:$ASCEND_VERSION AS runtime -COPY --from=build /app/build/bin/llama-cli /llama-cli +COPY --from=build /app/build/bin/jarvis-cli /jarvis-cli ENV LC_ALL=C.utf8 @@ -41,4 +41,4 @@ ENV ASCEND_OPP_PATH=${ASCEND_TOOLKIT_HOME}/opp ENV TOOLCHAIN_HOME=${ASCEND_TOOLKIT_HOME}/toolkit ENV ASCEND_HOME_PATH=${ASCEND_TOOLKIT_HOME} -ENTRYPOINT ["/llama-cli" ] +ENTRYPOINT ["/jarvis-cli" ] diff --git a/.devops/llama-cli-cuda.Dockerfile b/.devops/llama-cli-cuda.Dockerfile index b75163b94435a..43f8b2cb9a471 100644 --- a/.devops/llama-cli-cuda.Dockerfile +++ b/.devops/llama-cli-cuda.Dockerfile @@ -23,7 +23,7 @@ RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \ export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \ fi && \ cmake -B build -DGGML_CUDA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ - cmake --build build --config Release --target llama-cli -j$(nproc) + cmake --build build --config Release --target jarvis-cli -j$(nproc) FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime @@ -31,7 +31,7 @@ RUN apt-get update && \ apt-get install -y libgomp1 COPY --from=build /app/build/ggml/src/libggml.so /libggml.so -COPY --from=build /app/build/src/libllama.so /libllama.so -COPY --from=build /app/build/bin/llama-cli /llama-cli +COPY --from=build /app/build/src/libjarvis.so /libjarvis.so +COPY --from=build /app/build/bin/jarvis-cli /jarvis-cli -ENTRYPOINT [ "/llama-cli" ] +ENTRYPOINT [ "/jarvis-cli" ] diff --git a/.devops/llama-cli-intel.Dockerfile b/.devops/llama-cli-intel.Dockerfile index 79dba06a77d6e..cc3d64afef9df 100644 --- a/.devops/llama-cli-intel.Dockerfile +++ b/.devops/llama-cli-intel.Dockerfile @@ -17,12 +17,12 @@ RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \ echo "Building with static libs" && \ cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx \ ${OPT_SYCL_F16} -DBUILD_SHARED_LIBS=OFF && \ - cmake --build build --config Release --target llama-cli + cmake --build build --config Release --target jarvis-cli FROM intel/oneapi-basekit:$ONEAPI_VERSION AS runtime -COPY --from=build /app/build/bin/llama-cli /llama-cli +COPY --from=build /app/build/bin/jarvis-cli /jarvis-cli ENV LC_ALL=C.utf8 -ENTRYPOINT [ "/llama-cli" ] +ENTRYPOINT [ "/jarvis-cli" ] diff --git a/.devops/llama-cli-musa.Dockerfile b/.devops/llama-cli-musa.Dockerfile index b5696794f1a56..69d13cc79cada 100644 --- a/.devops/llama-cli-musa.Dockerfile +++ b/.devops/llama-cli-musa.Dockerfile @@ -16,7 +16,7 @@ WORKDIR /app COPY . . RUN cmake -B build -DGGML_MUSA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ - cmake --build build --config Release --target llama-cli -j$(nproc) + cmake --build build --config Release --target jarvis-cli -j$(nproc) FROM ${BASE_MUSA_RUN_CONTAINER} AS runtime @@ -24,7 +24,7 @@ RUN apt-get update && \ apt-get install -y libgomp1 COPY --from=build /app/build/ggml/src/libggml.so /libggml.so -COPY --from=build /app/build/src/libllama.so /libllama.so -COPY --from=build /app/build/bin/llama-cli /llama-cli +COPY --from=build /app/build/src/libjarvis.so /libjarvis.so +COPY --from=build /app/build/bin/jarvis-cli /jarvis-cli -ENTRYPOINT [ "/llama-cli" ] +ENTRYPOINT [ "/jarvis-cli" ] diff --git a/.devops/llama-cli-rocm.Dockerfile b/.devops/llama-cli-rocm.Dockerfile index e60c747bdbf11..2eeb794358221 100644 --- a/.devops/llama-cli-rocm.Dockerfile +++ b/.devops/llama-cli-rocm.Dockerfile @@ -9,7 +9,7 @@ ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-co FROM ${BASE_ROCM_DEV_CONTAINER} AS build # Unless otherwise specified, we make a fat build. -# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878 +# List from https://github.com/ggerganov/jarvis.cpp/pull/1087#issuecomment-1682807878 # This is mostly tied to rocBLAS supported archs. ARG ROCM_DOCKER_ARCH="\ gfx803 \ @@ -40,6 +40,6 @@ ENV GGML_HIPBLAS=1 ENV CC=/opt/rocm/llvm/bin/clang ENV CXX=/opt/rocm/llvm/bin/clang++ -RUN make -j$(nproc) llama-cli +RUN make -j$(nproc) jarvis-cli -ENTRYPOINT [ "/app/llama-cli" ] +ENTRYPOINT [ "/app/jarvis-cli" ] diff --git a/.devops/llama-cli-vulkan.Dockerfile b/.devops/llama-cli-vulkan.Dockerfile index 9b0dad8bf7a13..57ebafa9bed2f 100644 --- a/.devops/llama-cli-vulkan.Dockerfile +++ b/.devops/llama-cli-vulkan.Dockerfile @@ -15,13 +15,13 @@ RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key WORKDIR /app COPY . . RUN cmake -B build -DGGML_VULKAN=1 && \ - cmake --build build --config Release --target llama-cli + cmake --build build --config Release --target jarvis-cli # Clean up WORKDIR / -RUN cp /app/build/bin/llama-cli /llama-cli && \ +RUN cp /app/build/bin/jarvis-cli /jarvis-cli && \ rm -rf /app ENV LC_ALL=C.utf8 -ENTRYPOINT [ "/llama-cli" ] +ENTRYPOINT [ "/jarvis-cli" ] diff --git a/.devops/llama-cli.Dockerfile b/.devops/llama-cli.Dockerfile index 7f741aa46ecf0..6a3137f281679 100644 --- a/.devops/llama-cli.Dockerfile +++ b/.devops/llama-cli.Dockerfile @@ -9,15 +9,15 @@ WORKDIR /app COPY . . -RUN make -j$(nproc) llama-cli +RUN make -j$(nproc) jarvis-cli FROM ubuntu:$UBUNTU_VERSION AS runtime RUN apt-get update && \ apt-get install -y libgomp1 -COPY --from=build /app/llama-cli /llama-cli +COPY --from=build /app/jarvis-cli /jarvis-cli ENV LC_ALL=C.utf8 -ENTRYPOINT [ "/llama-cli" ] +ENTRYPOINT [ "/jarvis-cli" ] diff --git a/.devops/llama-cpp-cuda.srpm.spec b/.devops/llama-cpp-cuda.srpm.spec index 7425d3a9d7a40..09d41e9b105a5 100644 --- a/.devops/llama-cpp-cuda.srpm.spec +++ b/.devops/llama-cpp-cuda.srpm.spec @@ -3,7 +3,7 @@ # Built and maintained by John Boero - boeroboy@gmail.com # In honor of Seth Vidal https://www.redhat.com/it/blog/thank-you-seth-vidal -# Notes for llama.cpp: +# Notes for jarvis.cpp: # 1. Tags are currently based on hash - which will not sort asciibetically. # We need to declare standard versioning if people want to sort latest releases. # 2. Builds for CUDA/OpenCL support are separate, with different depenedencies. @@ -12,44 +12,44 @@ # 4. OpenCL/CLBLAST support simply requires the ICD loader and basic opencl libraries. # It is up to the user to install the correct vendor-specific support. -Name: llama.cpp-cuda +Name: jarvis.cpp-cuda Version: %( date "+%%Y%%m%%d" ) Release: 1%{?dist} Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL) License: MIT -Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz +Source0: https://github.com/ggerganov/jarvis.cpp/archive/refs/heads/master.tar.gz BuildRequires: coreutils make gcc-c++ git cuda-toolkit Requires: cuda-toolkit -URL: https://github.com/ggerganov/llama.cpp +URL: https://github.com/ggerganov/jarvis.cpp %define debug_package %{nil} %define source_date_epoch_from_changelog 0 %description -CPU inference for Meta's Lllama2 models using default options. +CPU inference for Meta's Ljarvis2 models using default options. %prep -%setup -n llama.cpp-master +%setup -n jarvis.cpp-master %build make -j GGML_CUDA=1 %install mkdir -p %{buildroot}%{_bindir}/ -cp -p llama-cli %{buildroot}%{_bindir}/llama-cuda-cli -cp -p llama-server %{buildroot}%{_bindir}/llama-cuda-server -cp -p llama-simple %{buildroot}%{_bindir}/llama-cuda-simple +cp -p jarvis-cli %{buildroot}%{_bindir}/jarvis-cuda-cli +cp -p jarvis-server %{buildroot}%{_bindir}/jarvis-cuda-server +cp -p jarvis-simple %{buildroot}%{_bindir}/jarvis-cuda-simple mkdir -p %{buildroot}/usr/lib/systemd/system -%{__cat} < %{buildroot}/usr/lib/systemd/system/llamacuda.service +%{__cat} < %{buildroot}/usr/lib/systemd/system/jarviscuda.service [Unit] -Description=Llama.cpp server, CPU only (no GPU support in this build). +Description=Jarvis.cpp server, CPU only (no GPU support in this build). After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.target [Service] Type=simple -EnvironmentFile=/etc/sysconfig/llama -ExecStart=/usr/bin/llama-cuda-server $LLAMA_ARGS +EnvironmentFile=/etc/sysconfig/jarvis +ExecStart=/usr/bin/jarvis-cuda-server $JARVIS_ARGS ExecReload=/bin/kill -s HUP $MAINPID Restart=never @@ -58,8 +58,8 @@ WantedBy=default.target EOF mkdir -p %{buildroot}/etc/sysconfig -%{__cat} < %{buildroot}/etc/sysconfig/llama -LLAMA_ARGS="-m /opt/llama2/ggml-model-f32.bin" +%{__cat} < %{buildroot}/etc/sysconfig/jarvis +JARVIS_ARGS="-m /opt/jarvis2/ggml-model-f32.bin" EOF %clean @@ -67,11 +67,11 @@ rm -rf %{buildroot} rm -rf %{_builddir}/* %files -%{_bindir}/llama-cuda-cli -%{_bindir}/llama-cuda-server -%{_bindir}/llama-cuda-simple -/usr/lib/systemd/system/llamacuda.service -%config /etc/sysconfig/llama +%{_bindir}/jarvis-cuda-cli +%{_bindir}/jarvis-cuda-server +%{_bindir}/jarvis-cuda-simple +/usr/lib/systemd/system/jarviscuda.service +%config /etc/sysconfig/jarvis %pre diff --git a/.devops/llama-cpp.srpm.spec b/.devops/llama-cpp.srpm.spec index 4d5560089816c..e151b0aacc87f 100644 --- a/.devops/llama-cpp.srpm.spec +++ b/.devops/llama-cpp.srpm.spec @@ -3,7 +3,7 @@ # Built and maintained by John Boero - boeroboy@gmail.com # In honor of Seth Vidal https://www.redhat.com/it/blog/thank-you-seth-vidal -# Notes for llama.cpp: +# Notes for jarvis.cpp: # 1. Tags are currently based on hash - which will not sort asciibetically. # We need to declare standard versioning if people want to sort latest releases. # In the meantime, YYYYMMDD format will be used. @@ -13,45 +13,45 @@ # 4. OpenCL/CLBLAST support simply requires the ICD loader and basic opencl libraries. # It is up to the user to install the correct vendor-specific support. -Name: llama.cpp +Name: jarvis.cpp Version: %( date "+%%Y%%m%%d" ) Release: 1%{?dist} Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL) License: MIT -Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz +Source0: https://github.com/ggerganov/jarvis.cpp/archive/refs/heads/master.tar.gz BuildRequires: coreutils make gcc-c++ git libstdc++-devel Requires: libstdc++ -URL: https://github.com/ggerganov/llama.cpp +URL: https://github.com/ggerganov/jarvis.cpp %define debug_package %{nil} %define source_date_epoch_from_changelog 0 %description -CPU inference for Meta's Lllama2 models using default options. +CPU inference for Meta's Ljarvis2 models using default options. Models are not included in this package and must be downloaded separately. %prep -%setup -n llama.cpp-master +%setup -n jarvis.cpp-master %build make -j %install mkdir -p %{buildroot}%{_bindir}/ -cp -p llama-cli %{buildroot}%{_bindir}/llama-cli -cp -p llama-server %{buildroot}%{_bindir}/llama-server -cp -p llama-simple %{buildroot}%{_bindir}/llama-simple +cp -p jarvis-cli %{buildroot}%{_bindir}/jarvis-cli +cp -p jarvis-server %{buildroot}%{_bindir}/jarvis-server +cp -p jarvis-simple %{buildroot}%{_bindir}/jarvis-simple mkdir -p %{buildroot}/usr/lib/systemd/system -%{__cat} < %{buildroot}/usr/lib/systemd/system/llama.service +%{__cat} < %{buildroot}/usr/lib/systemd/system/jarvis.service [Unit] -Description=Llama.cpp server, CPU only (no GPU support in this build). +Description=Jarvis.cpp server, CPU only (no GPU support in this build). After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.target [Service] Type=simple -EnvironmentFile=/etc/sysconfig/llama -ExecStart=/usr/bin/llama-server $LLAMA_ARGS +EnvironmentFile=/etc/sysconfig/jarvis +ExecStart=/usr/bin/jarvis-server $JARVIS_ARGS ExecReload=/bin/kill -s HUP $MAINPID Restart=never @@ -60,8 +60,8 @@ WantedBy=default.target EOF mkdir -p %{buildroot}/etc/sysconfig -%{__cat} < %{buildroot}/etc/sysconfig/llama -LLAMA_ARGS="-m /opt/llama2/ggml-model-f32.bin" +%{__cat} < %{buildroot}/etc/sysconfig/jarvis +JARVIS_ARGS="-m /opt/jarvis2/ggml-model-f32.bin" EOF %clean @@ -69,11 +69,11 @@ rm -rf %{buildroot} rm -rf %{_builddir}/* %files -%{_bindir}/llama-cli -%{_bindir}/llama-server -%{_bindir}/llama-simple -/usr/lib/systemd/system/llama.service -%config /etc/sysconfig/llama +%{_bindir}/jarvis-cli +%{_bindir}/jarvis-server +%{_bindir}/jarvis-simple +/usr/lib/systemd/system/jarvis.service +%config /etc/sysconfig/jarvis %pre diff --git a/.devops/llama-server-cuda.Dockerfile b/.devops/llama-server-cuda.Dockerfile index a40e24205707f..435fe9e8d9bf9 100644 --- a/.devops/llama-server-cuda.Dockerfile +++ b/.devops/llama-server-cuda.Dockerfile @@ -22,8 +22,8 @@ COPY . . RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \ export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \ fi && \ - cmake -B build -DGGML_CUDA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ - cmake --build build --config Release --target llama-server -j$(nproc) + cmake -B build -DGGML_CUDA=ON -DJARVIS_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ + cmake --build build --config Release --target jarvis-server -j$(nproc) FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime @@ -31,12 +31,12 @@ RUN apt-get update && \ apt-get install -y libcurl4-openssl-dev libgomp1 curl COPY --from=build /app/build/ggml/src/libggml.so /libggml.so -COPY --from=build /app/build/src/libllama.so /libllama.so -COPY --from=build /app/build/bin/llama-server /llama-server +COPY --from=build /app/build/src/libjarvis.so /libjarvis.so +COPY --from=build /app/build/bin/jarvis-server /jarvis-server # Must be set to 0.0.0.0 so it can listen to requests from host machine -ENV LLAMA_ARG_HOST=0.0.0.0 +ENV JARVIS_ARG_HOST=0.0.0.0 HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] -ENTRYPOINT [ "/llama-server" ] +ENTRYPOINT [ "/jarvis-server" ] diff --git a/.devops/llama-server-intel.Dockerfile b/.devops/llama-server-intel.Dockerfile index 9c355b664f15e..1d3cc936fe00f 100644 --- a/.devops/llama-server-intel.Dockerfile +++ b/.devops/llama-server-intel.Dockerfile @@ -15,20 +15,20 @@ RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \ export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \ fi && \ echo "Building with dynamic libs" && \ - cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \ - cmake --build build --config Release --target llama-server + cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DJARVIS_CURL=ON ${OPT_SYCL_F16} && \ + cmake --build build --config Release --target jarvis-server FROM intel/oneapi-basekit:$ONEAPI_VERSION AS runtime RUN apt-get update && \ apt-get install -y libcurl4-openssl-dev curl -COPY --from=build /app/build/bin/llama-server /llama-server +COPY --from=build /app/build/bin/jarvis-server /jarvis-server ENV LC_ALL=C.utf8 # Must be set to 0.0.0.0 so it can listen to requests from host machine -ENV LLAMA_ARG_HOST=0.0.0.0 +ENV JARVIS_ARG_HOST=0.0.0.0 HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] -ENTRYPOINT [ "/llama-server" ] +ENTRYPOINT [ "/jarvis-server" ] diff --git a/.devops/llama-server-musa.Dockerfile b/.devops/llama-server-musa.Dockerfile index 193a6d77cb9ed..1c8e8938bde96 100644 --- a/.devops/llama-server-musa.Dockerfile +++ b/.devops/llama-server-musa.Dockerfile @@ -15,8 +15,8 @@ WORKDIR /app COPY . . -RUN cmake -B build -DGGML_MUSA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ - cmake --build build --config Release --target llama-server -j$(nproc) +RUN cmake -B build -DGGML_MUSA=ON -DJARVIS_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ + cmake --build build --config Release --target jarvis-server -j$(nproc) FROM ${BASE_MUSA_RUN_CONTAINER} AS runtime @@ -24,12 +24,12 @@ RUN apt-get update && \ apt-get install -y libcurl4-openssl-dev libgomp1 curl COPY --from=build /app/build/ggml/src/libggml.so /libggml.so -COPY --from=build /app/build/src/libllama.so /libllama.so -COPY --from=build /app/build/bin/llama-server /llama-server +COPY --from=build /app/build/src/libjarvis.so /libjarvis.so +COPY --from=build /app/build/bin/jarvis-server /jarvis-server # Must be set to 0.0.0.0 so it can listen to requests from host machine -ENV LLAMA_ARG_HOST=0.0.0.0 +ENV JARVIS_ARG_HOST=0.0.0.0 HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] -ENTRYPOINT [ "/llama-server" ] +ENTRYPOINT [ "/jarvis-server" ] diff --git a/.devops/llama-server-rocm.Dockerfile b/.devops/llama-server-rocm.Dockerfile index 8553af75b61fc..a9192b3dbbc91 100644 --- a/.devops/llama-server-rocm.Dockerfile +++ b/.devops/llama-server-rocm.Dockerfile @@ -9,7 +9,7 @@ ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-co FROM ${BASE_ROCM_DEV_CONTAINER} AS build # Unless otherwise specified, we make a fat build. -# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878 +# List from https://github.com/ggerganov/jarvis.cpp/pull/1087#issuecomment-1682807878 # This is mostly tied to rocBLAS supported archs. ARG ROCM_DOCKER_ARCH="\ gfx803 \ @@ -40,15 +40,15 @@ ENV GGML_HIPBLAS=1 ENV CC=/opt/rocm/llvm/bin/clang ENV CXX=/opt/rocm/llvm/bin/clang++ # Must be set to 0.0.0.0 so it can listen to requests from host machine -ENV LLAMA_ARG_HOST=0.0.0.0 +ENV JARVIS_ARG_HOST=0.0.0.0 # Enable cURL -ENV LLAMA_CURL=1 +ENV JARVIS_CURL=1 RUN apt-get update && \ apt-get install -y libcurl4-openssl-dev curl -RUN make -j$(nproc) llama-server +RUN make -j$(nproc) jarvis-server HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] -ENTRYPOINT [ "/app/llama-server" ] +ENTRYPOINT [ "/app/jarvis-server" ] diff --git a/.devops/llama-server-vulkan.Dockerfile b/.devops/llama-server-vulkan.Dockerfile index 93c5e0c26e691..89811bed3e6ad 100644 --- a/.devops/llama-server-vulkan.Dockerfile +++ b/.devops/llama-server-vulkan.Dockerfile @@ -14,18 +14,18 @@ RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key # Build it WORKDIR /app COPY . . -RUN cmake -B build -DGGML_VULKAN=1 -DLLAMA_CURL=1 && \ - cmake --build build --config Release --target llama-server +RUN cmake -B build -DGGML_VULKAN=1 -DJARVIS_CURL=1 && \ + cmake --build build --config Release --target jarvis-server # Clean up WORKDIR / -RUN cp /app/build/bin/llama-server /llama-server && \ +RUN cp /app/build/bin/jarvis-server /jarvis-server && \ rm -rf /app ENV LC_ALL=C.utf8 # Must be set to 0.0.0.0 so it can listen to requests from host machine -ENV LLAMA_ARG_HOST=0.0.0.0 +ENV JARVIS_ARG_HOST=0.0.0.0 HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] -ENTRYPOINT [ "/llama-server" ] +ENTRYPOINT [ "/jarvis-server" ] diff --git a/.devops/llama-server.Dockerfile b/.devops/llama-server.Dockerfile index 02accc85e1368..cc39a213c173e 100644 --- a/.devops/llama-server.Dockerfile +++ b/.devops/llama-server.Dockerfile @@ -9,21 +9,21 @@ WORKDIR /app COPY . . -ENV LLAMA_CURL=1 +ENV JARVIS_CURL=1 -RUN make -j$(nproc) llama-server +RUN make -j$(nproc) jarvis-server FROM ubuntu:$UBUNTU_VERSION AS runtime RUN apt-get update && \ apt-get install -y libcurl4-openssl-dev libgomp1 curl -COPY --from=build /app/llama-server /llama-server +COPY --from=build /app/jarvis-server /jarvis-server ENV LC_ALL=C.utf8 # Must be set to 0.0.0.0 so it can listen to requests from host machine -ENV LLAMA_ARG_HOST=0.0.0.0 +ENV JARVIS_ARG_HOST=0.0.0.0 HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] -ENTRYPOINT [ "/llama-server" ] +ENTRYPOINT [ "/jarvis-server" ] diff --git a/.devops/nix/apps.nix b/.devops/nix/apps.nix index 0ecf19fc56d55..af01140753974 100644 --- a/.devops/nix/apps.nix +++ b/.devops/nix/apps.nix @@ -6,10 +6,10 @@ let inherit (config.packages) default; binaries = [ - "llama-cli" - "llama-embedding" - "llama-server" - "llama-quantize" + "jarvis-cli" + "jarvis-embedding" + "jarvis-server" + "jarvis-quantize" ]; mkApp = name: { type = "app"; diff --git a/.devops/nix/docker.nix b/.devops/nix/docker.nix index d607b4575772c..502070aa8a5f2 100644 --- a/.devops/nix/docker.nix +++ b/.devops/nix/docker.nix @@ -2,14 +2,14 @@ lib, dockerTools, buildEnv, - llama-cpp, + jarvis-cpp, interactive ? true, coreutils, }: # A tar that can be fed into `docker load`: # -# $ nix build .#llamaPackages.docker +# $ nix build .#jarvisPackages.docker # $ docker load < result # For details and variations cf. @@ -19,16 +19,16 @@ # Approximate (compressed) sizes, at the time of writing, are: # -# .#llamaPackages.docker: 125M; -# .#llamaPackagesCuda.docker: 537M; -# .#legacyPackages.aarch64-linux.llamaPackagesXavier.docker: 415M. +# .#jarvisPackages.docker: 125M; +# .#jarvisPackagesCuda.docker: 537M; +# .#legacyPackages.aarch64-linux.jarvisPackagesXavier.docker: 415M. dockerTools.buildLayeredImage { - name = llama-cpp.pname; + name = jarvis-cpp.pname; tag = "latest"; contents = - [ llama-cpp ] + [ jarvis-cpp ] ++ lib.optionals interactive [ coreutils dockerTools.binSh diff --git a/.devops/nix/jetson-support.nix b/.devops/nix/jetson-support.nix index 78e2e40e03864..56f4c5b7805a5 100644 --- a/.devops/nix/jetson-support.nix +++ b/.devops/nix/jetson-support.nix @@ -11,10 +11,10 @@ { legacyPackages = let - caps.llamaPackagesXavier = "7.2"; - caps.llamaPackagesOrin = "8.7"; - caps.llamaPackagesTX2 = "6.2"; - caps.llamaPackagesNano = "5.3"; + caps.jarvisPackagesXavier = "7.2"; + caps.jarvisPackagesOrin = "8.7"; + caps.jarvisPackagesTX2 = "6.2"; + caps.jarvisPackagesNano = "5.3"; pkgsFor = cap: @@ -31,9 +31,9 @@ builtins.mapAttrs (name: cap: (pkgsFor cap).callPackage ./scope.nix { }) caps; packages = lib.optionalAttrs (system == "aarch64-linux") { - jetson-xavier = config.legacyPackages.llamaPackagesXavier.llama-cpp; - jetson-orin = config.legacyPackages.llamaPackagesOrin.llama-cpp; - jetson-nano = config.legacyPackages.llamaPackagesNano.llama-cpp; + jetson-xavier = config.legacyPackages.jarvisPackagesXavier.jarvis-cpp; + jetson-orin = config.legacyPackages.jarvisPackagesOrin.jarvis-cpp; + jetson-nano = config.legacyPackages.jarvisPackagesNano.jarvis-cpp; }; }; } diff --git a/.devops/nix/package-gguf-py.nix b/.devops/nix/package-gguf-py.nix index cca2f36a5bd4d..62b622332bf65 100644 --- a/.devops/nix/package-gguf-py.nix +++ b/.devops/nix/package-gguf-py.nix @@ -1,6 +1,6 @@ { lib, - llamaVersion, + jarvisVersion, numpy, tqdm, sentencepiece, @@ -12,7 +12,7 @@ buildPythonPackage { pname = "gguf"; - version = llamaVersion; + version = jarvisVersion; pyproject = true; nativeBuildInputs = [ poetry-core ]; propagatedBuildInputs = [ diff --git a/.devops/nix/package.nix b/.devops/nix/package.nix index 5d7d7ea5ae2d0..436f9c82315b1 100644 --- a/.devops/nix/package.nix +++ b/.devops/nix/package.nix @@ -33,7 +33,7 @@ useRocm ? config.rocmSupport, enableCurl ? true, useVulkan ? false, - llamaVersion ? "0.0.0", # Arbitrary version, substituted by the flake + jarvisVersion ? "0.0.0", # Arbitrary version, substituted by the flake # It's necessary to consistently use backendStdenv when building with CUDA support, # otherwise we get libstdc++ errors downstream. @@ -103,8 +103,8 @@ let in effectiveStdenv.mkDerivation (finalAttrs: { - pname = "llama-cpp${pnameSuffix}"; - version = llamaVersion; + pname = "jarvis-cpp${pnameSuffix}"; + version = jarvisVersion; # Note: none of the files discarded here are visible in the sandbox or # affect the output hash. This also means they can be modified without @@ -132,12 +132,12 @@ effectiveStdenv.mkDerivation (finalAttrs: { --replace '[bundle pathForResource:@"default" ofType:@"metallib"];' "@\"$out/bin/default.metallib\";" ''; - # With PR#6015 https://github.com/ggerganov/llama.cpp/pull/6015, + # With PR#6015 https://github.com/ggerganov/jarvis.cpp/pull/6015, # `default.metallib` may be compiled with Metal compiler from XCode # and we need to escape sandbox on MacOS to access Metal compiler. # `xcrun` is used find the path of the Metal compiler, which is varible # and not on $PATH - # see https://github.com/ggerganov/llama.cpp/pull/6118 for discussion + # see https://github.com/ggerganov/jarvis.cpp/pull/6118 for discussion __noChroot = effectiveStdenv.isDarwin && useMetalKit && precompileMetalShaders; nativeBuildInputs = @@ -166,10 +166,10 @@ effectiveStdenv.mkDerivation (finalAttrs: { cmakeFlags = [ - (cmakeBool "LLAMA_BUILD_SERVER" true) + (cmakeBool "JARVIS_BUILD_SERVER" true) (cmakeBool "BUILD_SHARED_LIBS" (!enableStatic)) (cmakeBool "CMAKE_SKIP_BUILD_RPATH" true) - (cmakeBool "LLAMA_CURL" enableCurl) + (cmakeBool "JARVIS_CURL" enableCurl) (cmakeBool "GGML_NATIVE" false) (cmakeBool "GGML_BLAS" useBlas) (cmakeBool "GGML_CUDA" useCuda) @@ -205,7 +205,7 @@ effectiveStdenv.mkDerivation (finalAttrs: { # if they haven't been added yet. postInstall = '' mkdir -p $out/include - cp $src/include/llama.h $out/include/ + cp $src/include/jarvis.h $out/include/ ''; meta = { @@ -219,11 +219,11 @@ effectiveStdenv.mkDerivation (finalAttrs: { broken = (useMetalKit && !effectiveStdenv.isDarwin); description = "Inference of LLaMA model in pure C/C++${descriptionSuffix}"; - homepage = "https://github.com/ggerganov/llama.cpp/"; + homepage = "https://github.com/ggerganov/jarvis.cpp/"; license = lib.licenses.mit; # Accommodates `nix run` and `lib.getExe` - mainProgram = "llama-cli"; + mainProgram = "jarvis-cli"; # These people might respond, on the best effort basis, if you ping them # in case of Nix-specific regressions or for reviewing Nix-specific PRs. diff --git a/.devops/nix/python-scripts.nix b/.devops/nix/python-scripts.nix index 392e9ffe41bf5..7c91fd9022e48 100644 --- a/.devops/nix/python-scripts.nix +++ b/.devops/nix/python-scripts.nix @@ -9,7 +9,7 @@ }@inputs: let - llama-python-deps = with python3Packages; [ + jarvis-python-deps = with python3Packages; [ numpy sentencepiece transformers @@ -18,7 +18,7 @@ let gguf-py tqdm - # for scripts/compare-llama-bench.py + # for scripts/compare-jarvis-bench.py gitpython tabulate @@ -28,7 +28,7 @@ let ]; - llama-python-test-deps = with python3Packages; [ + jarvis-python-test-deps = with python3Packages; [ # Server bench matplotlib @@ -40,7 +40,7 @@ let in buildPythonPackage ({ - pname = "llama-scripts"; + pname = "jarvis-scripts"; version = "0.0.0"; pyproject = true; @@ -61,6 +61,6 @@ buildPythonPackage ({ src = lib.cleanSource ../../.; }; nativeBuildInputs = [ poetry-core ]; - nativeCheckInputs = llama-python-test-deps; - dependencies = llama-python-deps; + nativeCheckInputs = jarvis-python-test-deps; + dependencies = jarvis-python-deps; }) diff --git a/.devops/nix/scope.nix b/.devops/nix/scope.nix index 478e8c4228afa..4b1b4ff090bd5 100644 --- a/.devops/nix/scope.nix +++ b/.devops/nix/scope.nix @@ -2,7 +2,7 @@ lib, newScope, python3, - llamaVersion ? "0.0.0", + jarvisVersion ? "0.0.0", }: let @@ -21,7 +21,7 @@ in # Cf. https://noogle.dev/f/lib/makeScope lib.makeScope newScope (self: { - inherit llamaVersion; + inherit jarvisVersion; gguf-py = self.callPackage ./package-gguf-py.nix { inherit buildPythonPackage @@ -34,7 +34,7 @@ lib.makeScope newScope (self: { ; }; python-scripts = self.callPackage ./python-scripts.nix { inherit buildPythonPackage poetry-core; }; - llama-cpp = self.callPackage ./package.nix { }; + jarvis-cpp = self.callPackage ./package.nix { }; docker = self.callPackage ./docker.nix { }; docker-min = self.callPackage ./docker.nix { interactive = false; }; sif = self.callPackage ./sif.nix { }; diff --git a/.devops/nix/sif.nix b/.devops/nix/sif.nix index 7a5e1dd0ffc4c..cc43dd75680e9 100644 --- a/.devops/nix/sif.nix +++ b/.devops/nix/sif.nix @@ -1,7 +1,7 @@ { lib, singularity-tools, - llama-cpp, + jarvis-cpp, bashInteractive, interactive ? false, }: @@ -10,8 +10,8 @@ let optionalInt = cond: x: if cond then x else 0; in singularity-tools.buildImage rec { - inherit (llama-cpp) name; - contents = [ llama-cpp ] ++ lib.optionals interactive [ bashInteractive ]; + inherit (jarvis-cpp) name; + contents = [ jarvis-cpp ] ++ lib.optionals interactive [ bashInteractive ]; # These are excessive (but safe) for most variants. Building singularity # images requires superuser privileges, so we build them inside a VM in a @@ -22,6 +22,6 @@ singularity-tools.buildImage rec { # Expected image sizes: # - cpu/blas: 150M, # - cuda, all gencodes: 560M, - diskSize = 4096 + optionalInt llama-cpp.useRocm 16384; + diskSize = 4096 + optionalInt jarvis-cpp.useRocm 16384; memSize = diskSize; } diff --git a/.devops/tools.sh b/.devops/tools.sh index 24dcfd35079cb..a5a56c8231fab 100755 --- a/.devops/tools.sh +++ b/.devops/tools.sh @@ -10,9 +10,9 @@ shift if [[ "$arg1" == '--convert' || "$arg1" == '-c' ]]; then python3 ./convert_hf_to_gguf.py "$@" elif [[ "$arg1" == '--quantize' || "$arg1" == '-q' ]]; then - ./llama-quantize "$@" + ./jarvis-quantize "$@" elif [[ "$arg1" == '--run' || "$arg1" == '-r' ]]; then - ./llama-cli "$@" + ./jarvis-cli "$@" elif [[ "$arg1" == '--all-in-one' || "$arg1" == '-a' ]]; then echo "Converting PTH to GGML..." for i in `ls $1/$2/ggml-model-f16.bin*`; do @@ -20,17 +20,17 @@ elif [[ "$arg1" == '--all-in-one' || "$arg1" == '-a' ]]; then echo "Skip model quantization, it already exists: ${i/f16/q4_0}" else echo "Converting PTH to GGML: $i into ${i/f16/q4_0}..." - ./llama-quantize "$i" "${i/f16/q4_0}" q4_0 + ./jarvis-quantize "$i" "${i/f16/q4_0}" q4_0 fi done elif [[ "$arg1" == '--server' || "$arg1" == '-s' ]]; then - ./llama-server "$@" + ./jarvis-server "$@" else echo "Unknown command: $arg1" echo "Available commands: " echo " --run (-r): Run a model previously converted into ggml" echo " ex: -m /models/7B/ggml-model-q4_0.bin -p \"Building a website can be done in 10 simple steps:\" -n 512" - echo " --convert (-c): Convert a llama model into ggml" + echo " --convert (-c): Convert a jarvis model into ggml" echo " ex: --outtype f16 \"/models/7B/\" " echo " --quantize (-q): Optimize with quantization process ggml" echo " ex: \"/models/7B/ggml-model-f16.bin\" \"/models/7B/ggml-model-q4_0.bin\" 2" diff --git a/.dockerignore b/.dockerignore index 064b7c7be86d0..a07624cfd185e 100644 --- a/.dockerignore +++ b/.dockerignore @@ -12,8 +12,8 @@ build*/ models/* -/llama-cli -/llama-quantize +/jarvis-cli +/jarvis-quantize arm_neon.h compile_commands.json diff --git a/.editorconfig b/.editorconfig index f88f8da67cd78..ec03eee394d99 100644 --- a/.editorconfig +++ b/.editorconfig @@ -24,7 +24,7 @@ insert_final_newline = unset [examples/server/public/*] indent_size = 2 -[examples/llama.swiftui/llama.swiftui.xcodeproj/*] +[examples/jarvis.swiftui/jarvis.swiftui.xcodeproj/*] indent_style = tab [examples/cvector-generator/*.txt] diff --git a/.github/ISSUE_TEMPLATE/01-bug-low.yml b/.github/ISSUE_TEMPLATE/01-bug-low.yml index 54785854f776e..281fdb74ff70f 100644 --- a/.github/ISSUE_TEMPLATE/01-bug-low.yml +++ b/.github/ISSUE_TEMPLATE/01-bug-low.yml @@ -1,5 +1,5 @@ name: Low Severity Bugs -description: Used to report low severity bugs in llama.cpp (e.g. cosmetic issues, non critical UI glitches) +description: Used to report low severity bugs in jarvis.cpp (e.g. cosmetic issues, non critical UI glitches) title: "Bug: " labels: ["bug-unconfirmed", "low severity"] body: @@ -8,7 +8,7 @@ body: value: | Thanks for taking the time to fill out this bug report! Please include information about your system, the steps to reproduce the bug, - and the version of llama.cpp that you are using. + and the version of jarvis.cpp that you are using. If possible, please provide a minimal code example that reproduces the bug. - type: textarea id: what-happened @@ -24,7 +24,7 @@ body: label: Name and Version description: Which executable and which version of our software are you running? (use `--version` to get a version string) placeholder: | - $./llama-cli --version + $./jarvis-cli --version version: 2999 (42b4109e) built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu validations: diff --git a/.github/ISSUE_TEMPLATE/02-bug-medium.yml b/.github/ISSUE_TEMPLATE/02-bug-medium.yml index a6285c6f05bac..9a4f564e37aae 100644 --- a/.github/ISSUE_TEMPLATE/02-bug-medium.yml +++ b/.github/ISSUE_TEMPLATE/02-bug-medium.yml @@ -1,5 +1,5 @@ name: Medium Severity Bug -description: Used to report medium severity bugs in llama.cpp (e.g. Malfunctioning Features but generally still useable) +description: Used to report medium severity bugs in jarvis.cpp (e.g. Malfunctioning Features but generally still useable) title: "Bug: " labels: ["bug-unconfirmed", "medium severity"] body: @@ -8,7 +8,7 @@ body: value: | Thanks for taking the time to fill out this bug report! Please include information about your system, the steps to reproduce the bug, - and the version of llama.cpp that you are using. + and the version of jarvis.cpp that you are using. If possible, please provide a minimal code example that reproduces the bug. - type: textarea id: what-happened @@ -24,7 +24,7 @@ body: label: Name and Version description: Which executable and which version of our software are you running? (use `--version` to get a version string) placeholder: | - $./llama-cli --version + $./jarvis-cli --version version: 2999 (42b4109e) built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu validations: diff --git a/.github/ISSUE_TEMPLATE/03-bug-high.yml b/.github/ISSUE_TEMPLATE/03-bug-high.yml index ff816b93769c3..cfa23d4afbdfb 100644 --- a/.github/ISSUE_TEMPLATE/03-bug-high.yml +++ b/.github/ISSUE_TEMPLATE/03-bug-high.yml @@ -1,5 +1,5 @@ name: High Severity Bug -description: Used to report high severity bugs in llama.cpp (e.g. Malfunctioning features hindering important common workflow) +description: Used to report high severity bugs in jarvis.cpp (e.g. Malfunctioning features hindering important common workflow) title: "Bug: " labels: ["bug-unconfirmed", "high severity"] body: @@ -8,7 +8,7 @@ body: value: | Thanks for taking the time to fill out this bug report! Please include information about your system, the steps to reproduce the bug, - and the version of llama.cpp that you are using. + and the version of jarvis.cpp that you are using. If possible, please provide a minimal code example that reproduces the bug. - type: textarea id: what-happened @@ -24,7 +24,7 @@ body: label: Name and Version description: Which executable and which version of our software are you running? (use `--version` to get a version string) placeholder: | - $./llama-cli --version + $./jarvis-cli --version version: 2999 (42b4109e) built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu validations: diff --git a/.github/ISSUE_TEMPLATE/04-bug-critical.yml b/.github/ISSUE_TEMPLATE/04-bug-critical.yml index 7af42a80b3b93..e88543452a79c 100644 --- a/.github/ISSUE_TEMPLATE/04-bug-critical.yml +++ b/.github/ISSUE_TEMPLATE/04-bug-critical.yml @@ -1,5 +1,5 @@ name: Critical Severity Bug -description: Used to report critical severity bugs in llama.cpp (e.g. Crashing, Corrupted, Dataloss) +description: Used to report critical severity bugs in jarvis.cpp (e.g. Crashing, Corrupted, Dataloss) title: "Bug: " labels: ["bug-unconfirmed", "critical severity"] body: @@ -8,7 +8,7 @@ body: value: | Thanks for taking the time to fill out this bug report! Please include information about your system, the steps to reproduce the bug, - and the version of llama.cpp that you are using. + and the version of jarvis.cpp that you are using. If possible, please provide a minimal code example that reproduces the bug. - type: textarea id: what-happened @@ -24,7 +24,7 @@ body: label: Name and Version description: Which executable and which version of our software are you running? (use `--version` to get a version string) placeholder: | - $./llama-cli --version + $./jarvis-cli --version version: 2999 (42b4109e) built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu validations: diff --git a/.github/ISSUE_TEMPLATE/05-enhancement.yml b/.github/ISSUE_TEMPLATE/05-enhancement.yml index 58fca73183d41..b33f44a627b41 100644 --- a/.github/ISSUE_TEMPLATE/05-enhancement.yml +++ b/.github/ISSUE_TEMPLATE/05-enhancement.yml @@ -1,12 +1,12 @@ name: Enhancement -description: Used to request enhancements for llama.cpp +description: Used to request enhancements for jarvis.cpp title: "Feature Request: " labels: ["enhancement"] body: - type: markdown attributes: value: | - [Please post your idea first in Discussion if there is not yet a consensus for this enhancement request. This will help to keep this issue tracker focused on enhancements that the community has agreed needs to be implemented.](https://github.com/ggerganov/llama.cpp/discussions/categories/ideas) + [Please post your idea first in Discussion if there is not yet a consensus for this enhancement request. This will help to keep this issue tracker focused on enhancements that the community has agreed needs to be implemented.](https://github.com/ggerganov/jarvis.cpp/discussions/categories/ideas) - type: checkboxes id: prerequisites @@ -16,18 +16,18 @@ body: options: - label: I am running the latest code. Mention the version if possible as well. required: true - - label: I carefully followed the [README.md](https://github.com/ggerganov/llama.cpp/blob/master/README.md). + - label: I carefully followed the [README.md](https://github.com/ggerganov/jarvis.cpp/blob/master/README.md). required: true - label: I searched using keywords relevant to my issue to make sure that I am creating a new issue that is not already open (or closed). required: true - - label: I reviewed the [Discussions](https://github.com/ggerganov/llama.cpp/discussions), and have a new and useful enhancement to share. + - label: I reviewed the [Discussions](https://github.com/ggerganov/jarvis.cpp/discussions), and have a new and useful enhancement to share. required: true - type: textarea id: feature-description attributes: label: Feature Description - description: Please provide a detailed written description of what you were trying to do, and what you expected `llama.cpp` to do as an enhancement. + description: Please provide a detailed written description of what you were trying to do, and what you expected `jarvis.cpp` to do as an enhancement. placeholder: Detailed description of the enhancement validations: required: true @@ -36,7 +36,7 @@ body: id: motivation attributes: label: Motivation - description: Please provide a detailed written description of reasons why this feature is necessary and how it is useful to `llama.cpp` users. + description: Please provide a detailed written description of reasons why this feature is necessary and how it is useful to `jarvis.cpp` users. placeholder: Explanation of why this feature is needed and its benefits validations: required: true diff --git a/.github/ISSUE_TEMPLATE/06-research.yml b/.github/ISSUE_TEMPLATE/06-research.yml index 3ae4e9f8caaa4..51e4baf6fffa7 100644 --- a/.github/ISSUE_TEMPLATE/06-research.yml +++ b/.github/ISSUE_TEMPLATE/06-research.yml @@ -6,7 +6,7 @@ body: - type: markdown attributes: value: | - Don't forget to check for any [duplicate research issue tickets](https://github.com/ggerganov/llama.cpp/issues?q=is%3Aopen+is%3Aissue+label%3A%22research+%F0%9F%94%AC%22) + Don't forget to check for any [duplicate research issue tickets](https://github.com/ggerganov/jarvis.cpp/issues?q=is%3Aopen+is%3Aissue+label%3A%22research+%F0%9F%94%AC%22) - type: checkboxes id: research-stage diff --git a/.github/ISSUE_TEMPLATE/07-refactor.yml b/.github/ISSUE_TEMPLATE/07-refactor.yml index 3a68d3d5355d6..0a8a58fccd0ba 100644 --- a/.github/ISSUE_TEMPLATE/07-refactor.yml +++ b/.github/ISSUE_TEMPLATE/07-refactor.yml @@ -6,8 +6,8 @@ body: - type: markdown attributes: value: | - Don't forget to [check for existing refactor issue tickets](https://github.com/ggerganov/llama.cpp/issues?q=is%3Aopen+is%3Aissue+label%3Arefactoring) in case it's already covered. - Also you may want to check [Pull request refactor label as well](https://github.com/ggerganov/llama.cpp/pulls?q=is%3Aopen+is%3Apr+label%3Arefactoring) for duplicates too. + Don't forget to [check for existing refactor issue tickets](https://github.com/ggerganov/jarvis.cpp/issues?q=is%3Aopen+is%3Aissue+label%3Arefactoring) in case it's already covered. + Also you may want to check [Pull request refactor label as well](https://github.com/ggerganov/jarvis.cpp/pulls?q=is%3Aopen+is%3Apr+label%3Arefactoring) for duplicates too. - type: textarea id: background-description diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index eb8c4b472df4c..fa85823fcdae0 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,11 +1,11 @@ blank_issues_enabled: true contact_links: - name: Got an idea? - url: https://github.com/ggerganov/llama.cpp/discussions/categories/ideas + url: https://github.com/ggerganov/jarvis.cpp/discussions/categories/ideas about: Pop it there. It may then become an enhancement ticket. - name: Got a question? - url: https://github.com/ggerganov/llama.cpp/discussions/categories/q-a + url: https://github.com/ggerganov/jarvis.cpp/discussions/categories/q-a about: Ask a question there! - name: Want to contribute? - url: https://github.com/ggerganov/llama.cpp/wiki/contribute + url: https://github.com/ggerganov/jarvis.cpp/wiki/contribute about: Head to the contribution guide page of the wiki for areas you can help with diff --git a/.github/labeler.yml b/.github/labeler.yml index 89436740d1ffb..7e5e48b35ac22 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -67,7 +67,7 @@ script: android: - changed-files: - any-glob-to-any-file: - - examples/llama.android/** + - examples/jarvis.android/** server: - changed-files: - any-glob-to-any-file: diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 997c6d9d05397..c1c783730f652 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,6 +1,6 @@ -- [x] I have read the [contributing guidelines](https://github.com/ggerganov/llama.cpp/blob/master/CONTRIBUTING.md) +- [x] I have read the [contributing guidelines](https://github.com/ggerganov/jarvis.cpp/blob/master/CONTRIBUTING.md) - Self-reported review complexity: - [ ] Low - [ ] Medium diff --git a/.github/workflows/bench.yml.disabled b/.github/workflows/bench.yml.disabled index 1c8787ef78f7e..12f092afcee5f 100644 --- a/.github/workflows/bench.yml.disabled +++ b/.github/workflows/bench.yml.disabled @@ -1,5 +1,5 @@ # TODO: there have been some issues with the workflow, so disabling for now -# https://github.com/ggerganov/llama.cpp/issues/7893 +# https://github.com/ggerganov/jarvis.cpp/issues/7893 # # Benchmark name: Benchmark @@ -27,10 +27,10 @@ on: push: branches: - master - paths: ['llama.cpp', 'ggml.c', 'ggml-backend.cpp', 'ggml-quants.c', '**/*.cu', 'examples/server/*.h*', 'examples/server/*.cpp'] + paths: ['jarvis.cpp', 'ggml.c', 'ggml-backend.cpp', 'ggml-quants.c', '**/*.cu', 'examples/server/*.h*', 'examples/server/*.cpp'] pull_request_target: types: [opened, synchronize, reopened] - paths: ['llama.cpp', 'ggml.c', 'ggml-backend.cpp', 'ggml-quants.c', '**/*.cu', 'examples/server/*.h*', 'examples/server/*.cpp'] + paths: ['jarvis.cpp', 'ggml.c', 'ggml-backend.cpp', 'ggml-quants.c', '**/*.cu', 'examples/server/*.h*', 'examples/server/*.cpp'] schedule: - cron: '04 2 * * *' @@ -113,16 +113,16 @@ jobs: set -eux cmake -B build \ -DGGML_NATIVE=OFF \ - -DLLAMA_BUILD_SERVER=ON \ - -DLLAMA_CURL=ON \ - -DLLAMA_CUBLAS=ON \ + -DJARVIS_BUILD_SERVER=ON \ + -DJARVIS_CURL=ON \ + -DJARVIS_CUBLAS=ON \ -DCUDAToolkit_ROOT=/usr/local/cuda \ -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc \ -DCMAKE_CUDA_ARCHITECTURES=75 \ - -DLLAMA_FATAL_WARNINGS=OFF \ - -DLLAMA_ALL_WARNINGS=OFF \ + -DJARVIS_FATAL_WARNINGS=OFF \ + -DJARVIS_ALL_WARNINGS=OFF \ -DCMAKE_BUILD_TYPE=Release; - cmake --build build --config Release -j $(nproc) --target llama-server + cmake --build build --config Release -j $(nproc) --target jarvis-server - name: Download the dataset id: download_dataset @@ -240,7 +240,7 @@ jobs: message: |

- 📈 **llama.cpp server** for _${{ github.job }}_ on _${{ env.RUNNER_LABEL }}_ for `${{ matrix.model }}`-`${{ matrix.ftype }}`: **${{ env.BENCH_ITERATIONS}} iterations** 🚀 + 📈 **jarvis.cpp server** for _${{ github.job }}_ on _${{ env.RUNNER_LABEL }}_ for `${{ matrix.model }}`-`${{ matrix.ftype }}`: **${{ env.BENCH_ITERATIONS}} iterations** 🚀

@@ -249,9 +249,9 @@ jobs: Expand details for performance related PR only - Concurrent users: ${{ env.N_USERS }}, duration: ${{ github.event.inputs.duration || env.DURATION }} - - HTTP request : avg=${{ env.HTTP_REQ_DURATION_AVG }}ms p(95)=${{ env.HTTP_REQ_DURATION_P_95_ }}ms fails=${{ env.HTTP_REQ_FAILED_PASSES }}, finish reason: stop=${{ env.LLAMACPP_COMPLETIONS_STOP_RATE_PASSES }} truncated=${{ env.LLAMACPP_COMPLETIONS_TRUNCATED_RATE_PASSES }} - - Prompt processing (pp): avg=${{ env.LLAMACPP_PROMPT_PROCESSING_SECOND_AVG }}tk/s p(95)=${{ env.LLAMACPP_PROMPT_PROCESSING_SECOND_P_95_ }}tk/s - - Token generation (tg): avg=${{ env.LLAMACPP_TOKENS_SECOND_AVG }}tk/s p(95)=${{ env.LLAMACPP_TOKENS_SECOND_P_95_ }}tk/s + - HTTP request : avg=${{ env.HTTP_REQ_DURATION_AVG }}ms p(95)=${{ env.HTTP_REQ_DURATION_P_95_ }}ms fails=${{ env.HTTP_REQ_FAILED_PASSES }}, finish reason: stop=${{ env.JARVISCPP_COMPLETIONS_STOP_RATE_PASSES }} truncated=${{ env.JARVISCPP_COMPLETIONS_TRUNCATED_RATE_PASSES }} + - Prompt processing (pp): avg=${{ env.JARVISCPP_PROMPT_PROCESSING_SECOND_AVG }}tk/s p(95)=${{ env.JARVISCPP_PROMPT_PROCESSING_SECOND_P_95_ }}tk/s + - Token generation (tg): avg=${{ env.JARVISCPP_TOKENS_SECOND_AVG }}tk/s p(95)=${{ env.JARVISCPP_TOKENS_SECOND_P_95_ }}tk/s - ${{ env.BENCH_GRAPH_XLABEL }} diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 423173b975897..d73089ed81b2e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -28,9 +28,9 @@ env: BRANCH_NAME: ${{ github.head_ref || github.ref_name }} GGML_NLOOP: 3 GGML_N_THREADS: 1 - LLAMA_LOG_COLORS: 1 - LLAMA_LOG_PREFIX: 1 - LLAMA_LOG_TIMESTAMPS: 1 + JARVIS_LOG_COLORS: 1 + JARVIS_LOG_PREFIX: 1 + JARVIS_LOG_TIMESTAMPS: 1 jobs: macOS-latest-cmake-arm64: @@ -55,7 +55,7 @@ jobs: sysctl -a mkdir build cd build - cmake -DLLAMA_FATAL_WARNINGS=ON -DGGML_METAL_EMBED_LIBRARY=ON -DLLAMA_CURL=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=OFF .. + cmake -DJARVIS_FATAL_WARNINGS=ON -DGGML_METAL_EMBED_LIBRARY=ON -DJARVIS_CURL=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=OFF .. cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) - name: Test @@ -82,14 +82,14 @@ jobs: if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} run: | cp LICENSE ./build/bin/ - zip -r llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.zip ./build/bin/* + zip -r jarvis-${{ steps.tag.outputs.name }}-bin-macos-arm64.zip ./build/bin/* - name: Upload artifacts if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} uses: actions/upload-artifact@v4 with: - path: llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.zip - name: llama-bin-macos-arm64.zip + path: jarvis-${{ steps.tag.outputs.name }}-bin-macos-arm64.zip + name: jarvis-bin-macos-arm64.zip macOS-latest-cmake-x64: runs-on: macos-12 @@ -112,8 +112,8 @@ jobs: run: | sysctl -a # Metal is disabled due to intermittent failures with Github runners not having a GPU: - # https://github.com/ggerganov/llama.cpp/actions/runs/8635935781/job/23674807267#step:5:2313 - cmake -B build -DLLAMA_FATAL_WARNINGS=ON -DGGML_METAL=OFF -DLLAMA_CURL=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=OFF + # https://github.com/ggerganov/jarvis.cpp/actions/runs/8635935781/job/23674807267#step:5:2313 + cmake -B build -DJARVIS_FATAL_WARNINGS=ON -DGGML_METAL=OFF -DJARVIS_CURL=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=OFF cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) - name: Test @@ -140,20 +140,20 @@ jobs: if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} run: | cp LICENSE ./build/bin/ - zip -r llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip ./build/bin/* + zip -r jarvis-${{ steps.tag.outputs.name }}-bin-macos-x64.zip ./build/bin/* - name: Upload artifacts if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} uses: actions/upload-artifact@v4 with: - path: llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip - name: llama-bin-macos-x64.zip + path: jarvis-${{ steps.tag.outputs.name }}-bin-macos-x64.zip + name: jarvis-bin-macos-x64.zip ubuntu-focal-make: runs-on: ubuntu-20.04 env: - LLAMA_NODE_AVAILABLE: true - LLAMA_PYTHON_AVAILABLE: true + JARVIS_NODE_AVAILABLE: true + JARVIS_PYTHON_AVAILABLE: true steps: - name: Clone @@ -177,7 +177,7 @@ jobs: - name: Build id: make_build env: - LLAMA_FATAL_WARNINGS: 1 + JARVIS_FATAL_WARNINGS: 1 run: | CC=gcc-8 make -j $(nproc) @@ -204,8 +204,8 @@ jobs: - name: Build id: make_build env: - LLAMA_FATAL_WARNINGS: 1 - LLAMA_CURL: 1 + JARVIS_FATAL_WARNINGS: 1 + JARVIS_CURL: 1 run: | CC=gcc-8 make -j $(nproc) @@ -230,7 +230,7 @@ jobs: run: | mkdir build cd build - cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=OFF + cmake .. -DJARVIS_FATAL_WARNINGS=ON -DJARVIS_CURL=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=OFF cmake --build . --config Release -j $(nproc) - name: Test @@ -239,16 +239,16 @@ jobs: cd build ctest -L 'main|curl' --verbose --timeout 900 - - name: Test llama2c conversion - id: llama2c_test + - name: Test jarvis2c conversion + id: jarvis2c_test run: | cd build echo "Fetch tokenizer" - wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories260K/tok512.bin - echo "Fetch llama2c model" - wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories260K/stories260K.bin - ./bin/llama-convert-llama2c-to-ggml --copy-vocab-from-model ./tok512.bin --llama2c-model stories260K.bin --llama2c-output-model stories260K.gguf - ./bin/llama-cli -m stories260K.gguf -p "One day, Lily met a Shoggoth" -n 500 -c 256 + wget https://huggingface.co/karpathy/tinyjarviss/resolve/main/stories260K/tok512.bin + echo "Fetch jarvis2c model" + wget https://huggingface.co/karpathy/tinyjarviss/resolve/main/stories260K/stories260K.bin + ./bin/jarvis-convert-jarvis2c-to-ggml --copy-vocab-from-model ./tok512.bin --jarvis2c-model stories260K.bin --jarvis2c-output-model stories260K.gguf + ./bin/jarvis-cli -m stories260K.gguf -p "One day, Lily met a Shoggoth" -n 500 -c 256 - name: Determine tag name id: tag @@ -268,14 +268,14 @@ jobs: if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} run: | cp LICENSE ./build/bin/ - zip -r llama-${{ steps.tag.outputs.name }}-bin-ubuntu-x64.zip ./build/bin/* + zip -r jarvis-${{ steps.tag.outputs.name }}-bin-ubuntu-x64.zip ./build/bin/* - name: Upload artifacts if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} uses: actions/upload-artifact@v4 with: - path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-x64.zip - name: llama-bin-ubuntu-x64.zip + path: jarvis-${{ steps.tag.outputs.name }}-bin-ubuntu-x64.zip + name: jarvis-bin-ubuntu-x64.zip ubuntu-latest-cmake-sanitizer: runs-on: ubuntu-latest @@ -304,7 +304,7 @@ jobs: run: | mkdir build cd build - cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} + cmake .. -DJARVIS_FATAL_WARNINGS=ON -DJARVIS_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} cmake --build . --config ${{ matrix.build_type }} -j $(nproc) - name: Build (no OpenMP) @@ -313,7 +313,7 @@ jobs: run: | mkdir build cd build - cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DGGML_OPENMP=OFF + cmake .. -DJARVIS_FATAL_WARNINGS=ON -DJARVIS_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DGGML_OPENMP=OFF cmake --build . --config ${{ matrix.build_type }} -j $(nproc) - name: Test @@ -487,7 +487,7 @@ jobs: # TODO: build with GGML_NO_METAL because test-backend-ops fail on "Apple Paravirtual device" and I don't know # how to debug it. - # ref: https://github.com/ggerganov/llama.cpp/actions/runs/7131777249/job/19420981052#step:5:1124 + # ref: https://github.com/ggerganov/jarvis.cpp/actions/runs/7131777249/job/19420981052#step:5:1124 macOS-latest-make: runs-on: macos-latest @@ -505,7 +505,7 @@ jobs: - name: Build id: make_build env: - LLAMA_FATAL_WARNINGS: 1 + JARVIS_FATAL_WARNINGS: 1 run: | GGML_NO_METAL=1 make -j $(sysctl -n hw.logicalcpu) @@ -517,7 +517,7 @@ jobs: # TODO: build with GGML_METAL=OFF because test-backend-ops fail on "Apple Paravirtual device" and I don't know # how to debug it. - # ref: https://github.com/ggerganov/llama.cpp/actions/runs/7132125951/job/19422043567?pr=4359#step:5:6584 + # ref: https://github.com/ggerganov/jarvis.cpp/actions/runs/7132125951/job/19422043567?pr=4359#step:5:6584 # would be great if we fix these macOS-latest-cmake: runs-on: macos-latest @@ -539,7 +539,7 @@ jobs: sysctl -a mkdir build cd build - cmake -DLLAMA_FATAL_WARNINGS=ON -DGGML_METAL=OFF .. + cmake -DJARVIS_FATAL_WARNINGS=ON -DGGML_METAL=OFF .. cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) - name: Test @@ -570,9 +570,9 @@ jobs: cd build cmake -G Xcode .. \ -DGGML_METAL_EMBED_LIBRARY=ON \ - -DLLAMA_BUILD_EXAMPLES=OFF \ - -DLLAMA_BUILD_TESTS=OFF \ - -DLLAMA_BUILD_SERVER=OFF \ + -DJARVIS_BUILD_EXAMPLES=OFF \ + -DJARVIS_BUILD_TESTS=OFF \ + -DJARVIS_BUILD_SERVER=OFF \ -DCMAKE_SYSTEM_NAME=iOS \ -DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \ -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml @@ -600,9 +600,9 @@ jobs: cd build cmake -G Xcode .. \ -DGGML_METAL_EMBED_LIBRARY=ON \ - -DLLAMA_BUILD_EXAMPLES=OFF \ - -DLLAMA_BUILD_TESTS=OFF \ - -DLLAMA_BUILD_SERVER=OFF \ + -DJARVIS_BUILD_EXAMPLES=OFF \ + -DJARVIS_BUILD_TESTS=OFF \ + -DJARVIS_BUILD_SERVER=OFF \ -DCMAKE_SYSTEM_NAME=tvOS \ -DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \ -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml @@ -629,7 +629,7 @@ jobs: - name: xcodebuild for swift package id: xcodebuild run: | - xcodebuild -scheme llama -destination "${{ matrix.destination }}" + xcodebuild -scheme jarvis -destination "${{ matrix.destination }}" - name: Build Swift Example id: make_build_swift_example @@ -705,23 +705,23 @@ jobs: matrix: include: - build: 'noavx-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DJARVIS_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF -DBUILD_SHARED_LIBS=ON' - build: 'avx2-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DJARVIS_BUILD_SERVER=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=ON' - build: 'avx-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX2=OFF -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DJARVIS_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX2=OFF -DBUILD_SHARED_LIBS=ON' - build: 'avx512-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX512=ON -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DJARVIS_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX512=ON -DBUILD_SHARED_LIBS=ON' - build: 'openblas-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BLAS=ON -DBUILD_SHARED_LIBS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"' + defines: '-DGGML_NATIVE=OFF -DJARVIS_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BLAS=ON -DBUILD_SHARED_LIBS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"' - build: 'kompute-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DJARVIS_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON -DBUILD_SHARED_LIBS=ON' - build: 'vulkan-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_VULKAN=ON -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DJARVIS_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_VULKAN=ON -DBUILD_SHARED_LIBS=ON' - build: 'llvm-arm64' - defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' + defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DJARVIS_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' - build: 'msvc-arm64' - defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' + defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DGGML_NATIVE=OFF -DJARVIS_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' steps: - name: Clone @@ -807,7 +807,7 @@ jobs: 7z x "-o${env:RUNNER_TEMP}" $env:RUNNER_TEMP/sde.tar $sde = $(join-path $env:RUNNER_TEMP sde-external-${env:SDE_VERSION}-win/sde.exe) cd build - $env:LLAMA_SKIP_TESTS_SLOW_ON_EMULATOR = 1 + $env:JARVIS_SKIP_TESTS_SLOW_ON_EMULATOR = 1 & $sde -future -- ctest -L main -C Release --verbose --timeout 900 - name: Determine tag name @@ -827,15 +827,15 @@ jobs: id: pack_artifacts if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} run: | - Copy-Item LICENSE .\build\bin\Release\llama.cpp.txt - 7z a llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}.zip .\build\bin\Release\* + Copy-Item LICENSE .\build\bin\Release\jarvis.cpp.txt + 7z a jarvis-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}.zip .\build\bin\Release\* - name: Upload artifacts if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} uses: actions/upload-artifact@v4 with: - path: llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}.zip - name: llama-bin-win-${{ matrix.build }}.zip + path: jarvis-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}.zip + name: jarvis-bin-win-${{ matrix.build }}.zip windows-latest-cmake-cuda: runs-on: windows-2019 @@ -865,7 +865,7 @@ jobs: run: | mkdir build cd build - cmake .. -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_CUDA=ON -DBUILD_SHARED_LIBS=ON -DGGML_RPC=ON + cmake .. -DGGML_NATIVE=OFF -DJARVIS_BUILD_SERVER=ON -DGGML_CUDA=ON -DBUILD_SHARED_LIBS=ON -DGGML_RPC=ON cmake --build . --config Release -j $((${env:NUMBER_OF_PROCESSORS} - 1)) -t ggml cmake --build . --config Release -j ${env:NUMBER_OF_PROCESSORS} @@ -886,28 +886,28 @@ jobs: id: pack_artifacts if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} run: | - 7z a llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}-cu${{ matrix.cuda }}-x64.zip .\build\bin\Release\* + 7z a jarvis-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}-cu${{ matrix.cuda }}-x64.zip .\build\bin\Release\* - name: Upload artifacts if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} uses: actions/upload-artifact@v4 with: - path: llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}-cu${{ matrix.cuda }}-x64.zip - name: llama-bin-win-cu${{ matrix.cuda }}-x64.zip + path: jarvis-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}-cu${{ matrix.cuda }}-x64.zip + name: jarvis-bin-win-cu${{ matrix.cuda }}-x64.zip - name: Copy and pack Cuda runtime run: | echo "Cuda install location: ${{steps.cuda-toolkit.outputs.CUDA_PATH}}" $dst='.\build\bin\cudart\' robocopy "${{steps.cuda-toolkit.outputs.CUDA_PATH}}\bin" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll - 7z a cudart-llama-bin-win-cu${{ matrix.cuda }}-x64.zip $dst\* + 7z a cudart-jarvis-bin-win-cu${{ matrix.cuda }}-x64.zip $dst\* - name: Upload Cuda runtime if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} uses: actions/upload-artifact@v4 with: - path: cudart-llama-bin-win-cu${{ matrix.cuda }}-x64.zip - name: cudart-llama-bin-win-cu${{ matrix.cuda }}-x64.zip + path: cudart-jarvis-bin-win-cu${{ matrix.cuda }}-x64.zip + name: cudart-jarvis-bin-win-cu${{ matrix.cuda }}-x64.zip windows-latest-cmake-sycl: runs-on: windows-latest @@ -963,14 +963,14 @@ jobs: cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libmmd.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libiomp5md.dll" ./build/bin echo "cp oneAPI running time dll files to ./build/bin done" - 7z a llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip ./build/bin/* + 7z a jarvis-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip ./build/bin/* - name: Upload artifacts if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} uses: actions/upload-artifact@v4 with: - path: llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip - name: llama-bin-win-sycl-x64.zip + path: jarvis-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip + name: jarvis-bin-win-sycl-x64.zip windows-latest-cmake-hip: if: ${{ github.event.inputs.create_release != 'true' }} @@ -1060,13 +1060,13 @@ jobs: - name: Pack artifacts id: pack_artifacts run: | - 7z a llama-${{ steps.tag.outputs.name }}-bin-win-hip-x64-${{ matrix.gpu_target }}.zip .\build\bin\* + 7z a jarvis-${{ steps.tag.outputs.name }}-bin-win-hip-x64-${{ matrix.gpu_target }}.zip .\build\bin\* - name: Upload artifacts uses: actions/upload-artifact@v4 with: - path: llama-${{ steps.tag.outputs.name }}-bin-win-hip-x64-${{ matrix.gpu_target }}.zip - name: llama-bin-win-hip-x64-${{ matrix.gpu_target }}.zip + path: jarvis-${{ steps.tag.outputs.name }}-bin-win-hip-x64-${{ matrix.gpu_target }}.zip + name: jarvis-bin-win-hip-x64-${{ matrix.gpu_target }}.zip ios-xcode-build: runs-on: macos-latest @@ -1076,7 +1076,7 @@ jobs: uses: actions/checkout@v4 - name: Build Xcode project - run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' build + run: xcodebuild -project examples/jarvis.swiftui/jarvis.swiftui.xcodeproj -scheme jarvis.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' build android-build: runs-on: ubuntu-latest @@ -1098,7 +1098,7 @@ jobs: - name: Build run: | - cd examples/llama.android + cd examples/jarvis.android ./gradlew build --no-daemon @@ -1261,7 +1261,7 @@ jobs: # sudo apt-get install cmake # # - name: Configure -# run: cmake . -DCMAKE_BUILD_TYPE=Debug -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON +# run: cmake . -DCMAKE_BUILD_TYPE=Debug -DJARVIS_SANITIZE_${{ matrix.sanitizer }}=ON # # - name: Build # run: | @@ -1300,7 +1300,7 @@ jobs: # - name: Upload binaries # uses: actions/upload-artifact@v4 # with: -# name: llama-bin-${{ matrix.arch }} +# name: jarvis-bin-${{ matrix.arch }} # path: build/bin/${{ matrix.build }} # # windows-blas: @@ -1339,7 +1339,7 @@ jobs: # run: > # cmake -S . -B ./build -A ${{ matrix.arch }} # -DCMAKE_BUILD_TYPE=${{ matrix.build }} -# -DLLAMA_SUPPORT_OPENBLAS=${{ matrix.blas }} +# -DJARVIS_SUPPORT_OPENBLAS=${{ matrix.blas }} # -DCMAKE_LIBRARY_PATH="$env:blasdir/lib" # # - name: Build @@ -1355,7 +1355,7 @@ jobs: # if: matrix.blas == 'ON' # uses: actions/upload-artifact@v4 # with: -# name: llama-blas-bin-${{ matrix.arch }} +# name: jarvis-blas-bin-${{ matrix.arch }} # path: build/bin/${{ matrix.build }} # # emscripten: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index a953cdac907ae..fee3e9145be21 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -37,21 +37,21 @@ jobs: strategy: matrix: config: - - { tag: "light", dockerfile: ".devops/llama-cli.Dockerfile", platforms: "linux/amd64,linux/arm64" } - - { tag: "server", dockerfile: ".devops/llama-server.Dockerfile", platforms: "linux/amd64,linux/arm64" } + - { tag: "light", dockerfile: ".devops/jarvis-cli.Dockerfile", platforms: "linux/amd64,linux/arm64" } + - { tag: "server", dockerfile: ".devops/jarvis-server.Dockerfile", platforms: "linux/amd64,linux/arm64" } - { tag: "full", dockerfile: ".devops/full.Dockerfile", platforms: "linux/amd64,linux/arm64" } - - { tag: "light-cuda", dockerfile: ".devops/llama-cli-cuda.Dockerfile", platforms: "linux/amd64" } - - { tag: "server-cuda", dockerfile: ".devops/llama-server-cuda.Dockerfile", platforms: "linux/amd64" } + - { tag: "light-cuda", dockerfile: ".devops/jarvis-cli-cuda.Dockerfile", platforms: "linux/amd64" } + - { tag: "server-cuda", dockerfile: ".devops/jarvis-server-cuda.Dockerfile", platforms: "linux/amd64" } - { tag: "full-cuda", dockerfile: ".devops/full-cuda.Dockerfile", platforms: "linux/amd64" } - - { tag: "light-musa", dockerfile: ".devops/llama-cli-musa.Dockerfile", platforms: "linux/amd64" } - - { tag: "server-musa", dockerfile: ".devops/llama-server-musa.Dockerfile", platforms: "linux/amd64" } + - { tag: "light-musa", dockerfile: ".devops/jarvis-cli-musa.Dockerfile", platforms: "linux/amd64" } + - { tag: "server-musa", dockerfile: ".devops/jarvis-server-musa.Dockerfile", platforms: "linux/amd64" } - { tag: "full-musa", dockerfile: ".devops/full-musa.Dockerfile", platforms: "linux/amd64" } # Note: the rocm images are failing due to a compiler error and are disabled until this is fixed to allow the workflow to complete - #- { tag: "light-rocm", dockerfile: ".devops/llama-cli-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" } - #- { tag: "server-rocm", dockerfile: ".devops/llama-server-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" } + #- { tag: "light-rocm", dockerfile: ".devops/jarvis-cli-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" } + #- { tag: "server-rocm", dockerfile: ".devops/jarvis-server-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" } #- { tag: "full-rocm", dockerfile: ".devops/full-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" } - - { tag: "light-intel", dockerfile: ".devops/llama-cli-intel.Dockerfile", platforms: "linux/amd64" } - - { tag: "server-intel", dockerfile: ".devops/llama-server-intel.Dockerfile", platforms: "linux/amd64" } + - { tag: "light-intel", dockerfile: ".devops/jarvis-cli-intel.Dockerfile", platforms: "linux/amd64" } + - { tag: "server-intel", dockerfile: ".devops/jarvis-server-intel.Dockerfile", platforms: "linux/amd64" } steps: - name: Check out the repo uses: actions/checkout@v4 diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 368dbdbe5dccc..e3344be63ad39 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -11,7 +11,7 @@ jobs: steps: - uses: actions/checkout@v4 with: - repository: "ggerganov/llama.cpp" + repository: "ggerganov/jarvis.cpp" - uses: actions/labeler@v5 with: configuration-path: '.github/labeler.yml' diff --git a/.github/workflows/nix-ci-aarch64.yml b/.github/workflows/nix-ci-aarch64.yml index 0da6acdf1c81e..7473135ef5c79 100644 --- a/.github/workflows/nix-ci-aarch64.yml +++ b/.github/workflows/nix-ci-aarch64.yml @@ -47,8 +47,8 @@ jobs: extra-conf: | extra-platforms = aarch64-linux extra-system-features = nixos-test kvm - extra-substituters = https://llama-cpp.cachix.org https://cuda-maintainers.cachix.org - extra-trusted-public-keys = llama-cpp.cachix.org-1:H75X+w83wUKTIPSO1KWy9ADUrzThyGs8P5tmAbkWhQc= cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E= + extra-substituters = https://jarvis-cpp.cachix.org https://cuda-maintainers.cachix.org + extra-trusted-public-keys = jarvis-cpp.cachix.org-1:H75X+w83wUKTIPSO1KWy9ADUrzThyGs8P5tmAbkWhQc= cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E= - uses: DeterminateSystems/magic-nix-cache-action@v2 with: upstream-cache: https://${{ matrix.cachixName }}.cachix.org @@ -56,7 +56,7 @@ jobs: uses: cachix/cachix-action@v13 with: authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' - name: llama-cpp + name: jarvis-cpp - name: Show all output paths run: > nix run github:nix-community/nix-eval-jobs diff --git a/.github/workflows/nix-ci.yml b/.github/workflows/nix-ci.yml index 8ecbbe53b4ed1..3a748d9acf4d3 100644 --- a/.github/workflows/nix-ci.yml +++ b/.github/workflows/nix-ci.yml @@ -34,8 +34,8 @@ jobs: with: github-token: ${{ secrets.GITHUB_TOKEN }} extra-conf: | - extra-substituters = https://llama-cpp.cachix.org https://cuda-maintainers.cachix.org - extra-trusted-public-keys = llama-cpp.cachix.org-1:H75X+w83wUKTIPSO1KWy9ADUrzThyGs8P5tmAbkWhQc= cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E= + extra-substituters = https://jarvis-cpp.cachix.org https://cuda-maintainers.cachix.org + extra-trusted-public-keys = jarvis-cpp.cachix.org-1:H75X+w83wUKTIPSO1KWy9ADUrzThyGs8P5tmAbkWhQc= cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E= - uses: DeterminateSystems/magic-nix-cache-action@v2 with: upstream-cache: https://${{ matrix.cachixName }}.cachix.org @@ -61,8 +61,8 @@ jobs: with: github-token: ${{ secrets.GITHUB_TOKEN }} extra-conf: | - extra-substituters = https://llama-cpp.cachix.org https://cuda-maintainers.cachix.org - extra-trusted-public-keys = llama-cpp.cachix.org-1:H75X+w83wUKTIPSO1KWy9ADUrzThyGs8P5tmAbkWhQc= cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E= + extra-substituters = https://jarvis-cpp.cachix.org https://cuda-maintainers.cachix.org + extra-trusted-public-keys = jarvis-cpp.cachix.org-1:H75X+w83wUKTIPSO1KWy9ADUrzThyGs8P5tmAbkWhQc= cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E= - uses: DeterminateSystems/magic-nix-cache-action@v2 with: upstream-cache: https://${{ matrix.cachixName }}.cachix.org @@ -70,7 +70,7 @@ jobs: uses: cachix/cachix-action@v13 with: authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' - name: llama-cpp + name: jarvis-cpp - name: Build run: > nix run github:Mic92/nix-fast-build diff --git a/.github/workflows/server.yml b/.github/workflows/server.yml index 699ac095d6c83..29943d52e2dc3 100644 --- a/.github/workflows/server.yml +++ b/.github/workflows/server.yml @@ -21,10 +21,10 @@ on: paths: ['.github/workflows/server.yml', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.swift', '**/*.m', 'examples/server/**.*'] env: - LLAMA_LOG_COLORS: 1 - LLAMA_LOG_PREFIX: 1 - LLAMA_LOG_TIMESTAMPS: 1 - LLAMA_LOG_VERBOSITY: 10 + JARVIS_LOG_COLORS: 1 + JARVIS_LOG_PREFIX: 1 + JARVIS_LOG_TIMESTAMPS: 1 + JARVIS_LOG_VERBOSITY: 10 concurrency: group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || github.run_id }} @@ -41,7 +41,7 @@ jobs: include: - build_type: Release sanitizer: "" - fail-fast: false # While -DLLAMA_SANITIZE_THREAD=ON is broken + fail-fast: false # While -DJARVIS_SANITIZE_THREAD=ON is broken steps: - name: Dependencies @@ -99,12 +99,12 @@ jobs: run: | cmake -B build \ -DGGML_NATIVE=OFF \ - -DLLAMA_BUILD_SERVER=ON \ - -DLLAMA_CURL=ON \ + -DJARVIS_BUILD_SERVER=ON \ + -DJARVIS_CURL=ON \ -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \ - -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON \ + -DJARVIS_SANITIZE_${{ matrix.sanitizer }}=ON \ -DGGML_OPENMP=OFF ; - cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target llama-server + cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target jarvis-server - name: Build id: cmake_build @@ -112,11 +112,11 @@ jobs: run: | cmake -B build \ -DGGML_NATIVE=OFF \ - -DLLAMA_BUILD_SERVER=ON \ - -DLLAMA_CURL=ON \ + -DJARVIS_BUILD_SERVER=ON \ + -DJARVIS_CURL=ON \ -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \ - -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON ; - cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target llama-server + -DJARVIS_SANITIZE_${{ matrix.sanitizer }}=ON ; + cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target jarvis-server - name: Tests id: server_integration_tests @@ -155,8 +155,8 @@ jobs: - name: Build id: cmake_build run: | - cmake -B build -DLLAMA_CURL=ON -DCURL_LIBRARY="$env:RUNNER_TEMP/libcurl/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="$env:RUNNER_TEMP/libcurl/include" - cmake --build build --config Release -j ${env:NUMBER_OF_PROCESSORS} --target llama-server + cmake -B build -DJARVIS_CURL=ON -DCURL_LIBRARY="$env:RUNNER_TEMP/libcurl/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="$env:RUNNER_TEMP/libcurl/include" + cmake --build build --config Release -j ${env:NUMBER_OF_PROCESSORS} --target jarvis-server - name: Python setup id: setup_python @@ -180,7 +180,7 @@ jobs: run: | cd examples/server/tests $env:PYTHONIOENCODING = ":replace" - behave.exe --summary --stop --no-capture --exclude 'issues|wrong_usages|passkey' --tags llama.cpp + behave.exe --summary --stop --no-capture --exclude 'issues|wrong_usages|passkey' --tags jarvis.cpp - name: Slow tests id: server_integration_tests_slow diff --git a/.gitignore b/.gitignore index 1092d097a7542..cf5abf6ff55de 100644 --- a/.gitignore +++ b/.gitignore @@ -48,8 +48,8 @@ build* !build-info.sh !build.zig !docs/build.md -/libllama.so -/llama-* +/libjarvis.so +/jarvis-* /vulkan-shaders-gen android-ndk-* arm_neon.h @@ -57,7 +57,7 @@ cmake-build-* CMakeSettings.json compile_commands.json ggml-metal-embed.metal -llama-batched-swift +jarvis-batched-swift /rpc-server out/ tmp/ @@ -118,7 +118,7 @@ poetry.toml /tests/test-double-float /tests/test-grad0 /tests/test-grammar-parser -/tests/test-llama-grammar +/tests/test-jarvis-grammar /tests/test-opt /tests/test-quantize-fns /tests/test-quantize-perf diff --git a/CMakeLists.txt b/CMakeLists.txt index ef0932a7b9277..db4944fcb677c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.14) # for add_link_options and implicit target directories. -project("llama.cpp" C CXX) +project("jarvis.cpp" C CXX) include(CheckIncludeFileCXX) #set(CMAKE_WARN_DEPRECATED YES) @@ -18,20 +18,20 @@ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/") set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) - set(LLAMA_STANDALONE ON) + set(JARVIS_STANDALONE ON) include(git-vars) # configure project version # TODO else() - set(LLAMA_STANDALONE OFF) + set(JARVIS_STANDALONE OFF) endif() if (EMSCRIPTEN) set(BUILD_SHARED_LIBS_DEFAULT OFF) - option(LLAMA_WASM_SINGLE_FILE "llama: embed WASM inside the generated llama.js" ON) + option(JARVIS_WASM_SINGLE_FILE "jarvis: embed WASM inside the generated jarvis.js" ON) else() if (MINGW) set(BUILD_SHARED_LIBS_DEFAULT OFF) @@ -51,41 +51,41 @@ endif() # # debug -option(LLAMA_ALL_WARNINGS "llama: enable all compiler warnings" ON) -option(LLAMA_ALL_WARNINGS_3RD_PARTY "llama: enable all compiler warnings in 3rd party libs" OFF) +option(JARVIS_ALL_WARNINGS "jarvis: enable all compiler warnings" ON) +option(JARVIS_ALL_WARNINGS_3RD_PARTY "jarvis: enable all compiler warnings in 3rd party libs" OFF) # build -option(LLAMA_FATAL_WARNINGS "llama: enable -Werror flag" OFF) +option(JARVIS_FATAL_WARNINGS "jarvis: enable -Werror flag" OFF) # sanitizers -option(LLAMA_SANITIZE_THREAD "llama: enable thread sanitizer" OFF) -option(LLAMA_SANITIZE_ADDRESS "llama: enable address sanitizer" OFF) -option(LLAMA_SANITIZE_UNDEFINED "llama: enable undefined sanitizer" OFF) +option(JARVIS_SANITIZE_THREAD "jarvis: enable thread sanitizer" OFF) +option(JARVIS_SANITIZE_ADDRESS "jarvis: enable address sanitizer" OFF) +option(JARVIS_SANITIZE_UNDEFINED "jarvis: enable undefined sanitizer" OFF) # utils -option(LLAMA_BUILD_COMMON "llama: build common utils library" ${LLAMA_STANDALONE}) +option(JARVIS_BUILD_COMMON "jarvis: build common utils library" ${JARVIS_STANDALONE}) # extra artifacts -option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE}) -option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE}) -option(LLAMA_BUILD_SERVER "llama: build server example" ${LLAMA_STANDALONE}) +option(JARVIS_BUILD_TESTS "jarvis: build tests" ${JARVIS_STANDALONE}) +option(JARVIS_BUILD_EXAMPLES "jarvis: build examples" ${JARVIS_STANDALONE}) +option(JARVIS_BUILD_SERVER "jarvis: build server example" ${JARVIS_STANDALONE}) # 3rd party libs -option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF) +option(JARVIS_CURL "jarvis: use libcurl to download model from an URL" OFF) # Required for relocatable CMake package include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake) # override ggml options -set(GGML_SANITIZE_THREAD ${LLAMA_SANITIZE_THREAD}) -set(GGML_SANITIZE_ADDRESS ${LLAMA_SANITIZE_ADDRESS}) -set(GGML_SANITIZE_UNDEFINED ${LLAMA_SANITIZE_UNDEFINED}) -set(GGML_ALL_WARNINGS ${LLAMA_ALL_WARNINGS}) -set(GGML_FATAL_WARNINGS ${LLAMA_FATAL_WARNINGS}) +set(GGML_SANITIZE_THREAD ${JARVIS_SANITIZE_THREAD}) +set(GGML_SANITIZE_ADDRESS ${JARVIS_SANITIZE_ADDRESS}) +set(GGML_SANITIZE_UNDEFINED ${JARVIS_SANITIZE_UNDEFINED}) +set(GGML_ALL_WARNINGS ${JARVIS_ALL_WARNINGS}) +set(GGML_FATAL_WARNINGS ${JARVIS_FATAL_WARNINGS}) # change the default for these ggml options -if (NOT DEFINED GGML_LLAMAFILE) - set(GGML_LLAMAFILE_DEFAULT ON) +if (NOT DEFINED GGML_JARVISFILE) + set(GGML_JARVISFILE_DEFAULT ON) endif() if (NOT DEFINED GGML_AMX) @@ -97,23 +97,23 @@ if (NOT DEFINED GGML_CUDA_GRAPHS) endif() # transition helpers -function (llama_option_depr TYPE OLD NEW) +function (jarvis_option_depr TYPE OLD NEW) if (${OLD}) message(${TYPE} "${OLD} is deprecated and will be removed in the future.\nUse ${NEW} instead\n") set(${NEW} ON PARENT_SCOPE) endif() endfunction() -llama_option_depr(FATAL_ERROR LLAMA_CUBLAS GGML_CUDA) -llama_option_depr(WARNING LLAMA_CUDA GGML_CUDA) -llama_option_depr(WARNING LLAMA_KOMPUTE GGML_KOMPUTE) -llama_option_depr(WARNING LLAMA_METAL GGML_METAL) -llama_option_depr(WARNING LLAMA_METAL_EMBED_LIBRARY GGML_METAL_EMBED_LIBRARY) -llama_option_depr(WARNING LLAMA_NATIVE GGML_NATIVE) -llama_option_depr(WARNING LLAMA_RPC GGML_RPC) -llama_option_depr(WARNING LLAMA_SYCL GGML_SYCL) -llama_option_depr(WARNING LLAMA_SYCL_F16 GGML_SYCL_F16) -llama_option_depr(WARNING LLAMA_CANN GGML_CANN) +jarvis_option_depr(FATAL_ERROR JARVIS_CUBLAS GGML_CUDA) +jarvis_option_depr(WARNING JARVIS_CUDA GGML_CUDA) +jarvis_option_depr(WARNING JARVIS_KOMPUTE GGML_KOMPUTE) +jarvis_option_depr(WARNING JARVIS_METAL GGML_METAL) +jarvis_option_depr(WARNING JARVIS_METAL_EMBED_LIBRARY GGML_METAL_EMBED_LIBRARY) +jarvis_option_depr(WARNING JARVIS_NATIVE GGML_NATIVE) +jarvis_option_depr(WARNING JARVIS_RPC GGML_RPC) +jarvis_option_depr(WARNING JARVIS_SYCL GGML_SYCL) +jarvis_option_depr(WARNING JARVIS_SYCL_F16 GGML_SYCL_F16) +jarvis_option_depr(WARNING JARVIS_CANN GGML_CANN) # # build the library @@ -132,18 +132,18 @@ add_subdirectory(src) include(GNUInstallDirs) include(CMakePackageConfigHelpers) -set(LLAMA_BUILD_NUMBER ${BUILD_NUMBER}) -set(LLAMA_BUILD_COMMIT ${BUILD_COMMIT}) -set(LLAMA_INSTALL_VERSION 0.0.${BUILD_NUMBER}) +set(JARVIS_BUILD_NUMBER ${BUILD_NUMBER}) +set(JARVIS_BUILD_COMMIT ${BUILD_COMMIT}) +set(JARVIS_INSTALL_VERSION 0.0.${BUILD_NUMBER}) -set(LLAMA_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header files") -set(LLAMA_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files") -set(LLAMA_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files") +set(JARVIS_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header files") +set(JARVIS_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files") +set(JARVIS_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files") # At the moment some compile definitions are placed within the ggml/src # directory but not exported on the `ggml` target. This could be improved by -# determining _precisely_ which defines are necessary for the llama-config +# determining _precisely_ which defines are necessary for the jarvis-config # package. # set(GGML_TRANSIENT_DEFINES) @@ -158,25 +158,25 @@ if (GGML_TARGET_DEFINES) endif() get_target_property(GGML_LINK_LIBRARIES ggml LINK_LIBRARIES) -set_target_properties(llama PROPERTIES PUBLIC_HEADER ${CMAKE_CURRENT_SOURCE_DIR}/include/llama.h) -install(TARGETS llama LIBRARY PUBLIC_HEADER) +set_target_properties(jarvis PROPERTIES PUBLIC_HEADER ${CMAKE_CURRENT_SOURCE_DIR}/include/jarvis.h) +install(TARGETS jarvis LIBRARY PUBLIC_HEADER) configure_package_config_file( - ${CMAKE_CURRENT_SOURCE_DIR}/cmake/llama-config.cmake.in - ${CMAKE_CURRENT_BINARY_DIR}/llama-config.cmake - INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/llama - PATH_VARS LLAMA_INCLUDE_INSTALL_DIR - LLAMA_LIB_INSTALL_DIR - LLAMA_BIN_INSTALL_DIR ) + ${CMAKE_CURRENT_SOURCE_DIR}/cmake/jarvis-config.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/jarvis-config.cmake + INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/jarvis + PATH_VARS JARVIS_INCLUDE_INSTALL_DIR + JARVIS_LIB_INSTALL_DIR + JARVIS_BIN_INSTALL_DIR ) write_basic_package_version_file( - ${CMAKE_CURRENT_BINARY_DIR}/llama-version.cmake - VERSION ${LLAMA_INSTALL_VERSION} + ${CMAKE_CURRENT_BINARY_DIR}/jarvis-version.cmake + VERSION ${JARVIS_INSTALL_VERSION} COMPATIBILITY SameMajorVersion) -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/llama-config.cmake - ${CMAKE_CURRENT_BINARY_DIR}/llama-version.cmake - DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/llama) +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/jarvis-config.cmake + ${CMAKE_CURRENT_BINARY_DIR}/jarvis-version.cmake + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/jarvis) install( FILES convert_hf_to_gguf.py @@ -190,27 +190,27 @@ install( WORLD_EXECUTE DESTINATION ${CMAKE_INSTALL_BINDIR}) -configure_file(cmake/llama.pc.in - "${CMAKE_CURRENT_BINARY_DIR}/llama.pc" +configure_file(cmake/jarvis.pc.in + "${CMAKE_CURRENT_BINARY_DIR}/jarvis.pc" @ONLY) -install(FILES "${CMAKE_CURRENT_BINARY_DIR}/llama.pc" +install(FILES "${CMAKE_CURRENT_BINARY_DIR}/jarvis.pc" DESTINATION lib/pkgconfig) # # utils, programs, examples and tests # -if (LLAMA_BUILD_COMMON) +if (JARVIS_BUILD_COMMON) add_subdirectory(common) endif() -if (LLAMA_BUILD_COMMON AND LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION) +if (JARVIS_BUILD_COMMON AND JARVIS_BUILD_TESTS AND NOT CMAKE_JS_VERSION) include(CTest) add_subdirectory(tests) endif() -if (LLAMA_BUILD_COMMON AND LLAMA_BUILD_EXAMPLES) +if (JARVIS_BUILD_COMMON AND JARVIS_BUILD_EXAMPLES) add_subdirectory(examples) add_subdirectory(pocs) endif() diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4c882c254cac5..d24987c935c10 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,7 +11,7 @@ - Squash-merge PRs - Use the following format for the squashed commit title: ` : (#)`. For example: `utils : fix typo in utils.py (#1234)` -- Optionally pick a `` from here: https://github.com/ggerganov/llama.cpp/wiki/Modules +- Optionally pick a `` from here: https://github.com/ggerganov/jarvis.cpp/wiki/Modules # Coding guidelines @@ -22,7 +22,7 @@ - Clean-up any trailing whitespaces, use 4 spaces for indentation, brackets on the same line, `void * ptr`, `int & a` - Naming usually optimizes for common prefix (see https://github.com/ggerganov/ggml/pull/302#discussion_r1243240963) - Tensors store data in row-major order. We refer to dimension 0 as columns, 1 as rows, 2 as matrices -- Matrix multiplication is unconventional: [`C = ggml_mul_mat(ctx, A, B)`](https://github.com/ggerganov/llama.cpp/blob/880e352277fc017df4d5794f0c21c44e1eae2b84/ggml.h#L1058-L1064) means $C^T = A B^T \Leftrightarrow C = B A^T.$ +- Matrix multiplication is unconventional: [`C = ggml_mul_mat(ctx, A, B)`](https://github.com/ggerganov/jarvis.cpp/blob/880e352277fc017df4d5794f0c21c44e1eae2b84/ggml.h#L1058-L1064) means $C^T = A B^T \Leftrightarrow C = B A^T.$ ![matmul](media/matmul.png) @@ -30,4 +30,4 @@ The Github issues, PRs and discussions contain a lot of information that can be useful to get familiar with the codebase. For convenience, some of the more important information is referenced from Github projects: -https://github.com/ggerganov/llama.cpp/projects +https://github.com/ggerganov/jarvis.cpp/projects diff --git a/LLMCLI.java b/LLMCLI.java index 881c6fb14a9ad..30969d99a7782 100644 --- a/LLMCLI.java +++ b/LLMCLI.java @@ -7,7 +7,7 @@ public class LLMCLI { public static void main(String[] args) { // Path to the .exe file - String exePath = "bin/llama-cli.exe"; + String exePath = "bin/jarvis-cli.exe"; System.out.println("Enter -h for help"); // Scanner to take user input for various commands diff --git a/Makefile b/Makefile index 719f45d167463..ad411dbdf8d18 100644 --- a/Makefile +++ b/Makefile @@ -1,44 +1,44 @@ # Define the default target now so that it is always the first target BUILD_TARGETS = \ libllava.a \ - llama-baby-llama \ - llama-batched \ - llama-batched-bench \ - llama-bench \ - llama-cli \ - llama-convert-llama2c-to-ggml \ - llama-embedding \ - llama-eval-callback \ - llama-export-lora \ - llama-gbnf-validator \ - llama-gguf \ - llama-gguf-hash \ - llama-gguf-split \ - llama-gritlm \ - llama-imatrix \ - llama-infill \ - llama-llava-cli \ - llama-minicpmv-cli\ - llama-lookahead \ - llama-lookup \ - llama-lookup-create \ - llama-lookup-merge \ - llama-lookup-stats \ - llama-parallel \ - llama-passkey \ - llama-perplexity \ - llama-q8dot \ - llama-quantize \ - llama-quantize-stats \ - llama-retrieval \ - llama-save-load-state \ - llama-server \ - llama-simple \ - llama-speculative \ - llama-tokenize \ - llama-vdot \ - llama-cvector-generator \ - llama-gen-docs \ + jarvis-baby-jarvis \ + jarvis-batched \ + jarvis-batched-bench \ + jarvis-bench \ + jarvis-cli \ + jarvis-convert-jarvis2c-to-ggml \ + jarvis-embedding \ + jarvis-eval-callback \ + jarvis-export-lora \ + jarvis-gbnf-validator \ + jarvis-gguf \ + jarvis-gguf-hash \ + jarvis-gguf-split \ + jarvis-gritlm \ + jarvis-imatrix \ + jarvis-infill \ + jarvis-llava-cli \ + jarvis-minicpmv-cli\ + jarvis-lookahead \ + jarvis-lookup \ + jarvis-lookup-create \ + jarvis-lookup-merge \ + jarvis-lookup-stats \ + jarvis-parallel \ + jarvis-passkey \ + jarvis-perplexity \ + jarvis-q8dot \ + jarvis-quantize \ + jarvis-quantize-stats \ + jarvis-retrieval \ + jarvis-save-load-state \ + jarvis-server \ + jarvis-simple \ + jarvis-speculative \ + jarvis-tokenize \ + jarvis-vdot \ + jarvis-cvector-generator \ + jarvis-gen-docs \ tests/test-c.o # Binaries only useful for tests @@ -52,7 +52,7 @@ TEST_TARGETS = \ tests/test-grammar-integration \ tests/test-grammar-parser \ tests/test-json-schema-to-grammar \ - tests/test-llama-grammar \ + tests/test-jarvis-grammar \ tests/test-log \ tests/test-model-load-cancel \ tests/test-opt \ @@ -65,8 +65,8 @@ TEST_TARGETS = \ tests/test-tokenizer-1-spm # Legacy build targets that were renamed in #7809, but should still be removed when the project is cleaned -LEGACY_TARGETS_CLEAN = main quantize quantize-stats perplexity imatrix embedding vdot q8dot convert-llama2c-to-ggml \ - simple batched batched-bench save-load-state server gguf gguf-split eval-callback llama-bench libllava.a llava-cli baby-llama \ +LEGACY_TARGETS_CLEAN = main quantize quantize-stats perplexity imatrix embedding vdot q8dot convert-jarvis2c-to-ggml \ + simple batched batched-bench save-load-state server gguf gguf-split eval-callback jarvis-bench libllava.a llava-cli baby-jarvis \ retrieval speculative infill tokenize parallel export-lora lookahead lookup passkey gritlm # Legacy build targets that were renamed in #7809, but we want to build binaries that for them that output a deprecation warning if people try to use them. @@ -74,80 +74,80 @@ LEGACY_TARGETS_CLEAN = main quantize quantize-stats perplexity imatrix embedding LEGACY_TARGETS_BUILD = main quantize perplexity embedding server # Deprecation aliases -ifdef LLAMA_CUBLAS -$(error LLAMA_CUBLAS is removed. Use GGML_CUDA instead.) +ifdef JARVIS_CUBLAS +$(error JARVIS_CUBLAS is removed. Use GGML_CUDA instead.) endif -ifdef LLAMA_CUDA +ifdef JARVIS_CUDA GGML_CUDA := 1 DEPRECATE_WARNING := 1 endif -ifdef LLAMA_KOMPUTE +ifdef JARVIS_KOMPUTE GGML_KOMPUTE := 1 DEPRECATE_WARNING := 1 endif -ifdef LLAMA_METAL +ifdef JARVIS_METAL GGML_METAL := 1 DEPRECATE_WARNING := 1 endif -ifdef LLAMA_RPC +ifdef JARVIS_RPC GGML_RPC := 1 DEPRECATE_WARNING := 1 endif -ifdef LLAMA_SYCL +ifdef JARVIS_SYCL GGML_SYCL := 1 DEPRECATE_WARNING := 1 endif -ifdef LLAMA_SYCL_F16 +ifdef JARVIS_SYCL_F16 GGML_SYCL_F16 := 1 DEPRECATE_WARNING := 1 endif -ifdef LLAMA_OPENBLAS +ifdef JARVIS_OPENBLAS GGML_OPENBLAS := 1 DEPRECATE_WARNING := 1 endif -ifdef LLAMA_OPENBLAS64 +ifdef JARVIS_OPENBLAS64 GGML_OPENBLAS64 := 1 DEPRECATE_WARNING := 1 endif -ifdef LLAMA_BLIS +ifdef JARVIS_BLIS GGML_BLIS := 1 DEPRECATE_WARNING := 1 endif -ifdef LLAMA_NO_LLAMAFILE -GGML_NO_LLAMAFILE := 1 +ifdef JARVIS_NO_JARVISFILE +GGML_NO_JARVISFILE := 1 DEPRECATE_WARNING := 1 endif -ifdef LLAMA_NO_ACCELERATE +ifdef JARVIS_NO_ACCELERATE GGML_NO_ACCELERATE := 1 DEPRECATE_WARNING := 1 endif -ifdef LLAMA_NO_OPENMP +ifdef JARVIS_NO_OPENMP GGML_NO_OPENMP := 1 DEPRECATE_WARNING := 1 endif -ifdef LLAMA_NO_METAL +ifdef JARVIS_NO_METAL GGML_NO_METAL := 1 DEPRECATE_WARNING := 1 endif -ifdef LLAMA_DISABLE_LOGS +ifdef JARVIS_DISABLE_LOGS REMOVE_WARNING := 1 endif -ifdef LLAMA_SERVER_VERBOSE +ifdef JARVIS_SERVER_VERBOSE REMOVE_WARNING := 1 endif @@ -211,8 +211,8 @@ test: $(TEST_TARGETS) @failures=0; \ for test_target in $(TEST_TARGETS); do \ if [ "$$test_target" = "tests/test-tokenizer-0" ]; then \ - ./$$test_target $(CURDIR)/models/ggml-vocab-llama-spm.gguf; \ - ./$$test_target $(CURDIR)/models/ggml-vocab-llama-bpe.gguf; \ + ./$$test_target $(CURDIR)/models/ggml-vocab-jarvis-spm.gguf; \ + ./$$test_target $(CURDIR)/models/ggml-vocab-jarvis-bpe.gguf; \ ./$$test_target $(CURDIR)/models/ggml-vocab-phi-3.gguf; \ ./$$test_target $(CURDIR)/models/ggml-vocab-falcon.gguf; \ ./$$test_target $(CURDIR)/models/ggml-vocab-bert-bge.gguf; \ @@ -257,7 +257,7 @@ MK_CFLAGS = -std=c11 -fPIC MK_CXXFLAGS = -std=c++11 -fPIC MK_NVCCFLAGS = -std=c++11 -ifdef LLAMA_NO_CCACHE +ifdef JARVIS_NO_CCACHE GGML_NO_CCACHE := 1 DEPRECATE_WARNING := 1 endif @@ -320,7 +320,7 @@ ifdef GGML_SCHED_MAX_COPIES MK_CPPFLAGS += -DGGML_SCHED_MAX_COPIES=$(GGML_SCHED_MAX_COPIES) endif -ifdef LLAMA_DEBUG +ifdef JARVIS_DEBUG MK_CFLAGS += -O0 -g MK_CXXFLAGS += -O0 -g MK_LDFLAGS += -g @@ -336,25 +336,25 @@ else MK_NVCCFLAGS += -O3 -g endif -ifdef LLAMA_SANITIZE_THREAD +ifdef JARVIS_SANITIZE_THREAD MK_CFLAGS += -fsanitize=thread -g MK_CXXFLAGS += -fsanitize=thread -g MK_LDFLAGS += -fsanitize=thread -g endif -ifdef LLAMA_SANITIZE_ADDRESS +ifdef JARVIS_SANITIZE_ADDRESS MK_CFLAGS += -fsanitize=address -fno-omit-frame-pointer -g MK_CXXFLAGS += -fsanitize=address -fno-omit-frame-pointer -g MK_LDFLAGS += -fsanitize=address -fno-omit-frame-pointer -g endif -ifdef LLAMA_SANITIZE_UNDEFINED +ifdef JARVIS_SANITIZE_UNDEFINED MK_CFLAGS += -fsanitize=undefined -g MK_CXXFLAGS += -fsanitize=undefined -g MK_LDFLAGS += -fsanitize=undefined -g endif -ifdef LLAMA_SERVER_SSL +ifdef JARVIS_SERVER_SSL MK_CPPFLAGS += -DCPPHTTPLIB_OPENSSL_SUPPORT MK_LDFLAGS += -lssl -lcrypto endif @@ -381,7 +381,7 @@ MK_CXXFLAGS += \ -Wmissing-declarations \ -Wmissing-noreturn -ifeq ($(LLAMA_FATAL_WARNINGS),1) +ifeq ($(JARVIS_FATAL_WARNINGS),1) MK_CFLAGS += -Werror MK_CXXFLAGS += -Werror endif @@ -420,7 +420,7 @@ ifeq ($(_WIN32),1) LWINSOCK2 := -lws2_32 endif -ifdef LLAMA_GPROF +ifdef JARVIS_GPROF MK_CFLAGS += -pg MK_CXXFLAGS += -pg endif @@ -448,7 +448,7 @@ endif ifneq '' '$(findstring mingw,$(shell $(CC) -dumpmachine))' # The stack is only 16-byte aligned on Windows, so don't let gcc emit aligned moves. # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412 - # https://github.com/ggerganov/llama.cpp/issues/2922 + # https://github.com/ggerganov/jarvis.cpp/issues/2922 MK_CFLAGS += -Xassembler -muse-unaligned-vector-move MK_CXXFLAGS += -Xassembler -muse-unaligned-vector-move @@ -574,9 +574,9 @@ ifdef GGML_NVPL OBJ_GGML += ggml/src/ggml-blas.o endif # GGML_NVPL -ifndef GGML_NO_LLAMAFILE - MK_CPPFLAGS += -DGGML_USE_LLAMAFILE - OBJ_GGML += ggml/src/llamafile/sgemm.o +ifndef GGML_NO_JARVISFILE + MK_CPPFLAGS += -DGGML_USE_JARVISFILE + OBJ_GGML += ggml/src/jarvisfile/sgemm.o endif ifndef GGML_NO_AMX @@ -627,9 +627,9 @@ ifdef GGML_CUDA OBJ_GGML += $(patsubst %.cu,%.o,$(wildcard ggml/src/ggml-cuda/*.cu)) OBJ_GGML += $(OBJ_CUDA_TMPL) -ifdef LLAMA_FATAL_WARNINGS +ifdef JARVIS_FATAL_WARNINGS MK_NVCCFLAGS += -Werror all-warnings -endif # LLAMA_FATAL_WARNINGS +endif # JARVIS_FATAL_WARNINGS ifndef GGML_MUSA ifndef JETSON_EOL_MODULE_DETECT @@ -637,9 +637,9 @@ ifndef JETSON_EOL_MODULE_DETECT endif # JETSON_EOL_MODULE_DETECT endif # GGML_MUSA -ifdef LLAMA_DEBUG +ifdef JARVIS_DEBUG MK_NVCCFLAGS += -lineinfo -endif # LLAMA_DEBUG +endif # JARVIS_DEBUG ifdef GGML_CUDA_DEBUG MK_NVCCFLAGS += --device-debug @@ -920,11 +920,11 @@ OBJ_GGML += \ ggml/src/ggml-quants.o \ ggml/src/ggml-aarch64.o -OBJ_LLAMA = \ - src/llama.o \ - src/llama-vocab.o \ - src/llama-grammar.o \ - src/llama-sampling.o \ +OBJ_JARVIS = \ + src/jarvis.o \ + src/jarvis-vocab.o \ + src/jarvis-grammar.o \ + src/jarvis-sampling.o \ src/unicode.o \ src/unicode-data.o @@ -939,19 +939,19 @@ OBJ_COMMON = \ common/build-info.o \ common/json-schema-to-grammar.o -OBJ_ALL = $(OBJ_GGML) $(OBJ_LLAMA) $(OBJ_COMMON) +OBJ_ALL = $(OBJ_GGML) $(OBJ_JARVIS) $(OBJ_COMMON) LIB_GGML = $(LIB_PRE)ggml$(DSO_EXT) LIB_GGML_S = $(LIB_PRE)ggml.a -LIB_LLAMA = $(LIB_PRE)llama$(DSO_EXT) -LIB_LLAMA_S = $(LIB_PRE)llama.a +LIB_JARVIS = $(LIB_PRE)jarvis$(DSO_EXT) +LIB_JARVIS_S = $(LIB_PRE)jarvis.a LIB_COMMON = $(LIB_PRE)common$(DSO_EXT) LIB_COMMON_S = $(LIB_PRE)common.a -LIB_ALL = $(LIB_GGML) $(LIB_LLAMA) $(LIB_COMMON) -LIB_ALL_S = $(LIB_GGML_S) $(LIB_LLAMA_S) $(LIB_COMMON_S) +LIB_ALL = $(LIB_GGML) $(LIB_JARVIS) $(LIB_COMMON) +LIB_ALL_S = $(LIB_GGML_S) $(LIB_JARVIS_S) $(LIB_COMMON_S) GF_CC := $(CC) include scripts/get-flags.mk @@ -971,8 +971,8 @@ include scripts/get-flags.mk CUDA_CXXFLAGS := $(BASE_CXXFLAGS) $(GF_CXXFLAGS) -Wno-pedantic endif -ifdef LLAMA_CURL -override CXXFLAGS := $(CXXFLAGS) -DLLAMA_USE_CURL +ifdef JARVIS_CURL +override CXXFLAGS := $(CXXFLAGS) -DJARVIS_USE_CURL override LDFLAGS := $(LDFLAGS) -lcurl endif @@ -980,7 +980,7 @@ endif # Print build information # -$(info I llama.cpp build info: ) +$(info I jarvis.cpp build info: ) $(info I UNAME_S: $(UNAME_S)) $(info I UNAME_P: $(UNAME_P)) $(info I UNAME_M: $(UNAME_M)) @@ -1009,30 +1009,30 @@ $(info ) ifdef DEPRECATE_WARNING $(info !!! DEPRECATION WARNING !!!) -$(info The following LLAMA_ options are deprecated and will be removed in the future. Use the GGML_ prefix instead) -$(info - LLAMA_CUDA) -$(info - LLAMA_METAL) -$(info - LLAMA_METAL_EMBED_LIBRARY) -$(info - LLAMA_OPENMP) -$(info - LLAMA_RPC) -$(info - LLAMA_SYCL) -$(info - LLAMA_SYCL_F16) -$(info - LLAMA_OPENBLAS) -$(info - LLAMA_OPENBLAS64) -$(info - LLAMA_BLIS) -$(info - LLAMA_NO_LLAMAFILE) -$(info - LLAMA_NO_ACCELERATE) -$(info - LLAMA_NO_OPENMP) -$(info - LLAMA_NO_METAL) -$(info - LLAMA_NO_CCACHE) +$(info The following JARVIS_ options are deprecated and will be removed in the future. Use the GGML_ prefix instead) +$(info - JARVIS_CUDA) +$(info - JARVIS_METAL) +$(info - JARVIS_METAL_EMBED_LIBRARY) +$(info - JARVIS_OPENMP) +$(info - JARVIS_RPC) +$(info - JARVIS_SYCL) +$(info - JARVIS_SYCL_F16) +$(info - JARVIS_OPENBLAS) +$(info - JARVIS_OPENBLAS64) +$(info - JARVIS_BLIS) +$(info - JARVIS_NO_JARVISFILE) +$(info - JARVIS_NO_ACCELERATE) +$(info - JARVIS_NO_OPENMP) +$(info - JARVIS_NO_METAL) +$(info - JARVIS_NO_CCACHE) $(info ) endif ifdef REMOVE_WARNING $(info !!! REMOVAL WARNING !!!) -$(info The following LLAMA_ options have been removed and are no longer supported) -$(info - LLAMA_DISABLE_LOGS (https://github.com/ggerganov/llama.cpp/pull/9418)) -$(info - LLAMA_SERVER_VERBOSE (https://github.com/ggerganov/llama.cpp/pull/9418)) +$(info The following JARVIS_ options have been removed and are no longer supported) +$(info - JARVIS_DISABLE_LOGS (https://github.com/ggerganov/jarvis.cpp/pull/9418)) +$(info - JARVIS_SERVER_VERBOSE (https://github.com/ggerganov/jarvis.cpp/pull/9418)) $(info ) endif @@ -1079,13 +1079,13 @@ ggml/src/ggml-blas.o: \ ggml/include/ggml-blas.h $(CXX) $(CXXFLAGS) -c $< -o $@ -ifndef GGML_NO_LLAMAFILE -ggml/src/llamafile/sgemm.o: \ - ggml/src/llamafile/sgemm.cpp \ - ggml/src/llamafile/sgemm.h \ +ifndef GGML_NO_JARVISFILE +ggml/src/jarvisfile/sgemm.o: \ + ggml/src/jarvisfile/sgemm.cpp \ + ggml/src/jarvisfile/sgemm.h \ ggml/include/ggml.h $(CXX) $(CXXFLAGS) -c $< -o $@ -endif # GGML_NO_LLAMAFILE +endif # GGML_NO_JARVISFILE ifndef GGML_NO_AMX ggml/src/ggml-amx.o: \ @@ -1115,7 +1115,7 @@ $(LIB_GGML_S): \ $(OBJ_GGML) ar rcs $(LIB_GGML_S) $^ -# llama +# jarvis src/unicode.o: \ src/unicode.cpp \ @@ -1127,14 +1127,14 @@ src/unicode-data.o: \ src/unicode-data.h $(CXX) $(CXXFLAGS) -c $< -o $@ -src/llama.o: \ - src/llama.cpp \ - src/llama-impl.h \ - src/llama-vocab.h \ - src/llama-grammar.h \ - src/llama-sampling.h \ +src/jarvis.o: \ + src/jarvis.cpp \ + src/jarvis-impl.h \ + src/jarvis-vocab.h \ + src/jarvis-grammar.h \ + src/jarvis-sampling.h \ src/unicode.h \ - include/llama.h \ + include/jarvis.h \ ggml/include/ggml-cuda.h \ ggml/include/ggml-metal.h \ ggml/include/ggml.h \ @@ -1142,37 +1142,37 @@ src/llama.o: \ ggml/include/ggml-backend.h $(CXX) $(CXXFLAGS) -c $< -o $@ -src/llama-vocab.o: \ - src/llama-vocab.cpp \ - src/llama-vocab.h \ - src/llama-impl.h \ - include/llama.h +src/jarvis-vocab.o: \ + src/jarvis-vocab.cpp \ + src/jarvis-vocab.h \ + src/jarvis-impl.h \ + include/jarvis.h $(CXX) $(CXXFLAGS) -c $< -o $@ -src/llama-grammar.o: \ - src/llama-grammar.cpp \ - src/llama-grammar.h \ - src/llama-impl.h \ - src/llama-vocab.h \ - src/llama-sampling.h \ - include/llama.h +src/jarvis-grammar.o: \ + src/jarvis-grammar.cpp \ + src/jarvis-grammar.h \ + src/jarvis-impl.h \ + src/jarvis-vocab.h \ + src/jarvis-sampling.h \ + include/jarvis.h $(CXX) $(CXXFLAGS) -c $< -o $@ -src/llama-sampling.o: \ - src/llama-sampling.cpp \ - src/llama-sampling.h \ - src/llama-impl.h \ - include/llama.h +src/jarvis-sampling.o: \ + src/jarvis-sampling.cpp \ + src/jarvis-sampling.h \ + src/jarvis-impl.h \ + include/jarvis.h $(CXX) $(CXXFLAGS) -c $< -o $@ -$(LIB_LLAMA): \ - $(OBJ_LLAMA) \ +$(LIB_JARVIS): \ + $(OBJ_JARVIS) \ $(LIB_GGML) $(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS) -$(LIB_LLAMA_S): \ - $(OBJ_LLAMA) - ar rcs $(LIB_LLAMA_S) $^ +$(LIB_JARVIS_S): \ + $(OBJ_JARVIS) + ar rcs $(LIB_JARVIS_S) $^ # common @@ -1183,7 +1183,7 @@ common/common.o: \ common/sampling.h \ common/json.hpp \ common/json-schema-to-grammar.h \ - include/llama.h + include/jarvis.h $(CXX) $(CXXFLAGS) -c $< -o $@ common/arg.o: \ @@ -1199,7 +1199,7 @@ common/log.o: \ common/sampling.o: \ common/sampling.cpp \ common/sampling.h \ - include/llama.h + include/jarvis.h $(CXX) $(CXXFLAGS) -c $< -o $@ common/console.o: \ @@ -1224,7 +1224,7 @@ common/ngram-cache.o: \ $(LIB_COMMON): \ $(OBJ_COMMON) \ - $(LIB_LLAMA) \ + $(LIB_JARVIS) \ $(LIB_GGML) $(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS) @@ -1246,7 +1246,7 @@ clean: rm -rvf ggml/*.dll rm -rvf ggml/*.so rm -vrf ggml/src/*.o - rm -rvf ggml/src/llamafile/*.o + rm -rvf ggml/src/jarvisfile/*.o rm -rvf common/build-info.cpp rm -vrf ggml/src/ggml-metal-embed.metal rm -vrf ggml/src/ggml-cuda/*.o @@ -1269,75 +1269,75 @@ clean: # Helper function that replaces .c, .cpp, and .cu file endings with .o: GET_OBJ_FILE = $(patsubst %.c,%.o,$(patsubst %.cpp,%.o,$(patsubst %.cu,%.o,$(1)))) -llama-cli: examples/main/main.cpp \ +jarvis-cli: examples/main/main.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) @echo - @echo '==== Run ./llama-cli -h for help. ====' + @echo '==== Run ./jarvis-cli -h for help. ====' @echo -llama-infill: examples/infill/infill.cpp \ +jarvis-infill: examples/infill/infill.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-simple: examples/simple/simple.cpp \ +jarvis-simple: examples/simple/simple.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-tokenize: examples/tokenize/tokenize.cpp \ +jarvis-tokenize: examples/tokenize/tokenize.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-batched: examples/batched/batched.cpp \ +jarvis-batched: examples/batched/batched.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-batched-bench: examples/batched-bench/batched-bench.cpp \ +jarvis-batched-bench: examples/batched-bench/batched-bench.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-quantize: examples/quantize/quantize.cpp \ +jarvis-quantize: examples/quantize/quantize.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-quantize-stats: examples/quantize-stats/quantize-stats.cpp \ +jarvis-quantize-stats: examples/quantize-stats/quantize-stats.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-perplexity: examples/perplexity/perplexity.cpp \ +jarvis-perplexity: examples/perplexity/perplexity.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-imatrix: examples/imatrix/imatrix.cpp \ +jarvis-imatrix: examples/imatrix/imatrix.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-embedding: examples/embedding/embedding.cpp \ +jarvis-embedding: examples/embedding/embedding.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-gritlm: examples/gritlm/gritlm.cpp \ +jarvis-gritlm: examples/gritlm/gritlm.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-save-load-state: examples/save-load-state/save-load-state.cpp \ +jarvis-save-load-state: examples/save-load-state/save-load-state.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-gguf: examples/gguf/gguf.cpp \ +jarvis-gguf: examples/gguf/gguf.cpp \ $(OBJ_GGML) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) @@ -1354,92 +1354,92 @@ examples/gguf-hash/deps/sha256/sha256.o: \ examples/gguf-hash/deps/sha256/sha256.c $(CC) $(CFLAGS) -Iexamples/gguf-hash/deps -c $< -o $@ -llama-gguf-hash: examples/gguf-hash/gguf-hash.cpp examples/gguf-hash/deps/sha1/sha1.o examples/gguf-hash/deps/xxhash/xxhash.o examples/gguf-hash/deps/sha256/sha256.o\ +jarvis-gguf-hash: examples/gguf-hash/gguf-hash.cpp examples/gguf-hash/deps/sha1/sha1.o examples/gguf-hash/deps/xxhash/xxhash.o examples/gguf-hash/deps/sha256/sha256.o\ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -Iexamples/gguf-hash/deps -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-gguf-split: examples/gguf-split/gguf-split.cpp \ +jarvis-gguf-split: examples/gguf-split/gguf-split.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-eval-callback: examples/eval-callback/eval-callback.cpp \ +jarvis-eval-callback: examples/eval-callback/eval-callback.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-cvector-generator: examples/cvector-generator/cvector-generator.cpp \ +jarvis-cvector-generator: examples/cvector-generator/cvector-generator.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp \ +jarvis-convert-jarvis2c-to-ggml: examples/convert-jarvis2c-to-ggml/convert-jarvis2c-to-ggml.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-bench: examples/llama-bench/llama-bench.cpp \ +jarvis-bench: examples/jarvis-bench/jarvis-bench.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-baby-llama: examples/baby-llama/baby-llama.cpp \ +jarvis-baby-jarvis: examples/baby-jarvis/baby-jarvis.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-export-lora: examples/export-lora/export-lora.cpp \ +jarvis-export-lora: examples/export-lora/export-lora.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-retrieval: examples/retrieval/retrieval.cpp \ +jarvis-retrieval: examples/retrieval/retrieval.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-speculative: examples/speculative/speculative.cpp \ +jarvis-speculative: examples/speculative/speculative.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-parallel: examples/parallel/parallel.cpp \ +jarvis-parallel: examples/parallel/parallel.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-lookahead: examples/lookahead/lookahead.cpp \ +jarvis-lookahead: examples/lookahead/lookahead.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-lookup: examples/lookup/lookup.cpp \ +jarvis-lookup: examples/lookup/lookup.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-lookup-create: examples/lookup/lookup-create.cpp \ +jarvis-lookup-create: examples/lookup/lookup-create.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-lookup-merge: examples/lookup/lookup-merge.cpp \ +jarvis-lookup-merge: examples/lookup/lookup-merge.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-lookup-stats: examples/lookup/lookup-stats.cpp \ +jarvis-lookup-stats: examples/lookup/lookup-stats.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-passkey: examples/passkey/passkey.cpp \ +jarvis-passkey: examples/passkey/passkey.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-gbnf-validator: examples/gbnf-validator/gbnf-validator.cpp \ +jarvis-gbnf-validator: examples/gbnf-validator/gbnf-validator.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) @@ -1450,7 +1450,7 @@ rpc-server: examples/rpc/rpc-server.cpp \ $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) endif # GGML_RPC -llama-server: \ +jarvis-server: \ examples/server/server.cpp \ examples/server/utils.hpp \ examples/server/httplib.h \ @@ -1485,7 +1485,7 @@ examples/server/%.hpp: examples/server/public/% Makefile echo "unsigned int $${NAME}_len = $(shell cat $< | wc -c );" \ ) > $@ -llama-gen-docs: examples/gen-docs/gen-docs.cpp \ +jarvis-gen-docs: examples/gen-docs/gen-docs.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) @@ -1499,7 +1499,7 @@ libllava.a: examples/llava/llava.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -static -fPIC -c $< -o $@ -Wno-cast-qual -llama-llava-cli: examples/llava/llava-cli.cpp \ +jarvis-llava-cli: examples/llava/llava-cli.cpp \ examples/llava/llava.cpp \ examples/llava/llava.h \ examples/llava/clip.cpp \ @@ -1507,7 +1507,7 @@ llama-llava-cli: examples/llava/llava-cli.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual -llama-minicpmv-cli: examples/llava/minicpmv-cli.cpp \ +jarvis-minicpmv-cli: examples/llava/minicpmv-cli.cpp \ examples/llava/llava.cpp \ examples/llava/llava.h \ examples/llava/clip.cpp \ @@ -1542,7 +1542,7 @@ tests/test-arg-parser: tests/test-arg-parser.cpp \ $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -tests/test-llama-grammar: tests/test-llama-grammar.cpp \ +tests/test-jarvis-grammar: tests/test-jarvis-grammar.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) @@ -1616,7 +1616,7 @@ tests/test-rope: tests/test-rope.cpp ggml/src/ggml.o \ $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -tests/test-c.o: tests/test-c.c include/llama.h +tests/test-c.o: tests/test-c.c include/jarvis.h $(CC) $(CFLAGS) -c $(filter-out %.h,$^) -o $@ tests/test-backend-ops: tests/test-backend-ops.cpp \ @@ -1643,12 +1643,12 @@ tests/test-chat-template: tests/test-chat-template.cpp \ # PoCs # -llama-vdot: pocs/vdot/vdot.cpp ggml/src/ggml.o \ +jarvis-vdot: pocs/vdot/vdot.cpp ggml/src/ggml.o \ $(OBJ_GGML) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-q8dot: pocs/vdot/q8dot.cpp ggml/src/ggml.o \ +jarvis-q8dot: pocs/vdot/q8dot.cpp ggml/src/ggml.o \ $(OBJ_GGML) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) @@ -1667,17 +1667,17 @@ examples/deprecation-warning/deprecation-warning.o: examples/deprecation-warning # Eventually we will want to remove these target from building all the time. main: examples/deprecation-warning/deprecation-warning.o $(CXX) $(CXXFLAGS) $< -o $@ $(LDFLAGS) - @echo "NOTICE: The 'main' binary is deprecated. Please use 'llama-cli' instead." + @echo "NOTICE: The 'main' binary is deprecated. Please use 'jarvis-cli' instead." server: examples/deprecation-warning/deprecation-warning.o $(CXX) $(CXXFLAGS) $< -o $@ $(LDFLAGS) - @echo "NOTICE: The 'server' binary is deprecated. Please use 'llama-server' instead." + @echo "NOTICE: The 'server' binary is deprecated. Please use 'jarvis-server' instead." quantize: examples/deprecation-warning/deprecation-warning.o ifneq (,$(wildcard quantize)) $(CXX) $(CXXFLAGS) $< -o $@ $(LDFLAGS) @echo "#########" - @echo "WARNING: The 'quantize' binary is deprecated. Please use 'llama-quantize' instead." + @echo "WARNING: The 'quantize' binary is deprecated. Please use 'jarvis-quantize' instead." @echo " Remove the 'quantize' binary to remove this warning." @echo "#########" endif @@ -1686,7 +1686,7 @@ perplexity: examples/deprecation-warning/deprecation-warning.o ifneq (,$(wildcard perplexity)) $(CXX) $(CXXFLAGS) $< -o $@ $(LDFLAGS) @echo "#########" - @echo "WARNING: The 'perplexity' binary is deprecated. Please use 'llama-perplexity' instead." + @echo "WARNING: The 'perplexity' binary is deprecated. Please use 'jarvis-perplexity' instead." @echo " Remove the 'perplexity' binary to remove this warning." @echo "#########" endif @@ -1695,7 +1695,7 @@ embedding: examples/deprecation-warning/deprecation-warning.o ifneq (,$(wildcard embedding)) $(CXX) $(CXXFLAGS) $< -o $@ $(LDFLAGS) @echo "#########" - @echo "WARNING: The 'embedding' binary is deprecated. Please use 'llama-embedding' instead." + @echo "WARNING: The 'embedding' binary is deprecated. Please use 'jarvis-embedding' instead." @echo " Remove the 'embedding' binary to remove this warning." @echo "#########" endif diff --git a/Package.swift b/Package.swift index 3a17e6c349b01..2832bcf5c3caa 100644 --- a/Package.swift +++ b/Package.swift @@ -3,10 +3,10 @@ import PackageDescription var sources = [ - "src/llama.cpp", - "src/llama-vocab.cpp", - "src/llama-grammar.cpp", - "src/llama-sampling.cpp", + "src/jarvis.cpp", + "src/jarvis-vocab.cpp", + "src/jarvis-grammar.cpp", + "src/jarvis-sampling.cpp", "src/unicode.cpp", "src/unicode-data.cpp", "ggml/src/ggml.c", @@ -45,7 +45,7 @@ cSettings.append( #endif let package = Package( - name: "llama", + name: "jarvis", platforms: [ .macOS(.v12), .iOS(.v14), @@ -53,11 +53,11 @@ let package = Package( .tvOS(.v14) ], products: [ - .library(name: "llama", targets: ["llama"]), + .library(name: "jarvis", targets: ["jarvis"]), ], targets: [ .target( - name: "llama", + name: "jarvis", path: ".", exclude: [ "cmake", diff --git a/README.md b/README.md index 8fe1f4b4b6a7a..e5cf87597c0c4 100644 --- a/README.md +++ b/README.md @@ -1,30 +1,30 @@ -# llama.cpp +# jarvis.cpp -![llama](https://user-images.githubusercontent.com/1991296/230134379-7181e485-c521-4d23-a0d6-f7b3b61ba524.png) +![jarvis](https://user-images.githubusercontent.com/1991296/230134379-7181e485-c521-4d23-a0d6-f7b3b61ba524.png) [![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT) -[![Server](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml/badge.svg)](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml) -[![Conan Center](https://shields.io/conan/v/llama-cpp)](https://conan.io/center/llama-cpp) +[![Server](https://github.com/ggerganov/jarvis.cpp/actions/workflows/server.yml/badge.svg)](https://github.com/ggerganov/jarvis.cpp/actions/workflows/server.yml) +[![Conan Center](https://shields.io/conan/v/jarvis-cpp)](https://conan.io/center/jarvis-cpp) -[Roadmap](https://github.com/users/ggerganov/projects/7) / [Project status](https://github.com/ggerganov/llama.cpp/discussions/3471) / [Manifesto](https://github.com/ggerganov/llama.cpp/discussions/205) / [ggml](https://github.com/ggerganov/ggml) +[Roadmap](https://github.com/users/ggerganov/projects/7) / [Project status](https://github.com/ggerganov/jarvis.cpp/discussions/3471) / [Manifesto](https://github.com/ggerganov/jarvis.cpp/discussions/205) / [ggml](https://github.com/ggerganov/ggml) Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others) in pure C/C++ ## Recent API changes -- [Changelog for `libllama` API](https://github.com/ggerganov/llama.cpp/issues/9289) -- [Changelog for `llama-server` REST API](https://github.com/ggerganov/llama.cpp/issues/9291) +- [Changelog for `libjarvis` API](https://github.com/ggerganov/jarvis.cpp/issues/9289) +- [Changelog for `jarvis-server` REST API](https://github.com/ggerganov/jarvis.cpp/issues/9291) ## Hot topics -- **Hugging Face Inference Endpoints now support GGUF out of the box! https://github.com/ggerganov/llama.cpp/discussions/9669** -- Hugging Face GGUF editor: [discussion](https://github.com/ggerganov/llama.cpp/discussions/9268) | [tool](https://huggingface.co/spaces/CISCai/gguf-editor) +- **Hugging Face Inference Endpoints now support GGUF out of the box! https://github.com/ggerganov/jarvis.cpp/discussions/9669** +- Hugging Face GGUF editor: [discussion](https://github.com/ggerganov/jarvis.cpp/discussions/9268) | [tool](https://huggingface.co/spaces/CISCai/gguf-editor) ---- ## Description -The main goal of `llama.cpp` is to enable LLM inference with minimal setup and state-of-the-art performance on a wide +The main goal of `jarvis.cpp` is to enable LLM inference with minimal setup and state-of-the-art performance on a wide variety of hardware - locally and in the cloud. - Plain C/C++ implementation without any dependencies @@ -35,7 +35,7 @@ variety of hardware - locally and in the cloud. - Vulkan and SYCL backend support - CPU+GPU hybrid inference to partially accelerate models larger than the total VRAM capacity -Since its [inception](https://github.com/ggerganov/llama.cpp/issues/33#issuecomment-1465108022), the project has +Since its [inception](https://github.com/ggerganov/jarvis.cpp/issues/33#issuecomment-1465108022), the project has improved significantly thanks to many contributions. It is the main playground for developing new features for the [ggml](https://github.com/ggerganov/ggml) library. @@ -52,22 +52,22 @@ Typically finetunes of the base models below are supported as well. - [X] [Falcon](https://huggingface.co/models?search=tiiuae/falcon) - [X] [Chinese LLaMA / Alpaca](https://github.com/ymcui/Chinese-LLaMA-Alpaca) and [Chinese LLaMA-2 / Alpaca-2](https://github.com/ymcui/Chinese-LLaMA-Alpaca-2) - [X] [Vigogne (French)](https://github.com/bofenghuang/vigogne) -- [X] [BERT](https://github.com/ggerganov/llama.cpp/pull/5423) +- [X] [BERT](https://github.com/ggerganov/jarvis.cpp/pull/5423) - [X] [Koala](https://bair.berkeley.edu/blog/2023/04/03/koala/) - [X] [Baichuan 1 & 2](https://huggingface.co/models?search=baichuan-inc/Baichuan) + [derivations](https://huggingface.co/hiyouga/baichuan-7b-sft) - [X] [Aquila 1 & 2](https://huggingface.co/models?search=BAAI/Aquila) -- [X] [Starcoder models](https://github.com/ggerganov/llama.cpp/pull/3187) +- [X] [Starcoder models](https://github.com/ggerganov/jarvis.cpp/pull/3187) - [X] [Refact](https://huggingface.co/smallcloudai/Refact-1_6B-fim) -- [X] [MPT](https://github.com/ggerganov/llama.cpp/pull/3417) -- [X] [Bloom](https://github.com/ggerganov/llama.cpp/pull/3553) +- [X] [MPT](https://github.com/ggerganov/jarvis.cpp/pull/3417) +- [X] [Bloom](https://github.com/ggerganov/jarvis.cpp/pull/3553) - [x] [Yi models](https://huggingface.co/models?search=01-ai/Yi) - [X] [StableLM models](https://huggingface.co/stabilityai) - [x] [Deepseek models](https://huggingface.co/models?search=deepseek-ai/deepseek) - [x] [Qwen models](https://huggingface.co/models?search=Qwen/Qwen) -- [x] [PLaMo-13B](https://github.com/ggerganov/llama.cpp/pull/3557) +- [x] [PLaMo-13B](https://github.com/ggerganov/jarvis.cpp/pull/3557) - [x] [Phi models](https://huggingface.co/models?search=microsoft/phi) - [x] [GPT-2](https://huggingface.co/gpt2) -- [x] [Orion 14B](https://github.com/ggerganov/llama.cpp/pull/5118) +- [x] [Orion 14B](https://github.com/ggerganov/jarvis.cpp/pull/5118) - [x] [InternLM2](https://huggingface.co/models?search=internlm2) - [x] [CodeShell](https://github.com/WisdomShell/codeshell) - [x] [Gemma](https://ai.google.dev/gemma) @@ -111,36 +111,36 @@ Typically finetunes of the base models below are supported as well. **Bindings:** -- Python: [abetlen/llama-cpp-python](https://github.com/abetlen/llama-cpp-python) -- Go: [go-skynet/go-llama.cpp](https://github.com/go-skynet/go-llama.cpp) -- Node.js: [withcatai/node-llama-cpp](https://github.com/withcatai/node-llama-cpp) -- JS/TS (llama.cpp server client): [lgrammel/modelfusion](https://modelfusion.dev/integration/model-provider/llamacpp) +- Python: [abetlen/jarvis-cpp-python](https://github.com/abetlen/jarvis-cpp-python) +- Go: [go-skynet/go-jarvis.cpp](https://github.com/go-skynet/go-jarvis.cpp) +- Node.js: [withcatai/node-jarvis-cpp](https://github.com/withcatai/node-jarvis-cpp) +- JS/TS (jarvis.cpp server client): [lgrammel/modelfusion](https://modelfusion.dev/integration/model-provider/jarviscpp) - JS/TS (Programmable Prompt Engine CLI): [offline-ai/cli](https://github.com/offline-ai/cli) -- JavaScript/Wasm (works in browser): [tangledgroup/llama-cpp-wasm](https://github.com/tangledgroup/llama-cpp-wasm) -- Typescript/Wasm (nicer API, available on npm): [ngxson/wllama](https://github.com/ngxson/wllama) -- Ruby: [yoshoku/llama_cpp.rb](https://github.com/yoshoku/llama_cpp.rb) -- Rust (more features): [edgenai/llama_cpp-rs](https://github.com/edgenai/llama_cpp-rs) -- Rust (nicer API): [mdrokz/rust-llama.cpp](https://github.com/mdrokz/rust-llama.cpp) -- Rust (more direct bindings): [utilityai/llama-cpp-rs](https://github.com/utilityai/llama-cpp-rs) -- C#/.NET: [SciSharp/LLamaSharp](https://github.com/SciSharp/LLamaSharp) +- JavaScript/Wasm (works in browser): [tangledgroup/jarvis-cpp-wasm](https://github.com/tangledgroup/jarvis-cpp-wasm) +- Typescript/Wasm (nicer API, available on npm): [ngxson/wjarvis](https://github.com/ngxson/wjarvis) +- Ruby: [yoshoku/jarvis_cpp.rb](https://github.com/yoshoku/jarvis_cpp.rb) +- Rust (more features): [edgenai/jarvis_cpp-rs](https://github.com/edgenai/jarvis_cpp-rs) +- Rust (nicer API): [mdrokz/rust-jarvis.cpp](https://github.com/mdrokz/rust-jarvis.cpp) +- Rust (more direct bindings): [utilityai/jarvis-cpp-rs](https://github.com/utilityai/jarvis-cpp-rs) +- C#/.NET: [SciSharp/JarvisSharp](https://github.com/SciSharp/JarvisSharp) - C#/VB.NET (more features - community license): [LM-Kit.NET](https://docs.lm-kit.com/lm-kit-net/index.html) - Scala 3: [donderom/llm4s](https://github.com/donderom/llm4s) -- Clojure: [phronmophobic/llama.clj](https://github.com/phronmophobic/llama.clj) -- React Native: [mybigday/llama.rn](https://github.com/mybigday/llama.rn) -- Java: [kherud/java-llama.cpp](https://github.com/kherud/java-llama.cpp) -- Zig: [deins/llama.cpp.zig](https://github.com/Deins/llama.cpp.zig) -- Flutter/Dart: [netdur/llama_cpp_dart](https://github.com/netdur/llama_cpp_dart) -- PHP (API bindings and features built on top of llama.cpp): [distantmagic/resonance](https://github.com/distantmagic/resonance) [(more info)](https://github.com/ggerganov/llama.cpp/pull/6326) -- Guile Scheme: [guile_llama_cpp](https://savannah.nongnu.org/projects/guile-llama-cpp) -- Swift [srgtuszy/llama-cpp-swift](https://github.com/srgtuszy/llama-cpp-swift) -- Swift [ShenghaiWang/SwiftLlama](https://github.com/ShenghaiWang/SwiftLlama) +- Clojure: [phronmophobic/jarvis.clj](https://github.com/phronmophobic/jarvis.clj) +- React Native: [mybigday/jarvis.rn](https://github.com/mybigday/jarvis.rn) +- Java: [kherud/java-jarvis.cpp](https://github.com/kherud/java-jarvis.cpp) +- Zig: [deins/jarvis.cpp.zig](https://github.com/Deins/jarvis.cpp.zig) +- Flutter/Dart: [netdur/jarvis_cpp_dart](https://github.com/netdur/jarvis_cpp_dart) +- PHP (API bindings and features built on top of jarvis.cpp): [distantmagic/resonance](https://github.com/distantmagic/resonance) [(more info)](https://github.com/ggerganov/jarvis.cpp/pull/6326) +- Guile Scheme: [guile_jarvis_cpp](https://savannah.nongnu.org/projects/guile-jarvis-cpp) +- Swift [srgtuszy/jarvis-cpp-swift](https://github.com/srgtuszy/jarvis-cpp-swift) +- Swift [ShenghaiWang/SwiftJarvis](https://github.com/ShenghaiWang/SwiftJarvis) **UI:** Unless otherwise noted these projects are open-source with permissive licensing: - [MindWorkAI/AI-Studio](https://github.com/MindWorkAI/AI-Studio) (FSL-1.1-MIT) -- [iohub/collama](https://github.com/iohub/coLLaMA) +- [iohub/cojarvis](https://github.com/iohub/coLLaMA) - [janhq/jan](https://github.com/janhq/jan) (AGPL) - [nat/openplayground](https://github.com/nat/openplayground) - [Faraday](https://faraday.dev/) (proprietary) @@ -149,9 +149,9 @@ Unless otherwise noted these projects are open-source with permissive licensing: - [ramalama](https://github.com/containers/ramalama) (MIT) - [LocalAI](https://github.com/mudler/LocalAI) (MIT) - [LostRuins/koboldcpp](https://github.com/LostRuins/koboldcpp) (AGPL) -- [Mozilla-Ocho/llamafile](https://github.com/Mozilla-Ocho/llamafile) +- [Mozilla-Ocho/jarvisfile](https://github.com/Mozilla-Ocho/jarvisfile) - [nomic-ai/gpt4all](https://github.com/nomic-ai/gpt4all) -- [ollama/ollama](https://github.com/ollama/ollama) +- [ojarvis/ojarvis](https://github.com/ojarvis/ojarvis) - [oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui) (AGPL) - [psugihara/FreeChat](https://github.com/psugihara/FreeChat) - [cztomsik/ava](https://github.com/cztomsik/ava) (MIT) @@ -173,24 +173,24 @@ Unless otherwise noted these projects are open-source with permissive licensing: - [AIKit](https://github.com/sozercan/aikit) (MIT) - [LARS - The LLM & Advanced Referencing Solution](https://github.com/abgulati/LARS) (AGPL) - [LLMUnity](https://github.com/undreamai/LLMUnity) (MIT) -- [Llama Assistant](https://github.com/vietanhdev/llama-assistant) (GPL) +- [Jarvis Assistant](https://github.com/vietanhdev/jarvis-assistant) (GPL) - [PocketPal AI - An iOS and Android App](https://github.com/a-ghorbani/pocketpal-ai) (MIT) -*(to have a project listed here, it should clearly state that it depends on `llama.cpp`)* +*(to have a project listed here, it should clearly state that it depends on `jarvis.cpp`)* **Tools:** - [akx/ggify](https://github.com/akx/ggify) – download PyTorch models from HuggingFace Hub and convert them to GGML -- [akx/ollama-dl](https://github.com/akx/ollama-dl) – download models from the Ollama library to be used directly with llama.cpp -- [crashr/gppm](https://github.com/crashr/gppm) – launch llama.cpp instances utilizing NVIDIA Tesla P40 or P100 GPUs with reduced idle power consumption +- [akx/ojarvis-dl](https://github.com/akx/ojarvis-dl) – download models from the Ojarvis library to be used directly with jarvis.cpp +- [crashr/gppm](https://github.com/crashr/gppm) – launch jarvis.cpp instances utilizing NVIDIA Tesla P40 or P100 GPUs with reduced idle power consumption - [gpustack/gguf-parser](https://github.com/gpustack/gguf-parser-go/tree/main/cmd/gguf-parser) - review/check the GGUF file and estimate the memory usage -- [Styled Lines](https://marketplace.unity.com/packages/tools/generative-ai/styled-lines-llama-cpp-model-292902) (proprietary licensed, async wrapper of inference part for game development in Unity3d with prebuild Mobile and Web platform wrappers and a model example) +- [Styled Lines](https://marketplace.unity.com/packages/tools/generative-ai/styled-lines-jarvis-cpp-model-292902) (proprietary licensed, async wrapper of inference part for game development in Unity3d with prebuild Mobile and Web platform wrappers and a model example) **Infrastructure:** -- [Paddler](https://github.com/distantmagic/paddler) - Stateful load balancer custom-tailored for llama.cpp +- [Paddler](https://github.com/distantmagic/paddler) - Stateful load balancer custom-tailored for jarvis.cpp - [GPUStack](https://github.com/gpustack/gpustack) - Manage GPU clusters for running LLMs -- [llama_cpp_canister](https://github.com/onicai/llama_cpp_canister) - llama.cpp as a smart contract on the Internet Computer, using WebAssembly +- [jarvis_cpp_canister](https://github.com/onicai/jarvis_cpp_canister) - jarvis.cpp as a smart contract on the Internet Computer, using WebAssembly **Games:** - [Lucy's Labyrinth](https://github.com/MorganRO8/Lucys_Labyrinth) - A simple maze game where agents controlled by an AI model will try to trick you. @@ -201,8 +201,8 @@ Unless otherwise noted these projects are open-source with permissive licensing: Typical run using LLaMA v2 13B on M2 Ultra ``` -$ make -j && ./llama-cli -m models/llama-13b-v2/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -I llama.cpp build info: +$ make -j && ./jarvis-cli -m models/jarvis-13b-v2/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e +I jarvis.cpp build info: I UNAME_S: Darwin I UNAME_P: arm I UNAME_M: arm64 @@ -215,12 +215,12 @@ I CXX: Apple clang version 14.0.3 (clang-1403.0.22.14.1) make: Nothing to be done for `default'. main: build = 1041 (cf658ad) main: seed = 1692823051 -llama_model_loader: loaded meta data with 16 key-value pairs and 363 tensors from models/llama-13b-v2/ggml-model-q4_0.gguf (version GGUF V1 (latest)) -llama_model_loader: - type f32: 81 tensors -llama_model_loader: - type q4_0: 281 tensors -llama_model_loader: - type q6_K: 1 tensors +jarvis_model_loader: loaded meta data with 16 key-value pairs and 363 tensors from models/jarvis-13b-v2/ggml-model-q4_0.gguf (version GGUF V1 (latest)) +jarvis_model_loader: - type f32: 81 tensors +jarvis_model_loader: - type q4_0: 281 tensors +jarvis_model_loader: - type q6_K: 1 tensors llm_load_print_meta: format = GGUF V1 (latest) -llm_load_print_meta: arch = llama +llm_load_print_meta: arch = jarvis llm_load_print_meta: vocab type = SPM llm_load_print_meta: n_vocab = 32000 llm_load_print_meta: n_merges = 0 @@ -248,8 +248,8 @@ llm_load_print_meta: LF token = 13 '<0x0A>' llm_load_tensors: ggml ctx size = 0.11 MB llm_load_tensors: mem required = 7024.01 MB (+ 400.00 MB per state) ................................................................................................... -llama_new_context_with_model: kv self size = 400.00 MB -llama_new_context_with_model: compute buffer total size = 75.41 MB +jarvis_new_context_with_model: kv self size = 400.00 MB +jarvis_new_context_with_model: compute buffer total size = 75.41 MB system_info: n_threads = 16 / 24 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 | sampling: repeat_last_n = 64, repeat_penalty = 1.100000, presence_penalty = 0.000000, frequency_penalty = 0.000000, top_k = 40, tfs_z = 1.000000, top_p = 0.950000, typical_p = 1.000000, temp = 0.800000, mirostat = 0, mirostat_lr = 0.100000, mirostat_ent = 5.000000 @@ -271,11 +271,11 @@ How does a Website Work? A website works by having pages, which are made of HTML code. This code tells your computer how to display the content on each page you visit – whether it’s an image or text file (like PDFs). In order for someone else’s browser not only be able but also want those same results when accessing any given URL; some additional steps need taken by way of programming scripts that will add functionality such as making links clickable! The most common type is called static HTML pages because they remain unchanged over time unless modified manually (either through editing files directly or using an interface such as WordPress). They are usually served up via HTTP protocols – this means anyone can access them without having any special privileges like being part of a group who is allowed into restricted areas online; however, there may still exist some limitations depending upon where one lives geographically speaking. How to -llama_print_timings: load time = 576.45 ms -llama_print_timings: sample time = 283.10 ms / 400 runs ( 0.71 ms per token, 1412.91 tokens per second) -llama_print_timings: prompt eval time = 599.83 ms / 19 tokens ( 31.57 ms per token, 31.68 tokens per second) -llama_print_timings: eval time = 24513.59 ms / 399 runs ( 61.44 ms per token, 16.28 tokens per second) -llama_print_timings: total time = 25431.49 ms +jarvis_print_timings: load time = 576.45 ms +jarvis_print_timings: sample time = 283.10 ms / 400 runs ( 0.71 ms per token, 1412.91 tokens per second) +jarvis_print_timings: prompt eval time = 599.83 ms / 19 tokens ( 31.57 ms per token, 31.68 tokens per second) +jarvis_print_timings: eval time = 24513.59 ms / 399 runs ( 61.44 ms per token, 16.28 tokens per second) +jarvis_print_timings: total time = 25431.49 ms ``` @@ -297,14 +297,14 @@ Here are the end-to-end binary build and model conversion steps for most support Firstly, you need to get the binary. There are different methods that you can follow: - Method 1: Clone this repository and build locally, see [how to build](./docs/build.md) -- Method 2: If you are using MacOS or Linux, you can install llama.cpp via [brew, flox or nix](./docs/install.md) +- Method 2: If you are using MacOS or Linux, you can install jarvis.cpp via [brew, flox or nix](./docs/install.md) - Method 3: Use a Docker image, see [documentation for Docker](./docs/docker.md) -- Method 4: Download pre-built binary from [releases](https://github.com/ggerganov/llama.cpp/releases) +- Method 4: Download pre-built binary from [releases](https://github.com/ggerganov/jarvis.cpp/releases) You can run a basic completion using this command: ```bash -llama-cli -m your_model.gguf -p "I believe the meaning of life is" -n 128 +jarvis-cli -m your_model.gguf -p "I believe the meaning of life is" -n 128 # Output: # I believe the meaning of life is to find your own truth and to live in accordance with it. For me, this means being true to myself and following my passions, even if they don't align with societal expectations. I think that's what I love about yoga – it's not just a physical practice, but a spiritual one too. It's about connecting with yourself, listening to your inner voice, and honoring your own unique journey. @@ -317,7 +317,7 @@ See [this page](./examples/main/README.md) for a full list of parameters. If you want a more ChatGPT-like experience, you can run in conversation mode by passing `-cnv` as a parameter: ```bash -llama-cli -m your_model.gguf -p "You are a helpful assistant" -cnv +jarvis-cli -m your_model.gguf -p "You are a helpful assistant" -cnv # Output: # > hi, who are you? @@ -327,26 +327,26 @@ llama-cli -m your_model.gguf -p "You are a helpful assistant" -cnv # Easy peasy! The answer to 1+1 is... 2! ``` -By default, the chat template will be taken from the input model. If you want to use another chat template, pass `--chat-template NAME` as a parameter. See the list of [supported templates](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template) +By default, the chat template will be taken from the input model. If you want to use another chat template, pass `--chat-template NAME` as a parameter. See the list of [supported templates](https://github.com/ggerganov/jarvis.cpp/wiki/Templates-supported-by-jarvis_chat_apply_template) ```bash -./llama-cli -m your_model.gguf -p "You are a helpful assistant" -cnv --chat-template chatml +./jarvis-cli -m your_model.gguf -p "You are a helpful assistant" -cnv --chat-template chatml ``` You can also use your own template via in-prefix, in-suffix and reverse-prompt parameters: ```bash -./llama-cli -m your_model.gguf -p "You are a helpful assistant" -cnv --in-prefix 'User: ' --reverse-prompt 'User:' +./jarvis-cli -m your_model.gguf -p "You are a helpful assistant" -cnv --in-prefix 'User: ' --reverse-prompt 'User:' ``` ### Web server -[llama.cpp web server](./examples/server/README.md) is a lightweight [OpenAI API](https://github.com/openai/openai-openapi) compatible HTTP server that can be used to serve local models and easily connect them to existing clients. +[jarvis.cpp web server](./examples/server/README.md) is a lightweight [OpenAI API](https://github.com/openai/openai-openapi) compatible HTTP server that can be used to serve local models and easily connect them to existing clients. Example usage: ```bash -./llama-server -m your_model.gguf --port 8080 +./jarvis-server -m your_model.gguf --port 8080 # Basic web UI can be accessed via browser: http://localhost:8080 # Chat completion endpoint: http://localhost:8080/v1/chat/completions @@ -369,16 +369,16 @@ Here is an example of a few-shot interaction, invoked with the command ./examples/chat-13B.sh # custom arguments using a 13B model -./llama-cli -m ./models/13B/ggml-model-q4_0.gguf -n 256 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat-with-bob.txt +./jarvis-cli -m ./models/13B/ggml-model-q4_0.gguf -n 256 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat-with-bob.txt ``` -Note the use of `--color` to distinguish between user input and generated text. Other parameters are explained in more detail in the [README](examples/main/README.md) for the `llama-cli` example program. +Note the use of `--color` to distinguish between user input and generated text. Other parameters are explained in more detail in the [README](examples/main/README.md) for the `jarvis-cli` example program. ![image](https://user-images.githubusercontent.com/1991296/224575029-2af3c7dc-5a65-4f64-a6bb-517a532aea38.png) ### Persistent Interaction -The prompt, user inputs, and model generations can be saved and resumed across calls to `./llama-cli` by leveraging `--prompt-cache` and `--prompt-cache-all`. The `./examples/chat-persistent.sh` script demonstrates this with support for long-running, resumable chat sessions. To use this example, you must provide a file to cache the initial chat prompt and a directory to save the chat session, and may optionally provide the same variables as `chat-13B.sh`. The same prompt cache can be reused for new chat sessions. Note that both prompt cache and chat directory are tied to the initial prompt (`PROMPT_TEMPLATE`) and the model file. +The prompt, user inputs, and model generations can be saved and resumed across calls to `./jarvis-cli` by leveraging `--prompt-cache` and `--prompt-cache-all`. The `./examples/chat-persistent.sh` script demonstrates this with support for long-running, resumable chat sessions. To use this example, you must provide a file to cache the initial chat prompt and a directory to save the chat session, and may optionally provide the same variables as `chat-13B.sh`. The same prompt cache can be reused for new chat sessions. Note that both prompt cache and chat directory are tied to the initial prompt (`PROMPT_TEMPLATE`) and the model file. ```bash # Start a new chat @@ -397,10 +397,10 @@ PROMPT_TEMPLATE=./prompts/chat-with-bob.txt PROMPT_CACHE_FILE=bob.prompt.bin \ ### Constrained output with grammars -`llama.cpp` supports grammars to constrain model output. For example, you can force the model to output JSON only: +`jarvis.cpp` supports grammars to constrain model output. For example, you can force the model to output JSON only: ```bash -./llama-cli -m ./models/13B/ggml-model-q4_0.gguf -n 256 --grammar-file grammars/json.gbnf -p 'Request: schedule a call at 8pm; Command:' +./jarvis-cli -m ./models/13B/ggml-model-q4_0.gguf -n 256 --grammar-file grammars/json.gbnf -p 'Request: schedule a call at 8pm; Command:' ``` The `grammars/` folder contains a handful of sample grammars. To write your own, check out the [GBNF Guide](./grammars/README.md). @@ -409,7 +409,7 @@ For authoring more complex JSON grammars, you can also check out https://grammar ## Build -Please refer to [Build llama.cpp locally](./docs/build.md) +Please refer to [Build jarvis.cpp locally](./docs/build.md) ## Supported backends @@ -430,11 +430,11 @@ Please refer to [Build llama.cpp locally](./docs/build.md) ### Prepare and Quantize > [!NOTE] -> You can use the [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space on Hugging Face to quantise your model weights without any setup too. It is synced from `llama.cpp` main every 6 hours. +> You can use the [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space on Hugging Face to quantise your model weights without any setup too. It is synced from `jarvis.cpp` main every 6 hours. -To obtain the official LLaMA 2 weights please see the Obtaining and using the Facebook LLaMA 2 model section. There is also a large selection of pre-quantized `gguf` models available on Hugging Face. +To obtain the official LLaMA 2 weights please see the Obtaining and using the Facebook LLaMA 2 model section. There is also a large selection of pre-quantized `gguf` models available on Hugging Face. -Note: `convert.py` has been moved to `examples/convert_legacy_llama.py` and shouldn't be used for anything other than `Llama/Llama2/Mistral` models and their derivatives. +Note: `convert.py` has been moved to `examples/convert_legacy_jarvis.py` and shouldn't be used for anything other than `Jarvis/Jarvis2/Mistral` models and their derivatives. It does not support LLaMA 3, you can use `convert_hf_to_gguf.py` with LLaMA 3 downloaded from Hugging Face. To learn more about quantizing model, [read this documentation](./examples/quantize/README.md) @@ -444,17 +444,17 @@ To learn more about quantizing model, [read this documentation](./examples/quant You can use the `perplexity` example to measure perplexity over a given prompt (lower perplexity is better). For more information, see [https://huggingface.co/docs/transformers/perplexity](https://huggingface.co/docs/transformers/perplexity). -To learn more how to measure perplexity using llama.cpp, [read this documentation](./examples/perplexity/README.md) +To learn more how to measure perplexity using jarvis.cpp, [read this documentation](./examples/perplexity/README.md) ## Contributing - Contributors can open PRs -- Collaborators can push to branches in the `llama.cpp` repo and merge PRs into the `master` branch +- Collaborators can push to branches in the `jarvis.cpp` repo and merge PRs into the `master` branch - Collaborators will be invited based on contributions - Any help with managing issues, PRs and projects is very appreciated! -- See [good first issues](https://github.com/ggerganov/llama.cpp/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) for tasks suitable for first contributions +- See [good first issues](https://github.com/ggerganov/jarvis.cpp/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) for tasks suitable for first contributions - Read the [CONTRIBUTING.md](CONTRIBUTING.md) for more information -- Make sure to read this: [Inference at the edge](https://github.com/ggerganov/llama.cpp/discussions/205) +- Make sure to read this: [Inference at the edge](https://github.com/ggerganov/jarvis.cpp/discussions/205) - A bit of backstory for those who are interested: [Changelog podcast](https://changelog.com/podcast/532) ## Other documentations @@ -470,13 +470,13 @@ To learn more how to measure perplexity using llama.cpp, [read this documentatio - [Running on Docker](./docs/docker.md) - [Build on Android](./docs/android.md) - [Performance troubleshooting](./docs/development/token_generation_performance_tips.md) -- [GGML tips & tricks](https://github.com/ggerganov/llama.cpp/wiki/GGML-Tips-&-Tricks) +- [GGML tips & tricks](https://github.com/ggerganov/jarvis.cpp/wiki/GGML-Tips-&-Tricks) **Seminal papers and background on the models** If your issue is with model generation quality, then please at least scan the following links and papers to understand the limitations of LLaMA models. This is especially important when choosing an appropriate model size and appreciating both the significant and subtle differences between LLaMA models and ChatGPT: - LLaMA: - - [Introducing LLaMA: A foundational, 65-billion-parameter large language model](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) + - [Introducing LLaMA: A foundational, 65-billion-parameter large language model](https://ai.facebook.com/blog/large-language-model-jarvis-meta-ai/) - [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) - GPT-3 - [Language Models are Few-Shot Learners](https://arxiv.org/abs/2005.14165) diff --git a/SECURITY.md b/SECURITY.md index f4322c6ee4d18..191bede7d6323 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,6 +1,6 @@ # Security Policy - - [**Using llama.cpp securely**](#using-llamacpp-securely) + - [**Using jarvis.cpp securely**](#using-jarviscpp-securely) - [Untrusted models](#untrusted-models) - [Untrusted inputs](#untrusted-inputs) - [Data privacy](#data-privacy) @@ -8,7 +8,7 @@ - [Multi-Tenant environments](#multi-tenant-environments) - [**Reporting a vulnerability**](#reporting-a-vulnerability) -## Using llama.cpp securely +## Using jarvis.cpp securely ### Untrusted models Be careful when running untrusted models. This classification includes models created by unknown developers or utilizing data obtained from unknown sources. @@ -57,11 +57,11 @@ If you intend to run multiple models in parallel with shared memory, it is your ## Reporting a vulnerability -Beware that none of the topics under [Using llama.cpp securely](#using-llamacpp-securely) are considered vulnerabilities of LLaMA C++. +Beware that none of the topics under [Using jarvis.cpp securely](#using-jarviscpp-securely) are considered vulnerabilities of LLaMA C++. However, If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. -Please disclose it as a private [security advisory](https://github.com/ggerganov/llama.cpp/security/advisories/new). +Please disclose it as a private [security advisory](https://github.com/ggerganov/jarvis.cpp/security/advisories/new). A team of volunteers on a reasonable-effort basis maintains this project. As such, please give us at least 90 days to work on a fix before public exposure. diff --git a/ci/README.md b/ci/README.md index 4064705190697..a6a39b7901f18 100644 --- a/ci/README.md +++ b/ci/README.md @@ -1,11 +1,11 @@ # CI -In addition to [Github Actions](https://github.com/ggerganov/llama.cpp/actions) `llama.cpp` uses a custom CI framework: +In addition to [Github Actions](https://github.com/ggerganov/jarvis.cpp/actions) `jarvis.cpp` uses a custom CI framework: https://github.com/ggml-org/ci It monitors the `master` branch for new commits and runs the -[ci/run.sh](https://github.com/ggerganov/llama.cpp/blob/master/ci/run.sh) script on dedicated cloud instances. This allows us +[ci/run.sh](https://github.com/ggerganov/jarvis.cpp/blob/master/ci/run.sh) script on dedicated cloud instances. This allows us to execute heavier workloads compared to just using Github Actions. Also with time, the cloud instances will be scaled to cover various hardware architectures, including GPU and Apple Silicon instances. diff --git a/ci/run.sh b/ci/run.sh index dc26d94eed1fd..6551fa3315363 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -36,7 +36,7 @@ sd=`dirname $0` cd $sd/../ SRC=`pwd` -CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=ON" +CMAKE_EXTRA="-DJARVIS_FATAL_WARNINGS=ON" if [ ! -z ${GG_BUILD_METAL} ]; then CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON" @@ -217,7 +217,7 @@ function gg_sum_test_scripts_release { function gg_get_model { local gguf_0="$MNT/models/pythia/1.4B/ggml-model-f16.gguf" local gguf_1="$MNT/models/pythia/2.8B/ggml-model-f16.gguf" - local gguf_2="$MNT/models/open-llama/7B-v2/ggml-model-f16.gguf" + local gguf_2="$MNT/models/open-jarvis/7B-v2/ggml-model-f16.gguf" if [[ -s $gguf_0 ]]; then echo -n "$gguf_0" elif [[ -s $gguf_1 ]]; then @@ -236,7 +236,7 @@ function gg_run_ctest_with_model_debug { local model; model=$(gg_get_model) cd build-ci-debug set -e - (LLAMACPP_TEST_MODELFILE="$model" time ctest --output-on-failure -L model) 2>&1 | tee -a $OUT/${ci}-ctest.log + (JARVISCPP_TEST_MODELFILE="$model" time ctest --output-on-failure -L model) 2>&1 | tee -a $OUT/${ci}-ctest.log set +e cd .. } @@ -247,7 +247,7 @@ function gg_run_ctest_with_model_release { local model; model=$(gg_get_model) cd build-ci-release set -e - (LLAMACPP_TEST_MODELFILE="$model" time ctest --output-on-failure -L model) 2>&1 | tee -a $OUT/${ci}-ctest.log + (JARVISCPP_TEST_MODELFILE="$model" time ctest --output-on-failure -L model) 2>&1 | tee -a $OUT/${ci}-ctest.log set +e cd .. } @@ -272,24 +272,24 @@ function gg_sum_ctest_with_model_release { gg_printf '```\n' } -# open_llama_7b_v2 +# open_jarvis_7b_v2 -function gg_run_open_llama_7b_v2 { +function gg_run_open_jarvis_7b_v2 { cd ${SRC} - gg_wget models-mnt/open-llama/7B-v2/ https://huggingface.co/openlm-research/open_llama_7b_v2/raw/main/config.json - gg_wget models-mnt/open-llama/7B-v2/ https://huggingface.co/openlm-research/open_llama_7b_v2/resolve/main/tokenizer.model - gg_wget models-mnt/open-llama/7B-v2/ https://huggingface.co/openlm-research/open_llama_7b_v2/raw/main/tokenizer_config.json - gg_wget models-mnt/open-llama/7B-v2/ https://huggingface.co/openlm-research/open_llama_7b_v2/raw/main/special_tokens_map.json - gg_wget models-mnt/open-llama/7B-v2/ https://huggingface.co/openlm-research/open_llama_7b_v2/raw/main/pytorch_model.bin.index.json - gg_wget models-mnt/open-llama/7B-v2/ https://huggingface.co/openlm-research/open_llama_7b_v2/resolve/main/pytorch_model-00001-of-00002.bin - gg_wget models-mnt/open-llama/7B-v2/ https://huggingface.co/openlm-research/open_llama_7b_v2/resolve/main/pytorch_model-00002-of-00002.bin - gg_wget models-mnt/open-llama/7B-v2/ https://huggingface.co/openlm-research/open_llama_7b_v2/raw/main/generation_config.json + gg_wget models-mnt/open-jarvis/7B-v2/ https://huggingface.co/openlm-research/open_jarvis_7b_v2/raw/main/config.json + gg_wget models-mnt/open-jarvis/7B-v2/ https://huggingface.co/openlm-research/open_jarvis_7b_v2/resolve/main/tokenizer.model + gg_wget models-mnt/open-jarvis/7B-v2/ https://huggingface.co/openlm-research/open_jarvis_7b_v2/raw/main/tokenizer_config.json + gg_wget models-mnt/open-jarvis/7B-v2/ https://huggingface.co/openlm-research/open_jarvis_7b_v2/raw/main/special_tokens_map.json + gg_wget models-mnt/open-jarvis/7B-v2/ https://huggingface.co/openlm-research/open_jarvis_7b_v2/raw/main/pytorch_model.bin.index.json + gg_wget models-mnt/open-jarvis/7B-v2/ https://huggingface.co/openlm-research/open_jarvis_7b_v2/resolve/main/pytorch_model-00001-of-00002.bin + gg_wget models-mnt/open-jarvis/7B-v2/ https://huggingface.co/openlm-research/open_jarvis_7b_v2/resolve/main/pytorch_model-00002-of-00002.bin + gg_wget models-mnt/open-jarvis/7B-v2/ https://huggingface.co/openlm-research/open_jarvis_7b_v2/raw/main/generation_config.json gg_wget models-mnt/wikitext/ https://huggingface.co/datasets/ggml-org/ci/resolve/main/wikitext-2-raw-v1.zip unzip -o models-mnt/wikitext/wikitext-2-raw-v1.zip -d models-mnt/wikitext/ - path_models="../models-mnt/open-llama/7B-v2" + path_models="../models-mnt/open-jarvis/7B-v2" path_wiki="../models-mnt/wikitext/wikitext-2-raw" rm -rf build-ci-release && mkdir build-ci-release && cd build-ci-release @@ -299,7 +299,7 @@ function gg_run_open_llama_7b_v2 { (time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log (time make -j$(nproc) ) 2>&1 | tee -a $OUT/${ci}-make.log - python3 ../examples/convert_legacy_llama.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf + python3 ../examples/convert_legacy_jarvis.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf model_f16="${path_models}/ggml-model-f16.gguf" model_q8_0="${path_models}/ggml-model-q8_0.gguf" @@ -315,47 +315,47 @@ function gg_run_open_llama_7b_v2 { wiki_test="${path_wiki}/wiki.test.raw" - ./bin/llama-quantize ${model_f16} ${model_q8_0} q8_0 - ./bin/llama-quantize ${model_f16} ${model_q4_0} q4_0 - ./bin/llama-quantize ${model_f16} ${model_q4_1} q4_1 - ./bin/llama-quantize ${model_f16} ${model_q5_0} q5_0 - ./bin/llama-quantize ${model_f16} ${model_q5_1} q5_1 - ./bin/llama-quantize ${model_f16} ${model_q2_k} q2_k - ./bin/llama-quantize ${model_f16} ${model_q3_k} q3_k - ./bin/llama-quantize ${model_f16} ${model_q4_k} q4_k - ./bin/llama-quantize ${model_f16} ${model_q5_k} q5_k - ./bin/llama-quantize ${model_f16} ${model_q6_k} q6_k - - (time ./bin/llama-cli --model ${model_f16} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log - (time ./bin/llama-cli --model ${model_q8_0} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log - (time ./bin/llama-cli --model ${model_q4_0} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log - (time ./bin/llama-cli --model ${model_q4_1} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log - (time ./bin/llama-cli --model ${model_q5_0} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log - (time ./bin/llama-cli --model ${model_q5_1} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log - (time ./bin/llama-cli --model ${model_q2_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log - (time ./bin/llama-cli --model ${model_q3_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log - (time ./bin/llama-cli --model ${model_q4_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log - (time ./bin/llama-cli --model ${model_q5_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log - (time ./bin/llama-cli --model ${model_q6_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log - - (time ./bin/llama-perplexity --model ${model_f16} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log - (time ./bin/llama-perplexity --model ${model_q8_0} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log - (time ./bin/llama-perplexity --model ${model_q4_0} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log - (time ./bin/llama-perplexity --model ${model_q4_1} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log - (time ./bin/llama-perplexity --model ${model_q5_0} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log - (time ./bin/llama-perplexity --model ${model_q5_1} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log - (time ./bin/llama-perplexity --model ${model_q2_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log - (time ./bin/llama-perplexity --model ${model_q3_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log - (time ./bin/llama-perplexity --model ${model_q4_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log - (time ./bin/llama-perplexity --model ${model_q5_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log - (time ./bin/llama-perplexity --model ${model_q6_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log - - (time ./bin/llama-imatrix --model ${model_f16} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log - - (time ./bin/llama-save-load-state -ngl 10 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state -fa -ngl 10 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state -ngl 99 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state -fa -ngl 99 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + ./bin/jarvis-quantize ${model_f16} ${model_q8_0} q8_0 + ./bin/jarvis-quantize ${model_f16} ${model_q4_0} q4_0 + ./bin/jarvis-quantize ${model_f16} ${model_q4_1} q4_1 + ./bin/jarvis-quantize ${model_f16} ${model_q5_0} q5_0 + ./bin/jarvis-quantize ${model_f16} ${model_q5_1} q5_1 + ./bin/jarvis-quantize ${model_f16} ${model_q2_k} q2_k + ./bin/jarvis-quantize ${model_f16} ${model_q3_k} q3_k + ./bin/jarvis-quantize ${model_f16} ${model_q4_k} q4_k + ./bin/jarvis-quantize ${model_f16} ${model_q5_k} q5_k + ./bin/jarvis-quantize ${model_f16} ${model_q6_k} q6_k + + (time ./bin/jarvis-cli --model ${model_f16} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log + (time ./bin/jarvis-cli --model ${model_q8_0} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log + (time ./bin/jarvis-cli --model ${model_q4_0} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log + (time ./bin/jarvis-cli --model ${model_q4_1} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log + (time ./bin/jarvis-cli --model ${model_q5_0} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log + (time ./bin/jarvis-cli --model ${model_q5_1} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log + (time ./bin/jarvis-cli --model ${model_q2_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log + (time ./bin/jarvis-cli --model ${model_q3_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log + (time ./bin/jarvis-cli --model ${model_q4_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log + (time ./bin/jarvis-cli --model ${model_q5_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log + (time ./bin/jarvis-cli --model ${model_q6_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log + + (time ./bin/jarvis-perplexity --model ${model_f16} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log + (time ./bin/jarvis-perplexity --model ${model_q8_0} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log + (time ./bin/jarvis-perplexity --model ${model_q4_0} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log + (time ./bin/jarvis-perplexity --model ${model_q4_1} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log + (time ./bin/jarvis-perplexity --model ${model_q5_0} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log + (time ./bin/jarvis-perplexity --model ${model_q5_1} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log + (time ./bin/jarvis-perplexity --model ${model_q2_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log + (time ./bin/jarvis-perplexity --model ${model_q3_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log + (time ./bin/jarvis-perplexity --model ${model_q4_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log + (time ./bin/jarvis-perplexity --model ${model_q5_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log + (time ./bin/jarvis-perplexity --model ${model_q6_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log + + (time ./bin/jarvis-imatrix --model ${model_f16} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log + + (time ./bin/jarvis-save-load-state -ngl 10 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + (time ./bin/jarvis-save-load-state -fa -ngl 10 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + (time ./bin/jarvis-save-load-state -ngl 99 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + (time ./bin/jarvis-save-load-state -fa -ngl 99 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log function check_ppl { qnt="$1" @@ -387,7 +387,7 @@ function gg_run_open_llama_7b_v2 { set +e } -function gg_sum_open_llama_7b_v2 { +function gg_sum_open_jarvis_7b_v2 { gg_printf '### %s\n\n' "${ci}" gg_printf 'OpenLLaMA 7B-v2:\n' @@ -449,45 +449,45 @@ function gg_run_pythia_1_4b { wiki_test_60="${path_wiki}/wiki.test-60.raw" - ./bin/llama-quantize ${model_f16} ${model_q8_0} q8_0 - ./bin/llama-quantize ${model_f16} ${model_q4_0} q4_0 - ./bin/llama-quantize ${model_f16} ${model_q4_1} q4_1 - ./bin/llama-quantize ${model_f16} ${model_q5_0} q5_0 - ./bin/llama-quantize ${model_f16} ${model_q5_1} q5_1 - ./bin/llama-quantize ${model_f16} ${model_q2_k} q2_k - ./bin/llama-quantize ${model_f16} ${model_q3_k} q3_k - ./bin/llama-quantize ${model_f16} ${model_q4_k} q4_k - ./bin/llama-quantize ${model_f16} ${model_q5_k} q5_k - ./bin/llama-quantize ${model_f16} ${model_q6_k} q6_k - - (time ./bin/llama-cli --model ${model_f16} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log - (time ./bin/llama-cli --model ${model_q8_0} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log - (time ./bin/llama-cli --model ${model_q4_0} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log - (time ./bin/llama-cli --model ${model_q4_1} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log - (time ./bin/llama-cli --model ${model_q5_0} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log - (time ./bin/llama-cli --model ${model_q5_1} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log - (time ./bin/llama-cli --model ${model_q2_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log - (time ./bin/llama-cli --model ${model_q3_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log - (time ./bin/llama-cli --model ${model_q4_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log - (time ./bin/llama-cli --model ${model_q5_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log - (time ./bin/llama-cli --model ${model_q6_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log - - (time ./bin/llama-perplexity --model ${model_f16} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log - (time ./bin/llama-perplexity --model ${model_q8_0} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log - (time ./bin/llama-perplexity --model ${model_q4_0} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log - (time ./bin/llama-perplexity --model ${model_q4_1} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log - (time ./bin/llama-perplexity --model ${model_q5_0} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log - (time ./bin/llama-perplexity --model ${model_q5_1} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log - (time ./bin/llama-perplexity --model ${model_q2_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log - (time ./bin/llama-perplexity --model ${model_q3_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log - (time ./bin/llama-perplexity --model ${model_q4_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log - (time ./bin/llama-perplexity --model ${model_q5_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log - (time ./bin/llama-perplexity --model ${model_q6_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log - - (time ./bin/llama-imatrix --model ${model_f16} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log - - (time ./bin/llama-save-load-state --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state -fa --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + ./bin/jarvis-quantize ${model_f16} ${model_q8_0} q8_0 + ./bin/jarvis-quantize ${model_f16} ${model_q4_0} q4_0 + ./bin/jarvis-quantize ${model_f16} ${model_q4_1} q4_1 + ./bin/jarvis-quantize ${model_f16} ${model_q5_0} q5_0 + ./bin/jarvis-quantize ${model_f16} ${model_q5_1} q5_1 + ./bin/jarvis-quantize ${model_f16} ${model_q2_k} q2_k + ./bin/jarvis-quantize ${model_f16} ${model_q3_k} q3_k + ./bin/jarvis-quantize ${model_f16} ${model_q4_k} q4_k + ./bin/jarvis-quantize ${model_f16} ${model_q5_k} q5_k + ./bin/jarvis-quantize ${model_f16} ${model_q6_k} q6_k + + (time ./bin/jarvis-cli --model ${model_f16} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log + (time ./bin/jarvis-cli --model ${model_q8_0} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log + (time ./bin/jarvis-cli --model ${model_q4_0} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log + (time ./bin/jarvis-cli --model ${model_q4_1} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log + (time ./bin/jarvis-cli --model ${model_q5_0} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log + (time ./bin/jarvis-cli --model ${model_q5_1} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log + (time ./bin/jarvis-cli --model ${model_q2_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log + (time ./bin/jarvis-cli --model ${model_q3_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log + (time ./bin/jarvis-cli --model ${model_q4_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log + (time ./bin/jarvis-cli --model ${model_q5_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log + (time ./bin/jarvis-cli --model ${model_q6_k} -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log + + (time ./bin/jarvis-perplexity --model ${model_f16} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log + (time ./bin/jarvis-perplexity --model ${model_q8_0} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log + (time ./bin/jarvis-perplexity --model ${model_q4_0} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log + (time ./bin/jarvis-perplexity --model ${model_q4_1} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log + (time ./bin/jarvis-perplexity --model ${model_q5_0} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log + (time ./bin/jarvis-perplexity --model ${model_q5_1} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log + (time ./bin/jarvis-perplexity --model ${model_q2_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log + (time ./bin/jarvis-perplexity --model ${model_q3_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log + (time ./bin/jarvis-perplexity --model ${model_q4_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log + (time ./bin/jarvis-perplexity --model ${model_q5_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log + (time ./bin/jarvis-perplexity --model ${model_q6_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log + + (time ./bin/jarvis-imatrix --model ${model_f16} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log + + (time ./bin/jarvis-save-load-state --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + (time ./bin/jarvis-save-load-state -fa --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log function check_ppl { qnt="$1" @@ -580,47 +580,47 @@ function gg_run_pythia_2_8b { wiki_test="${path_wiki}/wiki.test.raw" - ./bin/llama-quantize ${model_f16} ${model_q8_0} q8_0 - ./bin/llama-quantize ${model_f16} ${model_q4_0} q4_0 - ./bin/llama-quantize ${model_f16} ${model_q4_1} q4_1 - ./bin/llama-quantize ${model_f16} ${model_q5_0} q5_0 - ./bin/llama-quantize ${model_f16} ${model_q5_1} q5_1 - ./bin/llama-quantize ${model_f16} ${model_q2_k} q2_k - ./bin/llama-quantize ${model_f16} ${model_q3_k} q3_k - ./bin/llama-quantize ${model_f16} ${model_q4_k} q4_k - ./bin/llama-quantize ${model_f16} ${model_q5_k} q5_k - ./bin/llama-quantize ${model_f16} ${model_q6_k} q6_k - - (time ./bin/llama-cli --model ${model_f16} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log - (time ./bin/llama-cli --model ${model_q8_0} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log - (time ./bin/llama-cli --model ${model_q4_0} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log - (time ./bin/llama-cli --model ${model_q4_1} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log - (time ./bin/llama-cli --model ${model_q5_0} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log - (time ./bin/llama-cli --model ${model_q5_1} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log - (time ./bin/llama-cli --model ${model_q2_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log - (time ./bin/llama-cli --model ${model_q3_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log - (time ./bin/llama-cli --model ${model_q4_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log - (time ./bin/llama-cli --model ${model_q5_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log - (time ./bin/llama-cli --model ${model_q6_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log - - (time ./bin/llama-perplexity --model ${model_f16} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log - (time ./bin/llama-perplexity --model ${model_q8_0} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log - (time ./bin/llama-perplexity --model ${model_q4_0} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log - (time ./bin/llama-perplexity --model ${model_q4_1} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log - (time ./bin/llama-perplexity --model ${model_q5_0} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log - (time ./bin/llama-perplexity --model ${model_q5_1} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log - (time ./bin/llama-perplexity --model ${model_q2_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log - (time ./bin/llama-perplexity --model ${model_q3_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log - (time ./bin/llama-perplexity --model ${model_q4_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log - (time ./bin/llama-perplexity --model ${model_q5_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log - (time ./bin/llama-perplexity --model ${model_q6_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log - - (time ./bin/llama-imatrix --model ${model_f16} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log - - (time ./bin/llama-save-load-state -ngl 10 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state -fa -ngl 10 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state -ngl 99 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state -fa -ngl 99 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + ./bin/jarvis-quantize ${model_f16} ${model_q8_0} q8_0 + ./bin/jarvis-quantize ${model_f16} ${model_q4_0} q4_0 + ./bin/jarvis-quantize ${model_f16} ${model_q4_1} q4_1 + ./bin/jarvis-quantize ${model_f16} ${model_q5_0} q5_0 + ./bin/jarvis-quantize ${model_f16} ${model_q5_1} q5_1 + ./bin/jarvis-quantize ${model_f16} ${model_q2_k} q2_k + ./bin/jarvis-quantize ${model_f16} ${model_q3_k} q3_k + ./bin/jarvis-quantize ${model_f16} ${model_q4_k} q4_k + ./bin/jarvis-quantize ${model_f16} ${model_q5_k} q5_k + ./bin/jarvis-quantize ${model_f16} ${model_q6_k} q6_k + + (time ./bin/jarvis-cli --model ${model_f16} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log + (time ./bin/jarvis-cli --model ${model_q8_0} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log + (time ./bin/jarvis-cli --model ${model_q4_0} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log + (time ./bin/jarvis-cli --model ${model_q4_1} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log + (time ./bin/jarvis-cli --model ${model_q5_0} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log + (time ./bin/jarvis-cli --model ${model_q5_1} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log + (time ./bin/jarvis-cli --model ${model_q2_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log + (time ./bin/jarvis-cli --model ${model_q3_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log + (time ./bin/jarvis-cli --model ${model_q4_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log + (time ./bin/jarvis-cli --model ${model_q5_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log + (time ./bin/jarvis-cli --model ${model_q6_k} -t 1 -ngl 999 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log + + (time ./bin/jarvis-perplexity --model ${model_f16} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log + (time ./bin/jarvis-perplexity --model ${model_q8_0} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log + (time ./bin/jarvis-perplexity --model ${model_q4_0} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log + (time ./bin/jarvis-perplexity --model ${model_q4_1} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log + (time ./bin/jarvis-perplexity --model ${model_q5_0} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log + (time ./bin/jarvis-perplexity --model ${model_q5_1} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log + (time ./bin/jarvis-perplexity --model ${model_q2_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log + (time ./bin/jarvis-perplexity --model ${model_q3_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log + (time ./bin/jarvis-perplexity --model ${model_q4_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log + (time ./bin/jarvis-perplexity --model ${model_q5_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log + (time ./bin/jarvis-perplexity --model ${model_q6_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log + + (time ./bin/jarvis-imatrix --model ${model_f16} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log + + (time ./bin/jarvis-save-load-state -ngl 10 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + (time ./bin/jarvis-save-load-state -fa -ngl 10 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + (time ./bin/jarvis-save-load-state -ngl 99 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + (time ./bin/jarvis-save-load-state -fa -ngl 99 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log function check_ppl { qnt="$1" @@ -704,10 +704,10 @@ function gg_run_embd_bge_small { model_f16="${path_models}/ggml-model-f16.gguf" model_q8_0="${path_models}/ggml-model-q8_0.gguf" - ./bin/llama-quantize ${model_f16} ${model_q8_0} q8_0 + ./bin/jarvis-quantize ${model_f16} ${model_q8_0} q8_0 - (time ./bin/llama-embedding --model ${model_f16} -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log - (time ./bin/llama-embedding --model ${model_q8_0} -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log + (time ./bin/jarvis-embedding --model ${model_f16} -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log + (time ./bin/jarvis-embedding --model ${model_q8_0} -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log set +e } @@ -752,7 +752,7 @@ function gg_run_rerank_tiny { model_f16="${path_models}/ggml-model-f16.gguf" # for this model, the SEP token is "" - (time ./bin/llama-embedding --model ${model_f16} -p "what is panda?hi\nwhat is panda?it's a bear\nwhat is panda?
The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China." --pooling rank --embd-normalize -1 --verbose-prompt) 2>&1 | tee -a $OUT/${ci}-rk-f16.log + (time ./bin/jarvis-embedding --model ${model_f16} -p "what is panda?hi\nwhat is panda?it's a bear\nwhat is panda?The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China." --pooling rank --embd-normalize -1 --verbose-prompt) 2>&1 | tee -a $OUT/${ci}-rk-f16.log # sample output # rerank score 0: 0.029 @@ -804,11 +804,11 @@ function gg_check_build_requirements { ## main -export LLAMA_LOG_PREFIX=1 -export LLAMA_LOG_TIMESTAMPS=1 +export JARVIS_LOG_PREFIX=1 +export JARVIS_LOG_TIMESTAMPS=1 if [ -z ${GG_BUILD_LOW_PERF} ]; then - # Create symlink: ./llama.cpp/models-mnt -> $MNT/models/models-mnt + # Create symlink: ./jarvis.cpp/models-mnt -> $MNT/models/models-mnt rm -rf ${SRC}/models-mnt mnt_models=${MNT}/models mkdir -p ${mnt_models} @@ -841,7 +841,7 @@ if [ -z ${GG_BUILD_LOW_PERF} ]; then test $ret -eq 0 && gg_run pythia_1_4b else test $ret -eq 0 && gg_run pythia_2_8b - #test $ret -eq 0 && gg_run open_llama_7b_v2 + #test $ret -eq 0 && gg_run open_jarvis_7b_v2 fi test $ret -eq 0 && gg_run ctest_with_model_debug test $ret -eq 0 && gg_run ctest_with_model_release diff --git a/cmake/llama-config.cmake.in b/cmake/llama-config.cmake.in index f072b76a39d2e..a64ac57a49a54 100644 --- a/cmake/llama-config.cmake.in +++ b/cmake/llama-config.cmake.in @@ -1,7 +1,7 @@ -set(LLAMA_VERSION @LLAMA_INSTALL_VERSION@) -set(LLAMA_BUILD_COMMIT @LLAMA_BUILD_COMMIT@) -set(LLAMA_BUILD_NUMBER @LLAMA_BUILD_NUMBER@) -set(LLAMA_SHARED_LIB @BUILD_SHARED_LIBS@) +set(JARVIS_VERSION @JARVIS_INSTALL_VERSION@) +set(JARVIS_BUILD_COMMIT @JARVIS_BUILD_COMMIT@) +set(JARVIS_BUILD_NUMBER @JARVIS_BUILD_NUMBER@) +set(JARVIS_SHARED_LIB @BUILD_SHARED_LIBS@) set(GGML_BLAS @GGML_BLAS@) set(GGML_CUDA @GGML_CUDA@) @@ -18,9 +18,9 @@ set(GGML_OPENMP @GGML_OPENMP@) @PACKAGE_INIT@ -set_and_check(LLAMA_INCLUDE_DIR "@PACKAGE_LLAMA_INCLUDE_INSTALL_DIR@") -set_and_check(LLAMA_LIB_DIR "@PACKAGE_LLAMA_LIB_INSTALL_DIR@") -set_and_check(LLAMA_BIN_DIR "@PACKAGE_LLAMA_BIN_INSTALL_DIR@") +set_and_check(JARVIS_INCLUDE_DIR "@PACKAGE_JARVIS_INCLUDE_INSTALL_DIR@") +set_and_check(JARVIS_LIB_DIR "@PACKAGE_JARVIS_LIB_INSTALL_DIR@") +set_and_check(JARVIS_BIN_DIR "@PACKAGE_JARVIS_BIN_INSTALL_DIR@") # Ensure transient dependencies satisfied @@ -66,25 +66,25 @@ endif() find_library(ggml_LIBRARY ggml REQUIRED - HINTS ${LLAMA_LIB_DIR}) + HINTS ${JARVIS_LIB_DIR}) -find_library(llama_LIBRARY llama +find_library(jarvis_LIBRARY jarvis REQUIRED - HINTS ${LLAMA_LIB_DIR}) + HINTS ${JARVIS_LIB_DIR}) -set(_llama_link_deps "${ggml_LIBRARY}" "@GGML_LINK_LIBRARIES@") -set(_llama_transient_defines "@GGML_TRANSIENT_DEFINES@") +set(_jarvis_link_deps "${ggml_LIBRARY}" "@GGML_LINK_LIBRARIES@") +set(_jarvis_transient_defines "@GGML_TRANSIENT_DEFINES@") -add_library(llama UNKNOWN IMPORTED) +add_library(jarvis UNKNOWN IMPORTED) -set_target_properties(llama +set_target_properties(jarvis PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES "${LLAMA_INCLUDE_DIR}" - INTERFACE_LINK_LIBRARIES "${_llama_link_deps}" - INTERFACE_COMPILE_DEFINITIONS "${_llama_transient_defines}" + INTERFACE_INCLUDE_DIRECTORIES "${JARVIS_INCLUDE_DIR}" + INTERFACE_LINK_LIBRARIES "${_jarvis_link_deps}" + INTERFACE_COMPILE_DEFINITIONS "${_jarvis_transient_defines}" IMPORTED_LINK_INTERFACE_LANGUAGES "CXX" - IMPORTED_LOCATION "${llama_LIBRARY}" + IMPORTED_LOCATION "${jarvis_LIBRARY}" INTERFACE_COMPILE_FEATURES cxx_std_11 POSITION_INDEPENDENT_CODE ON ) -check_required_components(Llama) +check_required_components(Jarvis) diff --git a/cmake/llama.pc.in b/cmake/llama.pc.in index 326acbb6108fd..eb622d2a886e7 100644 --- a/cmake/llama.pc.in +++ b/cmake/llama.pc.in @@ -3,8 +3,8 @@ exec_prefix=${prefix} libdir=${exec_prefix}/lib includedir=${prefix}/include -Name: llama +Name: jarvis Description: Port of Facebook's LLaMA model in C/C++ Version: @PROJECT_VERSION@ -Libs: -L${libdir} -lllama +Libs: -L${libdir} -ljarvis Cflags: -I${includedir} diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 042e895add5e2..cfaa05b33ab72 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -74,17 +74,17 @@ if (BUILD_SHARED_LIBS) set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON) endif() -set(LLAMA_COMMON_EXTRA_LIBS build_info) +set(JARVIS_COMMON_EXTRA_LIBS build_info) # Use curl to download model url -if (LLAMA_CURL) +if (JARVIS_CURL) find_package(CURL REQUIRED) - add_definitions(-DLLAMA_USE_CURL) + add_definitions(-DJARVIS_USE_CURL) include_directories(${CURL_INCLUDE_DIRS}) find_library(CURL_LIBRARY curl REQUIRED) - set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} ${CURL_LIBRARY}) + set(JARVIS_COMMON_EXTRA_LIBS ${JARVIS_COMMON_EXTRA_LIBS} ${CURL_LIBRARY}) endif () target_include_directories(${TARGET} PUBLIC .) target_compile_features (${TARGET} PUBLIC cxx_std_11) -target_link_libraries (${TARGET} PRIVATE ${LLAMA_COMMON_EXTRA_LIBS} PUBLIC llama Threads::Threads) +target_link_libraries (${TARGET} PRIVATE ${JARVIS_COMMON_EXTRA_LIBS} PUBLIC jarvis Threads::Threads) diff --git a/common/arg.cpp b/common/arg.cpp index e1e933934f0ef..73a3542593ca2 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -17,7 +17,7 @@ using json = nlohmann::ordered_json; -common_arg & common_arg::set_examples(std::initializer_list examples) { +common_arg & common_arg::set_examples(std::initializer_list examples) { this->examples = std::move(examples); return *this; } @@ -33,7 +33,7 @@ common_arg & common_arg::set_sparam() { return *this; } -bool common_arg::in_example(enum llama_example ex) { +bool common_arg::in_example(enum jarvis_example ex) { return examples.find(ex) != examples.end(); } @@ -279,7 +279,7 @@ static void common_params_print_usage(common_params_context & ctx_arg) { std::vector sparam_options; std::vector specific_options; for (auto & opt : ctx_arg.options) { - // in case multiple LLAMA_EXAMPLE_* are set, we prioritize the LLAMA_EXAMPLE_* matching current example + // in case multiple JARVIS_EXAMPLE_* are set, we prioritize the JARVIS_EXAMPLE_* matching current example if (opt.is_sparam) { sparam_options.push_back(&opt); } else if (opt.in_example(ctx_arg.ex)) { @@ -292,12 +292,12 @@ static void common_params_print_usage(common_params_context & ctx_arg) { print_options(common_options); printf("\n\n----- sampling params -----\n\n"); print_options(sparam_options); - // TODO: maybe convert enum llama_example to string + // TODO: maybe convert enum jarvis_example to string printf("\n\n----- example-specific params -----\n\n"); print_options(specific_options); } -bool common_params_parse(int argc, char ** argv, common_params & params, llama_example ex, void(*print_usage)(int, char **)) { +bool common_params_parse(int argc, char ** argv, common_params & params, jarvis_example ex, void(*print_usage)(int, char **)) { auto ctx_arg = common_params_parser_init(params, ex, print_usage); const common_params params_org = ctx_arg.params; // the example can modify the default params @@ -322,7 +322,7 @@ bool common_params_parse(int argc, char ** argv, common_params & params, llama_e return true; } -common_params_context common_params_parser_init(common_params & params, llama_example ex, void(*print_usage)(int, char **)) { +common_params_context common_params_parser_init(common_params & params, jarvis_example ex, void(*print_usage)(int, char **)) { common_params_context ctx_arg(params); ctx_arg.print_usage = print_usage; ctx_arg.ex = ex; @@ -339,12 +339,12 @@ common_params_context common_params_parser_init(common_params & params, llama_ex /** * filter options by example * rules: - * - all examples inherit options from LLAMA_EXAMPLE_COMMON - * - if LLAMA_EXAMPLE_* is set (other than COMMON), we only show the option in the corresponding example - * - if both {LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_*,} are set, we will prioritize the LLAMA_EXAMPLE_* matching current example + * - all examples inherit options from JARVIS_EXAMPLE_COMMON + * - if JARVIS_EXAMPLE_* is set (other than COMMON), we only show the option in the corresponding example + * - if both {JARVIS_EXAMPLE_COMMON, JARVIS_EXAMPLE_*,} are set, we will prioritize the JARVIS_EXAMPLE_* matching current example */ auto add_opt = [&](common_arg arg) { - if (arg.in_example(ex) || arg.in_example(LLAMA_EXAMPLE_COMMON)) { + if (arg.in_example(ex) || arg.in_example(JARVIS_EXAMPLE_COMMON)) { ctx_arg.options.push_back(std::move(arg)); } }; @@ -361,8 +361,8 @@ common_params_context common_params_parser_init(common_params & params, llama_ex {"--version"}, "show version and build info", [](common_params &) { - fprintf(stderr, "version: %d (%s)\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT); - fprintf(stderr, "built with %s for %s\n", LLAMA_COMPILER, LLAMA_BUILD_TARGET); + fprintf(stderr, "version: %d (%s)\n", JARVIS_BUILD_NUMBER, JARVIS_COMMIT); + fprintf(stderr, "built with %s for %s\n", JARVIS_COMPILER, JARVIS_BUILD_TARGET); exit(0); } )); @@ -379,14 +379,14 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params) { params.display_prompt = false; } - ).set_examples({LLAMA_EXAMPLE_MAIN})); + ).set_examples({JARVIS_EXAMPLE_MAIN})); add_opt(common_arg( {"-co", "--color"}, string_format("colorise output to distinguish prompt and user input from generations (default: %s)", params.use_color ? "true" : "false"), [](common_params & params) { params.use_color = true; } - ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP})); + ).set_examples({JARVIS_EXAMPLE_MAIN, JARVIS_EXAMPLE_INFILL, JARVIS_EXAMPLE_SPECULATIVE, JARVIS_EXAMPLE_LOOKUP})); add_opt(common_arg( {"-t", "--threads"}, "N", string_format("number of threads to use during generation (default: %d)", params.cpuparams.n_threads), @@ -396,7 +396,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.cpuparams.n_threads = std::thread::hardware_concurrency(); } } - ).set_env("LLAMA_ARG_THREADS")); + ).set_env("JARVIS_ARG_THREADS")); add_opt(common_arg( {"-tb", "--threads-batch"}, "N", "number of threads to use during batch and prompt processing (default: same as --threads)", @@ -416,7 +416,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.draft_cpuparams.n_threads = std::thread::hardware_concurrency(); } } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + ).set_examples({JARVIS_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"-tbd", "--threads-batch-draft"}, "N", "number of threads to use during batch and prompt processing (default: same as --threads-draft)", @@ -426,7 +426,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.draft_cpuparams_batch.n_threads = std::thread::hardware_concurrency(); } } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + ).set_examples({JARVIS_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"-C", "--cpu-mask"}, "M", "CPU affinity mask: arbitrarily long hex. Complements cpu-range (default: \"\")", @@ -524,7 +524,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex throw std::invalid_argument("invalid cpumask"); } } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + ).set_examples({JARVIS_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"-Crd", "--cpu-range-draft"}, "lo-hi", "Ranges of CPUs for affinity. Complements --cpu-mask-draft", @@ -534,14 +534,14 @@ common_params_context common_params_parser_init(common_params & params, llama_ex throw std::invalid_argument("invalid range"); } } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + ).set_examples({JARVIS_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"--cpu-strict-draft"}, "<0|1>", "Use strict CPU placement for draft model (default: same as --cpu-strict)", [](common_params & params, int value) { params.draft_cpuparams.strict_cpu = value; } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + ).set_examples({JARVIS_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"--prio-draft"}, "N", string_format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.draft_cpuparams.priority), @@ -551,14 +551,14 @@ common_params_context common_params_parser_init(common_params & params, llama_ex } params.draft_cpuparams.priority = (enum ggml_sched_priority) prio; } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + ).set_examples({JARVIS_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"--poll-draft"}, "<0|1>", "Use polling to wait for draft model work (default: same as --poll])", [](common_params & params, int value) { params.draft_cpuparams.poll = value; } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + ).set_examples({JARVIS_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"-Cbd", "--cpu-mask-batch-draft"}, "M", "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)", @@ -568,7 +568,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex throw std::invalid_argument("invalid cpumask"); } } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + ).set_examples({JARVIS_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"-Crbd", "--cpu-range-batch-draft"}, "lo-hi", "Ranges of CPUs for affinity. Complements --cpu-mask-draft-batch)", @@ -578,14 +578,14 @@ common_params_context common_params_parser_init(common_params & params, llama_ex throw std::invalid_argument("invalid cpumask"); } } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + ).set_examples({JARVIS_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"--cpu-strict-batch-draft"}, "<0|1>", "Use strict CPU placement for draft model (default: --cpu-strict-draft)", [](common_params & params, int value) { params.draft_cpuparams_batch.strict_cpu = value; } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + ).set_examples({JARVIS_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"--prio-batch-draft"}, "N", string_format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.draft_cpuparams_batch.priority), @@ -595,70 +595,70 @@ common_params_context common_params_parser_init(common_params & params, llama_ex } params.draft_cpuparams_batch.priority = (enum ggml_sched_priority) prio; } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + ).set_examples({JARVIS_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"--poll-batch-draft"}, "<0|1>", "Use polling to wait for draft model work (default: --poll-draft)", [](common_params & params, int value) { params.draft_cpuparams_batch.poll = value; } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + ).set_examples({JARVIS_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"--draft"}, "N", string_format("number of tokens to draft for speculative decoding (default: %d)", params.n_draft), [](common_params & params, int value) { params.n_draft = value; } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP})); + ).set_examples({JARVIS_EXAMPLE_SPECULATIVE, JARVIS_EXAMPLE_LOOKUP})); add_opt(common_arg( {"-ps", "--p-split"}, "N", string_format("speculative decoding split probability (default: %.1f)", (double)params.p_split), [](common_params & params, const std::string & value) { params.p_split = std::stof(value); } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + ).set_examples({JARVIS_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"-lcs", "--lookup-cache-static"}, "FNAME", "path to static lookup cache to use for lookup decoding (not updated by generation)", [](common_params & params, const std::string & value) { params.lookup_cache_static = value; } - ).set_examples({LLAMA_EXAMPLE_LOOKUP})); + ).set_examples({JARVIS_EXAMPLE_LOOKUP})); add_opt(common_arg( {"-lcd", "--lookup-cache-dynamic"}, "FNAME", "path to dynamic lookup cache to use for lookup decoding (updated by generation)", [](common_params & params, const std::string & value) { params.lookup_cache_dynamic = value; } - ).set_examples({LLAMA_EXAMPLE_LOOKUP})); + ).set_examples({JARVIS_EXAMPLE_LOOKUP})); add_opt(common_arg( {"-c", "--ctx-size"}, "N", string_format("size of the prompt context (default: %d, 0 = loaded from model)", params.n_ctx), [](common_params & params, int value) { params.n_ctx = value; } - ).set_env("LLAMA_ARG_CTX_SIZE")); + ).set_env("JARVIS_ARG_CTX_SIZE")); add_opt(common_arg( {"-n", "--predict", "--n-predict"}, "N", string_format("number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)", params.n_predict), [](common_params & params, int value) { params.n_predict = value; } - ).set_env("LLAMA_ARG_N_PREDICT")); + ).set_env("JARVIS_ARG_N_PREDICT")); add_opt(common_arg( {"-b", "--batch-size"}, "N", string_format("logical maximum batch size (default: %d)", params.n_batch), [](common_params & params, int value) { params.n_batch = value; } - ).set_env("LLAMA_ARG_BATCH")); + ).set_env("JARVIS_ARG_BATCH")); add_opt(common_arg( {"-ub", "--ubatch-size"}, "N", string_format("physical maximum batch size (default: %d)", params.n_ubatch), [](common_params & params, int value) { params.n_ubatch = value; } - ).set_env("LLAMA_ARG_UBATCH")); + ).set_env("JARVIS_ARG_UBATCH")); add_opt(common_arg( {"--keep"}, "N", string_format("number of tokens to keep from the initial prompt (default: %d, -1 = all)", params.n_keep), @@ -672,24 +672,24 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params) { params.ctx_shift = false; } - ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_CONTEXT_SHIFT")); + ).set_examples({JARVIS_EXAMPLE_MAIN, JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_NO_CONTEXT_SHIFT")); add_opt(common_arg( {"--chunks"}, "N", string_format("max number of chunks to process (default: %d, -1 = all)", params.n_chunks), [](common_params & params, int value) { params.n_chunks = value; } - ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY, LLAMA_EXAMPLE_RETRIEVAL})); + ).set_examples({JARVIS_EXAMPLE_IMATRIX, JARVIS_EXAMPLE_PERPLEXITY, JARVIS_EXAMPLE_RETRIEVAL})); add_opt(common_arg( {"-fa", "--flash-attn"}, string_format("enable Flash Attention (default: %s)", params.flash_attn ? "enabled" : "disabled"), [](common_params & params) { params.flash_attn = true; } - ).set_env("LLAMA_ARG_FLASH_ATTN")); + ).set_env("JARVIS_ARG_FLASH_ATTN")); add_opt(common_arg( {"-p", "--prompt"}, "PROMPT", - ex == LLAMA_EXAMPLE_MAIN + ex == JARVIS_EXAMPLE_MAIN ? "prompt to start generation with\nif -cnv is set, this will be used as system prompt" : "prompt to start generation with", [](common_params & params, const std::string & value) { @@ -698,12 +698,12 @@ common_params_context common_params_parser_init(common_params & params, llama_ex )); add_opt(common_arg( {"--no-perf"}, - string_format("disable internal libllama performance timings (default: %s)", params.no_perf ? "true" : "false"), + string_format("disable internal libjarvis performance timings (default: %s)", params.no_perf ? "true" : "false"), [](common_params & params) { params.no_perf = true; params.sparams.no_perf = true; } - ).set_env("LLAMA_ARG_NO_PERF")); + ).set_env("JARVIS_ARG_NO_PERF")); add_opt(common_arg( {"-f", "--file"}, "FNAME", "a file containing the prompt (default: none)", @@ -730,7 +730,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex } params.in_files.push_back(value); } - ).set_examples({LLAMA_EXAMPLE_IMATRIX})); + ).set_examples({JARVIS_EXAMPLE_IMATRIX})); add_opt(common_arg( {"-bf", "--binary-file"}, "FNAME", "binary file containing the prompt (default: none)", @@ -767,42 +767,42 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params, int value) { params.n_print = value; } - ).set_examples({LLAMA_EXAMPLE_MAIN})); + ).set_examples({JARVIS_EXAMPLE_MAIN})); add_opt(common_arg( {"--prompt-cache"}, "FNAME", "file to cache prompt state for faster startup (default: none)", [](common_params & params, const std::string & value) { params.path_prompt_cache = value; } - ).set_examples({LLAMA_EXAMPLE_MAIN})); + ).set_examples({JARVIS_EXAMPLE_MAIN})); add_opt(common_arg( {"--prompt-cache-all"}, "if specified, saves user input and generations to cache as well\n", [](common_params & params) { params.prompt_cache_all = true; } - ).set_examples({LLAMA_EXAMPLE_MAIN})); + ).set_examples({JARVIS_EXAMPLE_MAIN})); add_opt(common_arg( {"--prompt-cache-ro"}, "if specified, uses the prompt cache but does not update it", [](common_params & params) { params.prompt_cache_ro = true; } - ).set_examples({LLAMA_EXAMPLE_MAIN})); + ).set_examples({JARVIS_EXAMPLE_MAIN})); add_opt(common_arg( {"-r", "--reverse-prompt"}, "PROMPT", "halt generation at PROMPT, return control in interactive mode\n", [](common_params & params, const std::string & value) { params.antiprompt.emplace_back(value); } - ).set_examples({LLAMA_EXAMPLE_MAIN})); + ).set_examples({JARVIS_EXAMPLE_MAIN})); add_opt(common_arg( {"-sp", "--special"}, string_format("special tokens output enabled (default: %s)", params.special ? "true" : "false"), [](common_params & params) { params.special = true; } - ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER})); + ).set_examples({JARVIS_EXAMPLE_MAIN, JARVIS_EXAMPLE_SERVER})); add_opt(common_arg( {"-cnv", "--conversation"}, string_format( @@ -815,28 +815,28 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params) { params.conversation = true; } - ).set_examples({LLAMA_EXAMPLE_MAIN})); + ).set_examples({JARVIS_EXAMPLE_MAIN})); add_opt(common_arg( {"-i", "--interactive"}, string_format("run in interactive mode (default: %s)", params.interactive ? "true" : "false"), [](common_params & params) { params.interactive = true; } - ).set_examples({LLAMA_EXAMPLE_MAIN})); + ).set_examples({JARVIS_EXAMPLE_MAIN})); add_opt(common_arg( {"-if", "--interactive-first"}, string_format("run in interactive mode and wait for input right away (default: %s)", params.interactive_first ? "true" : "false"), [](common_params & params) { params.interactive_first = true; } - ).set_examples({LLAMA_EXAMPLE_MAIN})); + ).set_examples({JARVIS_EXAMPLE_MAIN})); add_opt(common_arg( {"-mli", "--multiline-input"}, "allows you to write or paste multiple lines without ending each in '\\'", [](common_params & params) { params.multiline_input = true; } - ).set_examples({LLAMA_EXAMPLE_MAIN})); + ).set_examples({JARVIS_EXAMPLE_MAIN})); add_opt(common_arg( {"--in-prefix-bos"}, "prefix BOS to user inputs, preceding the `--in-prefix` string", @@ -844,7 +844,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.input_prefix_bos = true; params.enable_chat_template = false; } - ).set_examples({LLAMA_EXAMPLE_MAIN})); + ).set_examples({JARVIS_EXAMPLE_MAIN})); add_opt(common_arg( {"--in-prefix"}, "STRING", "string to prefix user inputs with (default: empty)", @@ -852,7 +852,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.input_prefix = value; params.enable_chat_template = false; } - ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL})); + ).set_examples({JARVIS_EXAMPLE_MAIN, JARVIS_EXAMPLE_INFILL})); add_opt(common_arg( {"--in-suffix"}, "STRING", "string to suffix after user inputs with (default: empty)", @@ -860,14 +860,14 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.input_suffix = value; params.enable_chat_template = false; } - ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL})); + ).set_examples({JARVIS_EXAMPLE_MAIN, JARVIS_EXAMPLE_INFILL})); add_opt(common_arg( {"--no-warmup"}, "skip warming up the model with an empty run", [](common_params & params) { params.warmup = false; } - ).set_examples({LLAMA_EXAMPLE_MAIN})); + ).set_examples({JARVIS_EXAMPLE_MAIN})); add_opt(common_arg( {"--spm-infill"}, string_format( @@ -877,7 +877,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params) { params.spm_infill = true; } - ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_INFILL})); + ).set_examples({JARVIS_EXAMPLE_SERVER, JARVIS_EXAMPLE_INFILL})); add_opt(common_arg( {"--samplers"}, "SAMPLERS", string_format("samplers that will be used for generation in the order, separated by \';\'\n(default: %s)", sampler_type_names.c_str()), @@ -888,7 +888,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex ).set_sparam()); add_opt(common_arg( {"-s", "--seed"}, "SEED", - string_format("RNG seed (default: %d, use random seed for %d)", params.sparams.seed, LLAMA_DEFAULT_SEED), + string_format("RNG seed (default: %d, use random seed for %d)", params.sparams.seed, JARVIS_DEFAULT_SEED), [](common_params & params, const std::string & value) { params.sparams.seed = std::stoul(value); } @@ -1101,7 +1101,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex "or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'", [](common_params & params, const std::string & value) { std::stringstream ss(value); - llama_token key; + jarvis_token key; char sign; std::string value_str; try { @@ -1149,103 +1149,103 @@ common_params_context common_params_parser_init(common_params & params, llama_ex {"--pooling"}, "{none,mean,cls,last,rank}", "pooling type for embeddings, use model default if unspecified", [](common_params & params, const std::string & value) { - /**/ if (value == "none") { params.pooling_type = LLAMA_POOLING_TYPE_NONE; } - else if (value == "mean") { params.pooling_type = LLAMA_POOLING_TYPE_MEAN; } - else if (value == "cls") { params.pooling_type = LLAMA_POOLING_TYPE_CLS; } - else if (value == "last") { params.pooling_type = LLAMA_POOLING_TYPE_LAST; } - else if (value == "rank") { params.pooling_type = LLAMA_POOLING_TYPE_RANK; } + /**/ if (value == "none") { params.pooling_type = JARVIS_POOLING_TYPE_NONE; } + else if (value == "mean") { params.pooling_type = JARVIS_POOLING_TYPE_MEAN; } + else if (value == "cls") { params.pooling_type = JARVIS_POOLING_TYPE_CLS; } + else if (value == "last") { params.pooling_type = JARVIS_POOLING_TYPE_LAST; } + else if (value == "rank") { params.pooling_type = JARVIS_POOLING_TYPE_RANK; } else { throw std::invalid_argument("invalid value"); } } - ).set_examples({LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_RETRIEVAL, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_POOLING")); + ).set_examples({JARVIS_EXAMPLE_EMBEDDING, JARVIS_EXAMPLE_RETRIEVAL, JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_POOLING")); add_opt(common_arg( {"--attention"}, "{causal,non-causal}", "attention type for embeddings, use model default if unspecified", [](common_params & params, const std::string & value) { - /**/ if (value == "causal") { params.attention_type = LLAMA_ATTENTION_TYPE_CAUSAL; } - else if (value == "non-causal") { params.attention_type = LLAMA_ATTENTION_TYPE_NON_CAUSAL; } + /**/ if (value == "causal") { params.attention_type = JARVIS_ATTENTION_TYPE_CAUSAL; } + else if (value == "non-causal") { params.attention_type = JARVIS_ATTENTION_TYPE_NON_CAUSAL; } else { throw std::invalid_argument("invalid value"); } } - ).set_examples({LLAMA_EXAMPLE_EMBEDDING})); + ).set_examples({JARVIS_EXAMPLE_EMBEDDING})); add_opt(common_arg( {"--rope-scaling"}, "{none,linear,yarn}", "RoPE frequency scaling method, defaults to linear unless specified by the model", [](common_params & params, const std::string & value) { - /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; } - else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; } - else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; } + /**/ if (value == "none") { params.rope_scaling_type = JARVIS_ROPE_SCALING_TYPE_NONE; } + else if (value == "linear") { params.rope_scaling_type = JARVIS_ROPE_SCALING_TYPE_LINEAR; } + else if (value == "yarn") { params.rope_scaling_type = JARVIS_ROPE_SCALING_TYPE_YARN; } else { throw std::invalid_argument("invalid value"); } } - ).set_env("LLAMA_ARG_ROPE_SCALING_TYPE")); + ).set_env("JARVIS_ARG_ROPE_SCALING_TYPE")); add_opt(common_arg( {"--rope-scale"}, "N", "RoPE context scaling factor, expands context by a factor of N", [](common_params & params, const std::string & value) { params.rope_freq_scale = 1.0f / std::stof(value); } - ).set_env("LLAMA_ARG_ROPE_SCALE")); + ).set_env("JARVIS_ARG_ROPE_SCALE")); add_opt(common_arg( {"--rope-freq-base"}, "N", "RoPE base frequency, used by NTK-aware scaling (default: loaded from model)", [](common_params & params, const std::string & value) { params.rope_freq_base = std::stof(value); } - ).set_env("LLAMA_ARG_ROPE_FREQ_BASE")); + ).set_env("JARVIS_ARG_ROPE_FREQ_BASE")); add_opt(common_arg( {"--rope-freq-scale"}, "N", "RoPE frequency scaling factor, expands context by a factor of 1/N", [](common_params & params, const std::string & value) { params.rope_freq_scale = std::stof(value); } - ).set_env("LLAMA_ARG_ROPE_FREQ_SCALE")); + ).set_env("JARVIS_ARG_ROPE_FREQ_SCALE")); add_opt(common_arg( {"--yarn-orig-ctx"}, "N", string_format("YaRN: original context size of model (default: %d = model training context size)", params.yarn_orig_ctx), [](common_params & params, int value) { params.yarn_orig_ctx = value; } - ).set_env("LLAMA_ARG_YARN_ORIG_CTX")); + ).set_env("JARVIS_ARG_YARN_ORIG_CTX")); add_opt(common_arg( {"--yarn-ext-factor"}, "N", string_format("YaRN: extrapolation mix factor (default: %.1f, 0.0 = full interpolation)", (double)params.yarn_ext_factor), [](common_params & params, const std::string & value) { params.yarn_ext_factor = std::stof(value); } - ).set_env("LLAMA_ARG_YARN_EXT_FACTOR")); + ).set_env("JARVIS_ARG_YARN_EXT_FACTOR")); add_opt(common_arg( {"--yarn-attn-factor"}, "N", string_format("YaRN: scale sqrt(t) or attention magnitude (default: %.1f)", (double)params.yarn_attn_factor), [](common_params & params, const std::string & value) { params.yarn_attn_factor = std::stof(value); } - ).set_env("LLAMA_ARG_YARN_ATTN_FACTOR")); + ).set_env("JARVIS_ARG_YARN_ATTN_FACTOR")); add_opt(common_arg( {"--yarn-beta-slow"}, "N", string_format("YaRN: high correction dim or alpha (default: %.1f)", (double)params.yarn_beta_slow), [](common_params & params, const std::string & value) { params.yarn_beta_slow = std::stof(value); } - ).set_env("LLAMA_ARG_YARN_BETA_SLOW")); + ).set_env("JARVIS_ARG_YARN_BETA_SLOW")); add_opt(common_arg( {"--yarn-beta-fast"}, "N", string_format("YaRN: low correction dim or beta (default: %.1f)", (double)params.yarn_beta_fast), [](common_params & params, const std::string & value) { params.yarn_beta_fast = std::stof(value); } - ).set_env("LLAMA_ARG_YARN_BETA_FAST")); + ).set_env("JARVIS_ARG_YARN_BETA_FAST")); add_opt(common_arg( {"-gan", "--grp-attn-n"}, "N", string_format("group-attention factor (default: %d)", params.grp_attn_n), [](common_params & params, int value) { params.grp_attn_n = value; } - ).set_env("LLAMA_ARG_GRP_ATTN_N").set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_PASSKEY})); + ).set_env("JARVIS_ARG_GRP_ATTN_N").set_examples({JARVIS_EXAMPLE_MAIN, JARVIS_EXAMPLE_PASSKEY})); add_opt(common_arg( {"-gaw", "--grp-attn-w"}, "N", string_format("group-attention width (default: %d)", params.grp_attn_w), [](common_params & params, int value) { params.grp_attn_w = value; } - ).set_env("LLAMA_ARG_GRP_ATTN_W").set_examples({LLAMA_EXAMPLE_MAIN})); + ).set_env("JARVIS_ARG_GRP_ATTN_W").set_examples({JARVIS_EXAMPLE_MAIN})); add_opt(common_arg( {"-dkvc", "--dump-kv-cache"}, "verbose print of the KV cache", @@ -1259,7 +1259,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params) { params.no_kv_offload = true; } - ).set_env("LLAMA_ARG_NO_KV_OFFLOAD")); + ).set_env("JARVIS_ARG_NO_KV_OFFLOAD")); add_opt(common_arg( {"-ctk", "--cache-type-k"}, "TYPE", string_format("KV cache data type for K (default: %s)", params.cache_type_k.c_str()), @@ -1267,7 +1267,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex // TODO: get the type right here params.cache_type_k = value; } - ).set_env("LLAMA_ARG_CACHE_TYPE_K")); + ).set_env("JARVIS_ARG_CACHE_TYPE_K")); add_opt(common_arg( {"-ctv", "--cache-type-v"}, "TYPE", string_format("KV cache data type for V (default: %s)", params.cache_type_v.c_str()), @@ -1275,141 +1275,141 @@ common_params_context common_params_parser_init(common_params & params, llama_ex // TODO: get the type right here params.cache_type_v = value; } - ).set_env("LLAMA_ARG_CACHE_TYPE_V")); + ).set_env("JARVIS_ARG_CACHE_TYPE_V")); add_opt(common_arg( {"--perplexity", "--all-logits"}, string_format("return logits for all tokens in the batch (default: %s)", params.logits_all ? "true" : "false"), [](common_params & params) { params.logits_all = true; } - ).set_examples({LLAMA_EXAMPLE_PERPLEXITY})); + ).set_examples({JARVIS_EXAMPLE_PERPLEXITY})); add_opt(common_arg( {"--hellaswag"}, "compute HellaSwag score over random tasks from datafile supplied with -f", [](common_params & params) { params.hellaswag = true; } - ).set_examples({LLAMA_EXAMPLE_PERPLEXITY})); + ).set_examples({JARVIS_EXAMPLE_PERPLEXITY})); add_opt(common_arg( {"--hellaswag-tasks"}, "N", string_format("number of tasks to use when computing the HellaSwag score (default: %zu)", params.hellaswag_tasks), [](common_params & params, int value) { params.hellaswag_tasks = value; } - ).set_examples({LLAMA_EXAMPLE_PERPLEXITY})); + ).set_examples({JARVIS_EXAMPLE_PERPLEXITY})); add_opt(common_arg( {"--winogrande"}, "compute Winogrande score over random tasks from datafile supplied with -f", [](common_params & params) { params.winogrande = true; } - ).set_examples({LLAMA_EXAMPLE_PERPLEXITY})); + ).set_examples({JARVIS_EXAMPLE_PERPLEXITY})); add_opt(common_arg( {"--winogrande-tasks"}, "N", string_format("number of tasks to use when computing the Winogrande score (default: %zu)", params.winogrande_tasks), [](common_params & params, int value) { params.winogrande_tasks = value; } - ).set_examples({LLAMA_EXAMPLE_PERPLEXITY})); + ).set_examples({JARVIS_EXAMPLE_PERPLEXITY})); add_opt(common_arg( {"--multiple-choice"}, "compute multiple choice score over random tasks from datafile supplied with -f", [](common_params & params) { params.multiple_choice = true; } - ).set_examples({LLAMA_EXAMPLE_PERPLEXITY})); + ).set_examples({JARVIS_EXAMPLE_PERPLEXITY})); add_opt(common_arg( {"--multiple-choice-tasks"}, "N", string_format("number of tasks to use when computing the multiple choice score (default: %zu)", params.multiple_choice_tasks), [](common_params & params, int value) { params.multiple_choice_tasks = value; } - ).set_examples({LLAMA_EXAMPLE_PERPLEXITY})); + ).set_examples({JARVIS_EXAMPLE_PERPLEXITY})); add_opt(common_arg( {"--kl-divergence"}, "computes KL-divergence to logits provided via --kl-divergence-base", [](common_params & params) { params.kl_divergence = true; } - ).set_examples({LLAMA_EXAMPLE_PERPLEXITY})); + ).set_examples({JARVIS_EXAMPLE_PERPLEXITY})); add_opt(common_arg( {"--save-all-logits", "--kl-divergence-base"}, "FNAME", "set logits file", [](common_params & params, const std::string & value) { params.logits_file = value; } - ).set_examples({LLAMA_EXAMPLE_PERPLEXITY})); + ).set_examples({JARVIS_EXAMPLE_PERPLEXITY})); add_opt(common_arg( {"--ppl-stride"}, "N", string_format("stride for perplexity calculation (default: %d)", params.ppl_stride), [](common_params & params, int value) { params.ppl_stride = value; } - ).set_examples({LLAMA_EXAMPLE_PERPLEXITY})); + ).set_examples({JARVIS_EXAMPLE_PERPLEXITY})); add_opt(common_arg( {"--ppl-output-type"}, "<0|1>", string_format("output type for perplexity calculation (default: %d)", params.ppl_output_type), [](common_params & params, int value) { params.ppl_output_type = value; } - ).set_examples({LLAMA_EXAMPLE_PERPLEXITY})); + ).set_examples({JARVIS_EXAMPLE_PERPLEXITY})); add_opt(common_arg( {"-dt", "--defrag-thold"}, "N", string_format("KV cache defragmentation threshold (default: %.1f, < 0 - disabled)", (double)params.defrag_thold), [](common_params & params, const std::string & value) { params.defrag_thold = std::stof(value); } - ).set_env("LLAMA_ARG_DEFRAG_THOLD")); + ).set_env("JARVIS_ARG_DEFRAG_THOLD")); add_opt(common_arg( {"-np", "--parallel"}, "N", string_format("number of parallel sequences to decode (default: %d)", params.n_parallel), [](common_params & params, int value) { params.n_parallel = value; } - ).set_env("LLAMA_ARG_N_PARALLEL")); + ).set_env("JARVIS_ARG_N_PARALLEL")); add_opt(common_arg( {"-ns", "--sequences"}, "N", string_format("number of sequences to decode (default: %d)", params.n_sequences), [](common_params & params, int value) { params.n_sequences = value; } - ).set_examples({LLAMA_EXAMPLE_PARALLEL})); + ).set_examples({JARVIS_EXAMPLE_PARALLEL})); add_opt(common_arg( {"-cb", "--cont-batching"}, string_format("enable continuous batching (a.k.a dynamic batching) (default: %s)", params.cont_batching ? "enabled" : "disabled"), [](common_params & params) { params.cont_batching = true; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CONT_BATCHING")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_CONT_BATCHING")); add_opt(common_arg( {"-nocb", "--no-cont-batching"}, "disable continuous batching", [](common_params & params) { params.cont_batching = false; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_CONT_BATCHING")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_NO_CONT_BATCHING")); add_opt(common_arg( {"--mmproj"}, "FILE", "path to a multimodal projector file for LLaVA. see examples/llava/README.md", [](common_params & params, const std::string & value) { params.mmproj = value; } - ).set_examples({LLAMA_EXAMPLE_LLAVA})); + ).set_examples({JARVIS_EXAMPLE_LLAVA})); add_opt(common_arg( {"--image"}, "FILE", "path to an image file. use with multimodal models. Specify multiple times for batching", [](common_params & params, const std::string & value) { params.image.emplace_back(value); } - ).set_examples({LLAMA_EXAMPLE_LLAVA})); - if (llama_supports_rpc()) { + ).set_examples({JARVIS_EXAMPLE_LLAVA})); + if (jarvis_supports_rpc()) { add_opt(common_arg( {"--rpc"}, "SERVERS", "comma separated list of RPC servers", [](common_params & params, const std::string & value) { params.rpc_servers = value; } - ).set_env("LLAMA_ARG_RPC")); + ).set_env("JARVIS_ARG_RPC")); } add_opt(common_arg( {"--mlock"}, @@ -1417,14 +1417,14 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params) { params.use_mlock = true; } - ).set_env("LLAMA_ARG_MLOCK")); + ).set_env("JARVIS_ARG_MLOCK")); add_opt(common_arg( {"--no-mmap"}, "do not memory-map model (slower load but may reduce pageouts if not using mlock)", [](common_params & params) { params.use_mmap = false; } - ).set_env("LLAMA_ARG_NO_MMAP")); + ).set_env("JARVIS_ARG_NO_MMAP")); add_opt(common_arg( {"--numa"}, "TYPE", "attempt optimizations that help on some NUMA systems\n" @@ -1432,36 +1432,36 @@ common_params_context common_params_parser_init(common_params & params, llama_ex "- isolate: only spawn threads on CPUs on the node that execution started on\n" "- numactl: use the CPU map provided by numactl\n" "if run without this previously, it is recommended to drop the system page cache before using this\n" - "see https://github.com/ggerganov/llama.cpp/issues/1437", + "see https://github.com/ggerganov/jarvis.cpp/issues/1437", [](common_params & params, const std::string & value) { /**/ if (value == "distribute" || value == "") { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; } else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } else { throw std::invalid_argument("invalid value"); } } - ).set_env("LLAMA_ARG_NUMA")); + ).set_env("JARVIS_ARG_NUMA")); add_opt(common_arg( {"-ngl", "--gpu-layers", "--n-gpu-layers"}, "N", "number of layers to store in VRAM", [](common_params & params, int value) { params.n_gpu_layers = value; - if (!llama_supports_gpu_offload()) { + if (!jarvis_supports_gpu_offload()) { fprintf(stderr, "warning: not compiled with GPU offload support, --gpu-layers option will be ignored\n"); fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n"); } } - ).set_env("LLAMA_ARG_N_GPU_LAYERS")); + ).set_env("JARVIS_ARG_N_GPU_LAYERS")); add_opt(common_arg( {"-ngld", "--gpu-layers-draft", "--n-gpu-layers-draft"}, "N", "number of layers to store in VRAM for the draft model", [](common_params & params, int value) { params.n_gpu_layers_draft = value; - if (!llama_supports_gpu_offload()) { + if (!jarvis_supports_gpu_offload()) { fprintf(stderr, "warning: not compiled with GPU offload support, --gpu-layers-draft option will be ignored\n"); fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n"); } } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + ).set_examples({JARVIS_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"-sm", "--split-mode"}, "{none,layer,row}", "how to split the model across multiple GPUs, one of:\n" @@ -1471,23 +1471,23 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params, const std::string & value) { std::string arg_next = value; if (arg_next == "none") { - params.split_mode = LLAMA_SPLIT_MODE_NONE; + params.split_mode = JARVIS_SPLIT_MODE_NONE; } else if (arg_next == "layer") { - params.split_mode = LLAMA_SPLIT_MODE_LAYER; + params.split_mode = JARVIS_SPLIT_MODE_LAYER; } else if (arg_next == "row") { #ifdef GGML_USE_SYCL - fprintf(stderr, "warning: The split mode value:[row] is not supported by llama.cpp with SYCL. It's developing.\nExit!\n"); + fprintf(stderr, "warning: The split mode value:[row] is not supported by jarvis.cpp with SYCL. It's developing.\nExit!\n"); exit(1); #endif // GGML_USE_SYCL - params.split_mode = LLAMA_SPLIT_MODE_ROW; + params.split_mode = JARVIS_SPLIT_MODE_ROW; } else { throw std::invalid_argument("invalid value"); } - if (!llama_supports_gpu_offload()) { - fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting the split mode has no effect.\n"); + if (!jarvis_supports_gpu_offload()) { + fprintf(stderr, "warning: jarvis.cpp was compiled without support for GPU offload. Setting the split mode has no effect.\n"); } } - ).set_env("LLAMA_ARG_SPLIT_MODE")); + ).set_env("JARVIS_ARG_SPLIT_MODE")); add_opt(common_arg( {"-ts", "--tensor-split"}, "N0,N1,N2,...", "fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1", @@ -1498,33 +1498,33 @@ common_params_context common_params_parser_init(common_params & params, llama_ex const std::regex regex{ R"([,/]+)" }; std::sregex_token_iterator it{ arg_next.begin(), arg_next.end(), regex, -1 }; std::vector split_arg{ it, {} }; - if (split_arg.size() >= llama_max_devices()) { + if (split_arg.size() >= jarvis_max_devices()) { throw std::invalid_argument( - string_format("got %d input configs, but system only has %d devices", (int)split_arg.size(), (int)llama_max_devices()) + string_format("got %d input configs, but system only has %d devices", (int)split_arg.size(), (int)jarvis_max_devices()) ); } - for (size_t i = 0; i < llama_max_devices(); ++i) { + for (size_t i = 0; i < jarvis_max_devices(); ++i) { if (i < split_arg.size()) { params.tensor_split[i] = std::stof(split_arg[i]); } else { params.tensor_split[i] = 0.0f; } } - if (!llama_supports_gpu_offload()) { - fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting a tensor split has no effect.\n"); + if (!jarvis_supports_gpu_offload()) { + fprintf(stderr, "warning: jarvis.cpp was compiled without support for GPU offload. Setting a tensor split has no effect.\n"); } } - ).set_env("LLAMA_ARG_TENSOR_SPLIT")); + ).set_env("JARVIS_ARG_TENSOR_SPLIT")); add_opt(common_arg( {"-mg", "--main-gpu"}, "INDEX", string_format("the GPU to use for the model (with split-mode = none), or for intermediate results and KV (with split-mode = row) (default: %d)", params.main_gpu), [](common_params & params, int value) { params.main_gpu = value; - if (!llama_supports_gpu_offload()) { - fprintf(stderr, "warning: llama.cpp was compiled without support for GPU offload. Setting the main GPU has no effect.\n"); + if (!jarvis_supports_gpu_offload()) { + fprintf(stderr, "warning: jarvis.cpp was compiled without support for GPU offload. Setting the main GPU has no effect.\n"); } } - ).set_env("LLAMA_ARG_MAIN_GPU")); + ).set_env("JARVIS_ARG_MAIN_GPU")); add_opt(common_arg( {"--check-tensors"}, string_format("check model tensor data for invalid values (default: %s)", params.check_tensors ? "true" : "false"), @@ -1549,7 +1549,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.lora_adapters.push_back({ std::string(value), 1.0 }); } // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg - ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA})); + ).set_examples({JARVIS_EXAMPLE_COMMON, JARVIS_EXAMPLE_EXPORT_LORA})); add_opt(common_arg( {"--lora-scaled"}, "FNAME", "SCALE", "path to LoRA adapter with user defined scaling (can be repeated to use multiple adapters)", @@ -1557,7 +1557,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.lora_adapters.push_back({ fname, std::stof(scale) }); } // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg - ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA})); + ).set_examples({JARVIS_EXAMPLE_COMMON, JARVIS_EXAMPLE_EXPORT_LORA})); add_opt(common_arg( {"--control-vector"}, "FNAME", "add a control vector\nnote: this argument can be repeated to add multiple control vectors", @@ -1587,10 +1587,10 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params, const std::string & value) { params.model_alias = value; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ALIAS")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_ALIAS")); add_opt(common_arg( {"-m", "--model"}, "FNAME", - ex == LLAMA_EXAMPLE_EXPORT_LORA + ex == JARVIS_EXAMPLE_EXPORT_LORA ? std::string("model path from which to load base model") : string_format( "model path (default: `models/$filename` with filename from `--hf-file` " @@ -1599,35 +1599,35 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params, const std::string & value) { params.model = value; } - ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}).set_env("LLAMA_ARG_MODEL")); + ).set_examples({JARVIS_EXAMPLE_COMMON, JARVIS_EXAMPLE_EXPORT_LORA}).set_env("JARVIS_ARG_MODEL")); add_opt(common_arg( {"-md", "--model-draft"}, "FNAME", "draft model for speculative decoding (default: unused)", [](common_params & params, const std::string & value) { params.model_draft = value; } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + ).set_examples({JARVIS_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"-mu", "--model-url"}, "MODEL_URL", "model download url (default: unused)", [](common_params & params, const std::string & value) { params.model_url = value; } - ).set_env("LLAMA_ARG_MODEL_URL")); + ).set_env("JARVIS_ARG_MODEL_URL")); add_opt(common_arg( {"-hfr", "--hf-repo"}, "REPO", "Hugging Face model repository (default: unused)", [](common_params & params, const std::string & value) { params.hf_repo = value; } - ).set_env("LLAMA_ARG_HF_REPO")); + ).set_env("JARVIS_ARG_HF_REPO")); add_opt(common_arg( {"-hff", "--hf-file"}, "FILE", "Hugging Face model file (default: unused)", [](common_params & params, const std::string & value) { params.hf_file = value; } - ).set_env("LLAMA_ARG_HF_FILE")); + ).set_env("JARVIS_ARG_HF_FILE")); add_opt(common_arg( {"-hft", "--hf-token"}, "TOKEN", "Hugging Face access token (default: value from HF_TOKEN environment variable)", @@ -1645,41 +1645,41 @@ common_params_context common_params_parser_init(common_params & params, llama_ex } params.context_files.push_back(value); } - ).set_examples({LLAMA_EXAMPLE_RETRIEVAL})); + ).set_examples({JARVIS_EXAMPLE_RETRIEVAL})); add_opt(common_arg( {"--chunk-size"}, "N", string_format("minimum length of embedded text chunks (default: %d)", params.chunk_size), [](common_params & params, int value) { params.chunk_size = value; } - ).set_examples({LLAMA_EXAMPLE_RETRIEVAL})); + ).set_examples({JARVIS_EXAMPLE_RETRIEVAL})); add_opt(common_arg( {"--chunk-separator"}, "STRING", string_format("separator between chunks (default: '%s')", params.chunk_separator.c_str()), [](common_params & params, const std::string & value) { params.chunk_separator = value; } - ).set_examples({LLAMA_EXAMPLE_RETRIEVAL})); + ).set_examples({JARVIS_EXAMPLE_RETRIEVAL})); add_opt(common_arg( {"--junk"}, "N", string_format("number of times to repeat the junk text (default: %d)", params.n_junk), [](common_params & params, int value) { params.n_junk = value; } - ).set_examples({LLAMA_EXAMPLE_PASSKEY})); + ).set_examples({JARVIS_EXAMPLE_PASSKEY})); add_opt(common_arg( {"--pos"}, "N", string_format("position of the passkey in the junk text (default: %d)", params.i_pos), [](common_params & params, int value) { params.i_pos = value; } - ).set_examples({LLAMA_EXAMPLE_PASSKEY})); + ).set_examples({JARVIS_EXAMPLE_PASSKEY})); add_opt(common_arg( {"-o", "--output", "--output-file"}, "FNAME", string_format("output file (default: '%s')", - ex == LLAMA_EXAMPLE_EXPORT_LORA + ex == JARVIS_EXAMPLE_EXPORT_LORA ? params.lora_outfile.c_str() - : ex == LLAMA_EXAMPLE_CVECTOR_GENERATOR + : ex == JARVIS_EXAMPLE_CVECTOR_GENERATOR ? params.cvector_outfile.c_str() : params.out_file.c_str()), [](common_params & params, const std::string & value) { @@ -1687,49 +1687,49 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.cvector_outfile = value; params.lora_outfile = value; } - ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA})); + ).set_examples({JARVIS_EXAMPLE_IMATRIX, JARVIS_EXAMPLE_CVECTOR_GENERATOR, JARVIS_EXAMPLE_EXPORT_LORA})); add_opt(common_arg( {"-ofreq", "--output-frequency"}, "N", string_format("output the imatrix every N iterations (default: %d)", params.n_out_freq), [](common_params & params, int value) { params.n_out_freq = value; } - ).set_examples({LLAMA_EXAMPLE_IMATRIX})); + ).set_examples({JARVIS_EXAMPLE_IMATRIX})); add_opt(common_arg( {"--save-frequency"}, "N", string_format("save an imatrix copy every N iterations (default: %d)", params.n_save_freq), [](common_params & params, int value) { params.n_save_freq = value; } - ).set_examples({LLAMA_EXAMPLE_IMATRIX})); + ).set_examples({JARVIS_EXAMPLE_IMATRIX})); add_opt(common_arg( {"--process-output"}, string_format("collect data for the output tensor (default: %s)", params.process_output ? "true" : "false"), [](common_params & params) { params.process_output = true; } - ).set_examples({LLAMA_EXAMPLE_IMATRIX})); + ).set_examples({JARVIS_EXAMPLE_IMATRIX})); add_opt(common_arg( {"--no-ppl"}, string_format("do not compute perplexity (default: %s)", params.compute_ppl ? "true" : "false"), [](common_params & params) { params.compute_ppl = false; } - ).set_examples({LLAMA_EXAMPLE_IMATRIX})); + ).set_examples({JARVIS_EXAMPLE_IMATRIX})); add_opt(common_arg( {"--chunk", "--from-chunk"}, "N", string_format("start processing the input from chunk N (default: %d)", params.i_chunk), [](common_params & params, int value) { params.i_chunk = value; } - ).set_examples({LLAMA_EXAMPLE_IMATRIX})); + ).set_examples({JARVIS_EXAMPLE_IMATRIX})); add_opt(common_arg( {"-pps"}, string_format("is the prompt shared across parallel sequences (default: %s)", params.is_pp_shared ? "true" : "false"), [](common_params & params) { params.is_pp_shared = true; } - ).set_examples({LLAMA_EXAMPLE_BENCH})); + ).set_examples({JARVIS_EXAMPLE_BENCH})); add_opt(common_arg( {"-npp"}, "n0,n1,...", "number of prompt tokens", @@ -1737,7 +1737,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex auto p = string_split(value, ','); params.n_pp.insert(params.n_pp.end(), p.begin(), p.end()); } - ).set_examples({LLAMA_EXAMPLE_BENCH})); + ).set_examples({JARVIS_EXAMPLE_BENCH})); add_opt(common_arg( {"-ntg"}, "n0,n1,...", "number of text generation tokens", @@ -1745,7 +1745,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex auto p = string_split(value, ','); params.n_tg.insert(params.n_tg.end(), p.begin(), p.end()); } - ).set_examples({LLAMA_EXAMPLE_BENCH})); + ).set_examples({JARVIS_EXAMPLE_BENCH})); add_opt(common_arg( {"-npl"}, "n0,n1,...", "number of parallel prompts", @@ -1753,70 +1753,70 @@ common_params_context common_params_parser_init(common_params & params, llama_ex auto p = string_split(value, ','); params.n_pl.insert(params.n_pl.end(), p.begin(), p.end()); } - ).set_examples({LLAMA_EXAMPLE_BENCH})); + ).set_examples({JARVIS_EXAMPLE_BENCH})); add_opt(common_arg( {"--embd-normalize"}, "N", string_format("normalisation for embeddings (default: %d) (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)", params.embd_normalize), [](common_params & params, int value) { params.embd_normalize = value; } - ).set_examples({LLAMA_EXAMPLE_EMBEDDING})); + ).set_examples({JARVIS_EXAMPLE_EMBEDDING})); add_opt(common_arg( {"--embd-output-format"}, "FORMAT", "empty = default, \"array\" = [[],[]...], \"json\" = openai style, \"json+\" = same \"json\" + cosine similarity matrix", [](common_params & params, const std::string & value) { params.embd_out = value; } - ).set_examples({LLAMA_EXAMPLE_EMBEDDING})); + ).set_examples({JARVIS_EXAMPLE_EMBEDDING})); add_opt(common_arg( {"--embd-separator"}, "STRING", "separator of embeddings (default \\n) for example \"<#sep#>\"", [](common_params & params, const std::string & value) { params.embd_sep = value; } - ).set_examples({LLAMA_EXAMPLE_EMBEDDING})); + ).set_examples({JARVIS_EXAMPLE_EMBEDDING})); add_opt(common_arg( {"--host"}, "HOST", string_format("ip address to listen (default: %s)", params.hostname.c_str()), [](common_params & params, const std::string & value) { params.hostname = value; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_HOST")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_HOST")); add_opt(common_arg( {"--port"}, "PORT", string_format("port to listen (default: %d)", params.port), [](common_params & params, int value) { params.port = value; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_PORT")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_PORT")); add_opt(common_arg( {"--path"}, "PATH", string_format("path to serve static files from (default: %s)", params.public_path.c_str()), [](common_params & params, const std::string & value) { params.public_path = value; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_STATIC_PATH")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_STATIC_PATH")); add_opt(common_arg( {"--embedding", "--embeddings"}, string_format("restrict to only support embedding use case; use only with dedicated embedding models (default: %s)", params.embedding ? "enabled" : "disabled"), [](common_params & params) { params.embedding = true; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_EMBEDDINGS")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_EMBEDDINGS")); add_opt(common_arg( {"--reranking", "--rerank"}, string_format("enable reranking endpoint on server (default: %s)", params.reranking ? "enabled" : "disabled"), [](common_params & params) { params.reranking = true; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_RERANKING")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_RERANKING")); add_opt(common_arg( {"--api-key"}, "KEY", "API key to use for authentication (default: none)", [](common_params & params, const std::string & value) { params.api_keys.push_back(value); } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_API_KEY")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_API_KEY")); add_opt(common_arg( {"--api-key-file"}, "FNAME", "path to file containing API keys (default: none)", @@ -1833,21 +1833,21 @@ common_params_context common_params_parser_init(common_params & params, llama_ex } key_file.close(); } - ).set_examples({LLAMA_EXAMPLE_SERVER})); + ).set_examples({JARVIS_EXAMPLE_SERVER})); add_opt(common_arg( {"--ssl-key-file"}, "FNAME", "path to file a PEM-encoded SSL private key", [](common_params & params, const std::string & value) { params.ssl_file_key = value; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_SSL_KEY_FILE")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_SSL_KEY_FILE")); add_opt(common_arg( {"--ssl-cert-file"}, "FNAME", "path to file a PEM-encoded SSL certificate", [](common_params & params, const std::string & value) { params.ssl_file_cert = value; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_SSL_CERT_FILE")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_SSL_CERT_FILE")); add_opt(common_arg( {"-to", "--timeout"}, "N", string_format("server read/write timeout in seconds (default: %d)", params.timeout_read), @@ -1855,49 +1855,49 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.timeout_read = value; params.timeout_write = value; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_TIMEOUT")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_TIMEOUT")); add_opt(common_arg( {"--threads-http"}, "N", string_format("number of threads used to process HTTP requests (default: %d)", params.n_threads_http), [](common_params & params, int value) { params.n_threads_http = value; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_THREADS_HTTP")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_THREADS_HTTP")); add_opt(common_arg( {"--cache-reuse"}, "N", string_format("min chunk size to attempt reusing from the cache via KV shifting (default: %d)", params.n_cache_reuse), [](common_params & params, int value) { params.n_cache_reuse = value; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CACHE_REUSE")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_CACHE_REUSE")); add_opt(common_arg( {"--metrics"}, string_format("enable prometheus compatible metrics endpoint (default: %s)", params.endpoint_metrics ? "enabled" : "disabled"), [](common_params & params) { params.endpoint_metrics = true; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_METRICS")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_ENDPOINT_METRICS")); add_opt(common_arg( {"--slots"}, string_format("enable slots monitoring endpoint (default: %s)", params.endpoint_slots ? "enabled" : "disabled"), [](common_params & params) { params.endpoint_slots = true; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_SLOTS")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_ENDPOINT_SLOTS")); add_opt(common_arg( {"--props"}, string_format("enable changing global properties via POST /props (default: %s)", params.endpoint_props ? "enabled" : "disabled"), [](common_params & params) { params.endpoint_props = true; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_PROPS")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_ENDPOINT_PROPS")); add_opt(common_arg( {"--no-slots"}, "disables slots monitoring endpoint", [](common_params & params) { params.endpoint_slots = false; } - ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_ENDPOINT_SLOTS")); + ).set_examples({JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_NO_ENDPOINT_SLOTS")); add_opt(common_arg( {"--slot-save-path"}, "PATH", "path to save slot kv cache (default: disabled)", @@ -1908,44 +1908,44 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.slot_save_path += DIRECTORY_SEPARATOR; } } - ).set_examples({LLAMA_EXAMPLE_SERVER})); + ).set_examples({JARVIS_EXAMPLE_SERVER})); add_opt(common_arg( {"--chat-template"}, "JINJA_TEMPLATE", "set custom jinja chat template (default: template taken from model's metadata)\n" "if suffix/prefix are specified, template will be disabled\n" - "only commonly used templates are accepted:\nhttps://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template", + "only commonly used templates are accepted:\nhttps://github.com/ggerganov/jarvis.cpp/wiki/Templates-supported-by-jarvis_chat_apply_template", [](common_params & params, const std::string & value) { if (!common_chat_verify_template(value)) { throw std::runtime_error(string_format( "error: the supplied chat template is not supported: %s\n" - "note: llama.cpp does not use jinja parser, we only support commonly used templates\n", + "note: jarvis.cpp does not use jinja parser, we only support commonly used templates\n", value.c_str() )); } params.chat_template = value; } - ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CHAT_TEMPLATE")); + ).set_examples({JARVIS_EXAMPLE_MAIN, JARVIS_EXAMPLE_SERVER}).set_env("JARVIS_ARG_CHAT_TEMPLATE")); add_opt(common_arg( {"-sps", "--slot-prompt-similarity"}, "SIMILARITY", string_format("how much the prompt of a request must match the prompt of a slot in order to use that slot (default: %.2f, 0.0 = disabled)\n", params.slot_prompt_similarity), [](common_params & params, const std::string & value) { params.slot_prompt_similarity = std::stof(value); } - ).set_examples({LLAMA_EXAMPLE_SERVER})); + ).set_examples({JARVIS_EXAMPLE_SERVER})); add_opt(common_arg( {"--lora-init-without-apply"}, string_format("load LoRA adapters without applying them (apply later via POST /lora-adapters) (default: %s)", params.lora_init_without_apply ? "enabled" : "disabled"), [](common_params & params) { params.lora_init_without_apply = true; } - ).set_examples({LLAMA_EXAMPLE_SERVER})); + ).set_examples({JARVIS_EXAMPLE_SERVER})); add_opt(common_arg( {"--simple-io"}, "use basic IO for better compatibility in subprocesses and limited consoles", [](common_params & params) { params.simple_io = true; } - ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL})); + ).set_examples({JARVIS_EXAMPLE_MAIN, JARVIS_EXAMPLE_INFILL})); add_opt(common_arg( {"-ld", "--logdir"}, "LOGDIR", "path under which to save YAML logs (no logging if unset)", @@ -1963,28 +1963,28 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params, const std::string & value) { params.cvector_positive_file = value; } - ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR})); + ).set_examples({JARVIS_EXAMPLE_CVECTOR_GENERATOR})); add_opt(common_arg( {"--negative-file"}, "FNAME", string_format("negative prompts file, one prompt per line (default: '%s')", params.cvector_negative_file.c_str()), [](common_params & params, const std::string & value) { params.cvector_negative_file = value; } - ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR})); + ).set_examples({JARVIS_EXAMPLE_CVECTOR_GENERATOR})); add_opt(common_arg( {"--pca-batch"}, "N", string_format("batch size used for PCA. Larger batch runs faster, but uses more memory (default: %d)", params.n_pca_batch), [](common_params & params, int value) { params.n_pca_batch = value; } - ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR})); + ).set_examples({JARVIS_EXAMPLE_CVECTOR_GENERATOR})); add_opt(common_arg( {"--pca-iter"}, "N", string_format("number of iterations used for PCA (default: %d)", params.n_pca_iterations), [](common_params & params, int value) { params.n_pca_iterations = value; } - ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR})); + ).set_examples({JARVIS_EXAMPLE_CVECTOR_GENERATOR})); add_opt(common_arg( {"--method"}, "{pca, mean}", "dimensionality reduction method to be used (default: pca)", @@ -1993,7 +1993,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex else if (value == "mean") { params.cvector_dimre_method = DIMRE_METHOD_MEAN; } else { throw std::invalid_argument("invalid value"); } } - ).set_examples({LLAMA_EXAMPLE_CVECTOR_GENERATOR})); + ).set_examples({JARVIS_EXAMPLE_CVECTOR_GENERATOR})); add_opt(common_arg( {"--output-format"}, "{md,jsonl}", "output format for batched-bench results (default: md)", @@ -2002,7 +2002,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex else if (value == "md") { params.batched_bench_output_jsonl = false; } else { std::invalid_argument("invalid value"); } } - ).set_examples({LLAMA_EXAMPLE_BENCH})); + ).set_examples({JARVIS_EXAMPLE_BENCH})); add_opt(common_arg( {"--log-disable"}, "Log disable", @@ -2023,7 +2023,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params &) { common_log_set_colors(common_log_main(), true); } - ).set_env("LLAMA_LOG_COLORS")); + ).set_env("JARVIS_LOG_COLORS")); add_opt(common_arg( {"-v", "--verbose", "--log-verbose"}, "Set verbosity level to infinity (i.e. log all messages, useful for debugging)", @@ -2039,21 +2039,21 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.verbosity = value; common_log_set_verbosity_thold(value); } - ).set_env("LLAMA_LOG_VERBOSITY")); + ).set_env("JARVIS_LOG_VERBOSITY")); add_opt(common_arg( {"--log-prefix"}, "Enable prefx in log messages", [](common_params &) { common_log_set_prefix(common_log_main(), true); } - ).set_env("LLAMA_LOG_PREFIX")); + ).set_env("JARVIS_LOG_PREFIX")); add_opt(common_arg( {"--log-timestamps"}, "Enable timestamps in log messages", [](common_params &) { common_log_set_timestamps(common_log_main(), true); } - ).set_env("LLAMA_LOG_TIMESTAMPS")); + ).set_env("JARVIS_LOG_TIMESTAMPS")); return ctx_arg; } diff --git a/common/arg.h b/common/arg.h index a6700d323cc14..7c6f1eeea3308 100644 --- a/common/arg.h +++ b/common/arg.h @@ -11,7 +11,7 @@ // struct common_arg { - std::set examples = {LLAMA_EXAMPLE_COMMON}; + std::set examples = {JARVIS_EXAMPLE_COMMON}; std::vector args; const char * value_hint = nullptr; // help text or example for arg value const char * value_hint_2 = nullptr; // for second arg value @@ -52,17 +52,17 @@ struct common_arg { void (*handler)(common_params & params, const std::string &, const std::string &) ) : args(args), value_hint(value_hint), value_hint_2(value_hint_2), help(help), handler_str_str(handler) {} - common_arg & set_examples(std::initializer_list examples); + common_arg & set_examples(std::initializer_list examples); common_arg & set_env(const char * env); common_arg & set_sparam(); - bool in_example(enum llama_example ex); + bool in_example(enum jarvis_example ex); bool get_value_from_env(std::string & output); bool has_value_from_env(); std::string to_string(); }; struct common_params_context { - enum llama_example ex = LLAMA_EXAMPLE_COMMON; + enum jarvis_example ex = JARVIS_EXAMPLE_COMMON; common_params & params; std::vector options; void(*print_usage)(int, char **) = nullptr; @@ -71,7 +71,7 @@ struct common_params_context { // parse input arguments from CLI // if one argument has invalid value, it will automatically display usage of the specific argument (and not the full usage message) -bool common_params_parse(int argc, char ** argv, common_params & params, llama_example ex, void(*print_usage)(int, char **) = nullptr); +bool common_params_parse(int argc, char ** argv, common_params & params, jarvis_example ex, void(*print_usage)(int, char **) = nullptr); // function to be used by test-arg-parser -common_params_context common_params_parser_init(common_params & params, llama_example ex, void(*print_usage)(int, char **) = nullptr); +common_params_context common_params_parser_init(common_params & params, jarvis_example ex, void(*print_usage)(int, char **) = nullptr); diff --git a/common/build-info.cpp.in b/common/build-info.cpp.in index 0b945aa68fff3..aac4ba7e9e33a 100644 --- a/common/build-info.cpp.in +++ b/common/build-info.cpp.in @@ -1,4 +1,4 @@ -int LLAMA_BUILD_NUMBER = @BUILD_NUMBER@; -char const *LLAMA_COMMIT = "@BUILD_COMMIT@"; -char const *LLAMA_COMPILER = "@BUILD_COMPILER@"; -char const *LLAMA_BUILD_TARGET = "@BUILD_TARGET@"; +int JARVIS_BUILD_NUMBER = @BUILD_NUMBER@; +char const *JARVIS_COMMIT = "@BUILD_COMMIT@"; +char const *JARVIS_COMPILER = "@BUILD_COMPILER@"; +char const *JARVIS_BUILD_TARGET = "@BUILD_TARGET@"; diff --git a/common/common.cpp b/common/common.cpp index ff8cc4076e95d..fa32f671eb6f5 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -8,7 +8,7 @@ #define JSON_ASSERT GGML_ASSERT #include "json.hpp" #include "json-schema-to-grammar.h" -#include "llama.h" +#include "jarvis.h" #include #include @@ -48,7 +48,7 @@ #include #include #endif -#if defined(LLAMA_USE_CURL) +#if defined(JARVIS_USE_CURL) #include #include #include @@ -58,7 +58,7 @@ #pragma warning(disable: 4244 4267) // possible loss of data #endif -#if defined(LLAMA_USE_CURL) +#if defined(JARVIS_USE_CURL) #ifdef __linux__ #include #elif defined(_WIN32) @@ -66,8 +66,8 @@ #else #include #endif -#define LLAMA_CURL_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083 -#endif // LLAMA_USE_CURL +#define JARVIS_CURL_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083 +#endif // JARVIS_USE_CURL using json = nlohmann::ordered_json; @@ -364,8 +364,8 @@ bool parse_cpu_mask(const std::string & mask, bool (&boolmask)[GGML_MAX_N_THREAD } void common_init() { - llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) { - if (LOG_DEFAULT_LLAMA <= common_log_verbosity_thold) { + jarvis_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) { + if (LOG_DEFAULT_JARVIS <= common_log_verbosity_thold) { common_log_add(common_log_main(), level, "%s", text); } }, NULL); @@ -376,7 +376,7 @@ void common_init() { const char * build_type = " (debug)"; #endif - LOG_INF("build: %d (%s) with %s for %s%s\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT, LLAMA_COMPILER, LLAMA_BUILD_TARGET, build_type); + LOG_INF("build: %d (%s) with %s for %s%s\n", JARVIS_BUILD_NUMBER, JARVIS_COMMIT, JARVIS_COMPILER, JARVIS_BUILD_TARGET, build_type); } std::string common_params_get_system_info(const common_params & params) { @@ -389,9 +389,9 @@ std::string common_params_get_system_info(const common_params & params) { #if defined(_WIN32) && (_WIN32_WINNT >= 0x0601) && !defined(__MINGW64__) // windows 7 and later // TODO: windows + arm64 + mingw64 DWORD logicalProcessorCount = GetActiveProcessorCount(ALL_PROCESSOR_GROUPS); - os << " / " << logicalProcessorCount << " | " << llama_print_system_info(); + os << " / " << logicalProcessorCount << " | " << jarvis_print_system_info(); #else - os << " / " << std::thread::hardware_concurrency() << " | " << llama_print_system_info(); + os << " / " << std::thread::hardware_concurrency() << " | " << jarvis_print_system_info(); #endif return os.str(); @@ -483,7 +483,7 @@ std::string string_from(const std::vector & values) { return buf.str(); } -std::string string_from(const struct llama_context * ctx, const std::vector & tokens) { +std::string string_from(const struct jarvis_context * ctx, const std::vector & tokens) { std::stringstream buf; buf << "[ "; @@ -514,7 +514,7 @@ std::string string_from(const struct llama_context * ctx, const std::vector & overrides) { +bool string_parse_kv_override(const char * data, std::vector & overrides) { const char * sep = strchr(data, '='); if (sep == nullptr || sep - data >= 128) { LOG_ERR("%s: malformed KV override '%s'\n", __func__, data); return false; } - llama_model_kv_override kvo; + jarvis_model_kv_override kvo; std::strncpy(kvo.key, data, sep - data); kvo.key[sep - data] = 0; sep++; if (strncmp(sep, "int:", 4) == 0) { sep += 4; - kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT; + kvo.tag = JARVIS_KV_OVERRIDE_TYPE_INT; kvo.val_i64 = std::atol(sep); } else if (strncmp(sep, "float:", 6) == 0) { sep += 6; - kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT; + kvo.tag = JARVIS_KV_OVERRIDE_TYPE_FLOAT; kvo.val_f64 = std::atof(sep); } else if (strncmp(sep, "bool:", 5) == 0) { sep += 5; - kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL; + kvo.tag = JARVIS_KV_OVERRIDE_TYPE_BOOL; if (std::strcmp(sep, "true") == 0) { kvo.val_bool = true; } else if (std::strcmp(sep, "false") == 0) { @@ -617,7 +617,7 @@ bool string_parse_kv_override(const char * data, std::vector 127) { LOG_ERR("%s: malformed KV override '%s', value cannot exceed 127 chars\n", __func__, data); return false; @@ -788,8 +788,8 @@ std::string fs_get_cache_directory() { } return p; }; - if (getenv("LLAMA_CACHE")) { - cache_directory = std::getenv("LLAMA_CACHE"); + if (getenv("JARVIS_CACHE")) { + cache_directory = std::getenv("JARVIS_CACHE"); } else { #ifdef __linux__ if (std::getenv("XDG_CACHE_HOME")) { @@ -803,7 +803,7 @@ std::string fs_get_cache_directory() { cache_directory = std::getenv("LOCALAPPDATA"); #endif // __linux__ cache_directory = ensure_trailing_slash(cache_directory); - cache_directory += "llama.cpp"; + cache_directory += "jarvis.cpp"; } return ensure_trailing_slash(cache_directory); } @@ -824,16 +824,16 @@ std::string fs_get_cache_file(const std::string & filename) { // struct common_init_result common_init_from_params(common_params & params) { common_init_result iparams; - auto mparams = common_model_params_to_llama(params); + auto mparams = common_model_params_to_jarvis(params); - llama_model * model = nullptr; + jarvis_model * model = nullptr; if (!params.hf_repo.empty() && !params.hf_file.empty()) { model = common_load_model_from_hf(params.hf_repo.c_str(), params.hf_file.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams); } else if (!params.model_url.empty()) { model = common_load_model_from_url(params.model_url.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams); } else { - model = llama_load_model_from_file(params.model.c_str(), mparams); + model = jarvis_load_model_from_file(params.model.c_str(), mparams); } if (model == NULL) { @@ -844,58 +844,58 @@ struct common_init_result common_init_from_params(common_params & params) { if (params.reranking) { bool ok = true; - if (llama_token_bos(model) == LLAMA_TOKEN_NULL) { + if (jarvis_token_bos(model) == JARVIS_TOKEN_NULL) { LOG_WRN("%s: warning: model does not have a BOS token, reranking will not work\n", __func__); ok = false; } - if (llama_token_eos(model) == LLAMA_TOKEN_NULL) { + if (jarvis_token_eos(model) == JARVIS_TOKEN_NULL) { LOG_WRN("%s: warning: model does not have an EOS token, reranking will not work\n", __func__); ok = false; } - if (llama_token_sep(model) == LLAMA_TOKEN_NULL) { + if (jarvis_token_sep(model) == JARVIS_TOKEN_NULL) { LOG_WRN("%s: warning: model does not have a SEP token, reranking will not work\n", __func__); ok = false; } if (!ok) { - llama_free_model(model); + jarvis_free_model(model); return iparams; } } - auto cparams = common_context_params_to_llama(params); + auto cparams = common_context_params_to_jarvis(params); - llama_context * lctx = llama_new_context_with_model(model, cparams); + jarvis_context * lctx = jarvis_new_context_with_model(model, cparams); if (lctx == NULL) { LOG_ERR("%s: failed to create context with model '%s'\n", __func__, params.model.c_str()); - llama_free_model(model); + jarvis_free_model(model); return iparams; } if (!params.control_vectors.empty()) { if (params.control_vector_layer_start <= 0) params.control_vector_layer_start = 1; - if (params.control_vector_layer_end <= 0) params.control_vector_layer_end = llama_n_layer(model); + if (params.control_vector_layer_end <= 0) params.control_vector_layer_end = jarvis_n_layer(model); const auto cvec = common_control_vector_load(params.control_vectors); if (cvec.n_embd == -1) { - llama_free(lctx); - llama_free_model(model); + jarvis_free(lctx); + jarvis_free_model(model); return iparams; } - int err = llama_control_vector_apply(lctx, + int err = jarvis_control_vector_apply(lctx, cvec.data.data(), cvec.data.size(), cvec.n_embd, params.control_vector_layer_start, params.control_vector_layer_end); if (err) { - llama_free(lctx); - llama_free_model(model); + jarvis_free(lctx); + jarvis_free_model(model); return iparams; } @@ -906,11 +906,11 @@ struct common_init_result common_init_from_params(common_params & params) { common_lora_adapter_container loaded_la; loaded_la.path = la.path; loaded_la.scale = la.scale; - loaded_la.adapter = llama_lora_adapter_init(model, la.path.c_str()); + loaded_la.adapter = jarvis_lora_adapter_init(model, la.path.c_str()); if (loaded_la.adapter == nullptr) { LOG_ERR("%s: failed to apply lora adapter '%s'\n", __func__, la.path.c_str()); - llama_free(lctx); - llama_free_model(model); + jarvis_free(lctx); + jarvis_free_model(model); return iparams; } iparams.lora_adapters.push_back(loaded_la); // copy to list of loaded adapters @@ -919,7 +919,7 @@ struct common_init_result common_init_from_params(common_params & params) { common_lora_adapters_apply(lctx, iparams.lora_adapters); } - if (params.sparams.ignore_eos && llama_token_eos(model) == LLAMA_TOKEN_NULL) { + if (params.sparams.ignore_eos && jarvis_token_eos(model) == JARVIS_TOKEN_NULL) { LOG_WRN("%s: warning: model does not have an EOS token, ignoring --ignore-eos\n", __func__); params.sparams.ignore_eos = false; } @@ -927,35 +927,35 @@ struct common_init_result common_init_from_params(common_params & params) { if (params.warmup) { LOG_WRN("%s: warming up the model with an empty run - please wait ... (--no-warmup to disable)\n", __func__); - std::vector tmp; - llama_token bos = llama_token_bos(model); - llama_token eos = llama_token_eos(model); + std::vector tmp; + jarvis_token bos = jarvis_token_bos(model); + jarvis_token eos = jarvis_token_eos(model); // some models (e.g. T5) don't have a BOS token - if (bos != LLAMA_TOKEN_NULL) { + if (bos != JARVIS_TOKEN_NULL) { tmp.push_back(bos); } - if (eos != LLAMA_TOKEN_NULL) { + if (eos != JARVIS_TOKEN_NULL) { tmp.push_back(eos); } if (tmp.empty()) { tmp.push_back(0); } - if (llama_model_has_encoder(model)) { - llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size())); - llama_token decoder_start_token_id = llama_model_decoder_start_token(model); + if (jarvis_model_has_encoder(model)) { + jarvis_encode(lctx, jarvis_batch_get_one(tmp.data(), tmp.size())); + jarvis_token decoder_start_token_id = jarvis_model_decoder_start_token(model); if (decoder_start_token_id == -1) { decoder_start_token_id = bos; } tmp.clear(); tmp.push_back(decoder_start_token_id); } - if (llama_model_has_decoder(model)) { - llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch))); + if (jarvis_model_has_decoder(model)) { + jarvis_decode(lctx, jarvis_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch))); } - llama_kv_cache_clear(lctx); - llama_synchronize(lctx); - llama_perf_context_reset(lctx); + jarvis_kv_cache_clear(lctx); + jarvis_synchronize(lctx); + jarvis_perf_context_reset(lctx); } iparams.model = model; @@ -964,17 +964,17 @@ struct common_init_result common_init_from_params(common_params & params) { return iparams; } -void common_lora_adapters_apply(struct llama_context * ctx, std::vector & lora_adapters) { - llama_lora_adapter_clear(ctx); +void common_lora_adapters_apply(struct jarvis_context * ctx, std::vector & lora_adapters) { + jarvis_lora_adapter_clear(ctx); for (auto & la : lora_adapters) { if (la.scale != 0.0f) { - llama_lora_adapter_set(ctx, la.adapter, la.scale); + jarvis_lora_adapter_set(ctx, la.adapter, la.scale); } } } -struct llama_model_params common_model_params_to_llama(const common_params & params) { - auto mparams = llama_model_default_params(); +struct jarvis_model_params common_model_params_to_jarvis(const common_params & params) { + auto mparams = jarvis_model_default_params(); if (params.n_gpu_layers != -1) { mparams.n_gpu_layers = params.n_gpu_layers; @@ -1025,8 +1025,8 @@ static ggml_type kv_cache_type_from_str(const std::string & s) { throw std::runtime_error("Unsupported cache type: " + s); } -struct llama_context_params common_context_params_to_llama(const common_params & params) { - auto cparams = llama_context_default_params(); +struct jarvis_context_params common_context_params_to_jarvis(const common_params & params) { + auto cparams = jarvis_context_default_params(); cparams.n_ctx = params.n_ctx; cparams.n_seq_max = params.n_parallel; @@ -1056,7 +1056,7 @@ struct llama_context_params common_context_params_to_llama(const common_params & if (params.reranking) { cparams.embeddings = true; - cparams.pooling_type = LLAMA_POOLING_TYPE_RANK; + cparams.pooling_type = JARVIS_POOLING_TYPE_RANK; } cparams.type_k = kv_cache_type_from_str(params.cache_type_k); @@ -1081,7 +1081,7 @@ struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_p return tpp; } -#ifdef LLAMA_USE_CURL +#ifdef JARVIS_USE_CURL #define CURL_MAX_RETRY 3 #define CURL_RETRY_DELAY_SECONDS 2 @@ -1279,7 +1279,7 @@ static bool common_download_file(const std::string & url, const std::string & pa curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 0L); // helper function to hide password in URL - auto llama_download_hide_password_in_url = [](const std::string & url) -> std::string { + auto jarvis_download_hide_password_in_url = [](const std::string & url) -> std::string { std::size_t protocol_pos = url.find("://"); if (protocol_pos == std::string::npos) { return url; // Malformed URL @@ -1295,7 +1295,7 @@ static bool common_download_file(const std::string & url, const std::string & pa // start the download LOG_INF("%s: trying to download model from %s to %s (server_etag:%s, server_last_modified:%s)...\n", __func__, - llama_download_hide_password_in_url(url).c_str(), path.c_str(), headers.etag.c_str(), headers.last_modified.c_str()); + jarvis_download_hide_password_in_url(url).c_str(), path.c_str(), headers.etag.c_str(), headers.last_modified.c_str()); bool was_perform_successful = curl_perform_with_retry(url, curl.get(), CURL_MAX_RETRY, CURL_RETRY_DELAY_SECONDS); if (!was_perform_successful) { return false; @@ -1329,11 +1329,11 @@ static bool common_download_file(const std::string & url, const std::string & pa return true; } -struct llama_model * common_load_model_from_url( +struct jarvis_model * common_load_model_from_url( const char * model_url, const char * path_model, const char * hf_token, - const struct llama_model_params & params) { + const struct jarvis_model_params & params) { // Basic validation of the model_url if (!model_url || strlen(model_url) == 0) { LOG_ERR("%s: invalid model_url\n", __func__); @@ -1367,17 +1367,17 @@ struct llama_model * common_load_model_from_url( if (n_split > 1) { char split_prefix[PATH_MAX] = {0}; - char split_url_prefix[LLAMA_CURL_MAX_URL_LENGTH] = {0}; + char split_url_prefix[JARVIS_CURL_MAX_URL_LENGTH] = {0}; // Verify the first split file format // and extract split URL and PATH prefixes { - if (!llama_split_prefix(split_prefix, sizeof(split_prefix), path_model, 0, n_split)) { + if (!jarvis_split_prefix(split_prefix, sizeof(split_prefix), path_model, 0, n_split)) { LOG_ERR("\n%s: unexpected model file name: %s n_split=%d\n", __func__, path_model, n_split); return NULL; } - if (!llama_split_prefix(split_url_prefix, sizeof(split_url_prefix), model_url, 0, n_split)) { + if (!jarvis_split_prefix(split_url_prefix, sizeof(split_url_prefix), model_url, 0, n_split)) { LOG_ERR("\n%s: unexpected model url: %s n_split=%d\n", __func__, model_url, n_split); return NULL; } @@ -1388,10 +1388,10 @@ struct llama_model * common_load_model_from_url( for (int idx = 1; idx < n_split; idx++) { futures_download.push_back(std::async(std::launch::async, [&split_prefix, &split_url_prefix, &n_split, hf_token](int download_idx) -> bool { char split_path[PATH_MAX] = {0}; - llama_split_path(split_path, sizeof(split_path), split_prefix, download_idx, n_split); + jarvis_split_path(split_path, sizeof(split_path), split_prefix, download_idx, n_split); - char split_url[LLAMA_CURL_MAX_URL_LENGTH] = {0}; - llama_split_path(split_url, sizeof(split_url), split_url_prefix, download_idx, n_split); + char split_url[JARVIS_CURL_MAX_URL_LENGTH] = {0}; + jarvis_split_path(split_url, sizeof(split_url), split_url_prefix, download_idx, n_split); return common_download_file(split_url, split_path, hf_token); }, idx)); @@ -1405,19 +1405,19 @@ struct llama_model * common_load_model_from_url( } } - return llama_load_model_from_file(path_model, params); + return jarvis_load_model_from_file(path_model, params); } -struct llama_model * common_load_model_from_hf( +struct jarvis_model * common_load_model_from_hf( const char * repo, const char * model, const char * path_model, const char * hf_token, - const struct llama_model_params & params) { + const struct jarvis_model_params & params) { // construct hugging face model url: // - // --repo ggml-org/models --file tinyllama-1.1b/ggml-model-f16.gguf - // https://huggingface.co/ggml-org/models/resolve/main/tinyllama-1.1b/ggml-model-f16.gguf + // --repo ggml-org/models --file tinyjarvis-1.1b/ggml-model-f16.gguf + // https://huggingface.co/ggml-org/models/resolve/main/tinyjarvis-1.1b/ggml-model-f16.gguf // // --repo TheBloke/Mixtral-8x7B-v0.1-GGUF --file mixtral-8x7b-v0.1.Q4_K_M.gguf // https://huggingface.co/TheBloke/Mixtral-8x7B-v0.1-GGUF/resolve/main/mixtral-8x7b-v0.1.Q4_K_M.gguf @@ -1433,42 +1433,42 @@ struct llama_model * common_load_model_from_hf( #else -struct llama_model * common_load_model_from_url( +struct jarvis_model * common_load_model_from_url( const char * /*model_url*/, const char * /*path_model*/, const char * /*hf_token*/, - const struct llama_model_params & /*params*/) { - LOG_WRN("%s: llama.cpp built without libcurl, downloading from an url not supported.\n", __func__); + const struct jarvis_model_params & /*params*/) { + LOG_WRN("%s: jarvis.cpp built without libcurl, downloading from an url not supported.\n", __func__); return nullptr; } -struct llama_model * common_load_model_from_hf( +struct jarvis_model * common_load_model_from_hf( const char * /*repo*/, const char * /*model*/, const char * /*path_model*/, const char * /*hf_token*/, - const struct llama_model_params & /*params*/) { - LOG_WRN("%s: llama.cpp built without libcurl, downloading from Hugging Face not supported.\n", __func__); + const struct jarvis_model_params & /*params*/) { + LOG_WRN("%s: jarvis.cpp built without libcurl, downloading from Hugging Face not supported.\n", __func__); return nullptr; } -#endif // LLAMA_USE_CURL +#endif // JARVIS_USE_CURL // // Batch utils // -void common_batch_clear(struct llama_batch & batch) { +void common_batch_clear(struct jarvis_batch & batch) { batch.n_tokens = 0; } void common_batch_add( - struct llama_batch & batch, - llama_token id, - llama_pos pos, - const std::vector & seq_ids, + struct jarvis_batch & batch, + jarvis_token id, + jarvis_pos pos, + const std::vector & seq_ids, bool logits) { - GGML_ASSERT(batch.seq_id[batch.n_tokens] && "llama_batch size exceeded"); + GGML_ASSERT(batch.seq_id[batch.n_tokens] && "jarvis_batch size exceeded"); batch.token [batch.n_tokens] = id; batch.pos [batch.n_tokens] = pos; @@ -1485,26 +1485,26 @@ void common_batch_add( // Vocab utils // -std::vector common_tokenize( - const struct llama_context * ctx, +std::vector common_tokenize( + const struct jarvis_context * ctx, const std::string & text, bool add_special, bool parse_special) { - return common_tokenize(llama_get_model(ctx), text, add_special, parse_special); + return common_tokenize(jarvis_get_model(ctx), text, add_special, parse_special); } -std::vector common_tokenize( - const struct llama_model * model, +std::vector common_tokenize( + const struct jarvis_model * model, const std::string & text, bool add_special, bool parse_special) { // upper limit for the number of tokens int n_tokens = text.length() + 2 * add_special; - std::vector result(n_tokens); - n_tokens = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); + std::vector result(n_tokens); + n_tokens = jarvis_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); if (n_tokens < 0) { result.resize(-n_tokens); - int check = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); + int check = jarvis_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); GGML_ASSERT(check == -n_tokens); } else { result.resize(n_tokens); @@ -1512,13 +1512,13 @@ std::vector common_tokenize( return result; } -std::string common_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) { +std::string common_token_to_piece(const struct jarvis_context * ctx, jarvis_token token, bool special) { std::string piece; piece.resize(piece.capacity()); // using string internal cache, 15 bytes + '\n' - const int n_chars = llama_token_to_piece(llama_get_model(ctx), token, &piece[0], piece.size(), 0, special); + const int n_chars = jarvis_token_to_piece(jarvis_get_model(ctx), token, &piece[0], piece.size(), 0, special); if (n_chars < 0) { piece.resize(-n_chars); - int check = llama_token_to_piece(llama_get_model(ctx), token, &piece[0], piece.size(), 0, special); + int check = jarvis_token_to_piece(jarvis_get_model(ctx), token, &piece[0], piece.size(), 0, special); GGML_ASSERT(check == -n_chars); } else { @@ -1528,13 +1528,13 @@ std::string common_token_to_piece(const struct llama_context * ctx, llama_token return piece; } -std::string common_detokenize(llama_context * ctx, const std::vector & tokens, bool special) { +std::string common_detokenize(jarvis_context * ctx, const std::vector & tokens, bool special) { std::string text; text.resize(std::max(text.capacity(), tokens.size())); - int32_t n_chars = llama_detokenize(llama_get_model(ctx), tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special); + int32_t n_chars = jarvis_detokenize(jarvis_get_model(ctx), tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special); if (n_chars < 0) { text.resize(-n_chars); - n_chars = llama_detokenize(llama_get_model(ctx), tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special); + n_chars = jarvis_detokenize(jarvis_get_model(ctx), tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special); GGML_ASSERT(n_chars <= (int32_t)text.size()); // whitespace trimming is performed after per-token detokenization } @@ -1549,18 +1549,18 @@ std::string common_detokenize(llama_context * ctx, const std::vector= 0; } -std::string common_chat_apply_template(const struct llama_model * model, +std::string common_chat_apply_template(const struct jarvis_model * model, const std::string & tmpl, const std::vector & msgs, bool add_ass) { int alloc_size = 0; bool fallback = false; // indicate if we must fallback to default chatml - std::vector chat; + std::vector chat; for (auto & msg : msgs) { chat.push_back({msg.role.c_str(), msg.content.c_str()}); alloc_size += (msg.role.size() + msg.content.size()) * 1.25; @@ -1570,17 +1570,17 @@ std::string common_chat_apply_template(const struct llama_model * model, std::vector buf(alloc_size); // run the first time to get the total output length - int32_t res = llama_chat_apply_template(model, ptr_tmpl, chat.data(), chat.size(), add_ass, buf.data(), buf.size()); + int32_t res = jarvis_chat_apply_template(model, ptr_tmpl, chat.data(), chat.size(), add_ass, buf.data(), buf.size()); // error: chat template is not supported if (res < 0) { if (ptr_tmpl != nullptr) { // if the custom "tmpl" is not supported, we throw an error - // this is a bit redundant (for good), since we're not sure if user validated the custom template with llama_chat_verify_template() + // this is a bit redundant (for good), since we're not sure if user validated the custom template with jarvis_chat_verify_template() throw std::runtime_error("this custom template is not supported"); } else { // If the built-in template is not supported, we default to chatml - res = llama_chat_apply_template(nullptr, "chatml", chat.data(), chat.size(), add_ass, buf.data(), buf.size()); + res = jarvis_chat_apply_template(nullptr, "chatml", chat.data(), chat.size(), add_ass, buf.data(), buf.size()); fallback = true; } } @@ -1588,7 +1588,7 @@ std::string common_chat_apply_template(const struct llama_model * model, // if it turns out that our buffer is too small, we resize it if ((size_t) res > buf.size()) { buf.resize(res); - res = llama_chat_apply_template( + res = jarvis_chat_apply_template( fallback ? nullptr : model, fallback ? "chatml" : ptr_tmpl, chat.data(), chat.size(), add_ass, buf.data(), buf.size()); @@ -1598,7 +1598,7 @@ std::string common_chat_apply_template(const struct llama_model * model, return formatted_chat; } -std::string common_chat_format_single(const struct llama_model * model, +std::string common_chat_format_single(const struct jarvis_model * model, const std::string & tmpl, const std::vector & past_msg, const common_chat_msg & new_msg, @@ -1618,7 +1618,7 @@ std::string common_chat_format_single(const struct llama_model * model, return ss.str(); } -std::string common_chat_format_example(const struct llama_model * model, +std::string common_chat_format_example(const struct jarvis_model * model, const std::string & tmpl) { std::vector msgs = { {"system", "You are a helpful assistant"}, @@ -1633,14 +1633,14 @@ std::string common_chat_format_example(const struct llama_model * model, // KV cache utils // -void common_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size) { +void common_kv_cache_dump_view(const jarvis_kv_cache_view & view, int row_size) { static const char slot_chars[] = ".123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+"; printf("=== Dumping KV cache. total cells %d, max sequences per cell %d, populated cells %d, total tokens in cache %d, largest empty slot=%d @ %d", view.n_cells, view.n_seq_max, view.used_cells, view.token_count, view.max_contiguous, view.max_contiguous_idx); - llama_kv_cache_view_cell * c_curr = view.cells; - llama_seq_id * cs_curr = view.cells_sequences; + jarvis_kv_cache_view_cell * c_curr = view.cells; + jarvis_seq_id * cs_curr = view.cells_sequences; for (int i = 0; i < view.n_cells; i++, c_curr++, cs_curr += view.n_seq_max) { if (i % row_size == 0) { @@ -1656,15 +1656,15 @@ void common_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size) { printf("\n=== Done dumping\n"); } -void common_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size) { +void common_kv_cache_dump_view_seqs(const jarvis_kv_cache_view & view, int row_size) { static const char slot_chars[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; printf("=== Dumping KV cache. total cells %d, max sequences per cell %d, populated cells %d, total tokens in cache %d, largest empty slot=%d @ %d\n", view.n_cells, view.n_seq_max, view.used_cells, view.token_count, view.max_contiguous, view.max_contiguous_idx); - std::unordered_map seqs; - llama_kv_cache_view_cell * c_curr = view.cells; - llama_seq_id * cs_curr = view.cells_sequences; + std::unordered_map seqs; + jarvis_kv_cache_view_cell * c_curr = view.cells; + jarvis_seq_id * cs_curr = view.cells_sequences; for (int i = 0; i < view.n_cells; i++, c_curr++, cs_curr += view.n_seq_max) { for (int j = 0; j < view.n_seq_max; j++) { @@ -1949,12 +1949,12 @@ void yaml_dump_string_multiline(FILE * stream, const char * prop_name, const cha } } -void yaml_dump_non_result_info(FILE * stream, const common_params & params, const llama_context * lctx, +void yaml_dump_non_result_info(FILE * stream, const common_params & params, const jarvis_context * lctx, const std::string & timestamp, const std::vector & prompt_tokens, const char * model_desc) { const auto & sparams = params.sparams; - fprintf(stream, "build_commit: %s\n", LLAMA_COMMIT); - fprintf(stream, "build_number: %d\n", LLAMA_BUILD_NUMBER); + fprintf(stream, "build_commit: %s\n", JARVIS_COMMIT); + fprintf(stream, "build_number: %d\n", JARVIS_BUILD_NUMBER); fprintf(stream, "cpu_has_arm_fma: %s\n", ggml_cpu_has_arm_fma() ? "true" : "false"); fprintf(stream, "cpu_has_avx: %s\n", ggml_cpu_has_avx() ? "true" : "false"); fprintf(stream, "cpu_has_avx_vnni: %s\n", ggml_cpu_has_avx_vnni() ? "true" : "false"); @@ -1985,7 +1985,7 @@ void yaml_dump_non_result_info(FILE * stream, const common_params & params, cons #endif // NDEBUG fprintf(stream, "model_desc: %s\n", model_desc); - fprintf(stream, "n_vocab: %d # output size of the final layer, 32001 for some models\n", llama_n_vocab(llama_get_model(lctx))); + fprintf(stream, "n_vocab: %d # output size of the final layer, 32001 for some models\n", jarvis_n_vocab(jarvis_get_model(lctx))); #ifdef __OPTIMIZE__ fprintf(stream, "optimize: true\n"); @@ -2087,7 +2087,7 @@ void yaml_dump_non_result_info(FILE * stream, const common_params & params, cons fprintf(stream, "flash_attn: %s # default: false\n", params.flash_attn ? "true" : "false"); fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp); - const std::vector tensor_split_vector(params.tensor_split, params.tensor_split + llama_max_devices()); + const std::vector tensor_split_vector(params.tensor_split, params.tensor_split + jarvis_max_devices()); yaml_dump_vector_float(stream, "tensor_split", tensor_split_vector); fprintf(stream, "tfs: %f # default: 1.0\n", sparams.tfs_z); diff --git a/common/common.h b/common/common.h index 18b2121ed89b0..e3e41053ff3d6 100644 --- a/common/common.h +++ b/common/common.h @@ -2,7 +2,7 @@ #pragma once -#include "llama.h" +#include "jarvis.h" #include #include @@ -18,8 +18,8 @@ #define die_fmt(fmt, ...) do { fprintf(stderr, "error: " fmt "\n", __VA_ARGS__); exit(1); } while (0) #define print_build_info() do { \ - fprintf(stderr, "%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT); \ - fprintf(stderr, "%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET); \ + fprintf(stderr, "%s: build = %d (%s)\n", __func__, JARVIS_BUILD_NUMBER, JARVIS_COMMIT); \ + fprintf(stderr, "%s: built with %s for %s\n", __func__, JARVIS_COMPILER, JARVIS_BUILD_TARGET); \ } while(0) #define DEFAULT_MODEL_PATH "models/7B/ggml-model-f16.gguf" @@ -30,14 +30,14 @@ struct common_lora_adapter_info { }; struct common_lora_adapter_container : common_lora_adapter_info { - struct llama_lora_adapter * adapter; + struct jarvis_lora_adapter * adapter; }; // build info -extern int LLAMA_BUILD_NUMBER; -extern char const * LLAMA_COMMIT; -extern char const * LLAMA_COMPILER; -extern char const * LLAMA_BUILD_TARGET; +extern int JARVIS_BUILD_NUMBER; +extern char const * JARVIS_COMMIT; +extern char const * JARVIS_COMPILER; +extern char const * JARVIS_BUILD_TARGET; struct common_control_vector_load_info; @@ -61,25 +61,25 @@ int32_t cpu_get_num_math(); // Common params // -enum llama_example { - LLAMA_EXAMPLE_COMMON, - LLAMA_EXAMPLE_SPECULATIVE, - LLAMA_EXAMPLE_MAIN, - LLAMA_EXAMPLE_INFILL, - LLAMA_EXAMPLE_EMBEDDING, - LLAMA_EXAMPLE_PERPLEXITY, - LLAMA_EXAMPLE_RETRIEVAL, - LLAMA_EXAMPLE_PASSKEY, - LLAMA_EXAMPLE_IMATRIX, - LLAMA_EXAMPLE_BENCH, - LLAMA_EXAMPLE_SERVER, - LLAMA_EXAMPLE_CVECTOR_GENERATOR, - LLAMA_EXAMPLE_EXPORT_LORA, - LLAMA_EXAMPLE_LLAVA, - LLAMA_EXAMPLE_LOOKUP, - LLAMA_EXAMPLE_PARALLEL, - - LLAMA_EXAMPLE_COUNT, +enum jarvis_example { + JARVIS_EXAMPLE_COMMON, + JARVIS_EXAMPLE_SPECULATIVE, + JARVIS_EXAMPLE_MAIN, + JARVIS_EXAMPLE_INFILL, + JARVIS_EXAMPLE_EMBEDDING, + JARVIS_EXAMPLE_PERPLEXITY, + JARVIS_EXAMPLE_RETRIEVAL, + JARVIS_EXAMPLE_PASSKEY, + JARVIS_EXAMPLE_IMATRIX, + JARVIS_EXAMPLE_BENCH, + JARVIS_EXAMPLE_SERVER, + JARVIS_EXAMPLE_CVECTOR_GENERATOR, + JARVIS_EXAMPLE_EXPORT_LORA, + JARVIS_EXAMPLE_LLAVA, + JARVIS_EXAMPLE_LOOKUP, + JARVIS_EXAMPLE_PARALLEL, + + JARVIS_EXAMPLE_COUNT, }; enum common_sampler_type { @@ -103,7 +103,7 @@ enum dimre_method { // sampler parameters struct common_sampler_params { - uint32_t seed = LLAMA_DEFAULT_SEED; // the seed used to initialize llama_sampler + uint32_t seed = JARVIS_DEFAULT_SEED; // the seed used to initialize jarvis_sampler int32_t n_prev = 64; // number of previous tokens to remember int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens. @@ -149,7 +149,7 @@ struct common_sampler_params { std::string grammar; // optional BNF-like grammar to constrain sampling - std::vector logit_bias; // logit biases to apply + std::vector logit_bias; // logit biases to apply // print the parameters into a string std::string print() const; @@ -192,10 +192,10 @@ struct common_params { ggml_numa_strategy numa = GGML_NUMA_STRATEGY_DISABLED; - enum llama_split_mode split_mode = LLAMA_SPLIT_MODE_LAYER; // how to split the model across GPUs - enum llama_rope_scaling_type rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED; - enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_UNSPECIFIED; // pooling type for embeddings - enum llama_attention_type attention_type = LLAMA_ATTENTION_TYPE_UNSPECIFIED; // attention type for embeddings + enum jarvis_split_mode split_mode = JARVIS_SPLIT_MODE_LAYER; // how to split the model across GPUs + enum jarvis_rope_scaling_type rope_scaling_type = JARVIS_ROPE_SCALING_TYPE_UNSPECIFIED; + enum jarvis_pooling_type pooling_type = JARVIS_POOLING_TYPE_UNSPECIFIED; // pooling type for embeddings + enum jarvis_attention_type attention_type = JARVIS_ATTENTION_TYPE_UNSPECIFIED; // attention type for embeddings struct common_sampler_params sparams; @@ -219,9 +219,9 @@ struct common_params { std::vector in_files; // all input files std::vector antiprompt; // strings upon which more user input is prompted (a.k.a. reverse prompts) - std::vector kv_overrides; + std::vector kv_overrides; - bool lora_init_without_apply = false; // only load lora to memory, but do not apply it to ctx (user can manually apply lora later using llama_lora_adapter_apply) + bool lora_init_without_apply = false; // only load lora to memory, but do not apply it to ctx (user can manually apply lora later using jarvis_lora_adapter_apply) std::vector lora_adapters; // lora adapter path with user defined scale std::vector control_vectors; // control vector with user defined scale @@ -377,15 +377,15 @@ bool set_process_priority(enum ggml_sched_priority prio); #ifdef __GNUC__ #ifdef __MINGW32__ -#define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__))) +#define JARVIS_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__))) #else -#define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__))) +#define JARVIS_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__))) #endif #else -#define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) +#define JARVIS_COMMON_ATTRIBUTE_FORMAT(...) #endif -LLAMA_COMMON_ATTRIBUTE_FORMAT(1, 2) +JARVIS_COMMON_ATTRIBUTE_FORMAT(1, 2) std::string string_format(const char * fmt, ...); std::string string_strip(const std::string & str); @@ -424,13 +424,13 @@ std::vector string_split(const std::string & input, ch return parts; } -bool string_parse_kv_override(const char * data, std::vector & overrides); +bool string_parse_kv_override(const char * data, std::vector & overrides); void string_process_escapes(std::string & input); std::string string_from(bool value); std::string string_from(const std::vector & values); -std::string string_from(const struct llama_context * ctx, const std::vector & tokens); -std::string string_from(const struct llama_context * ctx, const struct llama_batch & batch); +std::string string_from(const struct jarvis_context * ctx, const std::vector & tokens); +std::string string_from(const struct jarvis_context * ctx, const struct jarvis_batch & batch); // // Filesystem utils @@ -447,32 +447,32 @@ std::string fs_get_cache_file(const std::string & filename); // struct common_init_result { - struct llama_model * model = nullptr; - struct llama_context * context = nullptr; + struct jarvis_model * model = nullptr; + struct jarvis_context * context = nullptr; std::vector lora_adapters; }; struct common_init_result common_init_from_params(common_params & params); -struct llama_model_params common_model_params_to_llama (const common_params & params); -struct llama_context_params common_context_params_to_llama(const common_params & params); +struct jarvis_model_params common_model_params_to_jarvis (const common_params & params); +struct jarvis_context_params common_context_params_to_jarvis(const common_params & params); struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_params & params); -struct llama_model * common_load_model_from_url(const char * model_url, const char * path_model, const char * hf_token, const struct llama_model_params & params); -struct llama_model * common_load_model_from_hf(const char * repo, const char * file, const char * path_model, const char * hf_token, const struct llama_model_params & params); +struct jarvis_model * common_load_model_from_url(const char * model_url, const char * path_model, const char * hf_token, const struct jarvis_model_params & params); +struct jarvis_model * common_load_model_from_hf(const char * repo, const char * file, const char * path_model, const char * hf_token, const struct jarvis_model_params & params); // clear LoRA adapters from context, then apply new list of adapters -void common_lora_adapters_apply(struct llama_context * ctx, std::vector & lora_adapters); +void common_lora_adapters_apply(struct jarvis_context * ctx, std::vector & lora_adapters); // Batch utils -void common_batch_clear(struct llama_batch & batch); +void common_batch_clear(struct jarvis_batch & batch); void common_batch_add( - struct llama_batch & batch, - llama_token id, - llama_pos pos, - const std::vector & seq_ids, + struct jarvis_batch & batch, + jarvis_token id, + jarvis_pos pos, + const std::vector & seq_ids, bool logits); // @@ -481,14 +481,14 @@ void common_batch_add( // tokenizes a string into a vector of tokens // should work similar to Python's `tokenizer.encode` -std::vector common_tokenize( - const struct llama_context * ctx, +std::vector common_tokenize( + const struct jarvis_context * ctx, const std::string & text, bool add_special, bool parse_special = false); -std::vector common_tokenize( - const struct llama_model * model, +std::vector common_tokenize( + const struct jarvis_model * model, const std::string & text, bool add_special, bool parse_special = false); @@ -496,23 +496,23 @@ std::vector common_tokenize( // tokenizes a token into a piece, optionally renders special/control tokens // should work similar to Python's `tokenizer.id_to_piece` std::string common_token_to_piece( - const struct llama_context * ctx, - llama_token token, + const struct jarvis_context * ctx, + jarvis_token token, bool special = true); // detokenizes a vector of tokens into a string // should work similar to Python's `tokenizer.decode` // optionally renders special/control tokens std::string common_detokenize( - llama_context * ctx, - const std::vector & tokens, + jarvis_context * ctx, + const std::vector & tokens, bool special = true); // // Chat template utils // -// same with llama_chat_message, but uses std::string +// same with jarvis_chat_message, but uses std::string struct common_chat_msg { std::string role; std::string content; @@ -521,23 +521,23 @@ struct common_chat_msg { // Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid bool common_chat_verify_template(const std::string & tmpl); -// CPP wrapper for llama_chat_apply_template +// CPP wrapper for jarvis_chat_apply_template // If the built-in template is not supported, we default to chatml // If the custom "tmpl" is not supported, we throw an error -std::string common_chat_apply_template(const struct llama_model * model, +std::string common_chat_apply_template(const struct jarvis_model * model, const std::string & tmpl, const std::vector & chat, bool add_ass); // Format single message, while taking into account the position of that message in chat history -std::string common_chat_format_single(const struct llama_model * model, +std::string common_chat_format_single(const struct jarvis_model * model, const std::string & tmpl, const std::vector & past_msg, const common_chat_msg & new_msg, bool add_ass); // Returns an example of formatted chat -std::string common_chat_format_example(const struct llama_model * model, +std::string common_chat_format_example(const struct jarvis_model * model, const std::string & tmpl); // @@ -545,10 +545,10 @@ std::string common_chat_format_example(const struct llama_model * model, // // Dump the KV cache view with the number of sequences per cell. -void common_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size = 80); +void common_kv_cache_dump_view(const jarvis_kv_cache_view & view, int row_size = 80); // Dump the KV cache view showing individual sequences in each cell (long output). -void common_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size = 40); +void common_kv_cache_dump_view_seqs(const jarvis_kv_cache_view & view, int row_size = 40); // // Embedding utils @@ -596,5 +596,5 @@ void yaml_dump_vector_int (FILE * stream, const char * prop_name, const std void yaml_dump_string_multiline(FILE * stream, const char * prop_name, const char * data); void yaml_dump_non_result_info( - FILE * stream, const common_params & params, const llama_context * lctx, + FILE * stream, const common_params & params, const jarvis_context * lctx, const std::string & timestamp, const std::vector & prompt_tokens, const char * model_desc); diff --git a/common/console.cpp b/common/console.cpp index 078a8d678d933..d7c1d46d8dd09 100644 --- a/common/console.cpp +++ b/common/console.cpp @@ -435,7 +435,7 @@ namespace console { fputc('\n', out); has_more = !has_more; } else { - // llama will just eat the single space, it won't act as a space + // jarvis will just eat the single space, it won't act as a space if (line.length() == 1 && line.back() == ' ') { line.clear(); pop_cursor(); diff --git a/common/json.hpp b/common/json.hpp index a858728c4ceb8..a6f53f0b45aca 100644 --- a/common/json.hpp +++ b/common/json.hpp @@ -5336,7 +5336,7 @@ template class iteration_proxy }; // Structured Bindings Support -// For further reference see https://blog.tartanllama.xyz/structured-bindings/ +// For further reference see https://blog.tartanjarvis.xyz/structured-bindings/ // And see https://github.com/nlohmann/json/pull/1391 template = 0> auto get(const nlohmann::detail::iteration_proxy_value& i) -> decltype(i.key()) @@ -5344,7 +5344,7 @@ auto get(const nlohmann::detail::iteration_proxy_value& i) -> decl return i.key(); } // Structured Bindings Support -// For further reference see https://blog.tartanllama.xyz/structured-bindings/ +// For further reference see https://blog.tartanjarvis.xyz/structured-bindings/ // And see https://github.com/nlohmann/json/pull/1391 template = 0> auto get(const nlohmann::detail::iteration_proxy_value& i) -> decltype(i.value()) @@ -5357,7 +5357,7 @@ NLOHMANN_JSON_NAMESPACE_END // The Addition to the STD Namespace is required to add // Structured Bindings Support to the iteration_proxy_value class -// For further reference see https://blog.tartanllama.xyz/structured-bindings/ +// For further reference see https://blog.tartanjarvis.xyz/structured-bindings/ // And see https://github.com/nlohmann/json/pull/1391 namespace std { diff --git a/common/log.cpp b/common/log.cpp index 04c7c0ed10595..3b022ad9ff3c6 100644 --- a/common/log.cpp +++ b/common/log.cpp @@ -8,7 +8,7 @@ #include #include -int common_log_verbosity_thold = LOG_DEFAULT_LLAMA; +int common_log_verbosity_thold = LOG_DEFAULT_JARVIS; void common_log_set_verbosity_thold(int verbosity) { common_log_verbosity_thold = verbosity; diff --git a/common/log.h b/common/log.h index 66605cc69a314..37d7a0146f5d1 100644 --- a/common/log.h +++ b/common/log.h @@ -11,7 +11,7 @@ #endif #define LOG_DEFAULT_DEBUG 1 -#define LOG_DEFAULT_LLAMA 0 +#define LOG_DEFAULT_JARVIS 0 // needed by the LOG_TMPL macro to avoid computing log arguments if the verbosity lower // set via common_log_set_verbosity() diff --git a/common/ngram-cache.cpp b/common/ngram-cache.cpp index a9dfb67142528..c1576b136fccd 100644 --- a/common/ngram-cache.cpp +++ b/common/ngram-cache.cpp @@ -9,7 +9,7 @@ #include void common_ngram_cache_update(common_ngram_cache & ngram_cache, int ngram_min, int ngram_max, - std::vector & inp, int nnew, bool print_progress) { + std::vector & inp, int nnew, bool print_progress) { const int64_t t_start_ms = ggml_time_ms(); const int64_t inp_size = inp.size(); @@ -21,7 +21,7 @@ void common_ngram_cache_update(common_ngram_cache & ngram_cache, int ngram_min, for (int64_t i = i_start; i < inp_size; ++i) { const int64_t ngram_start = i - ngram_size; common_ngram ngram(&inp[ngram_start], ngram_size); - const llama_token token = inp[i]; + const jarvis_token token = inp[i]; common_ngram_cache::iterator part_it = ngram_cache.find(ngram); if (part_it == ngram_cache.end()) { @@ -51,18 +51,18 @@ void common_ngram_cache_update(common_ngram_cache & ngram_cache, int ngram_min, } // Helper function to get a token from the combined, speculative sequence of inp and draft. -static llama_token get_token(const std::vector & inp, const std::vector & draft, const size_t i) { +static jarvis_token get_token(const std::vector & inp, const std::vector & draft, const size_t i) { return i < inp.size() ? inp[i] : draft[1 + i - inp.size()]; } // If sample size or percentage are below these thresholds the draft is aborted early: -constexpr int draft_min_sample_size_lax[LLAMA_NGRAM_MAX] = { 2, 2, 1, 1}; -constexpr int draft_min_percent_lax[LLAMA_NGRAM_MAX] = {66, 50, 50, 50}; -constexpr int draft_min_sample_size_strict[LLAMA_NGRAM_MAX] = { 4, 3, 2, 2}; -constexpr int draft_min_percent_strict[LLAMA_NGRAM_MAX] = {75, 66, 66, 66}; +constexpr int draft_min_sample_size_lax[JARVIS_NGRAM_MAX] = { 2, 2, 1, 1}; +constexpr int draft_min_percent_lax[JARVIS_NGRAM_MAX] = {66, 50, 50, 50}; +constexpr int draft_min_sample_size_strict[JARVIS_NGRAM_MAX] = { 4, 3, 2, 2}; +constexpr int draft_min_percent_strict[JARVIS_NGRAM_MAX] = {75, 66, 66, 66}; // Helper function that tries to draft a token from only the static ngram cache: -static llama_token try_draft(common_ngram_cache & nc_static, const common_ngram ngram_static) { +static jarvis_token try_draft(common_ngram_cache & nc_static, const common_ngram ngram_static) { common_ngram_cache::iterator part_static_it = nc_static.find(ngram_static); if (part_static_it == nc_static.end()) { return -1; @@ -71,10 +71,10 @@ static llama_token try_draft(common_ngram_cache & nc_static, const common_ngram int max_count_static = 0; int sum_count_static = 0; - llama_token max_token = -1; + jarvis_token max_token = -1; - for (std::pair token_count_static : part_static) { - const llama_token token = token_count_static.first; + for (std::pair token_count_static : part_static) { + const jarvis_token token = token_count_static.first; const int32_t count_static = token_count_static.second; if (count_static > max_count_static) { @@ -84,21 +84,21 @@ static llama_token try_draft(common_ngram_cache & nc_static, const common_ngram sum_count_static += count_static; } - if (sum_count_static < draft_min_sample_size_lax[LLAMA_NGRAM_STATIC-1]) { + if (sum_count_static < draft_min_sample_size_lax[JARVIS_NGRAM_STATIC-1]) { return -1; } - if (100*max_count_static < draft_min_percent_lax[LLAMA_NGRAM_STATIC-1]*sum_count_static) { + if (100*max_count_static < draft_min_percent_lax[JARVIS_NGRAM_STATIC-1]*sum_count_static) { return -1; } return max_token; } // Try to draft a token from primary cache (context/dynamic), validate with static cache: -static llama_token try_draft( +static jarvis_token try_draft( common_ngram_cache & nc_primary, const std::vector & ngrams_primary, common_ngram_cache_part & part_static, const int * min_sample_size, const int * min_percent) { - llama_token drafted_token = -1; + jarvis_token drafted_token = -1; for (int i = ngrams_primary.size()-1; i >= 0 && drafted_token == -1; --i) { const common_ngram ngram_primary = ngrams_primary[i]; @@ -112,10 +112,10 @@ static llama_token try_draft( int max_count_primary = 0; int max_count_static = 0; int sum_count_primary = 0; - llama_token max_token = -1; + jarvis_token max_token = -1; - for (std::pair token_count_primary : part_primary) { - const llama_token token = token_count_primary.first; + for (std::pair token_count_primary : part_primary) { + const jarvis_token token = token_count_primary.first; common_ngram_cache_part::iterator token_count_static_it = part_static.find(token); @@ -143,22 +143,22 @@ static llama_token try_draft( } void common_ngram_cache_draft( - std::vector & inp, std::vector & draft, int n_draft, int ngram_min, int ngram_max, + std::vector & inp, std::vector & draft, int n_draft, int ngram_min, int ngram_max, common_ngram_cache & nc_context, common_ngram_cache & nc_dynamic, common_ngram_cache & nc_static ) { GGML_ASSERT(draft.size() == 1); const int inp_size = inp.size(); - if (inp_size < LLAMA_NGRAM_STATIC) { + if (inp_size < JARVIS_NGRAM_STATIC) { return; } while ((int) draft.size()-1 < n_draft) { - llama_token drafted_token = -1; + jarvis_token drafted_token = -1; - const int ngram_start_static = inp_size-LLAMA_NGRAM_STATIC + draft.size()-1; + const int ngram_start_static = inp_size-JARVIS_NGRAM_STATIC + draft.size()-1; common_ngram ngram_static; - for (int j = ngram_start_static; j < ngram_start_static + LLAMA_NGRAM_STATIC; ++j) { + for (int j = ngram_start_static; j < ngram_start_static + JARVIS_NGRAM_STATIC; ++j) { ngram_static.tokens[j-ngram_start_static] = get_token(inp, draft, j); } common_ngram_cache::iterator part_static_it = nc_static.find(ngram_static); @@ -207,12 +207,12 @@ void common_ngram_cache_save(common_ngram_cache & ngram_cache, std::string & fil file_out.write(reinterpret_cast(&ngram), sizeof(common_ngram)); file_out.write(reinterpret_cast(&ntokens), sizeof(int32_t)); - for (std::pair item2 : token_counts) { - const llama_token token = item2.first; + for (std::pair item2 : token_counts) { + const jarvis_token token = item2.first; const int32_t count = item2.second; GGML_ASSERT(count > 0); - file_out.write(reinterpret_cast(&token), sizeof(llama_token)); + file_out.write(reinterpret_cast(&token), sizeof(jarvis_token)); file_out.write(reinterpret_cast(&count), sizeof(int32_t)); } } @@ -228,7 +228,7 @@ common_ngram_cache common_ngram_cache_load(std::string & filename) { common_ngram ngram; int32_t ntokens; - llama_token token; + jarvis_token token; int32_t count; char * ngramc = reinterpret_cast(&ngram); @@ -243,7 +243,7 @@ common_ngram_cache common_ngram_cache_load(std::string & filename) { for (int i = 0; i < ntokens; ++i) { GGML_ASSERT(!hashmap_file.eof()); - GGML_ASSERT(hashmap_file.read(tokenc, sizeof(llama_token))); + GGML_ASSERT(hashmap_file.read(tokenc, sizeof(jarvis_token))); GGML_ASSERT(!hashmap_file.eof()); GGML_ASSERT(hashmap_file.read(countc, sizeof(int32_t))); GGML_ASSERT(count > 0); @@ -268,8 +268,8 @@ void common_ngram_cache_merge(common_ngram_cache & ngram_cache_target, common_ng continue; } - for (std::pair token_count : part) { - const llama_token token = token_count.first; + for (std::pair token_count : part) { + const jarvis_token token = token_count.first; const int32_t count = token_count.second; GGML_ASSERT(count > 0); diff --git a/common/ngram-cache.h b/common/ngram-cache.h index 09c2b0319f2c0..c3fb21c6ace95 100644 --- a/common/ngram-cache.h +++ b/common/ngram-cache.h @@ -1,34 +1,34 @@ #pragma once -#include "llama.h" +#include "jarvis.h" #include #include #include -#define LLAMA_NGRAM_MIN 1 -#define LLAMA_NGRAM_MAX 4 -#define LLAMA_NGRAM_STATIC 2 +#define JARVIS_NGRAM_MIN 1 +#define JARVIS_NGRAM_MAX 4 +#define JARVIS_NGRAM_STATIC 2 // Data structures to map n-grams to empirical token probabilities: struct common_ngram { - llama_token tokens[LLAMA_NGRAM_MAX]; + jarvis_token tokens[JARVIS_NGRAM_MAX]; common_ngram() { - for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) { + for (int i = 0; i < JARVIS_NGRAM_MAX; ++i) { tokens[i] = -1; } } - common_ngram(const llama_token * input, const int ngram_size) { - for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) { + common_ngram(const jarvis_token * input, const int ngram_size) { + for (int i = 0; i < JARVIS_NGRAM_MAX; ++i) { tokens[i] = i < ngram_size ? input[i] : -1; } } bool operator==(const common_ngram & other) const { - for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) { + for (int i = 0; i < JARVIS_NGRAM_MAX; ++i) { if (tokens[i] != other.tokens[i]) { return false; } @@ -38,7 +38,7 @@ struct common_ngram { }; struct common_token_hash_function { - size_t operator()(const llama_token token) const { + size_t operator()(const jarvis_token token) const { // see https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/ return token * 11400714819323198485llu; } @@ -47,7 +47,7 @@ struct common_token_hash_function { struct common_ngram_hash_function { size_t operator()(const common_ngram & ngram) const { size_t hash = common_token_hash_function{}(ngram.tokens[0]); - for (int i = 1; i < LLAMA_NGRAM_MAX; ++i) { + for (int i = 1; i < JARVIS_NGRAM_MAX; ++i) { hash ^= common_token_hash_function{}(ngram.tokens[i]); } return hash; @@ -55,7 +55,7 @@ struct common_ngram_hash_function { }; // token -> number of times token has been seen -typedef std::unordered_map common_ngram_cache_part; +typedef std::unordered_map common_ngram_cache_part; // n-gram -> empirical distribution of following tokens typedef std::unordered_map common_ngram_cache; @@ -71,7 +71,7 @@ typedef std::unordered_map & inp_data, int nnew, bool print_progress); + common_ngram_cache & ngram_cache, int ngram_min, int ngram_max, std::vector & inp_data, int nnew, bool print_progress); // Try to draft tokens from ngram caches. // inp: the tokens generated so far. @@ -82,7 +82,7 @@ void common_ngram_cache_update( // nc_dynamic: ngram cache based on previous user generations. // nc_static: ngram cache generated from a large text corpus, used for validation. void common_ngram_cache_draft( - std::vector & inp, std::vector & draft, int n_draft, int ngram_min, int ngram_max, + std::vector & inp, std::vector & draft, int n_draft, int ngram_min, int ngram_max, common_ngram_cache & nc_context, common_ngram_cache & nc_dynamic, common_ngram_cache & nc_static); // Save an ngram cache to a file. diff --git a/common/sampling.cpp b/common/sampling.cpp index 48a9df8ba5b88..b6cad63334e7b 100644 --- a/common/sampling.cpp +++ b/common/sampling.cpp @@ -6,7 +6,7 @@ #include // the ring buffer works similarly to std::deque, but with a fixed capacity -// TODO: deduplicate with llama-impl.h +// TODO: deduplicate with jarvis-impl.h template struct ring_buffer { ring_buffer(size_t cap) : capacity(cap), data(cap) {} @@ -101,24 +101,24 @@ struct ring_buffer { struct common_sampler { common_sampler_params params; - struct llama_sampler * grmr; - struct llama_sampler * chain; + struct jarvis_sampler * grmr; + struct jarvis_sampler * chain; - ring_buffer prev; + ring_buffer prev; - std::vector cur; + std::vector cur; - llama_token_data_array cur_p; + jarvis_token_data_array cur_p; - void set_logits(struct llama_context * ctx, int idx) { - const auto * logits = llama_get_logits_ith(ctx, idx); + void set_logits(struct jarvis_context * ctx, int idx) { + const auto * logits = jarvis_get_logits_ith(ctx, idx); - const int n_vocab = llama_n_vocab(llama_get_model(ctx)); + const int n_vocab = jarvis_n_vocab(jarvis_get_model(ctx)); cur.resize(n_vocab); - for (llama_token token_id = 0; token_id < n_vocab; token_id++) { - cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f}; + for (jarvis_token token_id = 0; token_id < n_vocab; token_id++) { + cur[token_id] = jarvis_token_data{token_id, logits[token_id], 0.0f}; } cur_p = { cur.data(), cur.size(), -1, false }; @@ -141,31 +141,31 @@ std::string common_sampler_params::print() const { return std::string(result); } -struct common_sampler * common_sampler_init(const struct llama_model * model, const struct common_sampler_params & params) { - llama_sampler_chain_params lparams = llama_sampler_chain_default_params(); +struct common_sampler * common_sampler_init(const struct jarvis_model * model, const struct common_sampler_params & params) { + jarvis_sampler_chain_params lparams = jarvis_sampler_chain_default_params(); lparams.no_perf = params.no_perf; auto * result = new common_sampler { /* .params = */ params, - /* .grmr = */ llama_sampler_init_grammar(model, params.grammar.c_str(), "root"), - /* .chain = */ llama_sampler_chain_init(lparams), - /* .prev = */ ring_buffer(std::max(32, params.n_prev)), + /* .grmr = */ jarvis_sampler_init_grammar(model, params.grammar.c_str(), "root"), + /* .chain = */ jarvis_sampler_chain_init(lparams), + /* .prev = */ ring_buffer(std::max(32, params.n_prev)), /* .cur = */ {}, /* .cur_p = */ {}, }; - llama_sampler_chain_add(result->chain, - llama_sampler_init_logit_bias( - llama_n_vocab(model), + jarvis_sampler_chain_add(result->chain, + jarvis_sampler_init_logit_bias( + jarvis_n_vocab(model), params.logit_bias.size(), params.logit_bias.data())); - llama_sampler_chain_add(result->chain, - llama_sampler_init_penalties( - llama_n_vocab (model), - llama_token_eos(model), - llama_token_nl (model), + jarvis_sampler_chain_add(result->chain, + jarvis_sampler_init_penalties( + jarvis_n_vocab (model), + jarvis_token_eos(model), + jarvis_token_nl (model), params.penalty_last_n, params.penalty_repeat, params.penalty_freq, @@ -184,44 +184,44 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co c_breakers.push_back(str.c_str()); } - llama_sampler_chain_add(result->chain, llama_sampler_init_dry (model, params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size())); + jarvis_sampler_chain_add(result->chain, jarvis_sampler_init_dry (model, params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size())); } break; case COMMON_SAMPLER_TYPE_TOP_K: - llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k)); + jarvis_sampler_chain_add(result->chain, jarvis_sampler_init_top_k (params.top_k)); break; case COMMON_SAMPLER_TYPE_TOP_P: - llama_sampler_chain_add(result->chain, llama_sampler_init_top_p (params.top_p, params.min_keep)); + jarvis_sampler_chain_add(result->chain, jarvis_sampler_init_top_p (params.top_p, params.min_keep)); break; case COMMON_SAMPLER_TYPE_MIN_P: - llama_sampler_chain_add(result->chain, llama_sampler_init_min_p (params.min_p, params.min_keep)); + jarvis_sampler_chain_add(result->chain, jarvis_sampler_init_min_p (params.min_p, params.min_keep)); break; case COMMON_SAMPLER_TYPE_XTC: - llama_sampler_chain_add(result->chain, llama_sampler_init_xtc (params.xtc_probability, params.xtc_threshold, params.min_keep, params.seed)); + jarvis_sampler_chain_add(result->chain, jarvis_sampler_init_xtc (params.xtc_probability, params.xtc_threshold, params.min_keep, params.seed)); break; case COMMON_SAMPLER_TYPE_TFS_Z: - llama_sampler_chain_add(result->chain, llama_sampler_init_tail_free(params.tfs_z, params.min_keep)); + jarvis_sampler_chain_add(result->chain, jarvis_sampler_init_tail_free(params.tfs_z, params.min_keep)); break; case COMMON_SAMPLER_TYPE_TYPICAL_P: - llama_sampler_chain_add(result->chain, llama_sampler_init_typical (params.typ_p, params.min_keep)); + jarvis_sampler_chain_add(result->chain, jarvis_sampler_init_typical (params.typ_p, params.min_keep)); break; case COMMON_SAMPLER_TYPE_TEMPERATURE: - llama_sampler_chain_add(result->chain, llama_sampler_init_temp_ext (params.temp, params.dynatemp_range, params.dynatemp_exponent)); + jarvis_sampler_chain_add(result->chain, jarvis_sampler_init_temp_ext (params.temp, params.dynatemp_range, params.dynatemp_exponent)); break; case COMMON_SAMPLER_TYPE_INFILL: - llama_sampler_chain_add(result->chain, llama_sampler_init_infill (model)); + jarvis_sampler_chain_add(result->chain, jarvis_sampler_init_infill (model)); break; default: GGML_ASSERT(false && "unknown sampler type"); } } - llama_sampler_chain_add(result->chain, llama_sampler_init_dist(params.seed)); + jarvis_sampler_chain_add(result->chain, jarvis_sampler_init_dist(params.seed)); } else if (params.mirostat == 1) { - llama_sampler_chain_add(result->chain, llama_sampler_init_temp(params.temp)); - llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat(llama_n_vocab(model), params.seed, params.mirostat_tau, params.mirostat_eta, 100)); + jarvis_sampler_chain_add(result->chain, jarvis_sampler_init_temp(params.temp)); + jarvis_sampler_chain_add(result->chain, jarvis_sampler_init_mirostat(jarvis_n_vocab(model), params.seed, params.mirostat_tau, params.mirostat_eta, 100)); } else if (params.mirostat == 2) { - llama_sampler_chain_add(result->chain, llama_sampler_init_temp(params.temp)); - llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat_v2(params.seed, params.mirostat_tau, params.mirostat_eta)); + jarvis_sampler_chain_add(result->chain, jarvis_sampler_init_temp(params.temp)); + jarvis_sampler_chain_add(result->chain, jarvis_sampler_init_mirostat_v2(params.seed, params.mirostat_tau, params.mirostat_eta)); } else { GGML_ASSERT(false && "unknown mirostat version"); } @@ -231,53 +231,53 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co void common_sampler_free(struct common_sampler * gsmpl) { if (gsmpl) { - llama_sampler_free(gsmpl->grmr); + jarvis_sampler_free(gsmpl->grmr); - llama_sampler_free(gsmpl->chain); + jarvis_sampler_free(gsmpl->chain); delete gsmpl; } } -void common_sampler_accept(struct common_sampler * gsmpl, llama_token token, bool accept_grammar) { +void common_sampler_accept(struct common_sampler * gsmpl, jarvis_token token, bool accept_grammar) { if (accept_grammar) { - llama_sampler_accept(gsmpl->grmr, token); + jarvis_sampler_accept(gsmpl->grmr, token); } - llama_sampler_accept(gsmpl->chain, token); + jarvis_sampler_accept(gsmpl->chain, token); gsmpl->prev.push_back(token); } void common_sampler_reset(struct common_sampler * gsmpl) { - llama_sampler_reset(gsmpl->grmr); + jarvis_sampler_reset(gsmpl->grmr); - llama_sampler_reset(gsmpl->chain); + jarvis_sampler_reset(gsmpl->chain); } struct common_sampler * common_sampler_clone(common_sampler * gsmpl) { return new common_sampler { /* .params = */ gsmpl->params, - /* .grmr = */ llama_sampler_clone(gsmpl->grmr), - /* .chain = */ llama_sampler_clone(gsmpl->chain), + /* .grmr = */ jarvis_sampler_clone(gsmpl->grmr), + /* .chain = */ jarvis_sampler_clone(gsmpl->chain), /* .prev = */ gsmpl->prev, /* .cur = */ gsmpl->cur, /* .cur_p = */ gsmpl->cur_p, }; } -void common_perf_print(const struct llama_context * ctx, const struct common_sampler * gsmpl) { +void common_perf_print(const struct jarvis_context * ctx, const struct common_sampler * gsmpl) { // TODO: measure grammar performance if (gsmpl) { - llama_perf_sampler_print(gsmpl->chain); + jarvis_perf_sampler_print(gsmpl->chain); } if (ctx) { - llama_perf_context_print(ctx); + jarvis_perf_context_print(ctx); } } -llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_context * ctx, int idx, bool grammar_first) { +jarvis_token common_sampler_sample(struct common_sampler * gsmpl, struct jarvis_context * ctx, int idx, bool grammar_first) { gsmpl->set_logits(ctx, idx); auto & grmr = gsmpl->grmr; @@ -285,14 +285,14 @@ llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_co auto & cur_p = gsmpl->cur_p; // initialized by set_logits if (grammar_first) { - llama_sampler_apply(grmr, &cur_p); + jarvis_sampler_apply(grmr, &cur_p); } - llama_sampler_apply(chain, &cur_p); + jarvis_sampler_apply(chain, &cur_p); GGML_ASSERT(cur_p.selected != -1 && "no selected token during sampling - check your sampling configuration"); - const llama_token id = cur_p.data[cur_p.selected].id; + const jarvis_token id = cur_p.data[cur_p.selected].id; if (grammar_first) { return id; @@ -300,10 +300,10 @@ llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_co // check if it the sampled token fits the grammar { - llama_token_data single_token_data = { id, 1.0f, 0.0f }; - llama_token_data_array single_token_data_array = { &single_token_data, 1, -1, false }; + jarvis_token_data single_token_data = { id, 1.0f, 0.0f }; + jarvis_token_data_array single_token_data_array = { &single_token_data, 1, -1, false }; - llama_sampler_apply(grmr, &single_token_data_array); + jarvis_sampler_apply(grmr, &single_token_data_array); const bool is_valid = single_token_data_array.data[0].logit != -INFINITY; if (is_valid) { @@ -315,8 +315,8 @@ llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_co // if the token is not valid, sample again, but first apply the grammar sampler and then the sampling chain gsmpl->set_logits(ctx, idx); - llama_sampler_apply(grmr, &cur_p); - llama_sampler_apply(chain, &cur_p); + jarvis_sampler_apply(grmr, &cur_p); + jarvis_sampler_apply(chain, &cur_p); GGML_ASSERT(cur_p.selected != -1 && "no selected token during re-sampling - check your sampling configuration"); @@ -324,31 +324,31 @@ llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_co } uint32_t common_sampler_get_seed(const struct common_sampler * gsmpl) { - return llama_sampler_get_seed(gsmpl->chain); + return jarvis_sampler_get_seed(gsmpl->chain); } // helpers -llama_token_data_array * common_sampler_get_candidates(struct common_sampler * gsmpl) { +jarvis_token_data_array * common_sampler_get_candidates(struct common_sampler * gsmpl) { return &gsmpl->cur_p; } -llama_token common_sampler_last(const struct common_sampler * gsmpl) { +jarvis_token common_sampler_last(const struct common_sampler * gsmpl) { return gsmpl->prev.rat(0); } std::string common_sampler_print(const struct common_sampler * gsmpl) { std::string result = "logits "; - for (int i = 0; i < llama_sampler_chain_n(gsmpl->chain); i++) { - const auto * smpl = llama_sampler_chain_get(gsmpl->chain, i); - result += std::string("-> ") + llama_sampler_name(smpl) + " "; + for (int i = 0; i < jarvis_sampler_chain_n(gsmpl->chain); i++) { + const auto * smpl = jarvis_sampler_chain_get(gsmpl->chain, i); + result += std::string("-> ") + jarvis_sampler_name(smpl) + " "; } return result; } -std::string common_sampler_prev_str(common_sampler * gsmpl, llama_context * ctx_main, int n) { +std::string common_sampler_prev_str(common_sampler * gsmpl, jarvis_context * ctx_main, int n) { n = std::min(n, (int) gsmpl->prev.size()); if (n <= 0) { @@ -359,9 +359,9 @@ std::string common_sampler_prev_str(common_sampler * gsmpl, llama_context * ctx_ result.reserve(8*n); // 8 is the average length of a token [citation needed], TODO: compute this from the vocab for (int i = n - 1; i >= 0; i--) { - const llama_token id = gsmpl->prev.rat(i); + const jarvis_token id = gsmpl->prev.rat(i); - GGML_ASSERT(id != LLAMA_TOKEN_NULL && "null token in the sampling history - should not happen"); + GGML_ASSERT(id != JARVIS_TOKEN_NULL && "null token in the sampling history - should not happen"); result += common_token_to_piece(ctx_main, id); } diff --git a/common/sampling.h b/common/sampling.h index d37f25ad37c4a..9dc17ed24b69f 100644 --- a/common/sampling.h +++ b/common/sampling.h @@ -1,13 +1,13 @@ #pragma once -#include "llama.h" +#include "jarvis.h" #include "common.h" #include #include -// common_sampler extends llama_sampler with additional functionality: +// common_sampler extends jarvis_sampler with additional functionality: // // - grammar support // - custom sampler logic based on the parameters @@ -24,7 +24,7 @@ // grammar constraints are applied to the full vocabulary and the token is resampled. // // The common_sampler also maintains a container with the last accepted tokens. In the future, this can -// be moved into the core llama library. +// be moved into the core jarvis library. // // For convenience, the common_sampler also maintains a container with the current candidate tokens. // This can be used to access the probabilities of the rest of the non-sampled tokens. @@ -34,19 +34,19 @@ struct common_sampler; -// llama_sampler API overloads +// jarvis_sampler API overloads -struct common_sampler * common_sampler_init(const struct llama_model * model, const struct common_sampler_params & params); +struct common_sampler * common_sampler_init(const struct jarvis_model * model, const struct common_sampler_params & params); void common_sampler_free(struct common_sampler * gsmpl); // if accept_grammar is true, the token is accepted both by the sampling chain and the grammar -void common_sampler_accept(struct common_sampler * gsmpl, llama_token token, bool accept_grammar); +void common_sampler_accept(struct common_sampler * gsmpl, jarvis_token token, bool accept_grammar); void common_sampler_reset (struct common_sampler * gsmpl); struct common_sampler * common_sampler_clone (struct common_sampler * gsmpl); // arguments can be nullptr to skip printing -void common_perf_print(const struct llama_context * ctx, const struct common_sampler * gsmpl); +void common_perf_print(const struct jarvis_context * ctx, const struct common_sampler * gsmpl); // extended sampling implementation: // @@ -58,23 +58,23 @@ void common_perf_print(const struct llama_context * ctx, const struct common_sam // if grammar_first is true, the grammar is applied before the samplers (slower) // useful in cases where all the resulting candidates (not just the sampled one) must fit the grammar // -llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_context * ctx, int idx, bool grammar_first = false); +jarvis_token common_sampler_sample(struct common_sampler * gsmpl, struct jarvis_context * ctx, int idx, bool grammar_first = false); uint32_t common_sampler_get_seed(const struct common_sampler * gsmpl); // helpers // access the internal list of current candidate tokens -llama_token_data_array * common_sampler_get_candidates(struct common_sampler * gsmpl); +jarvis_token_data_array * common_sampler_get_candidates(struct common_sampler * gsmpl); // get the last accepted token -llama_token common_sampler_last(const struct common_sampler * gsmpl); +jarvis_token common_sampler_last(const struct common_sampler * gsmpl); // print the sampler chain into a string std::string common_sampler_print(const struct common_sampler * gsmpl); // get a string representation of the last accepted tokens -std::string common_sampler_prev_str(common_sampler * gsmpl, llama_context * ctx, int n); +std::string common_sampler_prev_str(common_sampler * gsmpl, jarvis_context * ctx, int n); char common_sampler_type_to_chr(enum common_sampler_type cnstr); std::string common_sampler_type_to_str(enum common_sampler_type cnstr); diff --git a/common/train.cpp b/common/train.cpp index 661ad8382eab6..c913f6dbd8521 100644 --- a/common/train.cpp +++ b/common/train.cpp @@ -34,7 +34,7 @@ struct train_state * init_train_state() { state->opt = new struct ggml_opt_context; state->opt->ctx = NULL; state->opt->params = ggml_opt_default_params(GGML_OPT_TYPE_ADAM); - state->opt->params.graph_size = LLAMA_TRAIN_MAX_NODES; + state->opt->params.graph_size = JARVIS_TRAIN_MAX_NODES; state->opt->loss_after = 0.0f; return state; @@ -213,7 +213,7 @@ void assert_shape_4d(struct ggml_tensor * tensor, int64_t ne0, int64_t ne1, int6 } int64_t get_example_targets_batch( - struct llama_context * lctx, + struct jarvis_context * lctx, struct ggml_tensor * tokens_input, struct ggml_tensor * target_probs, int64_t example_id, @@ -221,7 +221,7 @@ int64_t get_example_targets_batch( const size_t * samples_begin, const size_t * samples_size, size_t samples_count, - const llama_token * train_data, + const jarvis_token * train_data, size_t n_train_data, bool separate_with_eos, bool separate_with_bos, @@ -241,8 +241,8 @@ int64_t get_example_targets_batch( int64_t used_samples = 0; ggml_set_f32(target_probs, 0.0f); - llama_token bos = llama_token_bos(llama_get_model(lctx)); - llama_token eos = llama_token_eos(llama_get_model(lctx)); + jarvis_token bos = jarvis_token_bos(jarvis_get_model(lctx)); + jarvis_token eos = jarvis_token_eos(jarvis_get_model(lctx)); // printf("%s: example_id=%d n_batch=%d n_train_samples=%zu\n", __func__, example_id, n_batch, n_train_samples); for (int k=0; k= sample_size && fill_with_next_samples) { if (!sample_separation_eos) { // insert eos token to separate samples @@ -281,7 +281,7 @@ int64_t get_example_targets_batch( } // note: no else-if here if (sample_offs < sample_size) { - token = clamp(train_data[sample_begin+sample_offs], 0, (llama_token) (n_vocab - 1)); + token = clamp(train_data[sample_begin+sample_offs], 0, (jarvis_token) (n_vocab - 1)); ++sample_offs; } ggml_set_f32_nd(target_probs, token, (int) i, (int) k, 0, +1.0f); @@ -712,12 +712,12 @@ void save_train_state_gguf(struct gguf_context * fctx, struct train_state * trai } -struct llama_file { +struct jarvis_file { // use FILE * so we don't have to re-open the file to mmap FILE * fp; size_t size; - llama_file(const char * fname, const char * mode) { + jarvis_file(const char * fname, const char * mode) { fp = std::fopen(fname, mode); if (fp == NULL) { size = 0; @@ -788,7 +788,7 @@ struct llama_file { write_raw(&val, sizeof(val)); } - ~llama_file() { + ~jarvis_file() { if (fp) { std::fclose(fp); } @@ -823,16 +823,16 @@ static size_t mark_utf8_units(const char* bytes, int * utf8_units, int * utf8_nu } size_t tokenize_file( - struct llama_context * lctx, + struct jarvis_context * lctx, const char * filename, const std::string & sample_start, bool include_sample_start, bool overlapping_samples, unsigned context_length, - std::vector & out_tokens, + std::vector & out_tokens, std::vector & out_samples_begin, std::vector & out_samples_size) { - struct llama_file f(filename, "rb"); + struct jarvis_file f(filename, "rb"); if (f.size == 0) { out_tokens.clear(); @@ -844,7 +844,7 @@ size_t tokenize_file( } // account for possible leading whitespace that will be added by tokenizer - // e.g. '\t' will be tokenized by llama spm tokenizer to [29871, 12] + // e.g. '\t' will be tokenized by jarvis spm tokenizer to [29871, 12] const int n_max_tokens_overhead = 1; std::vector buf; @@ -862,8 +862,8 @@ size_t tokenize_file( // tokenize all data at once out_tokens.resize(buf.size() + n_max_tokens_overhead); - int n_tokens = llama_tokenize( - llama_get_model(lctx), + int n_tokens = jarvis_tokenize( + jarvis_get_model(lctx), buf.data(), (int) buf.size(), out_tokens.data(), @@ -871,8 +871,8 @@ size_t tokenize_file( false, false); if (n_tokens < 0) { out_tokens.resize(-n_tokens); - n_tokens = llama_tokenize( - llama_get_model(lctx), + n_tokens = jarvis_tokenize( + jarvis_get_model(lctx), buf.data(), (int) buf.size(), out_tokens.data(), @@ -915,7 +915,7 @@ size_t tokenize_file( out_samples_size.resize(out_samples_begin.size(), 0); std::vector buf_sample; - std::vector tok_sample; + std::vector tok_sample; const size_t sample_begin_offset = (include_sample_start ? 0 : sample_start.size()); size_t found_too_big_sample = 0; @@ -925,11 +925,11 @@ size_t tokenize_file( size_t found_max_sample_size = 0; size_t max_token_text_size = 0; - int n_vocab = llama_n_vocab(llama_get_model(lctx)); - for (llama_token token=0; token < n_vocab; ++token) { + int n_vocab = jarvis_n_vocab(jarvis_get_model(lctx)); + for (jarvis_token token=0; token < n_vocab; ++token) { max_token_text_size = std::max( max_token_text_size, - strlen(llama_token_get_text(llama_get_model(lctx), token))); + strlen(jarvis_token_get_text(jarvis_get_model(lctx), token))); } // upper bound of context byte length. @@ -957,7 +957,7 @@ size_t tokenize_file( } if (sample_size > 0) { - // llama_tokenize expects zero terminated string, + // jarvis_tokenize expects zero terminated string, // copy sample into buffer and zero terminate it. buf_sample.resize(sample_size); memcpy(buf_sample.data(), data_str.data() + sample_begin, sample_size); @@ -966,7 +966,7 @@ size_t tokenize_file( // tokenize the sample tok_sample.resize(buf_sample.size() + n_max_tokens_overhead); - int n_tokens = llama_tokenize(llama_get_model(lctx), + int n_tokens = jarvis_tokenize(jarvis_get_model(lctx), buf_sample.data(), (int) buf_sample.size(), tok_sample.data(), @@ -974,7 +974,7 @@ size_t tokenize_file( false, false); if (n_tokens < 0) { tok_sample.resize(-n_tokens); - n_tokens = llama_tokenize(llama_get_model(lctx), + n_tokens = jarvis_tokenize(jarvis_get_model(lctx), buf_sample.data(), (int) buf_sample.size(), tok_sample.data(), @@ -1365,7 +1365,7 @@ bool consume_common_train_arg( *invalid_param = true; return true; } - if (llama_supports_gpu_offload()) { + if (jarvis_supports_gpu_offload()) { params->n_gpu_layers = std::stoi(argv[i]); } else { fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n"); diff --git a/common/train.h b/common/train.h index 263d940c04298..82c4a24c5d3ee 100644 --- a/common/train.h +++ b/common/train.h @@ -7,9 +7,9 @@ #include #include "ggml.h" -#include "llama.h" +#include "jarvis.h" -#define LLAMA_TRAIN_MAX_NODES 16384 +#define JARVIS_TRAIN_MAX_NODES 16384 typedef std::string mt19937_state; @@ -92,9 +92,9 @@ struct train_opt_callback_data { struct train_state * train; save_train_files_callback save_cb; void * save_data; - struct llama_context * lctx; + struct jarvis_context * lctx; int last_save_iter; - llama_token * tokens_data; + jarvis_token * tokens_data; size_t tokens_size; size_t * samples_begin; size_t * samples_size; @@ -146,18 +146,18 @@ void assert_shape_3d(struct ggml_tensor * tensor, int64_t ne0, int64_t ne1, int6 void assert_shape_4d(struct ggml_tensor * tensor, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3); size_t tokenize_file( - struct llama_context * lctx, + struct jarvis_context * lctx, const char * filename, const std::string & sample_start, bool include_sample_start, bool overlapping_samples, unsigned context_length, - std::vector & out_tokens, + std::vector & out_tokens, std::vector & out_samples_begin, std::vector & out_samples_size); int64_t get_example_targets_batch( - struct llama_context * lctx, + struct jarvis_context * lctx, struct ggml_tensor * tokens_input, struct ggml_tensor * target_probs, int64_t example_id, @@ -165,7 +165,7 @@ int64_t get_example_targets_batch( const size_t * samples_begin, const size_t * samples_size, size_t samples_count, - const llama_token * train_data, + const jarvis_token * train_data, size_t n_train_data, bool separate_with_eos, bool separate_with_bos, diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index a34dabe235a34..ebd619b9c91ce 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -49,7 +49,7 @@ class Model: _model_classes: dict[str, type[Model]] = {} dir_model: Path - ftype: gguf.LlamaFileType + ftype: gguf.JarvisFileType fname_out: Path is_big_endian: bool endianess: gguf.GGUFEndian @@ -69,7 +69,7 @@ class Model: # subclasses should define this! model_arch: gguf.MODEL_ARCH - def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, is_big_endian: bool = False, + def __init__(self, dir_model: Path, ftype: gguf.JarvisFileType, fname_out: Path, is_big_endian: bool = False, use_temp_file: bool = False, eager: bool = False, metadata_override: Path | None = None, model_name: str | None = None, split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False, small_first_shard: bool = False): @@ -96,15 +96,15 @@ def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, self.dir_model_card = dir_model # overridden in convert_lora_to_gguf.py # Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type - if self.ftype == gguf.LlamaFileType.GUESSED: + if self.ftype == gguf.JarvisFileType.GUESSED: # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie. _, first_tensor = next(self.get_tensors()) if first_tensor.dtype == torch.float16: logger.info(f"choosing --outtype f16 from first tensor type ({first_tensor.dtype})") - self.ftype = gguf.LlamaFileType.MOSTLY_F16 + self.ftype = gguf.JarvisFileType.MOSTLY_F16 else: logger.info(f"choosing --outtype bf16 from first tensor type ({first_tensor.dtype})") - self.ftype = gguf.LlamaFileType.MOSTLY_BF16 + self.ftype = gguf.JarvisFileType.MOSTLY_BF16 # Configure GGUF Writer self.gguf_writer = gguf.GGUFWriter(path=None, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file, @@ -308,7 +308,7 @@ def prepare_tensors(self): if n_dims <= 1 or new_name.endswith("_norm.weight"): data_qtype = gguf.GGMLQuantizationType.F32 - # Conditions should closely match those in llama_model_quantize_internal in llama.cpp + # Conditions should closely match those in jarvis_model_quantize_internal in jarvis.cpp # Some tensor types are always in float32 if data_qtype is False and ( any( @@ -337,25 +337,25 @@ def prepare_tensors(self): ) ): if self.ftype in ( - gguf.LlamaFileType.MOSTLY_TQ1_0, - gguf.LlamaFileType.MOSTLY_TQ2_0, + gguf.JarvisFileType.MOSTLY_TQ1_0, + gguf.JarvisFileType.MOSTLY_TQ2_0, ): # TODO: use Q4_K and Q6_K data_qtype = gguf.GGMLQuantizationType.F16 # No override (data_qtype is False), or wants to be quantized (data_qtype is True) if isinstance(data_qtype, bool): - if self.ftype == gguf.LlamaFileType.ALL_F32: + if self.ftype == gguf.JarvisFileType.ALL_F32: data_qtype = gguf.GGMLQuantizationType.F32 - elif self.ftype == gguf.LlamaFileType.MOSTLY_F16: + elif self.ftype == gguf.JarvisFileType.MOSTLY_F16: data_qtype = gguf.GGMLQuantizationType.F16 - elif self.ftype == gguf.LlamaFileType.MOSTLY_BF16: + elif self.ftype == gguf.JarvisFileType.MOSTLY_BF16: data_qtype = gguf.GGMLQuantizationType.BF16 - elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0: + elif self.ftype == gguf.JarvisFileType.MOSTLY_Q8_0: data_qtype = gguf.GGMLQuantizationType.Q8_0 - elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ1_0: + elif self.ftype == gguf.JarvisFileType.MOSTLY_TQ1_0: data_qtype = gguf.GGMLQuantizationType.TQ1_0 - elif self.ftype == gguf.LlamaFileType.MOSTLY_TQ2_0: + elif self.ftype == gguf.JarvisFileType.MOSTLY_TQ2_0: data_qtype = gguf.GGMLQuantizationType.TQ2_0 else: raise ValueError(f"Unknown file type: {self.ftype.name}") @@ -394,7 +394,7 @@ def prepare_metadata(self, vocab_only: bool): if self.metadata.size_label is None and total_params > 0: self.metadata.size_label = gguf.size_label(total_params, shared_params, expert_params, expert_count) - # Extract the encoding scheme from the file type name. e.g. 'gguf.LlamaFileType.MOSTLY_Q8_0' --> 'Q8_0' + # Extract the encoding scheme from the file type name. e.g. 'gguf.JarvisFileType.MOSTLY_Q8_0' --> 'Q8_0' output_type: str = self.ftype.name.partition("_")[2] # Filename Output @@ -537,13 +537,13 @@ def get_vocab_base(self) -> tuple[list[str], list[int], str]: # NOTE: this function is generated by convert_hf_to_gguf_update.py # do not modify it manually! - # ref: https://github.com/ggerganov/llama.cpp/pull/6920 + # ref: https://github.com/ggerganov/jarvis.cpp/pull/6920 # Marker: Start get_vocab_base_pre def get_vocab_base_pre(self, tokenizer) -> str: # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that # is specific for the BPE pre-tokenizer used by the model # we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can - # use in llama.cpp to implement the same pre-tokenizer + # use in jarvis.cpp to implement the same pre-tokenizer chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶\u200d🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````""""......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL' @@ -559,8 +559,8 @@ def get_vocab_base_pre(self, tokenizer) -> str: # or pull the latest version of the model from Huggingface # don't edit the hashes manually! if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5": - # ref: https://huggingface.co/meta-llama/Meta-Llama-3-8B - res = "llama-bpe" + # ref: https://huggingface.co/meta-jarvis/Meta-Jarvis-3-8B + res = "jarvis-bpe" if chkhsh == "049ecf7629871e3041641907f3de7c733e4dbfdc736f57d882ba0b0845599754": # ref: https://huggingface.co/deepseek-ai/deepseek-llm-7b-base res = "deepseek-llm" @@ -616,7 +616,7 @@ def get_vocab_base_pre(self, tokenizer) -> str: # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de res = "jina-v2-de" if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d": - # ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct + # ref: https://huggingface.co/abacusai/Smaug-Jarvis-3-70B-Instruct res = "smaug-bpe" if chkhsh == "c7ea5862a53e4272c035c8238367063e2b270d51faa48c0f09e9d5b54746c360": # ref: https://huggingface.co/LumiOpen/Poro-34B-chat @@ -666,7 +666,7 @@ def get_vocab_base_pre(self, tokenizer) -> str: logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet") logger.warning("** - the pre-tokenization config has changed upstream") logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.") - logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920") + logger.warning("** ref: https://github.com/ggerganov/jarvis.cpp/pull/6920") logger.warning("**") logger.warning(f"** chkhsh: {chkhsh}") logger.warning("**************************************************************************************") @@ -746,7 +746,7 @@ def _set_vocab_qwen(self): def _set_vocab_sentencepiece(self, add_to_gguf=True): tokens, scores, toktypes = self._create_vocab_sentencepiece() - self.gguf_writer.add_tokenizer_model("llama") + self.gguf_writer.add_tokenizer_model("jarvis") self.gguf_writer.add_tokenizer_pre("default") self.gguf_writer.add_token_list(tokens) self.gguf_writer.add_token_scores(scores) @@ -835,8 +835,8 @@ def _create_vocab_sentencepiece(self): return tokens, scores, toktypes - def _set_vocab_llama_hf(self): - vocab = gguf.LlamaHfVocab(self.dir_model) + def _set_vocab_jarvis_hf(self): + vocab = gguf.JarvisHfVocab(self.dir_model) tokens = [] scores = [] toktypes = [] @@ -848,7 +848,7 @@ def _set_vocab_llama_hf(self): assert len(tokens) == vocab.vocab_size - self.gguf_writer.add_tokenizer_model("llama") + self.gguf_writer.add_tokenizer_model("jarvis") self.gguf_writer.add_tokenizer_pre("default") self.gguf_writer.add_token_list(tokens) self.gguf_writer.add_token_scores(scores) @@ -857,7 +857,7 @@ def _set_vocab_llama_hf(self): special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) special_vocab.add_to_gguf(self.gguf_writer) - def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab_size: int): + def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "jarvis-spm"], vocab_size: int): tokenizer_path = Path(sys.path[0]) / "models" / f"ggml-vocab-{model_name}.gguf" logger.warning(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'") vocab_reader = gguf.GGUFReader(tokenizer_path, "r") @@ -875,7 +875,7 @@ def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab assert field # token list self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size]) - if model_name == "llama-spm": + if model_name == "jarvis-spm": field = vocab_reader.get_field(gguf.Keys.Tokenizer.SCORES) assert field # token scores self.gguf_writer.add_token_scores([field.parts[i].tolist()[0] for i in field.data][:vocab_size]) @@ -884,7 +884,7 @@ def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab assert field # token types self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size]) - if model_name != "llama-spm": + if model_name != "jarvis-spm": field = vocab_reader.get_field(gguf.Keys.Tokenizer.MERGES) assert field # token merges self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data]) @@ -1226,7 +1226,7 @@ def set_vocab(self): tokens.append(token_text) toktypes.append(toktype) - self.gguf_writer.add_tokenizer_model("llama") + self.gguf_writer.add_tokenizer_model("jarvis") self.gguf_writer.add_tokenizer_pre("default") self.gguf_writer.add_token_list(tokens) self.gguf_writer.add_token_types(toktypes) @@ -1515,21 +1515,21 @@ def prepare_tensors(self): raise ValueError(f"Unprocessed norms: {norms}") -@Model.register("LLaMAForCausalLM", "LlamaForCausalLM", "MistralForCausalLM", "MixtralForCausalLM") -class LlamaModel(Model): - model_arch = gguf.MODEL_ARCH.LLAMA +@Model.register("LLaMAForCausalLM", "JarvisForCausalLM", "MistralForCausalLM", "MixtralForCausalLM") +class JarvisModel(Model): + model_arch = gguf.MODEL_ARCH.JARVIS def set_vocab(self): try: self._set_vocab_sentencepiece() except FileNotFoundError: try: - self._set_vocab_llama_hf() + self._set_vocab_jarvis_hf() except (FileNotFoundError, TypeError): - # Llama 3 + # Jarvis 3 self._set_vocab_gpt2() - # Apply to CodeLlama only (and ignore for Llama 3 with a vocab size of 128256) + # Apply to CodeJarvis only (and ignore for Jarvis 3 with a vocab size of 128256) if self.hparams.get("vocab_size", 32000) == 32016: special_vocab = gguf.SpecialVocab( self.dir_model, load_merges=False, @@ -1583,9 +1583,9 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter n_kv_head = self.hparams.get("num_key_value_heads") if name.endswith(("q_proj.weight", "q_proj.bias")): - data_torch = LlamaModel.permute(data_torch, n_head, n_head) + data_torch = JarvisModel.permute(data_torch, n_head, n_head) if name.endswith(("k_proj.weight", "k_proj.bias")): - data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) + data_torch = JarvisModel.permute(data_torch, n_head, n_kv_head) # process the experts separately if name.find("block_sparse_moe.experts") != -1: @@ -1625,7 +1625,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: if rope_scaling := self.find_hparam(["rope_scaling"], optional=True): - if rope_scaling.get("rope_type", '').lower() == "llama3": + if rope_scaling.get("rope_type", '').lower() == "jarvis3": base = self.hparams.get("rope_theta", 10000.0) dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim)) @@ -1793,7 +1793,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose # original implementation expects (n_expert, n_ff, n_embd) for all experts weights - # But llama.cpp moe graph works differently + # But jarvis.cpp moe graph works differently # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert} @@ -1842,7 +1842,7 @@ def set_gguf_parameters(self): self.gguf_writer.add_file_type(self.ftype) def set_vocab(self): - self._set_vocab_llama_hf() + self._set_vocab_jarvis_hf() def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor: if n_kv_head is not None and n_head != n_kv_head: @@ -2188,7 +2188,7 @@ def set_vocab(self): if foken_data.get("special"): toktypes[token_id] = SentencePieceTokenTypes.CONTROL - self.gguf_writer.add_tokenizer_model("llama") + self.gguf_writer.add_tokenizer_model("jarvis") self.gguf_writer.add_tokenizer_pre("default") self.gguf_writer.add_token_list(tokens) self.gguf_writer.add_token_scores(scores) @@ -2456,7 +2456,7 @@ def set_vocab(self): if foken_data.get("special"): toktypes[token_id] = SentencePieceTokenTypes.CONTROL - self.gguf_writer.add_tokenizer_model("llama") + self.gguf_writer.add_tokenizer_model("jarvis") self.gguf_writer.add_tokenizer_pre("default") self.gguf_writer.add_token_list(tokens) self.gguf_writer.add_token_scores(scores) @@ -2468,7 +2468,7 @@ def set_vocab(self): if chat_eos_token_id is not None: # For the chat model, we replace the eos with '<|im_end|>'. # TODO: this is a hack, should be fixed - # https://github.com/ggerganov/llama.cpp/pull/6745#issuecomment-2067687048 + # https://github.com/ggerganov/jarvis.cpp/pull/6745#issuecomment-2067687048 special_vocab.special_token_ids["eos"] = chat_eos_token_id logger.warning(f"Replace eos:{old_eos} with a special token:{chat_eos_token_id}" " in chat mode so that the conversation can end normally.") @@ -2505,8 +2505,8 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter q, k, v = qkv[:, : q_per_kv], qkv[:, -2], qkv[:, -1] # The model weights of q and k equire additional reshape. - q = LlamaModel.permute(q.reshape((-1, q.shape[-1])), num_heads, num_heads) - k = LlamaModel.permute(k.reshape((-1, k.shape[-1])), num_heads, num_kv_heads) + q = JarvisModel.permute(q.reshape((-1, q.shape[-1])), num_heads, num_heads) + k = JarvisModel.permute(k.reshape((-1, k.shape[-1])), num_heads, num_kv_heads) v = v.reshape((-1, v.shape[-1])) return [ @@ -2769,7 +2769,7 @@ def set_gguf_parameters(self): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: del bid # unused - # lm_head is not used in llama.cpp, while autoawq will include this tensor in model + # lm_head is not used in jarvis.cpp, while autoawq will include this tensor in model # To prevent errors, skip loading lm_head.weight. if name == "lm_head.weight": logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.") @@ -2816,7 +2816,7 @@ def set_gguf_parameters(self): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: del bid # unused - # lm_head is not used in llama.cpp, while autoawq will include this tensor in model + # lm_head is not used in jarvis.cpp, while autoawq will include this tensor in model # To prevent errors, skip loading lm_head.weight. if name == "lm_head.weight": logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.") @@ -2894,7 +2894,7 @@ def set_gguf_parameters(self): self.gguf_writer.add_feed_forward_length(intermediate_size) self.gguf_writer.add_file_type(self.ftype) - # required by llama.cpp, unused + # required by jarvis.cpp, unused self.gguf_writer.add_head_count(0) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: @@ -3024,7 +3024,7 @@ def set_gguf_parameters(self): self.gguf_writer.add_clamp_kqv(clip_qkv) # Same as super class, but permuting q_proj, k_proj - # Copied from: LlamaModel + # Copied from: JarvisModel def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: del bid # unused @@ -3032,9 +3032,9 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter n_kv_head = self.hparams.get("num_key_value_heads") if name.endswith("q_proj.weight"): - data_torch = LlamaModel.permute(data_torch, n_head, n_head) + data_torch = JarvisModel.permute(data_torch, n_head, n_head) if name.endswith("k_proj.weight"): - data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) + data_torch = JarvisModel.permute(data_torch, n_head, n_kv_head) return [(self.map_tensor_name(name), data_torch)] @@ -3174,12 +3174,12 @@ def __init__(self, *args, **kwargs): assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int) assert isinstance(self._num_query_heads, list) and isinstance(self._num_query_heads[0], int) - # Uses the tokenizer from meta-llama/Llama-2-7b-hf + # Uses the tokenizer from meta-jarvis/Jarvis-2-7b-hf def set_vocab(self): try: self._set_vocab_sentencepiece() except FileNotFoundError: - self._set_vocab_builtin("llama-spm", self.hparams["vocab_size"]) + self._set_vocab_builtin("jarvis-spm", self.hparams["vocab_size"]) def set_gguf_parameters(self): n_embd = self._n_embd @@ -3300,7 +3300,7 @@ def set_vocab(self): toktypes[token_id] = token_type scores[token_id] = token_score - self.gguf_writer.add_tokenizer_model("llama") + self.gguf_writer.add_tokenizer_model("jarvis") self.gguf_writer.add_tokenizer_pre("default") self.gguf_writer.add_token_list(tokens) self.gguf_writer.add_token_scores(scores) @@ -3322,9 +3322,9 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter n_kv_head = self.hparams.get("num_key_value_heads") if name.endswith("q_proj.weight"): - data_torch = LlamaModel.permute(data_torch, n_head, n_head) + data_torch = JarvisModel.permute(data_torch, n_head, n_head) if name.endswith("k_proj.weight"): - data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) + data_torch = JarvisModel.permute(data_torch, n_head, n_kv_head) # process the experts separately if name.find("block_sparse_moe.experts") != -1: @@ -3882,7 +3882,7 @@ def set_vocab_chatglm3(self): scores.append(score) toktypes.append(toktype) - self.gguf_writer.add_tokenizer_model("llama") + self.gguf_writer.add_tokenizer_model("jarvis") # glm3 needs prefix and suffix formatted as: # prompt = "[gMASK]sop<|user|>\n" + prompt + "<|assistant|>" self.gguf_writer.add_tokenizer_pre("chatglm-spm") @@ -4087,7 +4087,7 @@ def set_gguf_parameters(self): def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: if rope_scaling := self.find_hparam(["rope_scaling"], optional=True): - if rope_scaling.get("rope_type", '').lower() == "llama3": + if rope_scaling.get("rope_type", '').lower() == "jarvis3": base = self.hparams.get("rope_theta", 10000.0) dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim)) @@ -4116,12 +4116,12 @@ def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: @Model.register("GraniteForCausalLM") -class GraniteModel(LlamaModel): +class GraniteModel(JarvisModel): """Conversion for IBM's GraniteForCausalLM""" model_arch = gguf.MODEL_ARCH.GRANITE def set_gguf_parameters(self): - """Granite uses standard llama parameters with the following differences: + """Granite uses standard jarvis parameters with the following differences: - No head_dim support - New multiplier params: @@ -4196,9 +4196,9 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter hidden_dim = self.hparams.get("hidden_size") if name.endswith(("q_proj.weight", "q_proj.bias")): - data_torch = LlamaModel.permute(data_torch, n_head, n_head) + data_torch = JarvisModel.permute(data_torch, n_head, n_head) if name.endswith(("k_proj.weight", "k_proj.bias")): - data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) + data_torch = JarvisModel.permute(data_torch, n_head, n_kv_head) if name.endswith(("q_norm.weight", "q_norm.bias")): data_torch = ChameleonModel._reverse_hf_permute(data_torch, n_head, hidden_dim) if name.endswith(("k_norm.weight", "k_norm.bias")): @@ -4379,14 +4379,14 @@ def main() -> None: logger.error(f'Error: {args.model} is not a directory') sys.exit(1) - ftype_map: dict[str, gguf.LlamaFileType] = { - "f32": gguf.LlamaFileType.ALL_F32, - "f16": gguf.LlamaFileType.MOSTLY_F16, - "bf16": gguf.LlamaFileType.MOSTLY_BF16, - "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0, - "tq1_0": gguf.LlamaFileType.MOSTLY_TQ1_0, - "tq2_0": gguf.LlamaFileType.MOSTLY_TQ2_0, - "auto": gguf.LlamaFileType.GUESSED, + ftype_map: dict[str, gguf.JarvisFileType] = { + "f32": gguf.JarvisFileType.ALL_F32, + "f16": gguf.JarvisFileType.MOSTLY_F16, + "bf16": gguf.JarvisFileType.MOSTLY_BF16, + "q8_0": gguf.JarvisFileType.MOSTLY_Q8_0, + "tq1_0": gguf.JarvisFileType.MOSTLY_TQ1_0, + "tq2_0": gguf.JarvisFileType.MOSTLY_TQ2_0, + "auto": gguf.JarvisFileType.GUESSED, } is_split = args.split_max_tensors > 0 or args.split_max_size != "0" diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py index 28cd02e5a7f66..b4324a3cd1922 100755 --- a/convert_hf_to_gguf_update.py +++ b/convert_hf_to_gguf_update.py @@ -5,10 +5,10 @@ # generates the get_vocab_base_pre() function for convert_hf_to_gguf.py # # This is necessary in order to analyze the type of pre-tokenizer used by the model and -# provide the necessary information to llama.cpp via the GGUF header in order to implement +# provide the necessary information to jarvis.cpp via the GGUF header in order to implement # the same pre-tokenizer. # -# ref: https://github.com/ggerganov/llama.cpp/pull/6920 +# ref: https://github.com/ggerganov/jarvis.cpp/pull/6920 # # Instructions: # @@ -18,9 +18,9 @@ # python3 convert_hf_to_gguf_update.py # # - Copy-paste the generated get_vocab_base_pre() function into convert_hf_to_gguf.py -# - Update llama.cpp with the new pre-tokenizer if necessary +# - Update jarvis.cpp with the new pre-tokenizer if necessary # -# TODO: generate tokenizer tests for llama.cpp +# TODO: generate tokenizer tests for jarvis.cpp # import logging @@ -65,8 +65,8 @@ class TOKENIZER_TYPE(IntEnum): # TODO: add models here, base models preferred models = [ - {"name": "llama-spm", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/meta-llama/Llama-2-7b-hf", }, - {"name": "llama-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Meta-Llama-3-8B", }, + {"name": "jarvis-spm", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/meta-jarvis/Jarvis-2-7b-hf", }, + {"name": "jarvis-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-jarvis/Meta-Jarvis-3-8B", }, {"name": "phi-3", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct", }, {"name": "deepseek-llm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-llm-7b-base", }, {"name": "deepseek-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base", }, @@ -86,7 +86,7 @@ class TOKENIZER_TYPE(IntEnum): {"name": "jina-v2-en", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-en", }, # WPM! {"name": "jina-v2-es", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", }, {"name": "jina-v2-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", }, - {"name": "smaug-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct", }, + {"name": "smaug-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/abacusai/Smaug-Jarvis-3-70B-Instruct", }, {"name": "poro-chat", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Poro-34B-chat", }, {"name": "jina-v2-code", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-code", }, {"name": "viking", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Viking-7B", }, # Also used for Viking 13B and 33B @@ -215,7 +215,7 @@ def get_vocab_base_pre(self, tokenizer) -> str: # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that # is specific for the BPE pre-tokenizer used by the model # we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can - # use in llama.cpp to implement the same pre-tokenizer + # use in jarvis.cpp to implement the same pre-tokenizer chktxt = {repr(CHK_TXT)} @@ -239,7 +239,7 @@ def get_vocab_base_pre(self, tokenizer) -> str: logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet") logger.warning("** - the pre-tokenization config has changed upstream") logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.") - logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920") + logger.warning("** ref: https://github.com/ggerganov/jarvis.cpp/pull/6920") logger.warning("**") logger.warning(f"** chkhsh: {{chkhsh}}") logger.warning("**************************************************************************************") @@ -311,7 +311,7 @@ def get_vocab_base_pre(self, tokenizer) -> str: "3333333", "33333333", "333333333", - "Cửa Việt", # llama-bpe fails on this + "Cửa Việt", # jarvis-bpe fails on this " discards", CHK_TXT, ] diff --git a/convert_llama_ggml_to_gguf.py b/convert_llama_ggml_to_gguf.py index 29b14e98dd237..b741215345c9a 100755 --- a/convert_llama_ggml_to_gguf.py +++ b/convert_llama_ggml_to_gguf.py @@ -223,13 +223,13 @@ def __init__(self, ggml_model, data, cfg, params_override = None, vocab_override assert n_kv_head is not None, "Couldn't determine n_kv_head from GQA param" logger.info(f'- Guessed n_kv_head = {n_kv_head} based on GQA {cfg.gqa}') self.n_kv_head = n_kv_head - self.name_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.LLAMA, ggml_model.hyperparameters.n_layer) + self.name_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.JARVIS, ggml_model.hyperparameters.n_layer) def save(self): logger.info('* Preparing to save GGUF file') gguf_writer = gguf.GGUFWriter( self.cfg.output, - gguf.MODEL_ARCH_NAMES[gguf.MODEL_ARCH.LLAMA], + gguf.MODEL_ARCH_NAMES[gguf.MODEL_ARCH.JARVIS], use_temp_file = False) self.add_params(gguf_writer) self.add_vocab(gguf_writer) @@ -286,7 +286,7 @@ def add_params(self, gguf_writer): def add_vocab(self, gguf_writer): hp = self.model.hyperparameters - gguf_writer.add_tokenizer_model('llama') + gguf_writer.add_tokenizer_model('jarvis') gguf_writer.add_tokenizer_pre('default') tokens = [] scores = [] @@ -358,7 +358,7 @@ def add_tensors(self, gguf_writer): def handle_metadata(cfg, hp): - import examples.convert_legacy_llama as convert + import examples.convert_legacy_jarvis as convert assert cfg.model_metadata_dir.is_dir(), 'Metadata dir is not a directory' hf_config_path = cfg.model_metadata_dir / "config.json" diff --git a/convert_lora_to_gguf.py b/convert_lora_to_gguf.py index bc68f68afb768..f0eabf62bf2a3 100755 --- a/convert_lora_to_gguf.py +++ b/convert_lora_to_gguf.py @@ -271,12 +271,12 @@ def parse_args() -> argparse.Namespace: args = parse_args() logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) - ftype_map: dict[str, gguf.LlamaFileType] = { - "f32": gguf.LlamaFileType.ALL_F32, - "f16": gguf.LlamaFileType.MOSTLY_F16, - "bf16": gguf.LlamaFileType.MOSTLY_BF16, - "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0, - "auto": gguf.LlamaFileType.GUESSED, + ftype_map: dict[str, gguf.JarvisFileType] = { + "f32": gguf.JarvisFileType.ALL_F32, + "f16": gguf.JarvisFileType.MOSTLY_F16, + "bf16": gguf.JarvisFileType.MOSTLY_BF16, + "q8_0": gguf.JarvisFileType.MOSTLY_Q8_0, + "auto": gguf.JarvisFileType.GUESSED, } ftype = ftype_map[args.outtype] @@ -372,9 +372,9 @@ def get_tensors(self) -> Iterator[tuple[str, Tensor]]: def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: dest = list(super().modify_tensors(data_torch, name, bid)) # some archs may have the same tensor for lm_head and output (tie word embeddings) - # in this case, adapters targeting lm_head will fail when using llama-export-lora + # in this case, adapters targeting lm_head will fail when using jarvis-export-lora # therefore, we ignore them for now - # see: https://github.com/ggerganov/llama.cpp/issues/9065 + # see: https://github.com/ggerganov/jarvis.cpp/issues/9065 if name == "lm_head.weight" and len(dest) == 0: raise ValueError("lm_head is present in adapter, but is ignored in base model") for dest_name, dest_data in dest: diff --git a/docs/android.md b/docs/android.md index 320b62240382f..e4a071396921d 100644 --- a/docs/android.md +++ b/docs/android.md @@ -5,14 +5,14 @@ [Termux](https://termux.dev/en/) is an Android terminal emulator and Linux environment app (no root required). As of writing, Termux is available experimentally in the Google Play Store; otherwise, it may be obtained directly from the project repo or on F-Droid. -With Termux, you can install and run `llama.cpp` as if the environment were Linux. Once in the Termux shell: +With Termux, you can install and run `jarvis.cpp` as if the environment were Linux. Once in the Termux shell: ``` $ apt update && apt upgrade -y $ apt install git cmake ``` -Then, follow the [build instructions](https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md), specifically for CMake. +Then, follow the [build instructions](https://github.com/ggerganov/jarvis.cpp/blob/master/docs/build.md), specifically for CMake. Once the binaries are built, download your model of choice (e.g., from Hugging Face). It's recommended to place it in the `~/` directory for best performance: @@ -20,22 +20,22 @@ Once the binaries are built, download your model of choice (e.g., from Hugging F $ curl -L {model-url} -o ~/{model}.gguf ``` -Then, if you are not already in the repo directory, `cd` into `llama.cpp` and: +Then, if you are not already in the repo directory, `cd` into `jarvis.cpp` and: ``` -$ ./build/bin/llama-simple -m ~/{model}.gguf -c {context-size} -p "{your-prompt}" +$ ./build/bin/jarvis-simple -m ~/{model}.gguf -c {context-size} -p "{your-prompt}" ``` -Here, we show `llama-simple`, but any of the executables under `examples` should work, in theory. Be sure to set `context-size` to a reasonable number (say, 4096) to start with; otherwise, memory could spike and kill your terminal. +Here, we show `jarvis-simple`, but any of the executables under `examples` should work, in theory. Be sure to set `context-size` to a reasonable number (say, 4096) to start with; otherwise, memory could spike and kill your terminal. To see what it might look like visually, here's an old demo of an interactive session running on a Pixel 5 phone: https://user-images.githubusercontent.com/271616/225014776-1d567049-ad71-4ef2-b050-55b0b3b9274c.mp4 ## Cross-compile using Android NDK -It's possible to build `llama.cpp` for Android on your host system via CMake and the Android NDK. If you are interested in this path, ensure you already have an environment prepared to cross-compile programs for Android (i.e., install the Android SDK). Note that, unlike desktop environments, the Android environment ships with a limited set of native libraries, and so only those libraries are available to CMake when building with the Android NDK (see: https://developer.android.com/ndk/guides/stable_apis.) +It's possible to build `jarvis.cpp` for Android on your host system via CMake and the Android NDK. If you are interested in this path, ensure you already have an environment prepared to cross-compile programs for Android (i.e., install the Android SDK). Note that, unlike desktop environments, the Android environment ships with a limited set of native libraries, and so only those libraries are available to CMake when building with the Android NDK (see: https://developer.android.com/ndk/guides/stable_apis.) -Once you're ready and have cloned `llama.cpp`, invoke the following in the project directory: +Once you're ready and have cloned `jarvis.cpp`, invoke the following in the project directory: ``` $ cmake \ @@ -45,15 +45,15 @@ $ cmake \ -DCMAKE_C_FLAGS="-march=armv8.7a" \ -DCMAKE_CXX_FLAGS="-march=armv8.7a" \ -DGGML_OPENMP=OFF \ - -DGGML_LLAMAFILE=OFF \ + -DGGML_JARVISFILE=OFF \ -B build-android ``` Notes: - While later versions of Android NDK ship with OpenMP, it must still be installed by CMake as a dependency, which is not supported at this time - - `llamafile` does not appear to support Android devices (see: https://github.com/Mozilla-Ocho/llamafile/issues/325) + - `jarvisfile` does not appear to support Android devices (see: https://github.com/Mozilla-Ocho/jarvisfile/issues/325) -The above command should configure `llama.cpp` with the most performant options for modern devices. Even if your device is not running `armv8.7a`, `llama.cpp` includes runtime checks for available CPU features it can use. +The above command should configure `jarvis.cpp` with the most performant options for modern devices. Even if your device is not running `armv8.7a`, `jarvis.cpp` includes runtime checks for available CPU features it can use. Feel free to adjust the Android ABI for your target. Once the project is configured: @@ -65,17 +65,17 @@ $ cmake --install build-android --prefix {install-dir} --config Release After installing, go ahead and download the model of your choice to your host system. Then: ``` -$ adb shell "mkdir /data/local/tmp/llama.cpp" -$ adb push {install-dir} /data/local/tmp/llama.cpp/ -$ adb push {model}.gguf /data/local/tmp/llama.cpp/ +$ adb shell "mkdir /data/local/tmp/jarvis.cpp" +$ adb push {install-dir} /data/local/tmp/jarvis.cpp/ +$ adb push {model}.gguf /data/local/tmp/jarvis.cpp/ $ adb shell ``` In the `adb shell`: ``` -$ cd /data/local/tmp/llama.cpp -$ LD_LIBRARY_PATH=lib ./bin/llama-simple -m {model}.gguf -c {context-size} -p "{your-prompt}" +$ cd /data/local/tmp/jarvis.cpp +$ LD_LIBRARY_PATH=lib ./bin/jarvis-simple -m {model}.gguf -c {context-size} -p "{your-prompt}" ``` That's it! diff --git a/docs/backend/BLIS.md b/docs/backend/BLIS.md index 35d06bd0f303d..7e9048135a2de 100644 --- a/docs/backend/BLIS.md +++ b/docs/backend/BLIS.md @@ -25,13 +25,13 @@ sudo make install We recommend using openmp since it's easier to modify the cores being used. -### llama.cpp compilation +### jarvis.cpp compilation Makefile: ```bash make GGML_BLIS=1 -j -# make GGML_BLIS=1 llama-benchmark-matmult +# make GGML_BLIS=1 jarvis-benchmark-matmult ``` CMake: @@ -43,7 +43,7 @@ cmake -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FLAME .. make -j ``` -### llama.cpp execution +### jarvis.cpp execution According to the BLIS documentation, we could set the following environment variables to modify the behavior of openmp: diff --git a/docs/backend/CANN.md b/docs/backend/CANN.md index 6bdd9d2daab90..ee92299473de0 100644 --- a/docs/backend/CANN.md +++ b/docs/backend/CANN.md @@ -1,4 +1,4 @@ -# llama.cpp for CANN +# jarvis.cpp for CANN - [Background](#background) - [News](#news) @@ -17,9 +17,9 @@ **CANN** (Compute Architecture for Neural Networks) is a heterogeneous computing architecture for AI scenarios, providing support for multiple AI frameworks on the top and serving AI processors and programming at the bottom. It plays a crucial role in bridging the gap between upper and lower layers, and is a key platform for improving the computing efficiency of Ascend AI processors. Meanwhile, it offers a highly efficient and easy-to-use programming interface for diverse application scenarios, allowing users to rapidly build AI applications and services based on the Ascend platform. -**Llama.cpp + CANN** +**Jarvis.cpp + CANN** -The llama.cpp CANN backend is designed to support Ascend NPU. It utilize the ability of AscendC and ACLNN which are intergrated to CANN Toolkit and kernels to using Ascend NPU directly. +The jarvis.cpp CANN backend is designed to support Ascend NPU. It utilize the ability of AscendC and ACLNN which are intergrated to CANN Toolkit and kernels to using Ascend NPU directly. ## News @@ -78,11 +78,11 @@ The llama.cpp CANN backend is designed to support Ascend NPU. It utilize the abi | GritLM-7B | √ | √ | √ | | internlm2_5-7b-chat | √ | √ | √ | | koala-7B-HF | √ | √ | √ | -| Llama-2-7b-chat-hf | √ | √ | √ | -| Llama-3-Smaug-8B | √ | √ | √ | -| Llama2-Chinese-7b-Chat | √ | √ | √ | -| Llama3-8B | √ | √ | √ | -| Llama3-8b-chinese | √ | √ | √ | +| Jarvis-2-7b-chat-hf | √ | √ | √ | +| Jarvis-3-Smaug-8B | √ | √ | √ | +| Jarvis2-Chinese-7b-Chat | √ | √ | √ | +| Jarvis3-8B | √ | √ | √ | +| Jarvis3-8b-chinese | √ | √ | √ | | mamba-130m-hf | √ | √ | √ | | Mistral-7B-Instruct-v0.2 | √ | √ | √ | | Mixtral-8x7B-Instruct-v0.1 | x | √ | √ | @@ -120,9 +120,9 @@ The llama.cpp CANN backend is designed to support Ascend NPU. It utilize the abi ## Docker ### Build Images -You can get a image with llama.cpp in one command. +You can get a image with jarvis.cpp in one command. ```sh -docker build -t llama-cpp-cann -f .devops/llama-cli-cann.Dockerfile . +docker build -t jarvis-cpp-cann -f .devops/jarvis-cli-cann.Dockerfile . ``` ### Run container @@ -133,7 +133,7 @@ npu-smi info # Select the cards that you want to use, make sure these cards are not used by someone. # Following using cards of device0. -docker run --name llamacpp --device /dev/davinci0 --device /dev/davinci_manager --device /dev/devmm_svm --device /dev/hisi_hdc -v /usr/local/dcmi:/usr/local/dcmi -v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi -v /usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/ -v /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info -v /PATH_TO_YOUR_MODELS/:/app/models -it llama-cpp-cann -m /app/models/MODEL_PATH -ngl 32 -p "Building a website can be done in 10 simple steps:" +docker run --name jarviscpp --device /dev/davinci0 --device /dev/davinci_manager --device /dev/devmm_svm --device /dev/hisi_hdc -v /usr/local/dcmi:/usr/local/dcmi -v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi -v /usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/ -v /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info -v /PATH_TO_YOUR_MODELS/:/app/models -it jarvis-cpp-cann -m /app/models/MODEL_PATH -ngl 32 -p "Building a website can be done in 10 simple steps:" ``` *Notes:* @@ -208,7 +208,7 @@ docker run --name llamacpp --device /dev/davinci0 --device /dev/davinci_manager Upon a successful installation, CANN is enabled for the available ascend devices. -### II. Build llama.cpp +### II. Build jarvis.cpp ```sh cmake -B build -DGGML_CANN=on -DCMAKE_BUILD_TYPE=release @@ -242,13 +242,13 @@ cmake --build build --config release - Use device 0: ```sh - ./build/bin/llama-cli -m path_to_model -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -sm none -mg 0 + ./build/bin/jarvis-cli -m path_to_model -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -sm none -mg 0 ``` - Use multiple devices: ```sh - ./build/bin/llama-cli -m path_to_model -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -sm layer + ./build/bin/jarvis-cli -m path_to_model -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -sm layer ``` ### **GitHub contribution**: diff --git a/docs/backend/SYCL.md b/docs/backend/SYCL.md index ea34182e41a4c..541fe043b23cb 100644 --- a/docs/backend/SYCL.md +++ b/docs/backend/SYCL.md @@ -1,4 +1,4 @@ -# llama.cpp for SYCL +# jarvis.cpp for SYCL - [Background](#background) - [Recommended Release](#recommended-release) @@ -24,9 +24,9 @@ - **oneAPI LevelZero**: A high performance low level interface for fine-grained control over intel iGPUs and dGPUs. - **Nvidia & AMD Plugins**: These are plugins extending oneAPI's DPCPP support to SYCL on Nvidia and AMD GPU targets. -### Llama.cpp + SYCL +### Jarvis.cpp + SYCL -The llama.cpp SYCL backend is designed to support **Intel GPU** firstly. Based on the cross-platform feature of SYCL, it also supports other vendor GPUs: Nvidia and AMD. +The jarvis.cpp SYCL backend is designed to support **Intel GPU** firstly. Based on the cross-platform feature of SYCL, it also supports other vendor GPUs: Nvidia and AMD. ## Recommended Release @@ -36,7 +36,7 @@ The following release is verified with good quality: |Commit ID|Tag|Release|Verified Platform| |-|-|-|-| -|fb76ec31a9914b7761c1727303ab30380fd4f05c|b3038 |[llama-b3038-bin-win-sycl-x64.zip](https://github.com/ggerganov/llama.cpp/releases/download/b3038/llama-b3038-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1
MTL Arc GPU/Windows 11/oneAPI 2024.1| +|fb76ec31a9914b7761c1727303ab30380fd4f05c|b3038 |[jarvis-b3038-bin-win-sycl-x64.zip](https://github.com/ggerganov/jarvis.cpp/releases/download/b3038/jarvis-b3038-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1
MTL Arc GPU/Windows 11/oneAPI 2024.1| ## News @@ -46,7 +46,7 @@ The following release is verified with good quality: - Use oneDNN as the default GEMM library, improve the compatibility for new Intel GPUs. - 2024.5 - - Performance is increased: 34 -> 37 tokens/s of llama-2-7b.Q4_0 on Arc770. + - Performance is increased: 34 -> 37 tokens/s of jarvis-2-7b.Q4_0 on Arc770. - Arch Linux is verified successfully. - 2024.4 @@ -54,8 +54,8 @@ The following release is verified with good quality: - 2024.3 - Release binary files of Windows. - - A blog is published: **Run LLM on all Intel GPUs Using llama.cpp**: [intel.com](https://www.intel.com/content/www/us/en/developer/articles/technical/run-llm-on-all-gpus-using-llama-cpp-artical.html) or [medium.com](https://medium.com/@jianyu_neo/run-llm-on-all-intel-gpus-using-llama-cpp-fd2e2dcbd9bd). - - New base line is ready: [tag b2437](https://github.com/ggerganov/llama.cpp/tree/b2437). + - A blog is published: **Run LLM on all Intel GPUs Using jarvis.cpp**: [intel.com](https://www.intel.com/content/www/us/en/developer/articles/technical/run-llm-on-all-gpus-using-jarvis-cpp-artical.html) or [medium.com](https://medium.com/@jianyu_neo/run-llm-on-all-intel-gpus-using-jarvis-cpp-fd2e2dcbd9bd). + - New base line is ready: [tag b2437](https://github.com/ggerganov/jarvis.cpp/tree/b2437). - Support multiple cards: **--split-mode**: [none|layer]; not support [row], it's on developing. - Support to assign main GPU by **--main-gpu**, replace $GGML_SYCL_DEVICE. - Support detecting all GPUs with level-zero and same top **Max compute units**. @@ -100,9 +100,9 @@ SYCL backend supports Intel GPU Family: *Notes:* - **Memory** - - The device memory is a limitation when running a large model. The loaded model size, *`llm_load_tensors: buffer_size`*, is displayed in the log when running `./bin/llama-cli`. + - The device memory is a limitation when running a large model. The loaded model size, *`llm_load_tensors: buffer_size`*, is displayed in the log when running `./bin/jarvis-cli`. - - Please make sure the GPU shared memory from the host is large enough to account for the model's size. For e.g. the *llama-2-7b.Q4_0* requires at least 8.0GB for integrated GPU and 4.0GB for discrete GPU. + - Please make sure the GPU shared memory from the host is large enough to account for the model's size. For e.g. the *jarvis-2-7b.Q4_0* requires at least 8.0GB for integrated GPU and 4.0GB for discrete GPU. - **Execution Unit (EU)** - If the iGPU has less than 80 EUs, the inference speed will likely be too slow for practical use. @@ -130,14 +130,14 @@ The docker build option is currently limited to *intel GPU* targets. ### Build image ```sh # Using FP16 -docker build -t llama-cpp-sycl --build-arg="GGML_SYCL_F16=ON" -f .devops/llama-cli-intel.Dockerfile . +docker build -t jarvis-cpp-sycl --build-arg="GGML_SYCL_F16=ON" -f .devops/jarvis-cli-intel.Dockerfile . ``` *Notes*: To build in default FP32 *(Slower than FP16 alternative)*, you can remove the `--build-arg="GGML_SYCL_F16=ON"` argument from the previous command. -You can also use the `.devops/llama-server-intel.Dockerfile`, which builds the *"server"* alternative. +You can also use the `.devops/jarvis-server-intel.Dockerfile`, which builds the *"server"* alternative. ### Run container @@ -145,7 +145,7 @@ You can also use the `.devops/llama-server-intel.Dockerfile`, which builds the * # First, find all the DRI cards ls -la /dev/dri # Then, pick the card that you want to use (here for e.g. /dev/dri/card1). -docker run -it --rm -v "$(pwd):/app:Z" --device /dev/dri/renderD128:/dev/dri/renderD128 --device /dev/dri/card1:/dev/dri/card1 llama-cpp-sycl -m "/app/models/YOUR_MODEL_FILE" -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 +docker run -it --rm -v "$(pwd):/app:Z" --device /dev/dri/renderD128:/dev/dri/renderD128 --device /dev/dri/card1:/dev/dri/card1 jarvis-cpp-sycl -m "/app/models/YOUR_MODEL_FILE" -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 ``` *Notes:* @@ -276,7 +276,7 @@ For AMD GPUs we should expect at least one SYCL-HIP device [`hip:gpu`]: [hip:gpu][hip:0] AMD HIP BACKEND, AMD Radeon PRO W6800 gfx1030 [HIP 60140.9] ``` -### II. Build llama.cpp +### II. Build jarvis.cpp #### Intel GPU @@ -309,7 +309,7 @@ export LIBRARY_PATH=/path/to/oneMKL/buildWithCublas/lib:$LIBRARY_PATH export CPLUS_INCLUDE_DIR=/path/to/oneMKL/buildWithCublas/include:$CPLUS_INCLUDE_DIR export CPLUS_INCLUDE_DIR=/path/to/oneMKL/include:$CPLUS_INCLUDE_DIR -# Build LLAMA with Nvidia BLAS acceleration through SYCL +# Build JARVIS with Nvidia BLAS acceleration through SYCL # Option 1: Use FP32 (recommended for better performance in most cases) cmake -B build -DGGML_SYCL=ON -DGGML_SYCL_TARGET=NVIDIA -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx @@ -329,7 +329,7 @@ export LD_LIBRARY_PATH=/path/to/oneMKL/buildWithrocBLAS/lib:$LD_LIBRARY_PATH export LIBRARY_PATH=/path/to/oneMKL/buildWithrocBLAS/lib:$LIBRARY_PATH export CPLUS_INCLUDE_DIR=/path/to/oneMKL/buildWithrocBLAS/include:$CPLUS_INCLUDE_DIR -# Build LLAMA with rocBLAS acceleration through SYCL +# Build JARVIS with rocBLAS acceleration through SYCL ## AMD # Use FP32, FP16 is not supported @@ -344,7 +344,7 @@ cmake --build build --config Release -j -v #### Retrieve and prepare model -You can refer to the general [*Prepare and Quantize*](README.md#prepare-and-quantize) guide for model prepration, or simply download [llama-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q4_0.gguf) model as example. +You can refer to the general [*Prepare and Quantize*](README.md#prepare-and-quantize) guide for model prepration, or simply download [jarvis-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Jarvis-2-7B-GGUF/blob/main/jarvis-2-7b.Q4_0.gguf) model as example. ##### Check device @@ -359,7 +359,7 @@ source /opt/intel/oneapi/setvars.sh Similar to the native `sycl-ls`, available SYCL devices can be queried as follow: ```sh -./build/bin/llama-ls-sycl-device +./build/bin/jarvis-ls-sycl-device ``` This command will only display the selected backend that is supported by SYCL. The default backend is level_zero. For example, in a system with 2 *intel GPU* it would look like the following: @@ -390,12 +390,12 @@ Choose one of following methods to run. - Use device 0: ```sh -./examples/sycl/run-llama2.sh 0 +./examples/sycl/run-jarvis2.sh 0 ``` - Use multiple devices: ```sh -./examples/sycl/run-llama2.sh +./examples/sycl/run-jarvis2.sh ``` 2. Command line @@ -418,13 +418,13 @@ Examples: - Use device 0: ```sh -ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m models/llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -sm none -mg 0 +ZES_ENABLE_SYSMAN=1 ./build/bin/jarvis-cli -m models/jarvis-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -sm none -mg 0 ``` - Use multiple devices: ```sh -ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m models/llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -sm layer +ZES_ENABLE_SYSMAN=1 ./build/bin/jarvis-cli -m models/jarvis-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -sm layer ``` *Notes:* @@ -492,7 +492,7 @@ a. Download & install cmake for Windows: https://cmake.org/download/ (CMake can b. The new Visual Studio will install Ninja as default. (If not, please install it manually: https://ninja-build.org/) -### II. Build llama.cpp +### II. Build jarvis.cpp You could download the release package for Windows directly, which including binary files and depended oneAPI dll files. @@ -506,7 +506,7 @@ Choose one of following methods to build from source code. 2. CMake -On the oneAPI command line window, step into the llama.cpp main directory and run the following: +On the oneAPI command line window, step into the jarvis.cpp main directory and run the following: ``` @call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force @@ -524,34 +524,34 @@ Or, use CMake presets to build: ```sh cmake --preset x64-windows-sycl-release -cmake --build build-x64-windows-sycl-release -j --target llama-cli +cmake --build build-x64-windows-sycl-release -j --target jarvis-cli cmake -DGGML_SYCL_F16=ON --preset x64-windows-sycl-release -cmake --build build-x64-windows-sycl-release -j --target llama-cli +cmake --build build-x64-windows-sycl-release -j --target jarvis-cli cmake --preset x64-windows-sycl-debug -cmake --build build-x64-windows-sycl-debug -j --target llama-cli +cmake --build build-x64-windows-sycl-debug -j --target jarvis-cli ``` 3. Visual Studio -You can use Visual Studio to open llama.cpp folder as a CMake project. Choose the sycl CMake presets (`x64-windows-sycl-release` or `x64-windows-sycl-debug`) before you compile the project. +You can use Visual Studio to open jarvis.cpp folder as a CMake project. Choose the sycl CMake presets (`x64-windows-sycl-release` or `x64-windows-sycl-debug`) before you compile the project. *Notes:* -- In case of a minimal experimental setup, the user can build the inference executable only through `cmake --build build --config Release -j --target llama-cli`. +- In case of a minimal experimental setup, the user can build the inference executable only through `cmake --build build --config Release -j --target jarvis-cli`. ### III. Run the inference #### Retrieve and prepare model -You can refer to the general [*Prepare and Quantize*](README.md#prepare-and-quantize) guide for model prepration, or simply download [llama-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q4_0.gguf) model as example. +You can refer to the general [*Prepare and Quantize*](README.md#prepare-and-quantize) guide for model prepration, or simply download [jarvis-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Jarvis-2-7B-GGUF/blob/main/jarvis-2-7b.Q4_0.gguf) model as example. ##### Check device 1. Enable oneAPI running environment -On the oneAPI command line window, run the following and step into the llama.cpp directory: +On the oneAPI command line window, run the following and step into the jarvis.cpp directory: ``` "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 ``` @@ -561,7 +561,7 @@ On the oneAPI command line window, run the following and step into the llama.cpp Similar to the native `sycl-ls`, available SYCL devices can be queried as follow: ``` -build\bin\llama-ls-sycl-device.exe +build\bin\jarvis-ls-sycl-device.exe ``` This command will only display the selected backend that is supported by SYCL. The default backend is level_zero. For example, in a system with 2 *intel GPU* it would look like the following: @@ -589,7 +589,7 @@ Choose one of following methods to run. 1. Script ``` -examples\sycl\win-run-llama2.bat +examples\sycl\win-run-jarvis2.bat ``` 2. Command line @@ -613,13 +613,13 @@ Examples: - Use device 0: ``` -build\bin\llama-cli.exe -m models\llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -ngl 33 -s 0 -sm none -mg 0 +build\bin\jarvis-cli.exe -m models\jarvis-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -ngl 33 -s 0 -sm none -mg 0 ``` - Use multiple devices: ``` -build\bin\llama-cli.exe -m models\llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -ngl 33 -s 0 -sm layer +build\bin\jarvis-cli.exe -m models\jarvis-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -ngl 33 -s 0 -sm layer ``` @@ -682,13 +682,13 @@ use 1 SYCL GPUs: [0] with Max compute units:512 ``` Otherwise, please double-check the GPU driver installation steps. -- Can I report Ollama issue on Intel GPU to llama.cpp SYCL backend? +- Can I report Ojarvis issue on Intel GPU to jarvis.cpp SYCL backend? - No. We can't support Ollama issue directly, because we aren't familiar with Ollama. + No. We can't support Ojarvis issue directly, because we aren't familiar with Ojarvis. - Sugguest reproducing on llama.cpp and report similar issue to llama.cpp. We will surpport it. + Sugguest reproducing on jarvis.cpp and report similar issue to jarvis.cpp. We will surpport it. - It's same for other projects including llama.cpp SYCL backend. + It's same for other projects including jarvis.cpp SYCL backend. - Meet issue: `Native API failed. Native API returns: -6 (PI_ERROR_OUT_OF_HOST_MEMORY) -6 (PI_ERROR_OUT_OF_HOST_MEMORY) -999 (UNKNOWN PI error)` or `failed to allocate SYCL0 buffer` diff --git a/docs/build.md b/docs/build.md index 4e362ebc78fa3..634e94b6b3586 100644 --- a/docs/build.md +++ b/docs/build.md @@ -1,13 +1,13 @@ -# Build llama.cpp locally +# Build jarvis.cpp locally **To get the Code:** ```bash -git clone https://github.com/ggerganov/llama.cpp -cd llama.cpp +git clone https://github.com/ggerganov/jarvis.cpp +cd jarvis.cpp ``` -In order to build llama.cpp you have four different options. +In order to build jarvis.cpp you have four different options. - Using `make`: - On Linux or MacOS: @@ -21,17 +21,17 @@ In order to build llama.cpp you have four different options. 1. Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases). 2. Extract `w64devkit` on your pc. 3. Run `w64devkit.exe`. - 4. Use the `cd` command to reach the `llama.cpp` folder. + 4. Use the `cd` command to reach the `jarvis.cpp` folder. 5. From here you can run: ```bash make ``` - Notes: - - For `Q4_0_4_4` quantization type build, add the `GGML_NO_LLAMAFILE=1` flag. For example, use `make GGML_NO_LLAMAFILE=1`. + - For `Q4_0_4_4` quantization type build, add the `GGML_NO_JARVISFILE=1` flag. For example, use `make GGML_NO_JARVISFILE=1`. - For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `make -j 8` will run 8 jobs in parallel. - For faster repeated compilation, install [ccache](https://ccache.dev/). - - For debug builds, run `make LLAMA_DEBUG=1` + - For debug builds, run `make JARVIS_DEBUG=1` - Using `CMake`: @@ -42,7 +42,7 @@ In order to build llama.cpp you have four different options. **Notes**: - - For `Q4_0_4_4` quantization type build, add the `-DGGML_LLAMAFILE=OFF` cmake option. For example, use `cmake -B build -DGGML_LLAMAFILE=OFF`. + - For `Q4_0_4_4` quantization type build, add the `-DGGML_JARVISFILE=OFF` cmake option. For example, use `cmake -B build -DGGML_JARVISFILE=OFF`. - For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `cmake --build build --config Release -j 8` will run 8 jobs in parallel. - For faster repeated compilation, install [ccache](https://ccache.dev/). - For debug builds, there are two cases: @@ -118,7 +118,7 @@ This provides BLAS acceleration using only the CPU. Make sure to have OpenBLAS i 4. From the OpenBLAS zip that you just downloaded copy `libopenblas.a`, located inside the `lib` folder, inside `w64devkit\x86_64-w64-mingw32\lib`. 5. From the same OpenBLAS zip copy the content of the `include` folder inside `w64devkit\x86_64-w64-mingw32\include`. 6. Run `w64devkit.exe`. - 7. Use the `cd` command to reach the `llama.cpp` folder. + 7. Use the `cd` command to reach the `jarvis.cpp` folder. 8. From here you can run: ```bash @@ -140,13 +140,13 @@ Check [BLIS.md](./backend/BLIS.md) for more information. SYCL is a higher-level programming model to improve programming productivity on various hardware accelerators. -llama.cpp based on SYCL is used to **support Intel GPU** (Data Center Max series, Flex series, Arc series, Built-in GPU and iGPU). +jarvis.cpp based on SYCL is used to **support Intel GPU** (Data Center Max series, Flex series, Arc series, Built-in GPU and iGPU). -For detailed info, please refer to [llama.cpp for SYCL](./backend/SYCL.md). +For detailed info, please refer to [jarvis.cpp for SYCL](./backend/SYCL.md). ### Intel oneMKL -Building through oneAPI compilers will make avx_vnni instruction set available for intel processors that do not support avx512 and avx512_vnni. Please note that this build config **does not support Intel GPU**. For Intel GPU support, please refer to [llama.cpp for SYCL](./backend/SYCL.md). +Building through oneAPI compilers will make avx_vnni instruction set available for intel processors that do not support avx512 and avx512_vnni. Please note that this build config **does not support Intel GPU**. For Intel GPU support, please refer to [jarvis.cpp for SYCL](./backend/SYCL.md). - Using manual oneAPI installation: By default, `GGML_BLAS_VENDOR` is set to `Generic`, so if you already sourced intel environment script and assign `-DGGML_BLAS=ON` in cmake, the mkl version of Blas will automatically been selected. Otherwise please install oneAPI and follow the below steps: @@ -159,7 +159,7 @@ Building through oneAPI compilers will make avx_vnni instruction set available f - Using oneAPI docker image: If you do not want to source the environment vars and install oneAPI manually, you can also build the code using intel docker container: [oneAPI-basekit](https://hub.docker.com/r/intel/oneapi-basekit). Then, you can use the commands given above. -Check [Optimizing and Running LLaMA2 on Intel® CPU](https://www.intel.com/content/www/us/en/content-details/791610/optimizing-and-running-llama2-on-intel-cpu.html) for more information. +Check [Optimizing and Running LLaMA2 on Intel® CPU](https://www.intel.com/content/www/us/en/content-details/791610/optimizing-and-running-jarvis2-on-intel-cpu.html) for more information. ### CUDA @@ -300,7 +300,7 @@ Libs: -lvulkan-1 EOF ``` -Switch into the `llama.cpp` directory and run `make GGML_VULKAN=1`. +Switch into the `jarvis.cpp` directory and run `make GGML_VULKAN=1`. #### MSYS2 Install [MSYS2](https://www.msys2.org/) and then run the following commands in a UCRT terminal to install dependencies. @@ -311,7 +311,7 @@ Install [MSYS2](https://www.msys2.org/) and then run the following commands in a mingw-w64-ucrt-x86_64-vulkan-devel \ mingw-w64-ucrt-x86_64-shaderc ``` -Switch into `llama.cpp` directory and build using CMake. +Switch into `jarvis.cpp` directory and build using CMake. ```sh cmake -B build -DGGML_VULKAN=ON cmake --build build --config Release @@ -323,10 +323,10 @@ You don't need to install Vulkan SDK. It will be installed inside the container. ```sh # Build the image -docker build -t llama-cpp-vulkan -f .devops/llama-cli-vulkan.Dockerfile . +docker build -t jarvis-cpp-vulkan -f .devops/jarvis-cli-vulkan.Dockerfile . # Then, use it: -docker run -it --rm -v "$(pwd):/app:Z" --device /dev/dri/renderD128:/dev/dri/renderD128 --device /dev/dri/card1:/dev/dri/card1 llama-cpp-vulkan -m "/app/models/YOUR_MODEL_FILE" -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 +docker run -it --rm -v "$(pwd):/app:Z" --device /dev/dri/renderD128:/dev/dri/renderD128 --device /dev/dri/card1:/dev/dri/card1 jarvis-cpp-vulkan -m "/app/models/YOUR_MODEL_FILE" -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 ``` **Without docker**: @@ -348,13 +348,13 @@ Alternatively your package manager might be able to provide the appropriate libr For example for Ubuntu 22.04 you can install `libvulkan-dev` instead. For Fedora 40, you can install `vulkan-devel`, `glslc` and `glslang` packages. -Then, build llama.cpp using the cmake command below: +Then, build jarvis.cpp using the cmake command below: ```bash cmake -B build -DGGML_VULKAN=1 cmake --build build --config Release # Test the output binary (with "-ngl 33" to offload all layers to GPU) -./bin/llama-cli -m "PATH_TO_MODEL" -p "Hi you how are you" -n 50 -e -ngl 33 -t 4 +./bin/jarvis-cli -m "PATH_TO_MODEL" -p "Hi you how are you" -n 50 -e -ngl 33 -t 4 # You should see in the output, ggml_vulkan detected your GPU. For example: # ggml_vulkan: Using Intel(R) Graphics (ADL GT2) | uma: 1 | fp16: 1 | warp size: 32 @@ -367,7 +367,7 @@ For more information about Ascend NPU in [Ascend Community](https://www.hiascend Make sure to have the CANN toolkit installed. You can download it from here: [CANN Toolkit](https://www.hiascend.com/developer/download/community/result?module=cann) -Go to `llama.cpp` directory and build using CMake. +Go to `jarvis.cpp` directory and build using CMake. ```bash cmake -B build -DGGML_CANN=on -DCMAKE_BUILD_TYPE=release cmake --build build --config release @@ -375,15 +375,15 @@ cmake --build build --config release You can test with: -`./build/llama-cli -m PATH_TO_MODEL -p "Building a website can be done in 10 steps:" -ngl 32` +`./build/jarvis-cli -m PATH_TO_MODEL -p "Building a website can be done in 10 steps:" -ngl 32` -If the fllowing info is output on screen, you are using `llama.cpp by CANN backend`: +If the fllowing info is output on screen, you are using `jarvis.cpp by CANN backend`: ```bash llm_load_tensors: CANN buffer size = 13313.00 MiB -llama_new_context_with_model: CANN compute buffer size = 1260.81 MiB +jarvis_new_context_with_model: CANN compute buffer size = 1260.81 MiB ``` -For detailed info, such as model/device supports, CANN install, please refer to [llama.cpp for CANN](./backend/CANN.md). +For detailed info, such as model/device supports, CANN install, please refer to [jarvis.cpp for CANN](./backend/CANN.md). ### Android @@ -391,6 +391,6 @@ To read documentation for how to build on Android, [click here](./android.md) ### Arm CPU optimized mulmat kernels -Llama.cpp includes a set of optimized mulmat kernels for the Arm architecture, leveraging Arm® Neon™, int8mm and SVE instructions. These kernels are enabled at build time through the appropriate compiler cpu-type flags, such as `-DCMAKE_C_FLAGS=-march=armv8.2a+i8mm+sve`. Note that these optimized kernels require the model to be quantized into one of the formats: `Q4_0_4_4` (Arm Neon), `Q4_0_4_8` (int8mm) or `Q4_0_8_8` (SVE). The SVE mulmat kernel specifically requires a vector width of 256 bits. When running on devices with a different vector width, it is recommended to use the `Q4_0_4_8` (int8mm) or `Q4_0_4_4` (Arm Neon) formats for better performance. Refer to [examples/quantize/README.md](../examples/quantize/README.md) for more information on the quantization formats. +Jarvis.cpp includes a set of optimized mulmat kernels for the Arm architecture, leveraging Arm® Neon™, int8mm and SVE instructions. These kernels are enabled at build time through the appropriate compiler cpu-type flags, such as `-DCMAKE_C_FLAGS=-march=armv8.2a+i8mm+sve`. Note that these optimized kernels require the model to be quantized into one of the formats: `Q4_0_4_4` (Arm Neon), `Q4_0_4_8` (int8mm) or `Q4_0_8_8` (SVE). The SVE mulmat kernel specifically requires a vector width of 256 bits. When running on devices with a different vector width, it is recommended to use the `Q4_0_4_8` (int8mm) or `Q4_0_4_4` (Arm Neon) formats for better performance. Refer to [examples/quantize/README.md](../examples/quantize/README.md) for more information on the quantization formats. -To support `Q4_0_4_4`, you must build with `GGML_NO_LLAMAFILE=1` (`make`) or `-DGGML_LLAMAFILE=OFF` (`cmake`). +To support `Q4_0_4_4`, you must build with `GGML_NO_JARVISFILE=1` (`make`) or `-DGGML_JARVISFILE=OFF` (`cmake`). diff --git a/docs/development/HOWTO-add-model.md b/docs/development/HOWTO-add-model.md index 04c5ccbbe60c3..d72c70b30e5e0 100644 --- a/docs/development/HOWTO-add-model.md +++ b/docs/development/HOWTO-add-model.md @@ -1,9 +1,9 @@ -# Add a new model architecture to `llama.cpp` +# Add a new model architecture to `jarvis.cpp` Adding a model requires few steps: 1. Convert the model to GGUF -2. Define the model architecture in `llama.cpp` +2. Define the model architecture in `jarvis.cpp` 3. Build the GGML graph implementation After following these steps, you can open PR. @@ -17,7 +17,7 @@ Also, it is important to check that the examples and main ggml backends (CUDA, M ### 1. Convert the model to GGUF This step is done in python with a `convert` script using the [gguf](https://pypi.org/project/gguf/) library. -Depending on the model architecture, you can use either [convert_hf_to_gguf.py](/convert_hf_to_gguf.py) or [examples/convert_legacy_llama.py](/examples/convert_legacy_llama.py) (for `llama/llama2` models in `.pth` format). +Depending on the model architecture, you can use either [convert_hf_to_gguf.py](/convert_hf_to_gguf.py) or [examples/convert_legacy_jarvis.py](/examples/convert_legacy_jarvis.py) (for `jarvis/jarvis2` models in `.pth` format). The convert script reads the model configuration, tokenizer, tensor names+data and converts them to GGUF metadata and tensors. @@ -81,26 +81,26 @@ Depending on the model configuration, tokenizer, code and tensors layout, you wi NOTE: Tensor names must end with `.weight` suffix, that is the convention and several tools like `quantize` expect this to proceed the weights. -### 2. Define the model architecture in `llama.cpp` +### 2. Define the model architecture in `jarvis.cpp` -The model params and tensors layout must be defined in `llama.cpp`: +The model params and tensors layout must be defined in `jarvis.cpp`: 1. Define a new `llm_arch` 2. Define the tensors layout in `LLM_TENSOR_NAMES` 3. Add any non standard metadata in `llm_load_hparams` 4. Create the tensors for inference in `llm_load_tensors` -5. If the model has a RoPE operation, add the rope type in `llama_rope_type` +5. If the model has a RoPE operation, add the rope type in `jarvis_rope_type` NOTE: The dimensions in `ggml` are typically in the reverse order of the `pytorch` dimensions. ### 3. Build the GGML graph implementation -This is the funniest part, you have to provide the inference graph implementation of the new model architecture in `llama_build_graph`. +This is the funniest part, you have to provide the inference graph implementation of the new model architecture in `jarvis_build_graph`. -Have a look at existing implementation like `build_llama`, `build_dbrx` or `build_bert`. +Have a look at existing implementation like `build_jarvis`, `build_dbrx` or `build_bert`. When implementing a new graph, please note that the underlying `ggml` backends might not support them all, support for missing backend operations can be added in another PR. -Note: to debug the inference graph: you can use [llama-eval-callback](/examples/eval-callback/). +Note: to debug the inference graph: you can use [jarvis-eval-callback](/examples/eval-callback/). ## GGUF specification @@ -108,12 +108,12 @@ https://github.com/ggerganov/ggml/blob/master/docs/gguf.md ## Resources -- YaRN RoPE scaling https://github.com/ggerganov/llama.cpp/pull/2268 -- support Baichuan serial models https://github.com/ggerganov/llama.cpp/pull/3009 -- support attention bias https://github.com/ggerganov/llama.cpp/pull/4283 -- Mixtral support https://github.com/ggerganov/llama.cpp/pull/4406 -- BERT embeddings https://github.com/ggerganov/llama.cpp/pull/5423 -- Grok-1 support https://github.com/ggerganov/llama.cpp/pull/6204 -- Command R Plus support https://github.com/ggerganov/llama.cpp/pull/6491 -- support arch DBRX https://github.com/ggerganov/llama.cpp/pull/6515 -- How to convert HuggingFace model to GGUF format https://github.com/ggerganov/llama.cpp/discussions/2948 +- YaRN RoPE scaling https://github.com/ggerganov/jarvis.cpp/pull/2268 +- support Baichuan serial models https://github.com/ggerganov/jarvis.cpp/pull/3009 +- support attention bias https://github.com/ggerganov/jarvis.cpp/pull/4283 +- Mixtral support https://github.com/ggerganov/jarvis.cpp/pull/4406 +- BERT embeddings https://github.com/ggerganov/jarvis.cpp/pull/5423 +- Grok-1 support https://github.com/ggerganov/jarvis.cpp/pull/6204 +- Command R Plus support https://github.com/ggerganov/jarvis.cpp/pull/6491 +- support arch DBRX https://github.com/ggerganov/jarvis.cpp/pull/6515 +- How to convert HuggingFace model to GGUF format https://github.com/ggerganov/jarvis.cpp/discussions/2948 diff --git a/docs/development/debugging-tests.md b/docs/development/debugging-tests.md index 18407f688f9db..38b6767622c85 100644 --- a/docs/development/debugging-tests.md +++ b/docs/development/debugging-tests.md @@ -51,7 +51,7 @@ rm -rf build-ci-debug && mkdir build-ci-debug && cd build-ci-debug Setup and trigger a build under debug mode. You may adapt the arguments as needed, but in this case these are sane defaults. ```bash -cmake -DCMAKE_BUILD_TYPE=Debug -DLLAMA_CUDA=1 -DLLAMA_FATAL_WARNINGS=ON .. +cmake -DCMAKE_BUILD_TYPE=Debug -DJARVIS_CUDA=1 -DJARVIS_FATAL_WARNINGS=ON .. make -j ``` @@ -71,12 +71,12 @@ This may return output similar to below (focusing on key lines to pay attention ```bash ... -1: Test command: ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf" +1: Test command: ~/jarvis.cpp/build-ci-debug/bin/test-tokenizer-0 "~/jarvis.cpp/tests/../models/ggml-vocab-jarvis-spm.gguf" 1: Working Directory: . Labels: main - Test #1: test-tokenizer-0-llama-spm + Test #1: test-tokenizer-0-jarvis-spm ... -4: Test command: ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-falcon.gguf" +4: Test command: ~/jarvis.cpp/build-ci-debug/bin/test-tokenizer-0 "~/jarvis.cpp/tests/../models/ggml-vocab-falcon.gguf" 4: Working Directory: . Labels: main Test #4: test-tokenizer-0-falcon @@ -86,8 +86,8 @@ Labels: main #### Step 4: Identify Test Command for Debugging So for test #1 above we can tell these two pieces of relevant information: -* Test Binary: `~/llama.cpp/build-ci-debug/bin/test-tokenizer-0` -* Test GGUF Model: `~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf` +* Test Binary: `~/jarvis.cpp/build-ci-debug/bin/test-tokenizer-0` +* Test GGUF Model: `~/jarvis.cpp/tests/../models/ggml-vocab-jarvis-spm.gguf` #### Step 5: Run GDB on test command @@ -100,5 +100,5 @@ gdb --args ${Test Binary} ${Test GGUF Model} Example: ```bash -gdb --args ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf" +gdb --args ~/jarvis.cpp/build-ci-debug/bin/test-tokenizer-0 "~/jarvis.cpp/tests/../models/ggml-vocab-jarvis-spm.gguf" ``` diff --git a/docs/development/token_generation_performance_tips.md b/docs/development/token_generation_performance_tips.md index 41b7232c976b3..62aeb11789fdb 100644 --- a/docs/development/token_generation_performance_tips.md +++ b/docs/development/token_generation_performance_tips.md @@ -1,23 +1,23 @@ # Token generation performance troubleshooting ## Verifying that the model is running on the GPU with CUDA -Make sure you compiled llama with the correct env variables according to [this guide](/docs/build.md#cuda), so that llama accepts the `-ngl N` (or `--n-gpu-layers N`) flag. When running llama, you may configure `N` to be very large, and llama will offload the maximum possible number of layers to the GPU, even if it's less than the number you configured. For example: +Make sure you compiled jarvis with the correct env variables according to [this guide](/docs/build.md#cuda), so that jarvis accepts the `-ngl N` (or `--n-gpu-layers N`) flag. When running jarvis, you may configure `N` to be very large, and jarvis will offload the maximum possible number of layers to the GPU, even if it's less than the number you configured. For example: ```shell -./llama-cli -m "path/to/model.gguf" -ngl 200000 -p "Please sir, may I have some " +./jarvis-cli -m "path/to/model.gguf" -ngl 200000 -p "Please sir, may I have some " ``` -When running llama, before it starts the inference work, it will output diagnostic information that shows whether cuBLAS is offloading work to the GPU. Look for these lines: +When running jarvis, before it starts the inference work, it will output diagnostic information that shows whether cuBLAS is offloading work to the GPU. Look for these lines: ```shell -llama_model_load_internal: [cublas] offloading 60 layers to GPU -llama_model_load_internal: [cublas] offloading output layer to GPU -llama_model_load_internal: [cublas] total VRAM used: 17223 MB +jarvis_model_load_internal: [cublas] offloading 60 layers to GPU +jarvis_model_load_internal: [cublas] offloading output layer to GPU +jarvis_model_load_internal: [cublas] total VRAM used: 17223 MB ... rest of inference ``` If you see these lines, then the GPU is being used. ## Verifying that the CPU is not oversaturated -llama accepts a `-t N` (or `--threads N`) parameter. It's extremely important that this parameter is not too large. If your token generation is extremely slow, try setting this number to 1. If this significantly improves your token generation speed, then your CPU is being oversaturated and you need to explicitly set this parameter to the number of the physical CPU cores on your machine (even if you utilize a GPU). If in doubt, start with 1 and double the amount until you hit a performance bottleneck, then scale the number down. +jarvis accepts a `-t N` (or `--threads N`) parameter. It's extremely important that this parameter is not too large. If your token generation is extremely slow, try setting this number to 1. If this significantly improves your token generation speed, then your CPU is being oversaturated and you need to explicitly set this parameter to the number of the physical CPU cores on your machine (even if you utilize a GPU). If in doubt, start with 1 and double the amount until you hit a performance bottleneck, then scale the number down. # Example of runtime flags effect on inference speed benchmark These runs were tested on the following machine: @@ -27,7 +27,7 @@ RAM: 32GB Model: `TheBloke_Wizard-Vicuna-30B-Uncensored-GGML/Wizard-Vicuna-30B-Uncensored.q4_0.gguf` (30B parameters, 4bit quantization, GGML) -Run command: `./llama-cli -m "path/to/model.gguf" -p "An extremely detailed description of the 10 best ethnic dishes will follow, with recipes: " -n 1000 [additional benchmark flags]` +Run command: `./jarvis-cli -m "path/to/model.gguf" -p "An extremely detailed description of the 10 best ethnic dishes will follow, with recipes: " -n 1000 [additional benchmark flags]` Result: diff --git a/docs/docker.md b/docs/docker.md index 8d90e6ded5738..464dbaa770f1b 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -2,26 +2,26 @@ ## Prerequisites * Docker must be installed and running on your system. -* Create a folder to store big models & intermediate files (ex. /llama/models) +* Create a folder to store big models & intermediate files (ex. /jarvis/models) ## Images We have three Docker images available for this project: -1. `ghcr.io/ggerganov/llama.cpp:full`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. (platforms: `linux/amd64`, `linux/arm64`) -2. `ghcr.io/ggerganov/llama.cpp:light`: This image only includes the main executable file. (platforms: `linux/amd64`, `linux/arm64`) -3. `ghcr.io/ggerganov/llama.cpp:server`: This image only includes the server executable file. (platforms: `linux/amd64`, `linux/arm64`) +1. `ghcr.io/ggerganov/jarvis.cpp:full`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. (platforms: `linux/amd64`, `linux/arm64`) +2. `ghcr.io/ggerganov/jarvis.cpp:light`: This image only includes the main executable file. (platforms: `linux/amd64`, `linux/arm64`) +3. `ghcr.io/ggerganov/jarvis.cpp:server`: This image only includes the server executable file. (platforms: `linux/amd64`, `linux/arm64`) Additionally, there the following images, similar to the above: -- `ghcr.io/ggerganov/llama.cpp:full-cuda`: Same as `full` but compiled with CUDA support. (platforms: `linux/amd64`) -- `ghcr.io/ggerganov/llama.cpp:light-cuda`: Same as `light` but compiled with CUDA support. (platforms: `linux/amd64`) -- `ghcr.io/ggerganov/llama.cpp:server-cuda`: Same as `server` but compiled with CUDA support. (platforms: `linux/amd64`) -- `ghcr.io/ggerganov/llama.cpp:full-rocm`: Same as `full` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`) -- `ghcr.io/ggerganov/llama.cpp:light-rocm`: Same as `light` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`) -- `ghcr.io/ggerganov/llama.cpp:server-rocm`: Same as `server` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`) -- `ghcr.io/ggerganov/llama.cpp:full-musa`: Same as `full` but compiled with MUSA support. (platforms: `linux/amd64`) -- `ghcr.io/ggerganov/llama.cpp:light-musa`: Same as `light` but compiled with MUSA support. (platforms: `linux/amd64`) -- `ghcr.io/ggerganov/llama.cpp:server-musa`: Same as `server` but compiled with MUSA support. (platforms: `linux/amd64`) +- `ghcr.io/ggerganov/jarvis.cpp:full-cuda`: Same as `full` but compiled with CUDA support. (platforms: `linux/amd64`) +- `ghcr.io/ggerganov/jarvis.cpp:light-cuda`: Same as `light` but compiled with CUDA support. (platforms: `linux/amd64`) +- `ghcr.io/ggerganov/jarvis.cpp:server-cuda`: Same as `server` but compiled with CUDA support. (platforms: `linux/amd64`) +- `ghcr.io/ggerganov/jarvis.cpp:full-rocm`: Same as `full` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`) +- `ghcr.io/ggerganov/jarvis.cpp:light-rocm`: Same as `light` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`) +- `ghcr.io/ggerganov/jarvis.cpp:server-rocm`: Same as `server` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`) +- `ghcr.io/ggerganov/jarvis.cpp:full-musa`: Same as `full` but compiled with MUSA support. (platforms: `linux/amd64`) +- `ghcr.io/ggerganov/jarvis.cpp:light-musa`: Same as `light` but compiled with MUSA support. (platforms: `linux/amd64`) +- `ghcr.io/ggerganov/jarvis.cpp:server-musa`: Same as `server` but compiled with MUSA support. (platforms: `linux/amd64`) The GPU enabled images are not currently tested by CI beyond being built. They are not built with any variation from the ones in the Dockerfiles defined in [.devops/](../.devops/) and the GitHub Action defined in [.github/workflows/docker.yml](../.github/workflows/docker.yml). If you need different settings (for example, a different CUDA, ROCm or MUSA library, you'll need to build the images locally for now). @@ -32,25 +32,25 @@ The easiest way to download the models, convert them to ggml and optimize them i Replace `/path/to/models` below with the actual path where you downloaded the models. ```bash -docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --all-in-one "/models/" 7B +docker run -v /path/to/models:/models ghcr.io/ggerganov/jarvis.cpp:full --all-in-one "/models/" 7B ``` On completion, you are ready to play! ```bash -docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 +docker run -v /path/to/models:/models ghcr.io/ggerganov/jarvis.cpp:full --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 ``` or with a light image: ```bash -docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:light -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 +docker run -v /path/to/models:/models ghcr.io/ggerganov/jarvis.cpp:light -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 ``` or with a server image: ```bash -docker run -v /path/to/models:/models -p 8000:8000 ghcr.io/ggerganov/llama.cpp:server -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512 +docker run -v /path/to/models:/models -p 8000:8000 ghcr.io/ggerganov/jarvis.cpp:server -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512 ``` ## Docker With CUDA @@ -60,9 +60,9 @@ Assuming one has the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia ## Building Docker locally ```bash -docker build -t local/llama.cpp:full-cuda -f .devops/full-cuda.Dockerfile . -docker build -t local/llama.cpp:light-cuda -f .devops/llama-cli-cuda.Dockerfile . -docker build -t local/llama.cpp:server-cuda -f .devops/llama-server-cuda.Dockerfile . +docker build -t local/jarvis.cpp:full-cuda -f .devops/full-cuda.Dockerfile . +docker build -t local/jarvis.cpp:light-cuda -f .devops/jarvis-cli-cuda.Dockerfile . +docker build -t local/jarvis.cpp:server-cuda -f .devops/jarvis-server-cuda.Dockerfile . ``` You may want to pass in some different `ARGS`, depending on the CUDA environment supported by your container host, as well as the GPU architecture. @@ -74,18 +74,18 @@ The defaults are: The resulting images, are essentially the same as the non-CUDA images: -1. `local/llama.cpp:full-cuda`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. -2. `local/llama.cpp:light-cuda`: This image only includes the main executable file. -3. `local/llama.cpp:server-cuda`: This image only includes the server executable file. +1. `local/jarvis.cpp:full-cuda`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. +2. `local/jarvis.cpp:light-cuda`: This image only includes the main executable file. +3. `local/jarvis.cpp:server-cuda`: This image only includes the server executable file. ## Usage After building locally, Usage is similar to the non-CUDA examples, but you'll need to add the `--gpus` flag. You will also want to use the `--n-gpu-layers` flag. ```bash -docker run --gpus all -v /path/to/models:/models local/llama.cpp:full-cuda --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 -docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 -docker run --gpus all -v /path/to/models:/models local/llama.cpp:server-cuda -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512 --n-gpu-layers 1 +docker run --gpus all -v /path/to/models:/models local/jarvis.cpp:full-cuda --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 +docker run --gpus all -v /path/to/models:/models local/jarvis.cpp:light-cuda -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 +docker run --gpus all -v /path/to/models:/models local/jarvis.cpp:server-cuda -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512 --n-gpu-layers 1 ``` ## Docker With MUSA @@ -95,9 +95,9 @@ Assuming one has the [mt-container-toolkit](https://developer.mthreads.com/musa/ ## Building Docker locally ```bash -docker build -t local/llama.cpp:full-musa -f .devops/full-musa.Dockerfile . -docker build -t local/llama.cpp:light-musa -f .devops/llama-cli-musa.Dockerfile . -docker build -t local/llama.cpp:server-musa -f .devops/llama-server-musa.Dockerfile . +docker build -t local/jarvis.cpp:full-musa -f .devops/full-musa.Dockerfile . +docker build -t local/jarvis.cpp:light-musa -f .devops/jarvis-cli-musa.Dockerfile . +docker build -t local/jarvis.cpp:server-musa -f .devops/jarvis-server-musa.Dockerfile . ``` You may want to pass in some different `ARGS`, depending on the MUSA environment supported by your container host, as well as the GPU architecture. @@ -108,16 +108,16 @@ The defaults are: The resulting images, are essentially the same as the non-MUSA images: -1. `local/llama.cpp:full-musa`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. -2. `local/llama.cpp:light-musa`: This image only includes the main executable file. -3. `local/llama.cpp:server-musa`: This image only includes the server executable file. +1. `local/jarvis.cpp:full-musa`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. +2. `local/jarvis.cpp:light-musa`: This image only includes the main executable file. +3. `local/jarvis.cpp:server-musa`: This image only includes the server executable file. ## Usage After building locally, Usage is similar to the non-MUSA examples, but you'll need to set `mthreads` as default Docker runtime. This can be done by executing `(cd /usr/bin/musa && sudo ./docker setup $PWD)` and verifying the changes by executing `docker info | grep mthreads` on the host machine. You will also want to use the `--n-gpu-layers` flag. ```bash -docker run -v /path/to/models:/models local/llama.cpp:full-musa --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 -docker run -v /path/to/models:/models local/llama.cpp:light-musa -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 -docker run -v /path/to/models:/models local/llama.cpp:server-musa -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512 --n-gpu-layers 1 +docker run -v /path/to/models:/models local/jarvis.cpp:full-musa --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 +docker run -v /path/to/models:/models local/jarvis.cpp:light-musa -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 +docker run -v /path/to/models:/models local/jarvis.cpp:server-musa -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512 --n-gpu-layers 1 ``` diff --git a/docs/install.md b/docs/install.md index 10a568506835b..e5baee4a7f495 100644 --- a/docs/install.md +++ b/docs/install.md @@ -1,39 +1,39 @@ -# Install pre-built version of llama.cpp +# Install pre-built version of jarvis.cpp ## Homebrew On Mac and Linux, the homebrew package manager can be used via ```sh -brew install llama.cpp +brew install jarvis.cpp ``` -The formula is automatically updated with new `llama.cpp` releases. More info: https://github.com/ggerganov/llama.cpp/discussions/7668 +The formula is automatically updated with new `jarvis.cpp` releases. More info: https://github.com/ggerganov/jarvis.cpp/discussions/7668 ## Nix On Mac and Linux, the Nix package manager can be used via ```sh -nix profile install nixpkgs#llama-cpp +nix profile install nixpkgs#jarvis-cpp ``` For flake enabled installs. Or ```sh -nix-env --file '' --install --attr llama-cpp +nix-env --file '' --install --attr jarvis-cpp ``` For non-flake enabled installs. -This expression is automatically updated within the [nixpkgs repo](https://github.com/NixOS/nixpkgs/blob/nixos-24.05/pkgs/by-name/ll/llama-cpp/package.nix#L164). +This expression is automatically updated within the [nixpkgs repo](https://github.com/NixOS/nixpkgs/blob/nixos-24.05/pkgs/by-name/ll/jarvis-cpp/package.nix#L164). ## Flox -On Mac and Linux, Flox can be used to install llama.cpp within a Flox environment via +On Mac and Linux, Flox can be used to install jarvis.cpp within a Flox environment via ```sh -flox install llama-cpp +flox install jarvis-cpp ``` -Flox follows the nixpkgs build of llama.cpp. +Flox follows the nixpkgs build of jarvis.cpp. diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index ead630661c8e2..5755f879a45d0 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -13,10 +13,10 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR}) if (EMSCRIPTEN) else() add_subdirectory(cvector-generator) - add_subdirectory(baby-llama) + add_subdirectory(baby-jarvis) add_subdirectory(batched-bench) add_subdirectory(batched) - add_subdirectory(convert-llama2c-to-ggml) + add_subdirectory(convert-jarvis2c-to-ggml) add_subdirectory(embedding) add_subdirectory(eval-callback) add_subdirectory(export-lora) @@ -27,7 +27,7 @@ else() add_subdirectory(gritlm) add_subdirectory(imatrix) add_subdirectory(infill) - add_subdirectory(llama-bench) + add_subdirectory(jarvis-bench) add_subdirectory(llava) add_subdirectory(lookahead) add_subdirectory(lookup) @@ -41,7 +41,7 @@ else() if (GGML_RPC) add_subdirectory(rpc) endif() - if (LLAMA_BUILD_SERVER) + if (JARVIS_BUILD_SERVER) add_subdirectory(server) endif() if (GGML_SYCL) diff --git a/examples/Miku.sh b/examples/Miku.sh index 0f6c8c8787107..1725dbf0099aa 100755 --- a/examples/Miku.sh +++ b/examples/Miku.sh @@ -2,7 +2,7 @@ set -e AI_NAME="${AI_NAME:-Miku}" -MODEL="${MODEL:-./models/llama-2-7b-chat.ggmlv3.q4_K_M.bin}" +MODEL="${MODEL:-./models/jarvis-2-7b-chat.ggmlv3.q4_K_M.bin}" USER_NAME="${USER_NAME:-Anon}" # Uncomment and adjust to the number of CPU cores you want to use. @@ -22,7 +22,7 @@ if [ -n "$N_THREAD" ]; then GEN_OPTIONS+=(--threads "$N_THREAD") fi -./llama-cli "${GEN_OPTIONS[@]}" \ +./jarvis-cli "${GEN_OPTIONS[@]}" \ --model "$MODEL" \ --in-prefix " " \ --in-suffix "${AI_NAME}:" \ diff --git a/examples/baby-llama/CMakeLists.txt b/examples/baby-llama/CMakeLists.txt index 71b82105c8863..a0703600b3d7a 100644 --- a/examples/baby-llama/CMakeLists.txt +++ b/examples/baby-llama/CMakeLists.txt @@ -1,5 +1,5 @@ -set(TARGET llama-baby-llama) -add_executable(${TARGET} baby-llama.cpp) +set(TARGET jarvis-baby-jarvis) +add_executable(${TARGET} baby-jarvis.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common jarvis ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/baby-llama/baby-llama.cpp b/examples/baby-llama/baby-llama.cpp index 3ce91070b4ed7..03f22bac8461c 100644 --- a/examples/baby-llama/baby-llama.cpp +++ b/examples/baby-llama/baby-llama.cpp @@ -11,8 +11,8 @@ #pragma warning(disable: 4244 4267) // possible loss of data #endif -#ifdef LLAMA_DEFAULT_RMS_EPS -constexpr float rms_norm_eps = LLAMA_DEFAULT_RMS_EPS; +#ifdef JARVIS_DEFAULT_RMS_EPS +constexpr float rms_norm_eps = JARVIS_DEFAULT_RMS_EPS; #else constexpr float rms_norm_eps = 5e-6f; #endif @@ -71,7 +71,7 @@ static struct ggml_tensor * randomize_tensor( return tensor; } -struct llama_hparams { +struct jarvis_hparams { uint32_t n_vocab = 32000; uint32_t n_ctx = 512; // this is provided as user input? uint32_t n_embd = 4096; @@ -80,17 +80,17 @@ struct llama_hparams { uint32_t n_layer = 32; uint32_t n_rot = 64; - bool operator!=(const llama_hparams & other) const { - return memcmp(this, &other, sizeof(llama_hparams)); + bool operator!=(const jarvis_hparams & other) const { + return memcmp(this, &other, sizeof(jarvis_hparams)); } }; -static uint32_t get_n_ff(const struct llama_hparams* hparams) { +static uint32_t get_n_ff(const struct jarvis_hparams* hparams) { const uint32_t n_ff = ((2*(4*hparams->n_embd)/3 + hparams->n_mult - 1)/hparams->n_mult)*hparams->n_mult; return n_ff; } -struct llama_hparams_lora { +struct jarvis_hparams_lora { uint32_t n_vocab = 32000; uint32_t n_ctx = 512; // this is provided as user input? uint32_t n_embd = 4096; @@ -100,12 +100,12 @@ struct llama_hparams_lora { uint32_t n_rot = 64; uint32_t n_lora = 64; - bool operator!=(const llama_hparams_lora & other) const { - return memcmp(this, &other, sizeof(llama_hparams_lora)) != 0; + bool operator!=(const jarvis_hparams_lora & other) const { + return memcmp(this, &other, sizeof(jarvis_hparams_lora)) != 0; } }; -struct llama_layer { +struct jarvis_layer { // normalization struct ggml_tensor * attention_norm; @@ -124,7 +124,7 @@ struct llama_layer { struct ggml_tensor * w3; }; -struct llama_layer_lora { +struct jarvis_layer_lora { // normalization struct ggml_tensor * attention_norm; @@ -148,34 +148,34 @@ struct llama_layer_lora { }; -struct llama_kv_cache { +struct jarvis_kv_cache { struct ggml_context * ctx = NULL; struct ggml_tensor * k; struct ggml_tensor * v; - // llama_ctx_buffer buf; + // jarvis_ctx_buffer buf; int n; // number of tokens currently in the cache }; -struct llama_model { +struct jarvis_model { struct ggml_context * ctx = NULL; - llama_hparams hparams; + jarvis_hparams hparams; struct ggml_tensor * tok_embeddings; struct ggml_tensor * norm; struct ggml_tensor * output; - std::vector layers; + std::vector layers; }; -struct llama_model_lora { +struct jarvis_model_lora { struct ggml_context * ctx = NULL; - llama_hparams_lora hparams; + jarvis_hparams_lora hparams; struct ggml_tensor * tok_embeddings; @@ -183,10 +183,10 @@ struct llama_model_lora { struct ggml_tensor * outputa; struct ggml_tensor * outputb; - std::vector layers; + std::vector layers; }; -static void init_model(struct llama_model * model) { +static void init_model(struct jarvis_model * model) { const auto & hparams = model->hparams; const uint32_t n_embd = hparams.n_embd; @@ -223,7 +223,7 @@ static void init_model(struct llama_model * model) { } -static void init_model_lora(struct llama_model_lora * model) { +static void init_model_lora(struct jarvis_model_lora * model) { const auto & hparams = model->hparams; const uint32_t n_embd = hparams.n_embd; @@ -266,7 +266,7 @@ static void init_model_lora(struct llama_model_lora * model) { } } -static void set_param_model(struct llama_model * model) { +static void set_param_model(struct jarvis_model * model) { const auto& hparams = model->hparams; const uint32_t n_layer = hparams.n_layer; @@ -292,7 +292,7 @@ static void set_param_model(struct llama_model * model) { } } -static void set_param_model_lora(struct llama_model_lora * model) { +static void set_param_model_lora(struct jarvis_model_lora * model) { const auto& hparams = model->hparams; const uint32_t n_layer = hparams.n_layer; @@ -323,7 +323,7 @@ static void set_param_model_lora(struct llama_model_lora * model) { } } -static void randomize_model(struct llama_model * model, int seed, float mean, float std, float min, float max) { +static void randomize_model(struct jarvis_model * model, int seed, float mean, float std, float min, float max) { const auto & hparams = model->hparams; const uint32_t n_layer = hparams.n_layer; @@ -355,7 +355,7 @@ static void randomize_model(struct llama_model * model, int seed, float mean, fl static void randomize_model_lora( - struct llama_model_lora * model, int seed, float mean, float std, float min, float max + struct jarvis_model_lora * model, int seed, float mean, float std, float min, float max ) { const auto & hparams = model->hparams; @@ -391,7 +391,7 @@ static void randomize_model_lora( free_random_normal_distribution(rnd); } -static void init_kv_cache(struct llama_kv_cache* cache, struct llama_model * model, int n_batch) { +static void init_kv_cache(struct jarvis_kv_cache* cache, struct jarvis_model * model, int n_batch) { const auto & hparams = model->hparams; const uint32_t n_ctx = hparams.n_ctx; @@ -425,7 +425,7 @@ static void init_kv_cache(struct llama_kv_cache* cache, struct llama_model * mod cache->v = ggml_new_tensor_1d(cache->ctx, GGML_TYPE_F32, n_elements); } -static bool init_kv_cache_lora(struct llama_kv_cache* cache, struct llama_model_lora * model, int n_batch) { +static bool init_kv_cache_lora(struct jarvis_kv_cache* cache, struct jarvis_model_lora * model, int n_batch) { const auto & hparams = model->hparams; const uint32_t n_ctx = hparams.n_ctx; @@ -462,8 +462,8 @@ static bool init_kv_cache_lora(struct llama_kv_cache* cache, struct llama_model_ } static struct ggml_tensor * forward( - struct llama_model * model, - struct llama_kv_cache * cache, + struct jarvis_model * model, + struct jarvis_kv_cache * cache, struct ggml_context * ctx0, struct ggml_cgraph * gf, struct ggml_tensor * tokens_input, @@ -472,7 +472,7 @@ static struct ggml_tensor * forward( ) { const int N = n_tokens; - struct llama_kv_cache& kv_self = *cache; + struct jarvis_kv_cache& kv_self = *cache; const auto & hparams = model->hparams; const int n_ctx = hparams.n_ctx; const int n_embd = hparams.n_embd; @@ -692,8 +692,8 @@ static struct ggml_tensor * forward( } static struct ggml_tensor * forward_batch( - struct llama_model * model, - struct llama_kv_cache * cache, + struct jarvis_model * model, + struct jarvis_kv_cache * cache, struct ggml_context * ctx0, struct ggml_cgraph * gf, struct ggml_tensor * tokens_input, @@ -703,7 +703,7 @@ static struct ggml_tensor * forward_batch( ) { const int N = n_tokens; - struct llama_kv_cache& kv_self = *cache; + struct jarvis_kv_cache& kv_self = *cache; const auto & hparams = model->hparams; const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; @@ -989,8 +989,8 @@ static struct ggml_tensor * forward_batch( } static struct ggml_tensor * forward_lora( - struct llama_model_lora * model, - struct llama_kv_cache * cache, + struct jarvis_model_lora * model, + struct jarvis_kv_cache * cache, struct ggml_context * ctx0, struct ggml_cgraph * gf, struct ggml_tensor * tokens_input, @@ -999,7 +999,7 @@ static struct ggml_tensor * forward_lora( ) { const int N = n_tokens; - struct llama_kv_cache& kv_self = *cache; + struct jarvis_kv_cache& kv_self = *cache; const auto & hparams = model->hparams; const int n_ctx = hparams.n_ctx; @@ -1444,7 +1444,7 @@ int main(int argc, char ** argv) { lcparams.mem_buffer = NULL; lcparams.no_alloc = false; - struct llama_model model; + struct jarvis_model model; model.hparams.n_vocab = 8; model.hparams.n_ctx = 8; model.hparams.n_embd = 32; @@ -1467,7 +1467,7 @@ int main(int argc, char ** argv) { randomize_model(&model, 1337, 0.0f, 1.0f, -1.0f, +1.0f); /* - struct llama_model_lora model_lora; + struct jarvis_model_lora model_lora; // model.hparams.n_vocab = 6; // model.hparams.n_ctx = 64; // model.hparams.n_embd = 128; @@ -1501,7 +1501,7 @@ int main(int argc, char ** argv) { */ int n_batch = 8; // key + value cache for the self attention - struct llama_kv_cache kv_self; + struct jarvis_kv_cache kv_self; printf("init_kv_cache\n"); kv_self.ctx = model.ctx; init_kv_cache(&kv_self, &model, n_batch); @@ -1533,7 +1533,7 @@ int main(int argc, char ** argv) { int n_past = 0; struct ggml_cgraph * gf = NULL; - gf = ggml_new_graph_custom(ctx0, LLAMA_TRAIN_MAX_NODES, true); + gf = ggml_new_graph_custom(ctx0, JARVIS_TRAIN_MAX_NODES, true); get_example_targets_batch(ctx0, 64*ex+0, tokens_input, targets); @@ -1601,7 +1601,7 @@ int main(int argc, char ** argv) { struct ggml_context * ctx0 = ggml_init(params); struct ggml_cgraph * gf = NULL; - gf = ggml_new_graph_custom(ctx0, LLAMA_TRAIN_MAX_NODES, true); + gf = ggml_new_graph_custom(ctx0, JARVIS_TRAIN_MAX_NODES, true); int n_past = 0; struct ggml_tensor * logits = forward(&model, &kv_self, ctx0, gf, tokens_input, sample_ctx, n_past); diff --git a/examples/base-translate.sh b/examples/base-translate.sh index 103a52f55e6db..1db10dfd59036 100755 --- a/examples/base-translate.sh +++ b/examples/base-translate.sh @@ -5,7 +5,7 @@ # # Usage: # -# cd llama.cpp +# cd jarvis.cpp # make -j # # ./examples/base-translate.sh "" [extra-main-args] @@ -21,7 +21,7 @@ if [ $# -gt 2 ]; then eargs="${@:3}" fi -ftmp="__llama.cpp_example_tmp__.txt" +ftmp="__jarvis.cpp_example_tmp__.txt" trap "rm -f $ftmp" EXIT echo "Translate from English to French: @@ -58,4 +58,4 @@ echo "$2 model=$1 # generate the most likely continuation until the string "===" is found -./llama-cli -m $model -f $ftmp -n 64 --temp 0 --repeat-penalty 1.0 --no-penalize-nl -r "===" $eargs +./jarvis-cli -m $model -f $ftmp -n 64 --temp 0 --repeat-penalty 1.0 --no-penalize-nl -r "===" $eargs diff --git a/examples/batched-bench/CMakeLists.txt b/examples/batched-bench/CMakeLists.txt index 959acaeeebc38..f84e368f22422 100644 --- a/examples/batched-bench/CMakeLists.txt +++ b/examples/batched-bench/CMakeLists.txt @@ -1,5 +1,5 @@ -set(TARGET llama-batched-bench) +set(TARGET jarvis-batched-bench) add_executable(${TARGET} batched-bench.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common jarvis ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/batched-bench/README.md b/examples/batched-bench/README.md index df67c47e378cf..ec94a0a173229 100644 --- a/examples/batched-bench/README.md +++ b/examples/batched-bench/README.md @@ -1,6 +1,6 @@ -# llama.cpp/example/batched-bench +# jarvis.cpp/example/batched-bench -Benchmark the batched decoding performance of `llama.cpp` +Benchmark the batched decoding performance of `jarvis.cpp` ## Usage @@ -10,16 +10,16 @@ There are 2 modes of operation: - `prompt is shared` - there is a common prompt of size `PP` used by all batches (i.e. `N_KV = PP + B*TG`) ```bash -./llama-batched-bench -m model.gguf -c 2048 -b 2048 -ub 512 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32 [-pps] +./jarvis-batched-bench -m model.gguf -c 2048 -b 2048 -ub 512 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32 [-pps] # LLaMA 7B, F16, N_KV_MAX = 16384 (8GB), prompt not shared -./llama-batched-bench -m ./models/llama-7b/ggml-model-f16.gguf -c 16384 -b 2048 -ub 512 -ngl 99 +./jarvis-batched-bench -m ./models/jarvis-7b/ggml-model-f16.gguf -c 16384 -b 2048 -ub 512 -ngl 99 # LLaMA 7B, Q8_0, N_KV_MAX = 16384 (8GB), prompt is shared -./llama-batched-bench -m ./models/llama-7b/ggml-model-q8_0.gguf -c 16384 -b 2048 -ub 512 -ngl 99 -pps +./jarvis-batched-bench -m ./models/jarvis-7b/ggml-model-q8_0.gguf -c 16384 -b 2048 -ub 512 -ngl 99 -pps # custom set of batches -./llama-batched-bench -m ./models/llama-7b/ggml-model-q8_0.gguf -c 2048 -b 512 -ub 512 -ngl 999 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32 +./jarvis-batched-bench -m ./models/jarvis-7b/ggml-model-q8_0.gguf -c 2048 -b 512 -ub 512 -ngl 999 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32 ``` ## Sample results diff --git a/examples/batched-bench/batched-bench.cpp b/examples/batched-bench/batched-bench.cpp index a3b21ad6bce44..349f16aade71e 100644 --- a/examples/batched-bench/batched-bench.cpp +++ b/examples/batched-bench/batched-bench.cpp @@ -1,7 +1,7 @@ #include "arg.h" #include "common.h" #include "log.h" -#include "llama.h" +#include "jarvis.h" #include #include @@ -17,7 +17,7 @@ static void print_usage(int, char ** argv) { int main(int argc, char ** argv) { common_params params; - if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_BENCH, print_usage)) { + if (!common_params_parse(argc, argv, params, JARVIS_EXAMPLE_BENCH, print_usage)) { return 1; } @@ -31,42 +31,42 @@ int main(int argc, char ** argv) { // init LLM - llama_backend_init(); - llama_numa_init(params.numa); + jarvis_backend_init(); + jarvis_numa_init(params.numa); // initialize the model - llama_model_params model_params = common_model_params_to_llama(params); + jarvis_model_params model_params = common_model_params_to_jarvis(params); - llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params); + jarvis_model * model = jarvis_load_model_from_file(params.model.c_str(), model_params); if (model == NULL) { fprintf(stderr , "%s: error: unable to load model\n" , __func__); return 1; } - llama_context_params ctx_params = common_context_params_to_llama(params); + jarvis_context_params ctx_params = common_context_params_to_jarvis(params); // ensure enough sequences are available ctx_params.n_seq_max = n_pl.empty() ? 1 : *std::max_element(n_pl.begin(), n_pl.end()); - llama_context * ctx = llama_new_context_with_model(model, ctx_params); + jarvis_context * ctx = jarvis_new_context_with_model(model, ctx_params); if (ctx == NULL) { - fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__); + fprintf(stderr , "%s: error: failed to create the jarvis_context\n" , __func__); return 1; } - const int32_t n_kv_max = llama_n_ctx(ctx); + const int32_t n_kv_max = jarvis_n_ctx(ctx); - llama_batch batch = llama_batch_init(n_kv_max, 0, 1); + jarvis_batch batch = jarvis_batch_init(n_kv_max, 0, 1); // decode in batches of ctx_params.n_batch tokens - auto decode_helper = [](llama_context * ctx, llama_batch & batch, int32_t n_batch) { + auto decode_helper = [](jarvis_context * ctx, jarvis_batch & batch, int32_t n_batch) { for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) { const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i)); - llama_batch batch_view = { + jarvis_batch batch_view = { n_tokens, batch.token + i, nullptr, @@ -76,13 +76,13 @@ int main(int argc, char ** argv) { batch.logits + i, }; - const int ret = llama_decode(ctx, batch_view); + const int ret = jarvis_decode(ctx, batch_view); if (ret != 0) { LOG_ERR("failed to decode the batch, n_batch = %d, ret = %d\n", n_batch, ret); return false; } - llama_synchronize(ctx); + jarvis_synchronize(ctx); } return true; @@ -95,7 +95,7 @@ int main(int argc, char ** argv) { } if (!decode_helper(ctx, batch, ctx_params.n_batch)) { - LOG_ERR("%s: llama_decode() failed\n", __func__); + LOG_ERR("%s: jarvis_decode() failed\n", __func__); return 1; } } @@ -132,16 +132,16 @@ int main(int argc, char ** argv) { const auto t_pp_start = ggml_time_us(); - llama_kv_cache_clear(ctx); + jarvis_kv_cache_clear(ctx); if (!decode_helper(ctx, batch, ctx_params.n_batch)) { - LOG_ERR("%s: llama_decode() failed\n", __func__); + LOG_ERR("%s: jarvis_decode() failed\n", __func__); return 1; } if (is_pp_shared) { for (int32_t i = 1; i < pl; ++i) { - llama_kv_cache_seq_cp(ctx, 0, i, -1, -1); + jarvis_kv_cache_seq_cp(ctx, 0, i, -1, -1); } } @@ -157,7 +157,7 @@ int main(int argc, char ** argv) { } if (!decode_helper(ctx, batch, ctx_params.n_batch)) { - LOG_ERR("%s: llama_decode() failed\n", __func__); + LOG_ERR("%s: jarvis_decode() failed\n", __func__); return 1; } } @@ -189,14 +189,14 @@ int main(int argc, char ** argv) { } LOG("\n"); - llama_perf_context_print(ctx); + jarvis_perf_context_print(ctx); - llama_batch_free(batch); + jarvis_batch_free(batch); - llama_free(ctx); - llama_free_model(model); + jarvis_free(ctx); + jarvis_free_model(model); - llama_backend_free(); + jarvis_backend_free(); LOG("\n\n"); diff --git a/examples/batched.swift/Makefile b/examples/batched.swift/Makefile index 1f9156e583fdd..f6efa6ed62536 100755 --- a/examples/batched.swift/Makefile +++ b/examples/batched.swift/Makefile @@ -1,6 +1,6 @@ .PHONY: build build: - xcodebuild -scheme llama-batched-swift -destination "generic/platform=macOS" -derivedDataPath build - rm -f ./llama-batched-swift - ln -s ./build/Build/Products/Debug/llama-batched-swift ./llama-batched-swift + xcodebuild -scheme jarvis-batched-swift -destination "generic/platform=macOS" -derivedDataPath build + rm -f ./jarvis-batched-swift + ln -s ./build/Build/Products/Debug/jarvis-batched-swift ./jarvis-batched-swift diff --git a/examples/batched.swift/Package.swift b/examples/batched.swift/Package.swift index 7e8afd0843c5b..8130a77e66ebd 100644 --- a/examples/batched.swift/Package.swift +++ b/examples/batched.swift/Package.swift @@ -4,17 +4,17 @@ import PackageDescription let package = Package( - name: "llama-batched-swift", + name: "jarvis-batched-swift", platforms: [.macOS(.v12)], dependencies: [ - .package(name: "llama", path: "../../"), + .package(name: "jarvis", path: "../../"), ], targets: [ // Targets are the basic building blocks of a package, defining a module or a test suite. // Targets can depend on other targets in this package and products from dependencies. .executableTarget( - name: "llama-batched-swift", - dependencies: ["llama"], + name: "jarvis-batched-swift", + dependencies: ["jarvis"], path: "Sources", linkerSettings: [.linkedFramework("Foundation"), .linkedFramework("AppKit")] ), diff --git a/examples/batched.swift/README.md b/examples/batched.swift/README.md index 7f2e2fcdcf4a7..03ec340ab0522 100644 --- a/examples/batched.swift/README.md +++ b/examples/batched.swift/README.md @@ -1,4 +1,4 @@ This is a swift clone of `examples/batched`. $ `make` -$ `./llama-batched-swift MODEL_PATH [PROMPT] [PARALLEL]` +$ `./jarvis-batched-swift MODEL_PATH [PROMPT] [PARALLEL]` diff --git a/examples/batched.swift/Sources/main.swift b/examples/batched.swift/Sources/main.swift index 10f2e7fd117a1..92eedbac7f6e8 100644 --- a/examples/batched.swift/Sources/main.swift +++ b/examples/batched.swift/Sources/main.swift @@ -1,5 +1,5 @@ import Foundation -import llama +import jarvis let arguments = CommandLine.arguments @@ -17,56 +17,56 @@ let n_parallel: Int = arguments.count > 3 && Int(arguments[3]) != nil ? Int(argu let n_len: Int = 32 // init LLM -llama_backend_init() +jarvis_backend_init() defer { - llama_backend_free() + jarvis_backend_free() } -let model_params = llama_model_default_params() -guard let model = llama_load_model_from_file(modelPath.cString(using: .utf8), model_params) else { +let model_params = jarvis_model_default_params() +guard let model = jarvis_load_model_from_file(modelPath.cString(using: .utf8), model_params) else { print("Failed to load model") exit(1) } defer { - llama_free_model(model) + jarvis_free_model(model) } var tokens = tokenize(text: prompt, add_bos: true) let n_kv_req = UInt32(tokens.count) + UInt32((n_len - Int(tokens.count)) * n_parallel) -var context_params = llama_context_default_params() +var context_params = jarvis_context_default_params() context_params.n_ctx = n_kv_req context_params.n_batch = UInt32(max(n_len, n_parallel)) context_params.n_threads = 8 context_params.n_threads_batch = 8 -let context = llama_new_context_with_model(model, context_params) +let context = jarvis_new_context_with_model(model, context_params) guard context != nil else { print("Failed to initialize context") exit(1) } defer { - llama_free(context) + jarvis_free(context) } -var sparams = llama_sampler_chain_default_params() +var sparams = jarvis_sampler_chain_default_params() -let smpl = llama_sampler_chain_init(sparams) +let smpl = jarvis_sampler_chain_init(sparams) guard smpl != nil else { print("Failed to initialize sampling") exit(1) } defer { - llama_sampler_free(smpl) + jarvis_sampler_free(smpl) } -llama_sampler_chain_add(smpl, llama_sampler_init_top_k(40)); -llama_sampler_chain_add(smpl, llama_sampler_init_top_p(0.9, 1)); -llama_sampler_chain_add(smpl, llama_sampler_init_temp (0.4)); -llama_sampler_chain_add(smpl, llama_sampler_init_dist (1234)); +jarvis_sampler_chain_add(smpl, jarvis_sampler_init_top_k(40)); +jarvis_sampler_chain_add(smpl, jarvis_sampler_init_top_p(0.9, 1)); +jarvis_sampler_chain_add(smpl, jarvis_sampler_init_temp (0.4)); +jarvis_sampler_chain_add(smpl, jarvis_sampler_init_dist (1234)); -let n_ctx = llama_n_ctx(context) +let n_ctx = jarvis_n_ctx(context) print("\nn_len = \(n_len), n_ctx = \(n_ctx), n_batch = \(context_params.n_batch), n_parallel = \(n_parallel), n_kv_req = \(n_kv_req)\n") @@ -76,15 +76,15 @@ if n_kv_req > n_ctx { } var buffer: [CChar] = [] -for id: llama_token in tokens { +for id: jarvis_token in tokens { print(token_to_piece(token: id, buffer: &buffer) ?? "", terminator: "") } print("\n") -var batch = llama_batch_init(max(Int32(tokens.count), Int32(n_parallel)), 0, 1) +var batch = jarvis_batch_init(max(Int32(tokens.count), Int32(n_parallel)), 0, 1) defer { - llama_batch_free(batch) + jarvis_batch_free(batch) } // evaluate the initial prompt @@ -102,16 +102,16 @@ for (i, token) in tokens.enumerated() { batch.logits[i] = 0 } -// llama_decode will output logits only for the last token of the prompt +// jarvis_decode will output logits only for the last token of the prompt batch.logits[Int(batch.n_tokens) - 1] = 1 -if llama_decode(context, batch) != 0 { - print("llama_decode() failed") +if jarvis_decode(context, batch) != 0 { + print("jarvis_decode() failed") exit(1) } for i in 1 ..< n_parallel { - llama_kv_cache_seq_cp(context, 0, Int32(i), 0, batch.n_tokens) + jarvis_kv_cache_seq_cp(context, 0, Int32(i), 0, batch.n_tokens) } if n_parallel > 1 { @@ -138,10 +138,10 @@ while n_cur <= n_len { continue } - let new_token_id = llama_sampler_sample(smpl, context, i_batch[i]) + let new_token_id = jarvis_sampler_sample(smpl, context, i_batch[i]) // is it an end of stream? -> mark the stream as finished - if llama_token_is_eog(model, new_token_id) || n_cur == n_len { + if jarvis_token_is_eog(model, new_token_id) || n_cur == n_len { i_batch[i] = -1 // print("") if n_parallel > 1 { @@ -183,8 +183,8 @@ while n_cur <= n_len { n_cur += 1 // evaluate the current batch with the transformer model - if llama_decode(context, batch) != 0 { - print("llama_decode() failed") + if jarvis_decode(context, batch) != 0 { + print("jarvis_decode() failed") exit(1) } } @@ -200,15 +200,15 @@ let t_main_end = ggml_time_us() print("decoded \(n_decode) tokens in \(String(format: "%.2f", Double(t_main_end - t_main_start) / 1_000_000.0)) s, speed: \(String(format: "%.2f", Double(n_decode) / (Double(t_main_end - t_main_start) / 1_000_000.0))) t/s\n\n") -llama_perf_sampler_print(smpl) -llama_perf_context_print(context) +jarvis_perf_sampler_print(smpl) +jarvis_perf_context_print(context) -private func tokenize(text: String, add_bos: Bool) -> [llama_token] { +private func tokenize(text: String, add_bos: Bool) -> [jarvis_token] { let utf8Count = text.utf8.count let n_tokens = utf8Count + (add_bos ? 1 : 0) - let tokens = UnsafeMutablePointer.allocate(capacity: n_tokens) - let tokenCount = llama_tokenize(model, text, Int32(utf8Count), tokens, Int32(n_tokens), add_bos, /*special tokens*/ false) - var swiftTokens: [llama_token] = [] + let tokens = UnsafeMutablePointer.allocate(capacity: n_tokens) + let tokenCount = jarvis_tokenize(model, text, Int32(utf8Count), tokens, Int32(n_tokens), add_bos, /*special tokens*/ false) + var swiftTokens: [jarvis_token] = [] for i in 0 ..< tokenCount { swiftTokens.append(tokens[Int(i)]) } @@ -216,13 +216,13 @@ private func tokenize(text: String, add_bos: Bool) -> [llama_token] { return swiftTokens } -private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String? { +private func token_to_piece(token: jarvis_token, buffer: inout [CChar]) -> String? { var result = [CChar](repeating: 0, count: 8) - let nTokens = llama_token_to_piece(model, token, &result, Int32(result.count), 0, false) + let nTokens = jarvis_token_to_piece(model, token, &result, Int32(result.count), 0, false) if nTokens < 0 { let actualTokensCount = -Int(nTokens) result = .init(repeating: 0, count: actualTokensCount) - let check = llama_token_to_piece( + let check = jarvis_token_to_piece( model, token, &result, diff --git a/examples/batched/CMakeLists.txt b/examples/batched/CMakeLists.txt index 77e33343b6673..9c78d7f13544d 100644 --- a/examples/batched/CMakeLists.txt +++ b/examples/batched/CMakeLists.txt @@ -1,5 +1,5 @@ -set(TARGET llama-batched) +set(TARGET jarvis-batched) add_executable(${TARGET} batched.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common jarvis ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/batched/README.md b/examples/batched/README.md index 6013aab01fddc..ebc3ebdab319d 100644 --- a/examples/batched/README.md +++ b/examples/batched/README.md @@ -1,9 +1,9 @@ -# llama.cpp/example/batched +# jarvis.cpp/example/batched The example demonstrates batched generation from a given prompt ```bash -./llama-batched -m ./models/llama-7b-v2/ggml-model-f16.gguf -p "Hello my name is" -np 4 +./jarvis-batched -m ./models/jarvis-7b-v2/ggml-model-f16.gguf -p "Hello my name is" -np 4 ... @@ -36,9 +36,9 @@ Hello my name is Cody. I am a 3 year old neutered male. I am a very friendly cat main: decoded 108 tokens in 3.57 s, speed: 30.26 t/s -llama_print_timings: load time = 587.00 ms -llama_print_timings: sample time = 2.56 ms / 112 runs ( 0.02 ms per token, 43664.72 tokens per second) -llama_print_timings: prompt eval time = 4089.11 ms / 118 tokens ( 34.65 ms per token, 28.86 tokens per second) -llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) -llama_print_timings: total time = 4156.04 ms +jarvis_print_timings: load time = 587.00 ms +jarvis_print_timings: sample time = 2.56 ms / 112 runs ( 0.02 ms per token, 43664.72 tokens per second) +jarvis_print_timings: prompt eval time = 4089.11 ms / 118 tokens ( 34.65 ms per token, 28.86 tokens per second) +jarvis_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) +jarvis_print_timings: total time = 4156.04 ms ``` diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp index 3b554033e7ee4..d651730b2c582 100644 --- a/examples/batched/batched.cpp +++ b/examples/batched/batched.cpp @@ -1,7 +1,7 @@ #include "arg.h" #include "common.h" #include "log.h" -#include "llama.h" +#include "jarvis.h" #include #include @@ -20,7 +20,7 @@ int main(int argc, char ** argv) { params.prompt = "Hello my name is"; params.n_predict = 32; - if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON, print_usage)) { + if (!common_params_parse(argc, argv, params, JARVIS_EXAMPLE_COMMON, print_usage)) { return 1; } @@ -34,14 +34,14 @@ int main(int argc, char ** argv) { // init LLM - llama_backend_init(); - llama_numa_init(params.numa); + jarvis_backend_init(); + jarvis_numa_init(params.numa); // initialize the model - llama_model_params model_params = common_model_params_to_llama(params); + jarvis_model_params model_params = common_model_params_to_jarvis(params); - llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params); + jarvis_model * model = jarvis_load_model_from_file(params.model.c_str(), model_params); if (model == NULL) { LOG_ERR("%s: error: unable to load model\n" , __func__); @@ -50,35 +50,35 @@ int main(int argc, char ** argv) { // tokenize the prompt - std::vector tokens_list; + std::vector tokens_list; tokens_list = common_tokenize(model, params.prompt, true); const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size())*n_parallel; // initialize the context - llama_context_params ctx_params = common_context_params_to_llama(params); + jarvis_context_params ctx_params = common_context_params_to_jarvis(params); ctx_params.n_ctx = n_kv_req; ctx_params.n_batch = std::max(n_predict, n_parallel); - llama_context * ctx = llama_new_context_with_model(model, ctx_params); + jarvis_context * ctx = jarvis_new_context_with_model(model, ctx_params); - auto sparams = llama_sampler_chain_default_params(); + auto sparams = jarvis_sampler_chain_default_params(); - llama_sampler * smpl = llama_sampler_chain_init(sparams); + jarvis_sampler * smpl = jarvis_sampler_chain_init(sparams); - llama_sampler_chain_add(smpl, llama_sampler_init_top_k(params.sparams.top_k)); - llama_sampler_chain_add(smpl, llama_sampler_init_top_p(params.sparams.top_p, params.sparams.min_keep)); - llama_sampler_chain_add(smpl, llama_sampler_init_temp (params.sparams.temp)); - llama_sampler_chain_add(smpl, llama_sampler_init_dist (params.sparams.seed)); + jarvis_sampler_chain_add(smpl, jarvis_sampler_init_top_k(params.sparams.top_k)); + jarvis_sampler_chain_add(smpl, jarvis_sampler_init_top_p(params.sparams.top_p, params.sparams.min_keep)); + jarvis_sampler_chain_add(smpl, jarvis_sampler_init_temp (params.sparams.temp)); + jarvis_sampler_chain_add(smpl, jarvis_sampler_init_dist (params.sparams.seed)); if (ctx == NULL) { - LOG_ERR("%s: error: failed to create the llama_context\n" , __func__); + LOG_ERR("%s: error: failed to create the jarvis_context\n" , __func__); return 1; } - const int n_ctx = llama_n_ctx(ctx); + const int n_ctx = jarvis_n_ctx(ctx); LOG_INF("\n%s: n_predict = %d, n_ctx = %d, n_batch = %u, n_parallel = %d, n_kv_req = %d\n", __func__, n_predict, n_ctx, ctx_params.n_batch, n_parallel, n_kv_req); @@ -97,11 +97,11 @@ int main(int argc, char ** argv) { LOG("%s", common_token_to_piece(ctx, id).c_str()); } - // create a llama_batch + // create a jarvis_batch // we use this object to submit token data for decoding - llama_batch batch = llama_batch_init(std::max(tokens_list.size(), (size_t) n_parallel), 0, n_parallel); + jarvis_batch batch = jarvis_batch_init(std::max(tokens_list.size(), (size_t) n_parallel), 0, n_parallel); - std::vector seq_ids(n_parallel, 0); + std::vector seq_ids(n_parallel, 0); for (int32_t i = 0; i < n_parallel; ++i) { seq_ids[i] = i; } @@ -112,33 +112,33 @@ int main(int argc, char ** argv) { } GGML_ASSERT(batch.n_tokens == (int) tokens_list.size()); - if (llama_model_has_encoder(model)) { - if (llama_encode(ctx, batch)) { + if (jarvis_model_has_encoder(model)) { + if (jarvis_encode(ctx, batch)) { LOG_ERR("%s : failed to eval\n", __func__); return 1; } - llama_token decoder_start_token_id = llama_model_decoder_start_token(model); + jarvis_token decoder_start_token_id = jarvis_model_decoder_start_token(model); if (decoder_start_token_id == -1) { - decoder_start_token_id = llama_token_bos(model); + decoder_start_token_id = jarvis_token_bos(model); } common_batch_clear(batch); common_batch_add(batch, decoder_start_token_id, 0, seq_ids, false); } - // llama_decode will output logits only for the last token of the prompt + // jarvis_decode will output logits only for the last token of the prompt batch.logits[batch.n_tokens - 1] = true; - if (llama_decode(ctx, batch) != 0) { - LOG_ERR("%s: llama_decode() failed\n", __func__); + if (jarvis_decode(ctx, batch) != 0) { + LOG_ERR("%s: jarvis_decode() failed\n", __func__); return 1; } //// assign the system KV cache to all parallel sequences //// this way, the parallel sequences will "reuse" the prompt tokens without having to copy them //for (int32_t i = 1; i < n_parallel; ++i) { - // llama_kv_cache_seq_cp(ctx, 0, i, -1, -1); + // jarvis_kv_cache_seq_cp(ctx, 0, i, -1, -1); //} if (n_parallel > 1) { @@ -170,10 +170,10 @@ int main(int argc, char ** argv) { continue; } - const llama_token new_token_id = llama_sampler_sample(smpl, ctx, i_batch[i]); + const jarvis_token new_token_id = jarvis_sampler_sample(smpl, ctx, i_batch[i]); // is it an end of generation? -> mark the stream as finished - if (llama_token_is_eog(model, new_token_id) || n_cur == n_predict) { + if (jarvis_token_is_eog(model, new_token_id) || n_cur == n_predict) { i_batch[i] = -1; LOG("\n"); if (n_parallel > 1) { @@ -206,7 +206,7 @@ int main(int argc, char ** argv) { n_cur += 1; // evaluate the current batch with the transformer model - if (llama_decode(ctx, batch)) { + if (jarvis_decode(ctx, batch)) { LOG_ERR("%s : failed to eval, return code %d\n", __func__, 1); return 1; } @@ -226,18 +226,18 @@ int main(int argc, char ** argv) { __func__, n_decode, (t_main_end - t_main_start) / 1000000.0f, n_decode / ((t_main_end - t_main_start) / 1000000.0f)); LOG("\n"); - llama_perf_sampler_print(smpl); - llama_perf_context_print(ctx); + jarvis_perf_sampler_print(smpl); + jarvis_perf_context_print(ctx); fprintf(stderr, "\n"); - llama_batch_free(batch); + jarvis_batch_free(batch); - llama_sampler_free(smpl); - llama_free(ctx); - llama_free_model(model); + jarvis_sampler_free(smpl); + jarvis_free(ctx); + jarvis_free_model(model); - llama_backend_free(); + jarvis_backend_free(); return 0; } diff --git a/examples/chat-13B.bat b/examples/chat-13B.bat index c5c8ac6efa81a..e398912f0f69b 100644 --- a/examples/chat-13B.bat +++ b/examples/chat-13B.bat @@ -10,7 +10,7 @@ if not "%errorlevel%"=="0" ( if not defined MODEL set "MODEL=models\13B\ggml-model-q4_0.bin" if not defined USER_NAME set "USER_NAME=User" -if not defined AI_NAME set "AI_NAME=ChatLLaMa" +if not defined AI_NAME set "AI_NAME=ChatJarvis" rem Adjust to the number of CPU cores you want to use. rem if not defined N_THREAD set "N_THREAD=8" rem Number of tokens to predict (made it larger than default because we want a long interaction) diff --git a/examples/chat-13B.sh b/examples/chat-13B.sh index 1828903c31670..96785bd4b2ccc 100755 --- a/examples/chat-13B.sh +++ b/examples/chat-13B.sh @@ -7,7 +7,7 @@ cd "$(dirname "$0")/.." || exit MODEL="${MODEL:-./models/13B/ggml-model-q4_0.bin}" PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-./prompts/chat.txt} USER_NAME="${USER_NAME:-USER}" -AI_NAME="${AI_NAME:-ChatLLaMa}" +AI_NAME="${AI_NAME:-ChatJarvis}" # Adjust to the number of CPU cores you want to use. N_THREAD="${N_THREAD:-8}" @@ -15,13 +15,13 @@ N_THREAD="${N_THREAD:-8}" N_PREDICTS="${N_PREDICTS:-2048}" # Note: you can also override the generation options by specifying them on the command line: -# For example, override the context size by doing: ./chatLLaMa --ctx_size 1024 +# For example, override the context size by doing: ./chatJarvis --ctx_size 1024 GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 2048 --temp 0.7 --top_k 40 --top_p 0.5 --repeat_last_n 256 --batch_size 1024 --repeat_penalty 1.17647}" DATE_TIME=$(date +%H:%M) DATE_YEAR=$(date +%Y) -PROMPT_FILE=$(mktemp -t llamacpp_prompt.XXXXXXX.txt) +PROMPT_FILE=$(mktemp -t jarviscpp_prompt.XXXXXXX.txt) sed -e "s/\[\[USER_NAME\]\]/$USER_NAME/g" \ -e "s/\[\[AI_NAME\]\]/$AI_NAME/g" \ @@ -30,7 +30,7 @@ sed -e "s/\[\[USER_NAME\]\]/$USER_NAME/g" \ $PROMPT_TEMPLATE > $PROMPT_FILE # shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS -./llama-cli $GEN_OPTIONS \ +./jarvis-cli $GEN_OPTIONS \ --model "$MODEL" \ --threads "$N_THREAD" \ --n_predict "$N_PREDICTS" \ diff --git a/examples/chat-persistent.sh b/examples/chat-persistent.sh index d9cab9836482e..016e6d06f58e0 100755 --- a/examples/chat-persistent.sh +++ b/examples/chat-persistent.sh @@ -9,10 +9,10 @@ if [[ -z "${PROMPT_CACHE_FILE+x}" || -z "${CHAT_SAVE_DIR+x}" ]]; then exit 1 fi -MODEL="${MODEL:-./models/llama-13b/ggml-model-q4_0.gguf}" +MODEL="${MODEL:-./models/jarvis-13b/ggml-model-q4_0.gguf}" PROMPT_TEMPLATE="${PROMPT_TEMPLATE:-./prompts/chat.txt}" USER_NAME="${USER_NAME:-User}" -AI_NAME="${AI_NAME:-ChatLLaMa}" +AI_NAME="${AI_NAME:-ChatJarvis}" DATE_TIME="$(date +%H:%M)" DATE_YEAR="$(date +%Y)" @@ -62,7 +62,7 @@ fi if [[ ! -e "$PROMPT_CACHE_FILE" ]]; then echo 'Prompt cache does not exist, building...' # Default batch_size to 64 here for better user feedback during initial prompt processing - ./llama-cli 2>>"$LOG" \ + ./jarvis-cli 2>>"$LOG" \ --batch_size 64 \ "${OPTS[@]}" \ --prompt-cache "$PROMPT_CACHE_FILE" \ @@ -109,13 +109,13 @@ while read -e line; do printf '%s: ' "$AI_NAME" >>"$CUR_PROMPT_FILE" - ./llama-cli 2>>"$LOG" "${OPTS[@]}" \ + ./jarvis-cli 2>>"$LOG" "${OPTS[@]}" \ --prompt-cache "$CUR_PROMPT_CACHE" \ --prompt-cache-all \ --file "$CUR_PROMPT_FILE" \ --reverse-prompt "${USER_NAME}:" \ --n_predict "$n_predict" | - skip_bytes 1 | # skip BOS token added by ./llama-cli + skip_bytes 1 | # skip BOS token added by ./jarvis-cli tee "$CUR_PROMPT_FILE.tmp" | # save prompt + generation to tmp file skip_bytes "$n_prompt_len_pre" # print generation @@ -133,7 +133,7 @@ while read -e line; do # TODO get both messages in one go if ! session_size_msg="$(tail -n30 "$LOG" | grep -oE "$SESSION_SIZE_MSG_PATTERN")" || ! sample_time_msg="$(tail -n10 "$LOG" | grep -oE "$SAMPLE_TIME_MSG_PATTERN")"; then - echo >&2 "Couldn't get number of tokens from ./llama-cli output!" + echo >&2 "Couldn't get number of tokens from ./jarvis-cli output!" exit 1 fi @@ -144,7 +144,7 @@ while read -e line; do fi # Update cache for next prompt in background, ideally during user input - ./llama-cli >>"$LOG_BG" 2>&1 "${OPTS[@]}" \ + ./jarvis-cli >>"$LOG_BG" 2>&1 "${OPTS[@]}" \ --prompt-cache "$NEXT_PROMPT_CACHE" \ --file "$NEXT_PROMPT_FILE" \ --n_predict 1 & diff --git a/examples/chat-vicuna.sh b/examples/chat-vicuna.sh index ffdd200849503..2d059adac0338 100755 --- a/examples/chat-vicuna.sh +++ b/examples/chat-vicuna.sh @@ -15,13 +15,13 @@ N_THREAD="${N_THREAD:-8}" N_PREDICTS="${N_PREDICTS:-2048}" # Note: you can also override the generation options by specifying them on the command line: -# For example, override the context size by doing: ./chatLLaMa --ctx_size 1024 +# For example, override the context size by doing: ./chatJarvis --ctx_size 1024 GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 2048 --temp 0.7 --top_k 40 --top_p 0.5 --repeat_last_n 256 --batch_size 1024 --repeat_penalty 1.17647}" DATE_TIME=$(date +%H:%M) DATE_YEAR=$(date +%Y) -PROMPT_FILE=$(mktemp -t llamacpp_prompt.XXXXXXX.txt) +PROMPT_FILE=$(mktemp -t jarviscpp_prompt.XXXXXXX.txt) sed -e "s/\[\[USER_NAME\]\]/$USER_NAME/g" \ -e "s/\[\[AI_NAME\]\]/$AI_NAME/g" \ @@ -30,7 +30,7 @@ sed -e "s/\[\[USER_NAME\]\]/$USER_NAME/g" \ $PROMPT_TEMPLATE > $PROMPT_FILE # shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS -./bin/llama-cli $GEN_OPTIONS \ +./bin/jarvis-cli $GEN_OPTIONS \ --model "$MODEL" \ --threads "$N_THREAD" \ --n_predict "$N_PREDICTS" \ diff --git a/examples/chat.sh b/examples/chat.sh index 9f85d1e265d00..0eb4b2e21bbce 100755 --- a/examples/chat.sh +++ b/examples/chat.sh @@ -11,6 +11,6 @@ cd .. # # "--keep 48" is based on the contents of prompts/chat-with-bob.txt # -./llama-cli -m ./models/llama-7b/ggml-model-q4_0.gguf -c 512 -b 1024 -n 256 --keep 48 \ +./jarvis-cli -m ./models/jarvis-7b/ggml-model-q4_0.gguf -c 512 -b 1024 -n 256 --keep 48 \ --repeat_penalty 1.0 --color -i \ -r "User:" -f prompts/chat-with-bob.txt diff --git a/examples/convert-llama2c-to-ggml/CMakeLists.txt b/examples/convert-llama2c-to-ggml/CMakeLists.txt index a6790e617217e..f88ca32c7d617 100644 --- a/examples/convert-llama2c-to-ggml/CMakeLists.txt +++ b/examples/convert-llama2c-to-ggml/CMakeLists.txt @@ -1,5 +1,5 @@ -set(TARGET llama-convert-llama2c-to-ggml) -add_executable(${TARGET} convert-llama2c-to-ggml.cpp) +set(TARGET jarvis-convert-jarvis2c-to-ggml) +add_executable(${TARGET} convert-jarvis2c-to-ggml.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common jarvis ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/convert-llama2c-to-ggml/README.md b/examples/convert-llama2c-to-ggml/README.md index 5774ac83c32c8..0cb1cbbe7cebb 100644 --- a/examples/convert-llama2c-to-ggml/README.md +++ b/examples/convert-llama2c-to-ggml/README.md @@ -1,28 +1,28 @@ -## Convert llama2.c model to ggml +## Convert jarvis2.c model to ggml -This example reads weights from project [llama2.c](https://github.com/karpathy/llama2.c) and saves them in ggml compatible format. The vocab that is available in `models/ggml-vocab.bin` is used by default. +This example reads weights from project [jarvis2.c](https://github.com/karpathy/jarvis2.c) and saves them in ggml compatible format. The vocab that is available in `models/ggml-vocab.bin` is used by default. -To convert the model first download the models from the [llama2.c](https://github.com/karpathy/llama2.c) repository: +To convert the model first download the models from the [jarvis2.c](https://github.com/karpathy/jarvis2.c) repository: `$ make -j` After successful compilation, following usage options are available: ``` -usage: ./llama-convert-llama2c-to-ggml [options] +usage: ./jarvis-convert-jarvis2c-to-ggml [options] options: -h, --help show this help message and exit - --copy-vocab-from-model FNAME path of gguf llama model or llama2.c vocabulary from which to copy vocab (default 'models/7B/ggml-model-f16.gguf') - --llama2c-model FNAME [REQUIRED] model path from which to load Karpathy's llama2.c model - --llama2c-output-model FNAME model path to save the converted llama2.c model (default ak_llama_model.bin') + --copy-vocab-from-model FNAME path of gguf jarvis model or jarvis2.c vocabulary from which to copy vocab (default 'models/7B/ggml-model-f16.gguf') + --jarvis2c-model FNAME [REQUIRED] model path from which to load Karpathy's jarvis2.c model + --jarvis2c-output-model FNAME model path to save the converted jarvis2.c model (default ak_jarvis_model.bin') ``` -An example command using a model from [karpathy/tinyllamas](https://huggingface.co/karpathy/tinyllamas) is as follows: +An example command using a model from [karpathy/tinyjarviss](https://huggingface.co/karpathy/tinyjarviss) is as follows: -`$ ./llama-convert-llama2c-to-ggml --copy-vocab-from-model llama-2-7b-chat.gguf.q2_K.bin --llama2c-model stories42M.bin --llama2c-output-model stories42M.gguf.bin` +`$ ./jarvis-convert-jarvis2c-to-ggml --copy-vocab-from-model jarvis-2-7b-chat.gguf.q2_K.bin --jarvis2c-model stories42M.bin --jarvis2c-output-model stories42M.gguf.bin` -Note: The vocabulary for `stories260K.bin` should be its own tokenizer `tok512.bin` found in [karpathy/tinyllamas/stories260K](https://huggingface.co/karpathy/tinyllamas/tree/main/stories260K). +Note: The vocabulary for `stories260K.bin` should be its own tokenizer `tok512.bin` found in [karpathy/tinyjarviss/stories260K](https://huggingface.co/karpathy/tinyjarviss/tree/main/stories260K). Now you can use the model with a command like: -`$ ./llama-cli -m stories42M.gguf.bin -p "One day, Lily met a Shoggoth" -n 500 -c 256` +`$ ./jarvis-cli -m stories42M.gguf.bin -p "One day, Lily met a Shoggoth" -n 500 -c 256` diff --git a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp index 988a584c99a25..6eb760a0939e3 100644 --- a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +++ b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp @@ -1,5 +1,5 @@ #include "ggml.h" -#include "llama.h" +#include "jarvis.h" #include "common.h" #include "log.h" @@ -33,14 +33,14 @@ #define KV_TOKENIZER_PAD_ID "tokenizer.ggml.padding_token_id" #define KV_TOKENIZER_HF_JSON "tokenizer.huggingface.json" -#define KV_CONTEXT_LENGTH "llama.context_length" -#define KV_EMBEDDING_LENGTH "llama.embedding_length" -#define KV_BLOCK_COUNT "llama.block_count" -#define KV_FEED_FORWARD_LENGTH "llama.feed_forward_length" -#define KV_ATTENTION_HEAD_COUNT "llama.attention.head_count" -#define KV_ATTENTION_HEAD_COUNT_KV "llama.attention.head_count_kv" -#define KV_ATTENTION_LAYERNORM_RMS_EPS "llama.attention.layer_norm_rms_epsilon" -#define KV_ROPE_DIMENSION_COUNT "llama.rope.dimension_count" +#define KV_CONTEXT_LENGTH "jarvis.context_length" +#define KV_EMBEDDING_LENGTH "jarvis.embedding_length" +#define KV_BLOCK_COUNT "jarvis.block_count" +#define KV_FEED_FORWARD_LENGTH "jarvis.feed_forward_length" +#define KV_ATTENTION_HEAD_COUNT "jarvis.attention.head_count" +#define KV_ATTENTION_HEAD_COUNT_KV "jarvis.attention.head_count_kv" +#define KV_ATTENTION_LAYERNORM_RMS_EPS "jarvis.attention.layer_norm_rms_epsilon" +#define KV_ROPE_DIMENSION_COUNT "jarvis.rope.dimension_count" #define TN_TOKEN_EMBD "token_embd.weight" #define TN_OUTPUT_NORM "output_norm.weight" @@ -59,15 +59,15 @@ #pragma warning(disable: 4244 4267) // possible loss of data #endif -#define LLAMA_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt' -#define LLAMA_FILE_VERSION_GGJT_V3 3 +#define JARVIS_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt' +#define JARVIS_FILE_VERSION_GGJT_V3 3 -#define TOKENIZER_NAME "llama" +#define TOKENIZER_NAME "jarvis" #define UNKNOWN_TOKEN_ID 0 #define BOS_TOKEN_ID 1 #define EOS_TOKEN_ID 2 -//////////////////////////////////////// llama2.c model structs and functions to load models, alloc memory etc. +//////////////////////////////////////// jarvis2.c model structs and functions to load models, alloc memory etc. typedef struct { int dim; // transformer dimension int hidden_dim; // for ffn layers @@ -201,10 +201,10 @@ static void print_sample_weights(TransformerWeights *w){ //////////////////////////////////////// ggml structs and functions required to load models, configs and save the model. -struct my_llama_vocab { +struct my_jarvis_vocab { using id = int32_t; using token = std::string; - using ttype = llama_token_type; + using ttype = jarvis_token_type; struct token_data { token text; @@ -216,7 +216,7 @@ struct my_llama_vocab { std::vector id_to_token; }; -struct my_llama_hparams { +struct my_jarvis_hparams { uint32_t n_vocab = 32000; uint32_t n_ctx = 512; // this is provided as user input? uint32_t n_embd = 4096; @@ -227,12 +227,12 @@ struct my_llama_hparams { uint32_t n_layer = 32; uint32_t n_rot = 64; - bool operator!=(const my_llama_hparams& other) const { - return memcmp(this, &other, sizeof(my_llama_hparams)); + bool operator!=(const my_jarvis_hparams& other) const { + return memcmp(this, &other, sizeof(my_jarvis_hparams)); } }; -struct my_llama_layer { +struct my_jarvis_layer { // normalization struct ggml_tensor * attention_norm; @@ -251,19 +251,19 @@ struct my_llama_layer { struct ggml_tensor * w3; }; -struct my_llama_model { +struct my_jarvis_model { struct ggml_context * ctx = NULL; std::string name; - my_llama_hparams hparams; + my_jarvis_hparams hparams; struct ggml_tensor * tok_embeddings; struct ggml_tensor * norm; struct ggml_tensor * output; - std::vector layers; + std::vector layers; uint32_t train_its = 0; uint32_t train_samples = 0; @@ -272,8 +272,8 @@ struct my_llama_model { struct train_params { const char * fn_vocab_model; - const char * fn_llama2c_model; - const char * fn_llama2c_output_model; + const char * fn_jarvis2c_model; + const char * fn_jarvis2c_output_model; const char * fn_train_data; const char * fn_checkpoint_in; const char * fn_checkpoint_out; @@ -318,7 +318,7 @@ struct train_params { int mem_compute1_gb; }; -static void print_params(struct my_llama_hparams * params) { +static void print_params(struct my_jarvis_hparams * params) { LOG_INF("%s: n_vocab: %u\n", __func__, params->n_vocab); LOG_INF("%s: n_ctx: %u\n", __func__, params->n_ctx); LOG_INF("%s: n_embd: %u\n", __func__, params->n_embd); @@ -345,7 +345,7 @@ static void print_tensor_info(const struct ggml_context * ctx) { } } -static void init_model(struct my_llama_model * model) { +static void init_model(struct my_jarvis_model * model) { const auto & hparams = model->hparams; const uint32_t n_embd = hparams.n_embd; @@ -434,12 +434,12 @@ static void print_matrix(struct ggml_tensor * probs) { } } -struct llama_file { +struct jarvis_file { // use FILE * so we don't have to re-open the file to mmap FILE * fp; size_t size; - llama_file(const char * fname, const char * mode) { + jarvis_file(const char * fname, const char * mode) { fp = std::fopen(fname, mode); if (fp == NULL) { size = 0; @@ -500,7 +500,7 @@ struct llama_file { return std::string(chars.data(), len); } - ~llama_file() { + ~jarvis_file() { if (fp) { std::fclose(fp); } @@ -508,7 +508,7 @@ struct llama_file { }; static bool is_ggml_file(const char * filename) { - llama_file file(filename, "rb"); + jarvis_file file(filename, "rb"); if (file.size < 4) { return false; } @@ -516,7 +516,7 @@ static bool is_ggml_file(const char * filename) { return magic == GGUF_MAGIC; } -static std::string llama_escape_whitespaces(const std::string & text) { +static std::string jarvis_escape_whitespaces(const std::string & text) { std::ostringstream out; for (char c : text) { if (c == ' ') out << "\xe2\x96\x81"; @@ -525,7 +525,7 @@ static std::string llama_escape_whitespaces(const std::string & text) { return out.str(); } -static void load_vocab(const char * filename, const Config * config, struct my_llama_vocab * vocab) { +static void load_vocab(const char * filename, const Config * config, struct my_jarvis_vocab * vocab) { if (is_ggml_file(filename)) { LOG_INF("%s: Loading vocabulary from gguf file %s\n", __func__, filename); struct ggml_context * ctx_data = NULL; @@ -556,7 +556,7 @@ static void load_vocab(const char * filename, const Config * config, struct my_l const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx); if (n_vocab != static_cast(config->vocab_size)) { - die_fmt("vocab size mismatch: (gguf) %u != (llama2c) %d", n_vocab, config->vocab_size); + die_fmt("vocab size mismatch: (gguf) %u != (jarvis2c) %d", n_vocab, config->vocab_size); } vocab->id_to_token.resize(n_vocab); @@ -569,45 +569,45 @@ static void load_vocab(const char * filename, const Config * config, struct my_l auto & token_data = vocab->id_to_token[i]; token_data.text = std::move(word); token_data.score = scores[i]; - token_data.type = (llama_token_type) toktypes[i]; + token_data.type = (jarvis_token_type) toktypes[i]; } ggml_free(ctx_data); gguf_free(ctx); } else { - // assume llama2.c vocabulary - LOG_INF("%s: Assuming llama2.c vocabulary since %s is not a gguf file\n", __func__, filename); - llama_file file(filename, "rb"); + // assume jarvis2.c vocabulary + LOG_INF("%s: Assuming jarvis2.c vocabulary since %s is not a gguf file\n", __func__, filename); + jarvis_file file(filename, "rb"); if (!file.fp) { die_fmt("%s: %s", strerror(errno), filename); } const int n_vocab = config->vocab_size; /* uint32_t max_token_length = */ file.read_u32(); // unused vocab->id_to_token.resize(n_vocab); - for (my_llama_vocab::id id=0; id", &byte_val) == 1) { // Text of byte tokens is already in the expected format. - type = LLAMA_TOKEN_TYPE_BYTE; + type = JARVIS_TOKEN_TYPE_BYTE; } else { - type = LLAMA_TOKEN_TYPE_NORMAL; + type = JARVIS_TOKEN_TYPE_NORMAL; } - text = llama_escape_whitespaces(text); + text = jarvis_escape_whitespaces(text); vocab->id_to_token[id].text = text; vocab->id_to_token[id].score = score; @@ -630,8 +630,8 @@ static void convert_weights_ak_to_gg(struct ggml_tensor * gg_weights, const floa } } -static void save_as_llama_model( - struct my_llama_vocab * vocab, struct my_llama_model * model, TransformerWeights* w, const char * filename +static void save_as_jarvis_model( + struct my_jarvis_vocab * vocab, struct my_jarvis_model * model, TransformerWeights* w, const char * filename ) { // convert AK weights into GG weights one by one. // w->token_embedding_table -> model->tok_embeddings @@ -670,8 +670,8 @@ static void save_as_llama_model( std::vector tokens; std::vector scores; - std::vector token_types; - for (const my_llama_vocab::token_data & token_data : vocab->id_to_token) { + std::vector token_types; + for (const my_jarvis_vocab::token_data & token_data : vocab->id_to_token) { tokens.push_back(token_data.text.c_str()); scores.push_back(token_data.score); token_types.push_back(token_data.type); @@ -682,8 +682,8 @@ static void save_as_llama_model( gguf_set_val_str(ctx, KV_TOKENIZER_MODEL, TOKENIZER_NAME); - gguf_set_val_str(ctx, KV_GENERAL_ARCHITECTURE, "llama"); - gguf_set_val_str(ctx, KV_GENERAL_NAME, "llama"); + gguf_set_val_str(ctx, KV_GENERAL_ARCHITECTURE, "jarvis"); + gguf_set_val_str(ctx, KV_GENERAL_NAME, "jarvis"); // special tokens gguf_set_val_u32(ctx, KV_TOKENIZER_UNK_ID, UNKNOWN_TOKEN_ID); @@ -750,7 +750,7 @@ static void save_as_llama_model( static struct train_params get_default_train_params() { struct train_params params; params.fn_vocab_model = "models/7B/ggml-model-f16.gguf"; - params.fn_llama2c_output_model = "ak_llama_model.bin"; + params.fn_jarvis2c_output_model = "ak_jarvis_model.bin"; params.fn_train_data = "shakespeare.txt"; params.fn_checkpoint_in = "checkpoint.bin"; params.fn_checkpoint_out = "checkpoint.bin"; @@ -802,9 +802,9 @@ static void print_usage(int /*argc*/, char ** argv, const struct train_params * fprintf(stderr, "\n"); fprintf(stderr, "options:\n"); fprintf(stderr, " -h, --help show this help message and exit\n"); - fprintf(stderr, " --copy-vocab-from-model FNAME path of gguf llama model or llama2.c vocabulary from which to copy vocab (default '%s')\n", params->fn_vocab_model); - fprintf(stderr, " --llama2c-model FNAME [REQUIRED] model path from which to load Karpathy's llama2.c model\n"); - fprintf(stderr, " --llama2c-output-model FNAME model path to save the converted llama2.c model (default %s')\n", params->fn_llama2c_output_model); + fprintf(stderr, " --copy-vocab-from-model FNAME path of gguf jarvis model or jarvis2.c vocabulary from which to copy vocab (default '%s')\n", params->fn_vocab_model); + fprintf(stderr, " --jarvis2c-model FNAME [REQUIRED] model path from which to load Karpathy's jarvis2.c model\n"); + fprintf(stderr, " --jarvis2c-output-model FNAME model path to save the converted jarvis2.c model (default %s')\n", params->fn_jarvis2c_output_model); fprintf(stderr, "\n"); } @@ -827,19 +827,19 @@ static bool params_parse(int argc, char ** argv, struct train_params * params) { break; } params->fn_vocab_model = argv[i]; - } else if (arg == "--llama2c-model") { + } else if (arg == "--jarvis2c-model") { if (++i >= argc) { invalid_param = true; break; } reqd_param_found = true; - params->fn_llama2c_model = argv[i]; - } else if (arg == "--llama2c-output-model") { + params->fn_jarvis2c_model = argv[i]; + } else if (arg == "--jarvis2c-output-model") { if (++i >= argc) { invalid_param = true; break; } - params->fn_llama2c_output_model = argv[i]; + params->fn_jarvis2c_output_model = argv[i]; } else if (arg == "-h" || arg == "--help") { print_usage(argc, argv, &default_params); exit(0); @@ -855,7 +855,7 @@ static bool params_parse(int argc, char ** argv, struct train_params * params) { exit(1); } if (!reqd_param_found){ - fprintf(stderr, "error: please specify a llama2.c .bin file to be converted with argument --llama2c-model\n"); + fprintf(stderr, "error: please specify a jarvis2.c .bin file to be converted with argument --jarvis2c-model\n"); print_usage(argc, argv, &default_params); exit(1); } @@ -882,15 +882,15 @@ int main(int argc, char ** argv) { Config config; TransformerWeights weights = {}; { - LOG_INF("%s: Loading llama2c model from %s\n", __func__, params.fn_llama2c_model); - FILE * file = fopen(params.fn_llama2c_model, "rb"); + LOG_INF("%s: Loading jarvis2c model from %s\n", __func__, params.fn_jarvis2c_model); + FILE * file = fopen(params.fn_jarvis2c_model, "rb"); if (!file) { - LOG_ERR("%s: Unable to open the checkpoint file %s!\n", __func__, params.fn_llama2c_model); + LOG_ERR("%s: Unable to open the checkpoint file %s!\n", __func__, params.fn_jarvis2c_model); return 1; } // read in the config header if (fread(&config, sizeof(Config), 1, file) != 1) { - LOG_ERR("%s: Unable to read llama2c config from %s!\n",__func__,params.fn_llama2c_model); + LOG_ERR("%s: Unable to read jarvis2c config from %s!\n",__func__,params.fn_jarvis2c_model); return 1; } auto shared_weights = config.vocab_size > 0; @@ -899,17 +899,17 @@ int main(int argc, char ** argv) { // read in the Transformer weights alloc_weights(&weights, &config, shared_weights); if (checkpoint_init_weights(&weights, &config, file, shared_weights)) { - LOG_ERR("%s: Unable to initialize transformer weights from %s!",__func__,params.fn_llama2c_model); + LOG_ERR("%s: Unable to initialize transformer weights from %s!",__func__,params.fn_jarvis2c_model); return 1; } fclose(file); } - struct my_llama_vocab vocab; + struct my_jarvis_vocab vocab; load_vocab(params.fn_vocab_model, &config, &vocab); - struct my_llama_model model; - model.hparams.n_vocab = config.vocab_size; //llama_n_vocab(lctx); + struct my_jarvis_model model; + model.hparams.n_vocab = config.vocab_size; //jarvis_n_vocab(lctx); model.hparams.n_ctx = params.n_ctx; model.hparams.n_embd = config.dim; //params.n_embd; model.hparams.n_ff = config.hidden_dim; @@ -929,10 +929,10 @@ int main(int argc, char ** argv) { model.ctx = ggml_init(lcparams); init_model(&model); - model.name = basename(params.fn_llama2c_model); - save_as_llama_model(&vocab, &model, &weights, params.fn_llama2c_output_model); + model.name = basename(params.fn_jarvis2c_model); + save_as_jarvis_model(&vocab, &model, &weights, params.fn_jarvis2c_output_model); - LOG_INF("%s: Saving llama.c model file %s in ggml format at %s\n", __func__, params.fn_llama2c_model, params.fn_llama2c_output_model); + LOG_INF("%s: Saving jarvis.c model file %s in ggml format at %s\n", __func__, params.fn_jarvis2c_model, params.fn_jarvis2c_output_model); ggml_free(model.ctx); return 0; diff --git a/examples/convert_legacy_llama.py b/examples/convert_legacy_llama.py index 9ab9ab06edf8f..0e3d9363f1dc0 100755 --- a/examples/convert_legacy_llama.py +++ b/examples/convert_legacy_llama.py @@ -33,7 +33,7 @@ sys.path.insert(1, str(Path(__file__).parent.parent / 'gguf-py')) import gguf -from gguf import BaseVocab, Vocab, NoVocab, BpeVocab, SentencePieceVocab, LlamaHfVocab +from gguf import BaseVocab, Vocab, NoVocab, BpeVocab, SentencePieceVocab, JarvisHfVocab if TYPE_CHECKING: from typing_extensions import Self, TypeAlias @@ -45,7 +45,7 @@ NDArray: TypeAlias = 'np.ndarray[Any, Any]' -ARCH = gguf.MODEL_ARCH.LLAMA +ARCH = gguf.MODEL_ARCH.JARVIS DEFAULT_CONCURRENCY = 8 @@ -130,8 +130,8 @@ def quantize_blocks_q8_0(blocks: NDArray) -> Iterable[tuple[Any, Any]]: 'I32': DT_I32, } -# TODO: match this with `llama_ftype` -# TODO: rename to LLAMAFileType +# TODO: match this with `jarvis_ftype` +# TODO: rename to JARVISFileType # TODO: move to `gguf.py` @@ -288,12 +288,12 @@ def loadOriginalParamsJson(model: LazyModel, config_path: Path) -> Params: f_rope_freq_base = None n_ff = None - # hack to determine LLaMA v1 vs v2 vs CodeLlama + # hack to determine LLaMA v1 vs v2 vs CodeJarvis if config.get("moe"): # Mixtral n_ctx = 32768 elif config.get("rope_theta") == 1000000: - # CodeLlama + # CodeJarvis n_ctx = 16384 elif config["norm_eps"] == 1e-05: # LLaMA v2 @@ -1199,7 +1199,7 @@ def load_some_model(path: Path) -> ModelPlus: class VocabFactory: - _VOCAB_CLASSES: list[type[Vocab]] = [SentencePieceVocab, BpeVocab, LlamaHfVocab] + _VOCAB_CLASSES: list[type[Vocab]] = [SentencePieceVocab, BpeVocab, JarvisHfVocab] def __init__(self, path: Path): self.path = path diff --git a/examples/cvector-generator/CMakeLists.txt b/examples/cvector-generator/CMakeLists.txt index 0a559d60c2a6d..ed3bb6abba599 100644 --- a/examples/cvector-generator/CMakeLists.txt +++ b/examples/cvector-generator/CMakeLists.txt @@ -1,5 +1,5 @@ -set(TARGET llama-cvector-generator) +set(TARGET jarvis-cvector-generator) add_executable(${TARGET} cvector-generator.cpp pca.hpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common jarvis ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/cvector-generator/README.md b/examples/cvector-generator/README.md index be4dd5250f15f..e7a4f734761e6 100644 --- a/examples/cvector-generator/README.md +++ b/examples/cvector-generator/README.md @@ -3,24 +3,24 @@ This example demonstrates how to generate a control vector using gguf models. Related PRs: -- [Add support for control vectors](https://github.com/ggerganov/llama.cpp/pull/5970) -- (Issue) [Generate control vector using llama.cpp](https://github.com/ggerganov/llama.cpp/issues/6880) -- [Add cvector-generator example](https://github.com/ggerganov/llama.cpp/pull/7514) +- [Add support for control vectors](https://github.com/ggerganov/jarvis.cpp/pull/5970) +- (Issue) [Generate control vector using jarvis.cpp](https://github.com/ggerganov/jarvis.cpp/issues/6880) +- [Add cvector-generator example](https://github.com/ggerganov/jarvis.cpp/pull/7514) ## Examples ```sh # CPU only -./cvector-generator -m ./llama-3.Q4_K_M.gguf +./cvector-generator -m ./jarvis-3.Q4_K_M.gguf # With GPU -./cvector-generator -m ./llama-3.Q4_K_M.gguf -ngl 99 +./cvector-generator -m ./jarvis-3.Q4_K_M.gguf -ngl 99 # With advanced options -./cvector-generator -m ./llama-3.Q4_K_M.gguf -ngl 99 --pca-iter 2000 --pca-batch 100 +./cvector-generator -m ./jarvis-3.Q4_K_M.gguf -ngl 99 --pca-iter 2000 --pca-batch 100 # Using mean value instead of PCA -./cvector-generator -m ./llama-3.Q4_K_M.gguf --method mean +./cvector-generator -m ./jarvis-3.Q4_K_M.gguf --method mean # To see help message ./cvector-generator -h @@ -36,10 +36,10 @@ If you have multiple lines per prompt, you can escape the newline character (cha <|im_start|>system\nYou are in a very good mood today<|im_end|> ``` -Example to use output file with `llama-cli`: +Example to use output file with `jarvis-cli`: (Tips: The control vector works better when apply to layers higher than 10) ```sh -./llama-cli -m ./llama-3.Q4_K_M.gguf -p "<|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSing a song<|im_end|><|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" --special --control-vector-scaled ./control_vector.gguf 0.8 --control-vector-layer-range 10 31 +./jarvis-cli -m ./jarvis-3.Q4_K_M.gguf -p "<|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSing a song<|im_end|><|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" --special --control-vector-scaled ./control_vector.gguf 0.8 --control-vector-layer-range 10 31 ``` diff --git a/examples/cvector-generator/cvector-generator.cpp b/examples/cvector-generator/cvector-generator.cpp index d1731bba64e1b..e09304aed1058 100644 --- a/examples/cvector-generator/cvector-generator.cpp +++ b/examples/cvector-generator/cvector-generator.cpp @@ -1,6 +1,6 @@ #include "arg.h" #include "common.h" -#include "llama.h" +#include "jarvis.h" #include "ggml.h" #include "pca.hpp" #include "mean.hpp" @@ -28,7 +28,7 @@ // utils template -static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) { +static std::string tokens_to_str(jarvis_context * ctx, Iter begin, Iter end) { std::string ret; for (; begin != end; ++begin) { ret += common_token_to_piece(ctx, *begin); @@ -39,10 +39,10 @@ static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) { static void print_usage(int, char ** argv) { printf("\nexample usage:\n"); - printf("\n CPU only: %s -m ./llama-3.Q4_K_M.gguf\n", argv[0]); - printf("\n with GPU: %s -m ./llama-3.Q4_K_M.gguf -ngl 99\n", argv[0]); - printf("\n advanced: %s -m ./llama-3.Q4_K_M.gguf -ngl 99 --pca-iter 2000 --pca-batch 100\n", argv[0]); - printf("\n using mean: %s -m ./llama-3.Q4_K_M.gguf --method mean\n", argv[0]); + printf("\n CPU only: %s -m ./jarvis-3.Q4_K_M.gguf\n", argv[0]); + printf("\n with GPU: %s -m ./jarvis-3.Q4_K_M.gguf -ngl 99\n", argv[0]); + printf("\n advanced: %s -m ./jarvis-3.Q4_K_M.gguf -ngl 99 --pca-iter 2000 --pca-batch 100\n", argv[0]); + printf("\n using mean: %s -m ./jarvis-3.Q4_K_M.gguf --method mean\n", argv[0]); printf("\n"); } @@ -266,12 +266,12 @@ struct train_context { }; struct tokenized_prompt { - std::vector tokens_pos; - std::vector tokens_neg; + std::vector tokens_pos; + std::vector tokens_neg; size_t max_seq_len; - tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) { - const bool add_bos = llama_add_bos_token(llama_get_model(ctx)); + tokenized_prompt(jarvis_context * ctx, std::string pos, std::string neg) { + const bool add_bos = jarvis_add_bos_token(jarvis_get_model(ctx)); tokens_pos = common_tokenize(ctx, pos, add_bos, true); tokens_neg = common_tokenize(ctx, neg, add_bos, true); max_seq_len = std::max(tokens_pos.size(), tokens_neg.size()); @@ -279,10 +279,10 @@ struct tokenized_prompt { padding_seq(ctx, tokens_neg, max_seq_len); } - void padding_seq(llama_context * ctx, std::vector & tokens, size_t len) { + void padding_seq(jarvis_context * ctx, std::vector & tokens, size_t len) { // TODO: customize padding token - std::vector pad_tokens = common_tokenize(ctx, " ", false); - llama_token pad_tok = pad_tokens.back(); + std::vector pad_tokens = common_tokenize(ctx, " ", false); + jarvis_token pad_tok = pad_tokens.back(); while (tokens.size() < len) { tokens.push_back(pad_tok); } @@ -337,9 +337,9 @@ static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) { return true; } -static bool get_hidden_layers(llama_context * ctx, std::vector & tokens) { - llama_kv_cache_clear(ctx); - if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) { +static bool get_hidden_layers(jarvis_context * ctx, std::vector & tokens) { + jarvis_kv_cache_clear(ctx); + if (jarvis_decode(ctx, jarvis_batch_get_one(tokens.data(), tokens.size()))) { fprintf(stderr, "%s : failed to eval\n", __func__); return false; } @@ -390,7 +390,7 @@ static int prepare_entries(common_params & params, train_context & ctx_train) { int main(int argc, char ** argv) { common_params params; - if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_CVECTOR_GENERATOR, print_usage)) { + if (!common_params_parse(argc, argv, params, JARVIS_EXAMPLE_CVECTOR_GENERATOR, print_usage)) { return 1; } @@ -409,21 +409,21 @@ int main(int argc, char ** argv) { params.warmup = false; print_build_info(); - llama_backend_init(); - llama_numa_init(params.numa); + jarvis_backend_init(); + jarvis_numa_init(params.numa); // load the model to get hparams - common_init_result llama_init = common_init_from_params(params); + common_init_result jarvis_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + jarvis_model * model = jarvis_init.model; + jarvis_context * ctx = jarvis_init.context; - // int n_ctx = llama_n_ctx(ctx); - int n_layers = llama_n_layer(model); - int n_embd = llama_n_embd(model); + // int n_ctx = jarvis_n_ctx(ctx); + int n_layers = jarvis_n_layer(model); + int n_embd = jarvis_n_embd(model); // get model hint param (a.k.a model arch name) char model_hint[128]; - llama_model_meta_val_str(model, "general.architecture", model_hint, 128); + jarvis_model_meta_val_str(model, "general.architecture", model_hint, 128); // init train_context train_context ctx_train(n_embd, n_layers); @@ -474,8 +474,8 @@ int main(int argc, char ** argv) { // done with the model, we can now free it to make gain some memory printf("Done evaluate prompts, unload model...\n"); - llama_free(ctx); - llama_free_model(model); + jarvis_free(ctx); + jarvis_free_model(model); bool use_pca = params.cvector_dimre_method == DIMRE_METHOD_PCA; @@ -497,7 +497,7 @@ int main(int argc, char ** argv) { // write output vectors to gguf export_gguf(ctx_train.v_final, params.cvector_outfile, model_hint); - llama_backend_free(); + jarvis_backend_free(); return 0; } diff --git a/examples/cvector-generator/mean.hpp b/examples/cvector-generator/mean.hpp index 16be5ce3eecf1..f95fb2dcce6cf 100644 --- a/examples/cvector-generator/mean.hpp +++ b/examples/cvector-generator/mean.hpp @@ -1,5 +1,5 @@ #include "common.h" -#include "llama.h" +#include "jarvis.h" #include "ggml.h" #include diff --git a/examples/cvector-generator/pca.hpp b/examples/cvector-generator/pca.hpp index f6e307fbc4970..3ea5dc4738570 100644 --- a/examples/cvector-generator/pca.hpp +++ b/examples/cvector-generator/pca.hpp @@ -1,5 +1,5 @@ #include "common.h" -#include "llama.h" +#include "jarvis.h" #include "ggml.h" #ifdef GGML_USE_CUDA @@ -290,7 +290,7 @@ static void power_iteration( ggml_gallocr_free(allocr); // TODO @ngxson : The output vector is randomly inverted - // Solution: https://github.com/ggerganov/llama.cpp/pull/8069#issuecomment-2185328171 + // Solution: https://github.com/ggerganov/jarvis.cpp/pull/8069#issuecomment-2185328171 } static void run_pca( diff --git a/examples/deprecation-warning/README.md b/examples/deprecation-warning/README.md index 59918ec2bbf72..2790c72fb7052 100644 --- a/examples/deprecation-warning/README.md +++ b/examples/deprecation-warning/README.md @@ -1,7 +1,7 @@ # Migration notice for binary filenames > [!IMPORTANT] -[2024 Jun 12] Binaries have been renamed w/ a `llama-` prefix. `main` is now `llama-cli`, `server` is `llama-server`, etc (https://github.com/ggerganov/llama.cpp/pull/7809) +[2024 Jun 12] Binaries have been renamed w/ a `jarvis-` prefix. `main` is now `jarvis-cli`, `server` is `jarvis-server`, etc (https://github.com/ggerganov/jarvis.cpp/pull/7809) This migration was important, but it is a breaking change that may not always be immediately obvious to users. @@ -9,41 +9,41 @@ Please update all scripts and workflows to use the new binary names. | Old Filename | New Filename | | ---- | ---- | -| main | llama-cli | -| server | llama-server | -| llama-bench | llama-bench | -| embedding | llama-embedding | -| quantize | llama-quantize | -| tokenize | llama-tokenize | -| export-lora | llama-export-lora | +| main | jarvis-cli | +| server | jarvis-server | +| jarvis-bench | jarvis-bench | +| embedding | jarvis-embedding | +| quantize | jarvis-quantize | +| tokenize | jarvis-tokenize | +| export-lora | jarvis-export-lora | | libllava.a | libllava.a | -| baby-llama | llama-baby-llama | -| batched | llama-batched | -| batched-bench | llama-batched-bench | -| benchmark-matmult | llama-benchmark-matmult | -| convert-llama2c-to-ggml | llama-convert-llama2c-to-ggml | -| eval-callback | llama-eval-callback | -| gbnf-validator | llama-gbnf-validator | -| gguf | llama-gguf | -| gguf-split | llama-gguf-split | -| gritlm | llama-gritlm | -| imatrix | llama-imatrix | -| infill | llama-infill | -| llava-cli | llama-llava-cli | -| lookahead | llama-lookahead | -| lookup | llama-lookup | -| lookup-create | llama-lookup-create | -| lookup-merge | llama-lookup-merge | -| lookup-stats | llama-lookup-stats | -| parallel | llama-parallel | -| passkey | llama-passkey | -| perplexity | llama-perplexity | -| q8dot | llama-q8dot | -| quantize-stats | llama-quantize-stats | -| retrieval | llama-retrieval | -| save-load-state | llama-save-load-state | -| simple | llama-simple | -| speculative | llama-speculative | -| vdot | llama-vdot | +| baby-jarvis | jarvis-baby-jarvis | +| batched | jarvis-batched | +| batched-bench | jarvis-batched-bench | +| benchmark-matmult | jarvis-benchmark-matmult | +| convert-jarvis2c-to-ggml | jarvis-convert-jarvis2c-to-ggml | +| eval-callback | jarvis-eval-callback | +| gbnf-validator | jarvis-gbnf-validator | +| gguf | jarvis-gguf | +| gguf-split | jarvis-gguf-split | +| gritlm | jarvis-gritlm | +| imatrix | jarvis-imatrix | +| infill | jarvis-infill | +| llava-cli | jarvis-llava-cli | +| lookahead | jarvis-lookahead | +| lookup | jarvis-lookup | +| lookup-create | jarvis-lookup-create | +| lookup-merge | jarvis-lookup-merge | +| lookup-stats | jarvis-lookup-stats | +| parallel | jarvis-parallel | +| passkey | jarvis-passkey | +| perplexity | jarvis-perplexity | +| q8dot | jarvis-q8dot | +| quantize-stats | jarvis-quantize-stats | +| retrieval | jarvis-retrieval | +| save-load-state | jarvis-save-load-state | +| simple | jarvis-simple | +| speculative | jarvis-speculative | +| vdot | jarvis-vdot | | tests/test-c.o | tests/test-c.o | diff --git a/examples/deprecation-warning/deprecation-warning.cpp b/examples/deprecation-warning/deprecation-warning.cpp index 11b35d2c22500..088364cd4105c 100644 --- a/examples/deprecation-warning/deprecation-warning.cpp +++ b/examples/deprecation-warning/deprecation-warning.cpp @@ -17,18 +17,18 @@ int main(int argc, char** argv) { filename = filename.substr(pos+1); } - // Append "llama-" to the beginning of filename to get the replacemnt filename - auto replacement_filename = "llama-" + filename; + // Append "jarvis-" to the beginning of filename to get the replacemnt filename + auto replacement_filename = "jarvis-" + filename; - // The exception is if the filename is "main", then our replacement filename is "llama-cli" + // The exception is if the filename is "main", then our replacement filename is "jarvis-cli" if (filename == "main") { - replacement_filename = "llama-cli"; + replacement_filename = "jarvis-cli"; } fprintf(stdout, "\n"); fprintf(stdout, "WARNING: The binary '%s' is deprecated.\n", filename.c_str()); fprintf(stdout, " Please use '%s' instead.\n", replacement_filename.c_str()); - fprintf(stdout, " See https://github.com/ggerganov/llama.cpp/tree/master/examples/deprecation-warning/README.md for more information.\n"); + fprintf(stdout, " See https://github.com/ggerganov/jarvis.cpp/tree/master/examples/deprecation-warning/README.md for more information.\n"); fprintf(stdout, "\n"); return EXIT_FAILURE; diff --git a/examples/embedding/CMakeLists.txt b/examples/embedding/CMakeLists.txt index 8256e789ad33a..3c43d82e38f4f 100644 --- a/examples/embedding/CMakeLists.txt +++ b/examples/embedding/CMakeLists.txt @@ -1,5 +1,5 @@ -set(TARGET llama-embedding) +set(TARGET jarvis-embedding) add_executable(${TARGET} embedding.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common jarvis ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/embedding/README.md b/examples/embedding/README.md index 12b372bf1df42..40589f6ce4f81 100644 --- a/examples/embedding/README.md +++ b/examples/embedding/README.md @@ -1,6 +1,6 @@ -# llama.cpp/example/embedding +# jarvis.cpp/example/embedding -This example demonstrates generate high-dimensional embedding vector of a given text with llama.cpp. +This example demonstrates generate high-dimensional embedding vector of a given text with jarvis.cpp. ## Quick Start @@ -9,13 +9,13 @@ To get started right away, run the following command, making sure to use the cor ### Unix-based systems (Linux, macOS, etc.): ```bash -./llama-embedding -m ./path/to/model --pooling mean --log-disable -p "Hello World!" 2>/dev/null +./jarvis-embedding -m ./path/to/model --pooling mean --log-disable -p "Hello World!" 2>/dev/null ``` ### Windows: ```powershell -llama-embedding.exe -m ./path/to/model --pooling mean --log-disable -p "Hello World!" 2>$null +jarvis-embedding.exe -m ./path/to/model --pooling mean --log-disable -p "Hello World!" 2>$null ``` The above command will output space-separated float values. @@ -50,11 +50,11 @@ The above command will output space-separated float values. ### Unix-based systems (Linux, macOS, etc.): ```bash -./llama-embedding -p 'Castle<#sep#>Stronghold<#sep#>Dog<#sep#>Cat' --pooling mean --embd-separator '<#sep#>' --embd-normalize 2 --embd-output-format '' -m './path/to/model.gguf' --n-gpu-layers 99 --log-disable 2>/dev/null +./jarvis-embedding -p 'Castle<#sep#>Stronghold<#sep#>Dog<#sep#>Cat' --pooling mean --embd-separator '<#sep#>' --embd-normalize 2 --embd-output-format '' -m './path/to/model.gguf' --n-gpu-layers 99 --log-disable 2>/dev/null ``` ### Windows: ```powershell -llama-embedding.exe -p 'Castle<#sep#>Stronghold<#sep#>Dog<#sep#>Cat' --pooling mean --embd-separator '<#sep#>' --embd-normalize 2 --embd-output-format '' -m './path/to/model.gguf' --n-gpu-layers 99 --log-disable 2>/dev/null +jarvis-embedding.exe -p 'Castle<#sep#>Stronghold<#sep#>Dog<#sep#>Cat' --pooling mean --embd-separator '<#sep#>' --embd-normalize 2 --embd-output-format '' -m './path/to/model.gguf' --n-gpu-layers 99 --log-disable 2>/dev/null ``` diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index 3f18fc6a70878..77dafad011a79 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -1,7 +1,7 @@ #include "arg.h" #include "common.h" #include "log.h" -#include "llama.h" +#include "jarvis.h" #include @@ -25,30 +25,30 @@ static std::vector split_lines(const std::string & s, const std::st return lines; } -static void batch_add_seq(llama_batch & batch, const std::vector & tokens, llama_seq_id seq_id) { +static void batch_add_seq(jarvis_batch & batch, const std::vector & tokens, jarvis_seq_id seq_id) { size_t n_tokens = tokens.size(); for (size_t i = 0; i < n_tokens; i++) { common_batch_add(batch, tokens[i], i, { seq_id }, true); } } -static void batch_decode(llama_context * ctx, llama_batch & batch, float * output, int n_seq, int n_embd, int embd_norm) { - const enum llama_pooling_type pooling_type = llama_pooling_type(ctx); - const struct llama_model * model = llama_get_model(ctx); +static void batch_decode(jarvis_context * ctx, jarvis_batch & batch, float * output, int n_seq, int n_embd, int embd_norm) { + const enum jarvis_pooling_type pooling_type = jarvis_pooling_type(ctx); + const struct jarvis_model * model = jarvis_get_model(ctx); // clear previous kv_cache values (irrelevant for embeddings) - llama_kv_cache_clear(ctx); + jarvis_kv_cache_clear(ctx); // run model LOG_INF("%s: n_tokens = %d, n_seq = %d\n", __func__, batch.n_tokens, n_seq); - if (llama_model_has_encoder(model) && !llama_model_has_decoder(model)) { + if (jarvis_model_has_encoder(model) && !jarvis_model_has_decoder(model)) { // encoder-only model - if (llama_encode(ctx, batch) < 0) { + if (jarvis_encode(ctx, batch) < 0) { LOG_ERR("%s : failed to encode\n", __func__); } - } else if (!llama_model_has_encoder(model) && llama_model_has_decoder(model)) { + } else if (!jarvis_model_has_encoder(model) && jarvis_model_has_decoder(model)) { // decoder-only model - if (llama_decode(ctx, batch) < 0) { + if (jarvis_decode(ctx, batch) < 0) { LOG_ERR("%s : failed to decode\n", __func__); } } @@ -61,14 +61,14 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu const float * embd = nullptr; int embd_pos = 0; - if (pooling_type == LLAMA_POOLING_TYPE_NONE) { + if (pooling_type == JARVIS_POOLING_TYPE_NONE) { // try to get token embeddings - embd = llama_get_embeddings_ith(ctx, i); + embd = jarvis_get_embeddings_ith(ctx, i); embd_pos = i; GGML_ASSERT(embd != NULL && "failed to get token embeddings"); } else { // try to get sequence embeddings - supported only when pooling_type is not NONE - embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]); + embd = jarvis_get_embeddings_seq(ctx, batch.seq_id[i][0]); embd_pos = batch.seq_id[i][0]; GGML_ASSERT(embd != NULL && "failed to get sequence embeddings"); } @@ -81,7 +81,7 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu int main(int argc, char ** argv) { common_params params; - if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_EMBEDDING)) { + if (!common_params_parse(argc, argv, params, JARVIS_EXAMPLE_EMBEDDING)) { return 1; } @@ -91,25 +91,25 @@ int main(int argc, char ** argv) { // For non-causal models, batch size must be equal to ubatch size params.n_ubatch = params.n_batch; - llama_backend_init(); - llama_numa_init(params.numa); + jarvis_backend_init(); + jarvis_numa_init(params.numa); // load the model - common_init_result llama_init = common_init_from_params(params); + common_init_result jarvis_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + jarvis_model * model = jarvis_init.model; + jarvis_context * ctx = jarvis_init.context; if (model == NULL) { LOG_ERR("%s: unable to load model\n", __func__); return 1; } - const int n_ctx_train = llama_n_ctx_train(model); - const int n_ctx = llama_n_ctx(ctx); + const int n_ctx_train = jarvis_n_ctx_train(model); + const int n_ctx = jarvis_n_ctx(ctx); - const enum llama_pooling_type pooling_type = llama_pooling_type(ctx); + const enum jarvis_pooling_type pooling_type = jarvis_pooling_type(ctx); - if (llama_model_has_encoder(model) && llama_model_has_decoder(model)) { + if (jarvis_model_has_encoder(model) && jarvis_model_has_decoder(model)) { LOG_ERR("%s: computing embeddings in encoder-decoder models is not supported\n", __func__); return 1; } @@ -147,7 +147,7 @@ int main(int argc, char ** argv) { // check if the last token is SEP // it should be automatically added by the tokenizer when 'tokenizer.ggml.add_eos_token' is set to 'true' for (auto & inp : inputs) { - if (inp.empty() || inp.back() != llama_token_sep(model)) { + if (inp.empty() || inp.back() != jarvis_token_sep(model)) { LOG_WRN("%s: last token in the prompt is not SEP\n", __func__); LOG_WRN("%s: 'tokenizer.ggml.add_eos_token' should be set to 'true' in the GGUF header\n", __func__); } @@ -167,11 +167,11 @@ int main(int argc, char ** argv) { // initialize batch const int n_prompts = prompts.size(); - struct llama_batch batch = llama_batch_init(n_batch, 0, 1); + struct jarvis_batch batch = jarvis_batch_init(n_batch, 0, 1); // count number of embeddings int n_embd_count = 0; - if (pooling_type == LLAMA_POOLING_TYPE_NONE) { + if (pooling_type == JARVIS_POOLING_TYPE_NONE) { for (int k = 0; k < n_prompts; k++) { n_embd_count += inputs[k].size(); } @@ -180,7 +180,7 @@ int main(int argc, char ** argv) { } // allocate output - const int n_embd = llama_n_embd(model); + const int n_embd = jarvis_n_embd(model); std::vector embeddings(n_embd_count * n_embd, 0); float * emb = embeddings.data(); @@ -197,7 +197,7 @@ int main(int argc, char ** argv) { if (batch.n_tokens + n_toks > n_batch) { float * out = emb + e * n_embd; batch_decode(ctx, batch, out, s, n_embd, params.embd_normalize); - e += pooling_type == LLAMA_POOLING_TYPE_NONE ? batch.n_tokens : s; + e += pooling_type == JARVIS_POOLING_TYPE_NONE ? batch.n_tokens : s; s = 0; common_batch_clear(batch); } @@ -214,7 +214,7 @@ int main(int argc, char ** argv) { if (params.embd_out.empty()) { LOG("\n"); - if (pooling_type == LLAMA_POOLING_TYPE_NONE) { + if (pooling_type == JARVIS_POOLING_TYPE_NONE) { for (int j = 0; j < n_embd_count; j++) { LOG("embedding %d: ", j); for (int i = 0; i < std::min(3, n_embd); i++) { @@ -234,7 +234,7 @@ int main(int argc, char ** argv) { } LOG("\n"); } - } else if (pooling_type == LLAMA_POOLING_TYPE_RANK) { + } else if (pooling_type == JARVIS_POOLING_TYPE_RANK) { for (int j = 0; j < n_embd_count; j++) { // NOTE: if you change this log - update the tests in ci/run.sh LOG("rerank score %d: %8.3f\n", j, emb[j * n_embd]); @@ -312,13 +312,13 @@ int main(int argc, char ** argv) { } LOG("\n"); - llama_perf_context_print(ctx); + jarvis_perf_context_print(ctx); // clean up - llama_batch_free(batch); - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); + jarvis_batch_free(batch); + jarvis_free(ctx); + jarvis_free_model(model); + jarvis_backend_free(); return 0; } diff --git a/examples/eval-callback/CMakeLists.txt b/examples/eval-callback/CMakeLists.txt index a48753d38e16e..46b47b90b94ba 100644 --- a/examples/eval-callback/CMakeLists.txt +++ b/examples/eval-callback/CMakeLists.txt @@ -1,9 +1,9 @@ -set(TARGET llama-eval-callback) +set(TARGET jarvis-eval-callback) add_executable(${TARGET} eval-callback.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common jarvis ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) set(TEST_TARGET test-eval-callback) -add_test(NAME ${TEST_TARGET} COMMAND llama-eval-callback --hf-repo ggml-org/models --hf-file tinyllamas/stories260K.gguf --model stories260K.gguf --prompt hello --seed 42 -ngl 0) +add_test(NAME ${TEST_TARGET} COMMAND jarvis-eval-callback --hf-repo ggml-org/models --hf-file tinyjarviss/stories260K.gguf --model stories260K.gguf --prompt hello --seed 42 -ngl 0) set_property(TEST ${TEST_TARGET} PROPERTY LABELS eval-callback curl) diff --git a/examples/eval-callback/README.md b/examples/eval-callback/README.md index 63a57ad6b68e5..df7946f3abc3a 100644 --- a/examples/eval-callback/README.md +++ b/examples/eval-callback/README.md @@ -1,4 +1,4 @@ -# llama.cpp/examples/eval-callback +# jarvis.cpp/examples/eval-callback A simple example which demonstrates how to use callback during the inference. It simply prints to the console all operations and tensor data. @@ -6,7 +6,7 @@ It simply prints to the console all operations and tensor data. Usage: ```shell -llama-eval-callback \ +jarvis-eval-callback \ --hf-repo ggml-org/models \ --hf-file phi-2/ggml-model-q4_0.gguf \ --model phi-2-q4_0.gguf \ @@ -20,12 +20,12 @@ Will print: ```shell llm_load_tensors: offloaded 33/33 layers to GPU ... -llama_new_context_with_model: n_ctx = 512 +jarvis_new_context_with_model: n_ctx = 512 ... -llama_new_context_with_model: CUDA0 compute buffer size = 105.00 MiB -llama_new_context_with_model: CUDA_Host compute buffer size = 6.01 MiB -llama_new_context_with_model: graph nodes = 1225 -llama_new_context_with_model: graph splits = 2 +jarvis_new_context_with_model: CUDA0 compute buffer size = 105.00 MiB +jarvis_new_context_with_model: CUDA_Host compute buffer size = 6.01 MiB +jarvis_new_context_with_model: graph nodes = 1225 +jarvis_new_context_with_model: graph splits = 2 ggml_debug: inp_embd = (f32) GET_ROWS(token_embd.weight{2560, 51200, 1, 1}, inp_tokens{1, 1, 1, 1}}) = {2560, 1, 1, 1} [ [ diff --git a/examples/eval-callback/eval-callback.cpp b/examples/eval-callback/eval-callback.cpp index c08e3e5f675ed..a4cb2d6131438 100644 --- a/examples/eval-callback/eval-callback.cpp +++ b/examples/eval-callback/eval-callback.cpp @@ -1,7 +1,7 @@ #include "arg.h" #include "common.h" #include "log.h" -#include "llama.h" +#include "jarvis.h" #include "ggml.h" #include @@ -126,12 +126,12 @@ static bool ggml_debug(struct ggml_tensor * t, bool ask, void * user_data) { return true; } -static bool run(llama_context * ctx, const common_params & params) { - const bool add_bos = llama_add_bos_token(llama_get_model(ctx)); +static bool run(jarvis_context * ctx, const common_params & params) { + const bool add_bos = jarvis_add_bos_token(jarvis_get_model(ctx)); - std::vector tokens = common_tokenize(ctx, params.prompt, add_bos); + std::vector tokens = common_tokenize(ctx, params.prompt, add_bos); - if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) { + if (jarvis_decode(ctx, jarvis_batch_get_one(tokens.data(), tokens.size()))) { LOG_ERR("%s : failed to eval\n", __func__); return false; } @@ -144,14 +144,14 @@ int main(int argc, char ** argv) { common_params params; - if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) { + if (!common_params_parse(argc, argv, params, JARVIS_EXAMPLE_COMMON)) { return 1; } common_init(); - llama_backend_init(); - llama_numa_init(params.numa); + jarvis_backend_init(); + jarvis_numa_init(params.numa); // pass the callback to the backend scheduler // it will be executed for each node during the graph computation @@ -160,10 +160,10 @@ int main(int argc, char ** argv) { params.warmup = false; // init - common_init_result llama_init = common_init_from_params(params); + common_init_result jarvis_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + jarvis_model * model = jarvis_init.model; + jarvis_context * ctx = jarvis_init.context; if (model == nullptr || ctx == nullptr) { LOG_ERR("%s : failed to init\n", __func__); return 1; @@ -182,12 +182,12 @@ int main(int argc, char ** argv) { } LOG("\n"); - llama_perf_context_print(ctx); + jarvis_perf_context_print(ctx); - llama_free(ctx); - llama_free_model(model); + jarvis_free(ctx); + jarvis_free_model(model); - llama_backend_free(); + jarvis_backend_free(); return 0; } diff --git a/examples/export-lora/CMakeLists.txt b/examples/export-lora/CMakeLists.txt index 1cef6e71694e2..babb850e94ede 100644 --- a/examples/export-lora/CMakeLists.txt +++ b/examples/export-lora/CMakeLists.txt @@ -1,5 +1,5 @@ -set(TARGET llama-export-lora) +set(TARGET jarvis-export-lora) add_executable(${TARGET} export-lora.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common jarvis ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/export-lora/README.md b/examples/export-lora/README.md index 7dce99c9a9e61..7df4426e973d2 100644 --- a/examples/export-lora/README.md +++ b/examples/export-lora/README.md @@ -3,7 +3,7 @@ Apply LORA adapters to base model and export the resulting model. ``` -usage: llama-export-lora [options] +usage: jarvis-export-lora [options] options: -m, --model model path from which to load base model (default '') @@ -16,16 +16,16 @@ options: For example: ```bash -./bin/llama-export-lora \ - -m open-llama-3b-v2.gguf \ - -o open-llama-3b-v2-english2tokipona-chat.gguf \ - --lora lora-open-llama-3b-v2-english2tokipona-chat-LATEST.gguf +./bin/jarvis-export-lora \ + -m open-jarvis-3b-v2.gguf \ + -o open-jarvis-3b-v2-english2tokipona-chat.gguf \ + --lora lora-open-jarvis-3b-v2-english2tokipona-chat-LATEST.gguf ``` Multiple LORA adapters can be applied by passing multiple `--lora FNAME` or `--lora-scaled FNAME S` command line parameters: ```bash -./bin/llama-export-lora \ +./bin/jarvis-export-lora \ -m your_base_model.gguf \ -o your_merged_model.gguf \ --lora-scaled lora_task_A.gguf 0.5 \ diff --git a/examples/export-lora/export-lora.cpp b/examples/export-lora/export-lora.cpp index 67662313d075c..d024a7e85d574 100644 --- a/examples/export-lora/export-lora.cpp +++ b/examples/export-lora/export-lora.cpp @@ -186,10 +186,10 @@ struct lora_merge_ctx { // prepare metadata gguf_set_kv(ctx_out, base_model.ctx_gguf); // output is forced to f16 for now - gguf_set_val_u32(ctx_out, "general.file_type", LLAMA_FTYPE_MOSTLY_F16); + gguf_set_val_u32(ctx_out, "general.file_type", JARVIS_FTYPE_MOSTLY_F16); // check if all lora adapters have the same tensors - // TODO: remove this when we can support merging subset of adapters. Ref: https://github.com/ggerganov/llama.cpp/pull/8607#discussion_r1686027777 + // TODO: remove this when we can support merging subset of adapters. Ref: https://github.com/ggerganov/jarvis.cpp/pull/8607#discussion_r1686027777 static const char * err_no_subset_adapter = "Input adapters do not have the same list of tensors. This is not yet supported. Please merge the adapter one-by-one instead of merging all at once."; if (adapters.size() > 1) { for (size_t i = 1; i < adapters.size(); ++i) { @@ -402,7 +402,7 @@ static void print_usage(int, char ** argv) { int main(int argc, char ** argv) { common_params params; - if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_EXPORT_LORA, print_usage)) { + if (!common_params_parse(argc, argv, params, JARVIS_EXAMPLE_EXPORT_LORA, print_usage)) { return 1; } diff --git a/examples/gbnf-validator/CMakeLists.txt b/examples/gbnf-validator/CMakeLists.txt index 4edd6ec7394c5..870d93220a544 100644 --- a/examples/gbnf-validator/CMakeLists.txt +++ b/examples/gbnf-validator/CMakeLists.txt @@ -1,5 +1,5 @@ -set(TARGET llama-gbnf-validator) +set(TARGET jarvis-gbnf-validator) add_executable(${TARGET} gbnf-validator.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common jarvis ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/gbnf-validator/gbnf-validator.cpp b/examples/gbnf-validator/gbnf-validator.cpp index 7493af9d3aec3..bc4e028e3342d 100644 --- a/examples/gbnf-validator/gbnf-validator.cpp +++ b/examples/gbnf-validator/gbnf-validator.cpp @@ -1,5 +1,5 @@ #include "unicode.h" -#include "llama-grammar.h" +#include "jarvis-grammar.h" #include #include @@ -8,17 +8,17 @@ #include #include -static bool llama_grammar_validate(struct llama_grammar * grammar, const std::string & input_str, size_t & error_pos, std::string & error_msg) { +static bool jarvis_grammar_validate(struct jarvis_grammar * grammar, const std::string & input_str, size_t & error_pos, std::string & error_msg) { const auto cpts = unicode_cpts_from_utf8(input_str); - const llama_grammar_rules & rules = llama_grammar_get_rules (grammar); - llama_grammar_stacks & stacks_cur = llama_grammar_get_stacks(grammar); + const jarvis_grammar_rules & rules = jarvis_grammar_get_rules (grammar); + jarvis_grammar_stacks & stacks_cur = jarvis_grammar_get_stacks(grammar); size_t pos = 0; for (const auto & cpt : cpts) { - const llama_grammar_stacks stacks_prev = llama_grammar_get_stacks(grammar); // copy + const jarvis_grammar_stacks stacks_prev = jarvis_grammar_get_stacks(grammar); // copy - llama_grammar_accept(rules, stacks_prev, cpt, stacks_cur); + jarvis_grammar_accept(rules, stacks_prev, cpt, stacks_cur); if (stacks_cur.empty()) { error_pos = pos; @@ -80,9 +80,9 @@ int main(int argc, char** argv) { grammar_str = buffer.str(); } - llama_grammar * grammar = llama_grammar_init_impl(nullptr, grammar_str.c_str(), "root"); + jarvis_grammar * grammar = jarvis_grammar_init_impl(nullptr, grammar_str.c_str(), "root"); if (grammar == nullptr) { - throw std::runtime_error("Failed to initialize llama_grammar"); + throw std::runtime_error("Failed to initialize jarvis_grammar"); } // Read the input file std::string input_str; @@ -97,7 +97,7 @@ int main(int argc, char** argv) { // Validate the input string against the grammar size_t error_pos; std::string error_msg; - bool is_valid = llama_grammar_validate(grammar, input_str, error_pos, error_msg); + bool is_valid = jarvis_grammar_validate(grammar, input_str, error_pos, error_msg); if (is_valid) { fprintf(stdout, "Input string is valid according to the grammar.\n"); @@ -106,7 +106,7 @@ int main(int argc, char** argv) { } // Clean up - llama_grammar_free_impl(grammar); + jarvis_grammar_free_impl(grammar); return 0; } diff --git a/examples/gen-docs/CMakeLists.txt b/examples/gen-docs/CMakeLists.txt index c94cda7764341..45c2a215c43c1 100644 --- a/examples/gen-docs/CMakeLists.txt +++ b/examples/gen-docs/CMakeLists.txt @@ -1,5 +1,5 @@ -set(TARGET llama-gen-docs) +set(TARGET jarvis-gen-docs) add_executable(${TARGET} gen-docs.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common jarvis ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/gen-docs/gen-docs.cpp b/examples/gen-docs/gen-docs.cpp index 77c59a836e50a..b02918844f690 100644 --- a/examples/gen-docs/gen-docs.cpp +++ b/examples/gen-docs/gen-docs.cpp @@ -47,7 +47,7 @@ static void write_table(std::ofstream & file, std::vector & opts) } } -static void export_md(std::string fname, llama_example ex) { +static void export_md(std::string fname, jarvis_example ex) { std::ofstream file(fname, std::ofstream::out | std::ofstream::trunc); common_params params; @@ -57,7 +57,7 @@ static void export_md(std::string fname, llama_example ex) { std::vector sparam_options; std::vector specific_options; for (auto & opt : ctx_arg.options) { - // in case multiple LLAMA_EXAMPLE_* are set, we prioritize the LLAMA_EXAMPLE_* matching current example + // in case multiple JARVIS_EXAMPLE_* are set, we prioritize the JARVIS_EXAMPLE_* matching current example if (opt.is_sparam) { sparam_options.push_back(&opt); } else if (opt.in_example(ctx_arg.ex)) { @@ -76,8 +76,8 @@ static void export_md(std::string fname, llama_example ex) { } int main(int, char **) { - export_md("autogen-main.md", LLAMA_EXAMPLE_MAIN); - export_md("autogen-server.md", LLAMA_EXAMPLE_SERVER); + export_md("autogen-main.md", JARVIS_EXAMPLE_MAIN); + export_md("autogen-server.md", JARVIS_EXAMPLE_SERVER); return 0; } diff --git a/examples/gguf-hash/CMakeLists.txt b/examples/gguf-hash/CMakeLists.txt index 633f4553594bb..c51249495fccf 100644 --- a/examples/gguf-hash/CMakeLists.txt +++ b/examples/gguf-hash/CMakeLists.txt @@ -1,4 +1,4 @@ -set(TARGET llama-gguf-hash) +set(TARGET jarvis-gguf-hash) add_executable(${TARGET} gguf-hash.cpp) install(TARGETS ${TARGET} RUNTIME) diff --git a/examples/gguf-hash/README.md b/examples/gguf-hash/README.md index 9871651e38ba8..a9ceb24af3183 100644 --- a/examples/gguf-hash/README.md +++ b/examples/gguf-hash/README.md @@ -1,5 +1,5 @@ -# llama-gguf-hash +# jarvis-gguf-hash CLI to hash GGUF files to detect difference on a per model and per tensor level. @@ -38,8 +38,8 @@ For Maintainers: For Model Creators: - Optional consistent UUID generation based on model tensor content - This is served by UUIDv5 which is useful for databases keys - - llama.cpp UUIDv5 Namespace: `ef001206-dadc-5f6d-a15f-3359e577d4e5` - - Made via UUIDv5 URL namespace of `en.wikipedia.org/wiki/Llama.cpp` + - jarvis.cpp UUIDv5 Namespace: `ef001206-dadc-5f6d-a15f-3359e577d4e5` + - Made via UUIDv5 URL namespace of `en.wikipedia.org/wiki/Jarvis.cpp` For Model Users: - Assurance of tensor layer integrity even if metadata was updated @@ -57,14 +57,14 @@ For Model Users: ## Compile Example ```bash -cmake -B build -DCMAKE_BUILD_TYPE=Debug -DLLAMA_FATAL_WARNINGS=ON +cmake -B build -DCMAKE_BUILD_TYPE=Debug -DJARVIS_FATAL_WARNINGS=ON make -C build clean -make -C build llama-gguf-hash VERBOSE=1 -./build/bin/llama-gguf-hash test.gguf -./build/bin/llama-gguf-hash --xxh64 test.gguf -./build/bin/llama-gguf-hash --sha1 test.gguf -./build/bin/llama-gguf-hash --uuid test.gguf -./build/bin/llama-gguf-hash --sha256 test.gguf +make -C build jarvis-gguf-hash VERBOSE=1 +./build/bin/jarvis-gguf-hash test.gguf +./build/bin/jarvis-gguf-hash --xxh64 test.gguf +./build/bin/jarvis-gguf-hash --sha1 test.gguf +./build/bin/jarvis-gguf-hash --uuid test.gguf +./build/bin/jarvis-gguf-hash --sha256 test.gguf ``` ## Generation and Verification Example @@ -72,7 +72,7 @@ make -C build llama-gguf-hash VERBOSE=1 To generate we may use this command ```bash -./llama-gguf-hash --all test.gguf > test.gguf.manifest +./jarvis-gguf-hash --all test.gguf > test.gguf.manifest ``` Which would generate a manifest that looks like below, which contains multiple hash type and per tensor layer hashes as well @@ -117,7 +117,7 @@ sha256 7dd641b32f59b60dbd4b5420c4b0f6321ccf48f58f6ae201a3dbc4a58a27c6e4 test We can then use the normal check command which will by default check for the highest security strength hash and verify against that: ```bash -$ ./llama-gguf-hash --check test.gguf.manifest test.gguf +$ ./jarvis-gguf-hash --check test.gguf.manifest test.gguf manifest test.gguf.manifest sha256 sha1 xxh64 sha256 c0510d38fa060c46265e0160a85c7243096b01dd31c2f355bdbb5516b20de1bd test.gguf:tensor_0 - Ok sha256 8514cbcc73692a2c56bd7a33a022edd5ff819614bd23b19915d7224387f397a7 test.gguf:tensor_1 - Ok @@ -137,7 +137,7 @@ Verification results for test.gguf.manifest - Success Or we may explicitly ask for a faster hash like: ```bash -$ ./llama-gguf-hash --check test.gguf.manifest --xxh64 test.gguf +$ ./jarvis-gguf-hash --check test.gguf.manifest --xxh64 test.gguf manifest test.gguf.manifest sha256 sha1 xxh64 xxh64 f66e9cd66a4396a0 test.gguf:tensor_0 - Ok xxh64 7d3a1f9ac04d0537 test.gguf:tensor_1 - Ok @@ -157,7 +157,7 @@ Verification results for test.gguf.manifest - Success Or maybe we want to just check that all the hash is valid: ```bash -$./llama-gguf-hash --check test.gguf.manifest --all test.gguf.manifest +$./jarvis-gguf-hash --check test.gguf.manifest --all test.gguf.manifest manifest test.gguf.manifest sha256 sha1 xxh64 xxh64 f66e9cd66a4396a0 test.gguf:tensor_0 - Ok sha1 59f79ecefd8125a996fdf419239051a7e99e5f20 test.gguf:tensor_0 - Ok diff --git a/examples/gguf-hash/gguf-hash.cpp b/examples/gguf-hash/gguf-hash.cpp index e96c75117f533..e7e3cd576c3da 100644 --- a/examples/gguf-hash/gguf-hash.cpp +++ b/examples/gguf-hash/gguf-hash.cpp @@ -24,9 +24,9 @@ extern "C" { #endif -// uuid.uuid5(uuid.NAMESPACE_URL, 'en.wikipedia.org/wiki/Llama.cpp') -#define UUID_NAMESPACE_LLAMA_CPP "ef001206-dadc-5f6d-a15f-3359e577d4e5" -#define UUID_NAMESPACE_LLAMA_CPP_HEX 0xef, 0x00, 0x12, 0x06, 0xda, 0xdc, 0x5f, 0x6d, 0xa1, 0x5f, 0x33, 0x59, 0xe5, 0x77, 0xd4, 0xe5 +// uuid.uuid5(uuid.NAMESPACE_URL, 'en.wikipedia.org/wiki/Jarvis.cpp') +#define UUID_NAMESPACE_JARVIS_CPP "ef001206-dadc-5f6d-a15f-3359e577d4e5" +#define UUID_NAMESPACE_JARVIS_CPP_HEX 0xef, 0x00, 0x12, 0x06, 0xda, 0xdc, 0x5f, 0x6d, 0xa1, 0x5f, 0x33, 0x59, 0xe5, 0x77, 0xd4, 0xe5 #define HASH_TYPE_SHA256_STR "sha256" @@ -320,7 +320,7 @@ static hash_exit_code_t gguf_hash(const hash_params & hash_params) { // sha1 for uuid init SHA1_CTX sha1_for_uuid_ctx; if (hash_params.uuid) { - unsigned char const uuidv5_namespace[] = {UUID_NAMESPACE_LLAMA_CPP_HEX}; + unsigned char const uuidv5_namespace[] = {UUID_NAMESPACE_JARVIS_CPP_HEX}; SHA1Init(&sha1_for_uuid_ctx); SHA1Update( &sha1_for_uuid_ctx, (unsigned char const *)uuidv5_namespace, sizeof(uuidv5_namespace)); } diff --git a/examples/gguf-split/CMakeLists.txt b/examples/gguf-split/CMakeLists.txt index f63887da7dfca..e1ed69f8df477 100644 --- a/examples/gguf-split/CMakeLists.txt +++ b/examples/gguf-split/CMakeLists.txt @@ -1,5 +1,5 @@ -set(TARGET llama-gguf-split) +set(TARGET jarvis-gguf-split) add_executable(${TARGET} gguf-split.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common jarvis ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/gguf-split/gguf-split.cpp b/examples/gguf-split/gguf-split.cpp index 7e62657e118a4..e44fc83f1bed9 100644 --- a/examples/gguf-split/gguf-split.cpp +++ b/examples/gguf-split/gguf-split.cpp @@ -1,4 +1,4 @@ -#include "llama.h" +#include "jarvis.h" #include "common.h" #include @@ -99,8 +99,8 @@ static void split_params_parse_ex(int argc, const char ** argv, split_params & p split_print_usage(argv[0]); exit(0); } else if (arg == "--version") { - fprintf(stderr, "version: %d (%s)\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT); - fprintf(stderr, "built with %s for %s\n", LLAMA_COMPILER, LLAMA_BUILD_TARGET); + fprintf(stderr, "version: %d (%s)\n", JARVIS_BUILD_NUMBER, JARVIS_COMMIT); + fprintf(stderr, "built with %s for %s\n", JARVIS_COMPILER, JARVIS_BUILD_TARGET); exit(0); } else if (arg == "--dry-run") { arg_found = true; @@ -308,7 +308,7 @@ struct split_strategy { for (auto & ctx_out : ctx_outs) { // construct file path char split_path[PATH_MAX] = {0}; - llama_split_path(split_path, sizeof(split_path), params.output.c_str(), i_split, n_split); + jarvis_split_path(split_path, sizeof(split_path), params.output.c_str(), i_split, n_split); // open the output file printf("Writing file %s ... ", split_path); @@ -430,7 +430,7 @@ static void gguf_merge(const split_params & split_params) { }; if (i_split > 0) { - llama_split_path(split_path, sizeof(split_path), split_prefix, i_split, n_split); + jarvis_split_path(split_path, sizeof(split_path), split_prefix, i_split, n_split); } fprintf(stderr, "%s: reading metadata %s ...", __func__, split_path); @@ -470,7 +470,7 @@ static void gguf_merge(const split_params & split_params) { } // Verify the file naming and extract split_prefix - if (!llama_split_prefix(split_prefix, sizeof (split_prefix), split_path, i_split, n_split)) { + if (!jarvis_split_prefix(split_prefix, sizeof (split_prefix), split_path, i_split, n_split)) { fprintf(stderr, "\n%s: unexpected input file name: %s" " i_split=%d" " n_split=%d\n", __func__, @@ -508,7 +508,7 @@ static void gguf_merge(const split_params & split_params) { // Write tensors data for (int i_split = 0; i_split < n_split; i_split++) { - llama_split_path(split_path, sizeof(split_path), split_prefix, i_split, n_split); + jarvis_split_path(split_path, sizeof(split_path), split_prefix, i_split, n_split); std::ifstream f_input(split_path, std::ios::binary); if (!f_input.is_open()) { fprintf(stderr, "%s: failed to open input GGUF from %s\n", __func__, split_path); diff --git a/examples/gguf-split/tests.sh b/examples/gguf-split/tests.sh index d5a92d6051063..246e9a3573ec6 100755 --- a/examples/gguf-split/tests.sh +++ b/examples/gguf-split/tests.sh @@ -18,8 +18,8 @@ fi set -x -SPLIT=$1/llama-gguf-split -MAIN=$1/llama-cli +SPLIT=$1/jarvis-gguf-split +MAIN=$1/jarvis-cli WORK_PATH=$TMP_DIR/gguf-split ROOT_DIR=$(realpath $(dirname $0)/../../) diff --git a/examples/gguf/CMakeLists.txt b/examples/gguf/CMakeLists.txt index a9569b411956b..3cb82c8919c3b 100644 --- a/examples/gguf/CMakeLists.txt +++ b/examples/gguf/CMakeLists.txt @@ -1,4 +1,4 @@ -set(TARGET llama-gguf) +set(TARGET jarvis-gguf) add_executable(${TARGET} gguf.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE ggml ${CMAKE_THREAD_LIBS_INIT}) diff --git a/examples/gritlm/CMakeLists.txt b/examples/gritlm/CMakeLists.txt index 86dfddca346fe..0039c26030fcf 100644 --- a/examples/gritlm/CMakeLists.txt +++ b/examples/gritlm/CMakeLists.txt @@ -1,5 +1,5 @@ -set(TARGET llama-gritlm) +set(TARGET jarvis-gritlm) add_executable(${TARGET} gritlm.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common jarvis ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/gritlm/README.md b/examples/gritlm/README.md index 786ba57363def..88fde2e28aafc 100644 --- a/examples/gritlm/README.md +++ b/examples/gritlm/README.md @@ -26,7 +26,7 @@ $ scripts/hf.sh --repo cohesionet/GritLM-7B_gguf --file gritlm-7b_q4_1.gguf --ou Run the example using the downloaded model: ```console -$ ./llama-gritlm -m models/gritlm-7b_q4_1.gguf +$ ./jarvis-gritlm -m models/gritlm-7b_q4_1.gguf Cosine similarity between "Bitcoin: A Peer-to-Peer Electronic Cash System" and "A purely peer-to-peer version of electronic cash w" is: 0.605 Cosine similarity between "Bitcoin: A Peer-to-Peer Electronic Cash System" and "All text-based language problems can be reduced to" is: 0.103 diff --git a/examples/gritlm/gritlm.cpp b/examples/gritlm/gritlm.cpp index 6e42fa0734ecb..58df109196ff9 100644 --- a/examples/gritlm/gritlm.cpp +++ b/examples/gritlm/gritlm.cpp @@ -1,39 +1,39 @@ #include "arg.h" #include "common.h" -#include "llama.h" +#include "jarvis.h" #include #include // #define GRIT_DEBUG -static std::vector> encode(llama_context * ctx, const std::vector & sentences, const std::string & instruction) { +static std::vector> encode(jarvis_context * ctx, const std::vector & sentences, const std::string & instruction) { std::vector> result; - const llama_model * model = llama_get_model(ctx); + const jarvis_model * model = jarvis_get_model(ctx); - llama_batch batch = llama_batch_init(llama_n_batch(ctx), 0, 1); + jarvis_batch batch = jarvis_batch_init(jarvis_n_batch(ctx), 0, 1); for (uint64_t i = 0; i < sentences.size(); i++) { common_batch_clear(batch); const std::string input_string = instruction + sentences[i]; - std::vector inputs = common_tokenize(model, input_string, true, false); + std::vector inputs = common_tokenize(model, input_string, true, false); const int32_t n_toks = inputs.size(); // GritLM seems to have EOS = "" // https://github.com/ContextualAI/gritlm/blob/92025b16534712b31b3c4aaaf069350e222bd5f8/gritlm/gritlm.py#L18 - // inputs.push_back(llama_token_eos(model)); + // inputs.push_back(jarvis_token_eos(model)); // we want to ignore instruction tokens for mean pooling const int32_t n_inst = common_tokenize(model, instruction, true, false).size(); #ifdef GRIT_DEBUG // debug tokens - should be matching as referenced in the GritLM sample - std::for_each(inputs.begin(), inputs.end(), [&ctx](llama_token t) { - std::printf("[%u:%s]", t, llama_token_to_piece(ctx, t).c_str()); + std::for_each(inputs.begin(), inputs.end(), [&ctx](jarvis_token t) { + std::printf("[%u:%s]", t, jarvis_token_to_piece(ctx, t).c_str()); }); std::printf("\n"); #endif @@ -44,22 +44,22 @@ static std::vector> encode(llama_context * ctx, const std::ve } // clear previous kv_cache values (irrelevant for embeddings) - llama_kv_cache_clear(ctx); - llama_set_embeddings(ctx, true); - llama_set_causal_attn(ctx, false); + jarvis_kv_cache_clear(ctx); + jarvis_set_embeddings(ctx, true); + jarvis_set_causal_attn(ctx, false); // run model - llama_decode(ctx, batch); + jarvis_decode(ctx, batch); // get embedding dimensions - uint64_t n_embd = llama_n_embd(model); + uint64_t n_embd = jarvis_n_embd(model); // allocate embedding output std::vector emb_unorm(n_embd, 0.0f); // sum up all token embeddings for (int32_t k = n_inst; k < n_toks; k++) { - float * emb = llama_get_embeddings_ith(ctx, k); + float * emb = jarvis_get_embeddings_ith(ctx, k); for (uint64_t j = 0; j < n_embd; j++) { emb_unorm[j] += emb[j]; } @@ -88,24 +88,24 @@ static std::vector> encode(llama_context * ctx, const std::ve #endif } - llama_batch_free(batch); + jarvis_batch_free(batch); return result; } -static std::string generate(llama_context * ctx, llama_sampler * smpl, const std::string & prompt, bool stream) { +static std::string generate(jarvis_context * ctx, jarvis_sampler * smpl, const std::string & prompt, bool stream) { std::string result; - const llama_model * model = llama_get_model(ctx); - llama_token eos_token = llama_token_eos(model); + const jarvis_model * model = jarvis_get_model(ctx); + jarvis_token eos_token = jarvis_token_eos(model); - llama_kv_cache_clear(ctx); - llama_set_embeddings(ctx, false); - llama_set_causal_attn(ctx, true); + jarvis_kv_cache_clear(ctx); + jarvis_set_embeddings(ctx, false); + jarvis_set_causal_attn(ctx, true); - llama_batch bat = llama_batch_init(llama_n_batch(ctx), 0, 1); + jarvis_batch bat = jarvis_batch_init(jarvis_n_batch(ctx), 0, 1); - std::vector inputs = common_tokenize(model, prompt, false, true); + std::vector inputs = common_tokenize(model, prompt, false, true); int32_t i_current_token = 0; while (true) { @@ -119,9 +119,9 @@ static std::string generate(llama_context * ctx, llama_sampler * smpl, const std } inputs.clear(); - llama_decode(ctx, bat); + jarvis_decode(ctx, bat); - llama_token token = llama_sampler_sample(smpl, ctx, bat.n_tokens - 1); + jarvis_token token = jarvis_sampler_sample(smpl, ctx, bat.n_tokens - 1); if (token == eos_token) { break; @@ -142,7 +142,7 @@ static std::string generate(llama_context * ctx, llama_sampler * smpl, const std std::printf("\n"); } - llama_batch_free(bat); + jarvis_batch_free(bat); return result; } @@ -154,29 +154,29 @@ static std::string gritlm_instruction(const std::string & instruction) { int main(int argc, char * argv[]) { common_params params; - if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) { + if (!common_params_parse(argc, argv, params, JARVIS_EXAMPLE_COMMON)) { return 1; } common_init(); - llama_model_params mparams = common_model_params_to_llama(params); - llama_context_params cparams = common_context_params_to_llama(params); + jarvis_model_params mparams = common_model_params_to_jarvis(params); + jarvis_context_params cparams = common_context_params_to_jarvis(params); - llama_backend_init(); + jarvis_backend_init(); - llama_model * model = llama_load_model_from_file(params.model.c_str(), mparams); + jarvis_model * model = jarvis_load_model_from_file(params.model.c_str(), mparams); // create generation context - llama_context * ctx = llama_new_context_with_model(model, cparams); + jarvis_context * ctx = jarvis_new_context_with_model(model, cparams); - auto sparams = llama_sampler_chain_default_params(); + auto sparams = jarvis_sampler_chain_default_params(); sparams.no_perf = false; - llama_sampler * smpl = llama_sampler_chain_init(sparams); + jarvis_sampler * smpl = jarvis_sampler_chain_init(sparams); - llama_sampler_chain_add(smpl, llama_sampler_init_greedy()); + jarvis_sampler_chain_add(smpl, jarvis_sampler_init_greedy()); // ### Embedding/Representation ### // samples taken from: https://github.com/ContextualAI/gritlm#basic @@ -197,7 +197,7 @@ int main(int argc, char * argv[]) { const std::vector> d_rep = encode(ctx, documents, gritlm_instruction("")); const std::vector> q_rep = encode(ctx, queries, gritlm_instruction(instruction)); - const int n_embd = llama_n_embd(model); + const int n_embd = jarvis_n_embd(model); const float cosine_sim_q0_d0 = common_embd_similarity_cos(q_rep[0].data(), d_rep[0].data(), n_embd); const float cosine_sim_q0_d1 = common_embd_similarity_cos(q_rep[0].data(), d_rep[1].data(), n_embd); @@ -217,10 +217,10 @@ int main(int argc, char * argv[]) { std::string response = generate(ctx, smpl, prompt, true); } - llama_sampler_free(smpl); - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); + jarvis_sampler_free(smpl); + jarvis_free(ctx); + jarvis_free_model(model); + jarvis_backend_free(); return 0; } diff --git a/examples/imatrix/CMakeLists.txt b/examples/imatrix/CMakeLists.txt index d4c8265bdb9d2..c03c64826c129 100644 --- a/examples/imatrix/CMakeLists.txt +++ b/examples/imatrix/CMakeLists.txt @@ -1,5 +1,5 @@ -set(TARGET llama-imatrix) +set(TARGET jarvis-imatrix) add_executable(${TARGET} imatrix.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common jarvis ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/imatrix/README.md b/examples/imatrix/README.md index bb5faec94c20a..2781dce75e951 100644 --- a/examples/imatrix/README.md +++ b/examples/imatrix/README.md @@ -1,12 +1,12 @@ -# llama.cpp/examples/imatrix +# jarvis.cpp/examples/imatrix Compute an importance matrix for a model and given text dataset. Can be used during quantization to enchance the quality of the quantized models. -More information is available here: https://github.com/ggerganov/llama.cpp/pull/4861 +More information is available here: https://github.com/ggerganov/jarvis.cpp/pull/4861 ## Usage ``` -./llama-imatrix \ +./jarvis-imatrix \ -m model.gguf -f some-text.txt [-o imatrix.dat] [--process-output] [--verbosity 1] \ [--no-ppl] [--chunk 123] [--output-frequency 10] [--save-frequency 0] \ [--in-file imatrix-prev-0.dat --in-file imatrix-prev-1.dat ...] @@ -28,8 +28,8 @@ For faster computation, make sure to use GPU offloading via the `-ngl` argument GGML_CUDA=1 make -j # generate importance matrix (imatrix.dat) -./llama-imatrix -m ggml-model-f16.gguf -f train-data.txt -ngl 99 +./jarvis-imatrix -m ggml-model-f16.gguf -f train-data.txt -ngl 99 # use the imatrix to perform a Q4_K_M quantization -./llama-quantize --imatrix imatrix.dat ggml-model-f16.gguf ./ggml-model-q4_k_m.gguf q4_k_m +./jarvis-quantize --imatrix imatrix.dat ggml-model-f16.gguf ./ggml-model-q4_k_m.gguf q4_k_m ``` diff --git a/examples/imatrix/imatrix.cpp b/examples/imatrix/imatrix.cpp index 70ff47768c02b..437651a750227 100644 --- a/examples/imatrix/imatrix.cpp +++ b/examples/imatrix/imatrix.cpp @@ -1,7 +1,7 @@ #include "arg.h" #include "common.h" #include "log.h" -#include "llama.h" +#include "jarvis.h" #include #include @@ -100,7 +100,7 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void * const float * data = is_host ? (const float *) src1->data : m_src1_data.data(); // this has been adapted to the new format of storing merged experts in a single 3d tensor - // ref: https://github.com/ggerganov/llama.cpp/pull/6387 + // ref: https://github.com/ggerganov/jarvis.cpp/pull/6387 if (t->op == GGML_OP_MUL_MAT_ID) { // ids -> [n_experts_used, n_tokens] // src1 -> [cols, n_expert_used, n_tokens] @@ -428,15 +428,15 @@ static void process_logits( } } -static bool compute_imatrix(llama_context * ctx, const common_params & params) { - const bool add_bos = llama_add_bos_token(llama_get_model(ctx)); - GGML_ASSERT(!llama_add_eos_token(llama_get_model(ctx))); - const int n_ctx = llama_n_ctx(ctx); +static bool compute_imatrix(jarvis_context * ctx, const common_params & params) { + const bool add_bos = jarvis_add_bos_token(jarvis_get_model(ctx)); + GGML_ASSERT(!jarvis_add_eos_token(jarvis_get_model(ctx))); + const int n_ctx = jarvis_n_ctx(ctx); auto tim1 = std::chrono::high_resolution_clock::now(); LOG_INF("%s: tokenizing the input ..\n", __func__); - std::vector tokens = common_tokenize(ctx, params.prompt, true); + std::vector tokens = common_tokenize(ctx, params.prompt, true); auto tim2 = std::chrono::high_resolution_clock::now(); LOG_INF("%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast(tim2-tim1).count()); @@ -467,7 +467,7 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) { const int n_chunk_max = tokens.size() / n_ctx; const int n_chunk = params.n_chunks < 0 ? n_chunk_max : std::min(params.n_chunks, n_chunk_max); - const int n_vocab = llama_n_vocab(llama_get_model(ctx)); + const int n_vocab = jarvis_n_vocab(jarvis_get_model(ctx)); const int n_batch = params.n_batch; int count = 0; @@ -494,9 +494,9 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) { const auto t_start = std::chrono::high_resolution_clock::now(); // clear the KV cache - llama_kv_cache_clear(ctx); + jarvis_kv_cache_clear(ctx); - llama_batch batch = llama_batch_init(n_batch, 0, 1); + jarvis_batch batch = jarvis_batch_init(n_batch, 0, 1); for (int j = 0; j < num_batches; ++j) { const int batch_start = start + j * n_batch; @@ -507,7 +507,7 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) { // add BOS token for the first batch of each chunk if (add_bos && j == 0) { - tokens[batch_start] = llama_token_bos(llama_get_model(ctx)); + tokens[batch_start] = jarvis_token_bos(jarvis_get_model(ctx)); } common_batch_clear(batch); @@ -515,9 +515,9 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) { common_batch_add(batch, tokens[batch_start + i], j*n_batch + i, {0}, true); } - if (llama_decode(ctx, batch)) { + if (jarvis_decode(ctx, batch)) { LOG_ERR("%s : failed to eval\n", __func__); - llama_batch_free(batch); + jarvis_batch_free(batch); return false; } @@ -525,12 +525,12 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) { tokens[batch_start] = token_org; if (params.compute_ppl && num_batches > 1) { - const auto * batch_logits = llama_get_logits(ctx); + const auto * batch_logits = jarvis_get_logits(ctx); logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab); } } - llama_batch_free(batch); + jarvis_batch_free(batch); const auto t_end = std::chrono::high_resolution_clock::now(); @@ -547,7 +547,7 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) { if (params.compute_ppl) { const int first = n_ctx/2; - const auto * all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx); + const auto * all_logits = num_batches > 1 ? logits.data() : jarvis_get_logits(ctx); process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first, workers, nll, nll2, logit_history.data() + start + first, prob_history.data() + start + first); count += n_ctx - first - 1; @@ -583,7 +583,7 @@ int main(int argc, char ** argv) { params.logits_all = true; params.escape = false; - if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_IMATRIX, print_usage)) { + if (!common_params_parse(argc, argv, params, JARVIS_EXAMPLE_IMATRIX, print_usage)) { return 1; } @@ -606,8 +606,8 @@ int main(int argc, char ** argv) { g_collector.save_imatrix(); } - llama_backend_init(); - llama_numa_init(params.numa); + jarvis_backend_init(); + jarvis_numa_init(params.numa); // pass the callback to the backend scheduler // it will be executed for each node during the graph computation @@ -616,16 +616,16 @@ int main(int argc, char ** argv) { params.warmup = false; // init - common_init_result llama_init = common_init_from_params(params); + common_init_result jarvis_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + jarvis_model * model = jarvis_init.model; + jarvis_context * ctx = jarvis_init.context; if (model == nullptr || ctx == nullptr) { LOG_ERR("%s : failed to init\n", __func__); return 1; } - const int n_ctx_train = llama_n_ctx_train(model); + const int n_ctx_train = jarvis_n_ctx_train(model); if (params.n_ctx > n_ctx_train) { LOG_WRN("%s: model was trained on only %d context tokens (%d specified)\n", __func__, n_ctx_train, params.n_ctx); @@ -644,12 +644,12 @@ int main(int argc, char ** argv) { g_collector.save_imatrix(); LOG("\n"); - llama_perf_context_print(ctx); + jarvis_perf_context_print(ctx); - llama_free(ctx); - llama_free_model(model); + jarvis_free(ctx); + jarvis_free_model(model); - llama_backend_free(); + jarvis_backend_free(); return 0; } diff --git a/examples/infill/CMakeLists.txt b/examples/infill/CMakeLists.txt index 9b1aa3b63c920..f9ad699135e60 100644 --- a/examples/infill/CMakeLists.txt +++ b/examples/infill/CMakeLists.txt @@ -1,5 +1,5 @@ -set(TARGET llama-infill) +set(TARGET jarvis-infill) add_executable(${TARGET} infill.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common jarvis ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/infill/README.md b/examples/infill/README.md index 810a0c5e76697..9d9e7f5376d19 100644 --- a/examples/infill/README.md +++ b/examples/infill/README.md @@ -1,11 +1,11 @@ -# llama.cpp/example/infill +# jarvis.cpp/example/infill -This example shows how to use the infill mode with Code Llama models supporting infill mode. +This example shows how to use the infill mode with Code Jarvis models supporting infill mode. Currently the 7B and 13B models support infill mode. Infill supports most of the options available in the main example. -For further information have a look at the main README.md in llama.cpp/example/main/README.md +For further information have a look at the main README.md in jarvis.cpp/example/main/README.md ## Common Options @@ -37,11 +37,11 @@ The `infill` program offers a seamless way to interact with LLaMA models, allowi ### Example -Download a model that supports infill, for example CodeLlama: +Download a model that supports infill, for example CodeJarvis: ```console -scripts/hf.sh --repo TheBloke/CodeLlama-13B-GGUF --file codellama-13b.Q5_K_S.gguf --outdir models +scripts/hf.sh --repo TheBloke/CodeJarvis-13B-GGUF --file codejarvis-13b.Q5_K_S.gguf --outdir models ``` ```bash -./llama-infill -t 10 -ngl 0 -m models/codellama-13b.Q5_K_S.gguf -c 4096 --temp 0.7 --repeat_penalty 1.1 -n 20 --in-prefix "def helloworld():\n print(\"hell" --in-suffix "\n print(\"goodbye world\")\n " +./jarvis-infill -t 10 -ngl 0 -m models/codejarvis-13b.Q5_K_S.gguf -c 4096 --temp 0.7 --repeat_penalty 1.1 -n 20 --in-prefix "def helloworld():\n print(\"hell" --in-suffix "\n print(\"goodbye world\")\n " ``` diff --git a/examples/infill/infill.cpp b/examples/infill/infill.cpp index f18362c91c7bf..8c868e0ad0e47 100644 --- a/examples/infill/infill.cpp +++ b/examples/infill/infill.cpp @@ -3,7 +3,7 @@ #include "console.h" #include "sampling.h" #include "log.h" -#include "llama.h" +#include "jarvis.h" #include #include @@ -33,20 +33,20 @@ #pragma warning(disable: 4244 4267) // possible loss of data #endif -static llama_context ** g_ctx; -static llama_model ** g_model; +static jarvis_context ** g_ctx; +static jarvis_model ** g_model; static common_sampler ** g_smpl; static common_params * g_params; -static std::vector * g_input_tokens; +static std::vector * g_input_tokens; static std::ostringstream * g_output_ss; -static std::vector * g_output_tokens; +static std::vector * g_output_tokens; static bool is_interacting = false; static void write_logfile( - const llama_context * ctx, const common_params & params, const llama_model * model, - const std::vector & input_tokens, const std::string & output, - const std::vector & output_tokens + const jarvis_context * ctx, const common_params & params, const jarvis_model * model, + const std::vector & input_tokens, const std::string & output, + const std::vector & output_tokens ) { if (params.logdir.empty()) { return; @@ -71,7 +71,7 @@ static void write_logfile( fprintf(logfile, "binary: infill\n"); char model_desc[128]; - llama_model_desc(model, model_desc, sizeof(model_desc)); + jarvis_model_desc(model, model_desc, sizeof(model_desc)); yaml_dump_non_result_info(logfile, params, ctx, timestamp, input_tokens, model_desc); fprintf(logfile, "\n"); @@ -83,7 +83,7 @@ static void write_logfile( yaml_dump_string_multiline(logfile, "output", output.c_str()); yaml_dump_vector_int(logfile, "output_tokens", output_tokens); - llama_perf_dump_yaml(logfile, ctx); + jarvis_perf_dump_yaml(logfile, ctx); fclose(logfile); } @@ -112,7 +112,7 @@ int main(int argc, char ** argv) { common_params params; g_params = ¶ms; - if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_INFILL)) { + if (!common_params_parse(argc, argv, params, JARVIS_EXAMPLE_INFILL)) { return 1; } @@ -160,12 +160,12 @@ int main(int argc, char ** argv) { LOG_WRN("%s: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale); } - LOG_INF("%s: llama backend init\n", __func__); - llama_backend_init(); - llama_numa_init(params.numa); + LOG_INF("%s: jarvis backend init\n", __func__); + jarvis_backend_init(); + jarvis_numa_init(params.numa); - llama_model * model = nullptr; - llama_context * ctx = nullptr; + jarvis_model * model = nullptr; + jarvis_context * ctx = nullptr; common_sampler * smpl = nullptr; g_model = &model; @@ -174,18 +174,18 @@ int main(int argc, char ** argv) { // load the model and apply lora adapter, if any LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__); - common_init_result llama_init = common_init_from_params(params); + common_init_result jarvis_init = common_init_from_params(params); - model = llama_init.model; - ctx = llama_init.context; + model = jarvis_init.model; + ctx = jarvis_init.context; if (model == NULL) { LOG_ERR("%s: unable to load model\n", __func__); return 1; } - const int n_ctx_train = llama_n_ctx_train(model); - const int n_ctx = llama_n_ctx(ctx); + const int n_ctx_train = jarvis_n_ctx_train(model); + const int n_ctx = jarvis_n_ctx(ctx); LOG_DBG("n_ctx: %d\n", n_ctx); if (n_ctx > n_ctx_train) { @@ -197,28 +197,28 @@ int main(int argc, char ** argv) { LOG_INF("\n"); LOG_INF("%s\n", common_params_get_system_info(params).c_str()); } - const bool add_bos = llama_add_bos_token(model); - GGML_ASSERT(!llama_add_eos_token(model)); + const bool add_bos = jarvis_add_bos_token(model); + GGML_ASSERT(!jarvis_add_eos_token(model)); - std::vector embd_inp; - std::vector embd_end; - std::vector inp_pfx = common_tokenize(ctx, params.input_prefix, false); - std::vector inp_sfx = common_tokenize(ctx, params.input_suffix, false); + std::vector embd_inp; + std::vector embd_end; + std::vector inp_pfx = common_tokenize(ctx, params.input_prefix, false); + std::vector inp_sfx = common_tokenize(ctx, params.input_suffix, false); - GGML_ASSERT(llama_token_fim_pre(model) >= 0); - GGML_ASSERT(llama_token_fim_suf(model) >= 0); + GGML_ASSERT(jarvis_token_fim_pre(model) >= 0); + GGML_ASSERT(jarvis_token_fim_suf(model) >= 0); - inp_pfx.insert(inp_pfx.begin(), llama_token_fim_pre(model)); - inp_sfx.insert(inp_sfx.begin(), llama_token_fim_suf(model)); + inp_pfx.insert(inp_pfx.begin(), jarvis_token_fim_pre(model)); + inp_sfx.insert(inp_sfx.begin(), jarvis_token_fim_suf(model)); embd_inp = params.spm_infill ? inp_sfx : inp_pfx; embd_end = params.spm_infill ? inp_pfx : inp_sfx; if (add_bos) { - embd_inp.insert(embd_inp.begin(), llama_token_bos(model)); + embd_inp.insert(embd_inp.begin(), jarvis_token_bos(model)); } embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end()); - const llama_token middle_token = llama_token_fim_mid(model); + const jarvis_token middle_token = jarvis_token_fim_mid(model); if (middle_token >= 0) { embd_inp.push_back(middle_token); } @@ -230,7 +230,7 @@ int main(int argc, char ** argv) { // Should not run without any tokens if (embd_inp.empty()) { - embd_inp.push_back(llama_token_bos(model)); + embd_inp.push_back(jarvis_token_bos(model)); LOG_WRN("embd_inp was considered empty and bos was added: %s\n", string_from(ctx, embd_inp).c_str()); } @@ -340,7 +340,7 @@ int main(int argc, char ** argv) { // the first thing we will do is to output the prompt, so set color accordingly console::set_display(console::prompt); - std::vector embd; + std::vector embd; while (n_remain != 0 || params.interactive) { // predict @@ -375,8 +375,8 @@ int main(int argc, char ** argv) { LOG_DBG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n", n_past, n_left, n_ctx, params.n_keep, n_discard); - llama_kv_cache_seq_rm (ctx, 0, params.n_keep + 1 , params.n_keep + n_discard + 1); - llama_kv_cache_seq_add(ctx, 0, params.n_keep + 1 + n_discard, n_past, -n_discard); + jarvis_kv_cache_seq_rm (ctx, 0, params.n_keep + 1 , params.n_keep + n_discard + 1); + jarvis_kv_cache_seq_add(ctx, 0, params.n_keep + 1 + n_discard, n_past, -n_discard); n_past -= n_discard; @@ -396,7 +396,7 @@ int main(int argc, char ** argv) { LOG_DBG("eval: %s\n", string_from(ctx, embd).c_str()); - if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval))) { + if (jarvis_decode(ctx, jarvis_batch_get_one(&embd[i], n_eval))) { LOG_ERR("%s : failed to eval\n", __func__); return 1; } @@ -411,7 +411,7 @@ int main(int argc, char ** argv) { embd.clear(); if ((int) embd_inp.size() <= n_consumed && !is_interacting) { - const llama_token id = common_sampler_sample(smpl, ctx, -1); + const jarvis_token id = common_sampler_sample(smpl, ctx, -1); common_sampler_accept(smpl, id, true); @@ -465,10 +465,10 @@ int main(int argc, char ** argv) { // if not currently processing queued inputs; if ((int) embd_inp.size() <= n_consumed) { // deal with eot token in infill mode - if ((common_sampler_last(smpl) == llama_token_eot(model) || is_interacting) && params.interactive){ + if ((common_sampler_last(smpl) == jarvis_token_eot(model) || is_interacting) && params.interactive){ if (is_interacting && !params.interactive_first) { // print an eot token - LOG("%s", common_token_to_piece(ctx, llama_token_eot(model)).c_str()); + LOG("%s", common_token_to_piece(ctx, jarvis_token_eot(model)).c_str()); } LOG("\n"); console::set_display(console::user_input); @@ -505,16 +505,16 @@ int main(int argc, char ** argv) { } // tokenize new prefix and suffix - std::vector inp_pfx = common_tokenize(ctx, params.input_prefix, false); - std::vector inp_sfx = common_tokenize(ctx, params.input_suffix, false); + std::vector inp_pfx = common_tokenize(ctx, params.input_prefix, false); + std::vector inp_sfx = common_tokenize(ctx, params.input_suffix, false); - inp_pfx.insert(inp_pfx.begin(), llama_token_fim_pre(model)); - inp_sfx.insert(inp_sfx.begin(), llama_token_fim_suf(model)); + inp_pfx.insert(inp_pfx.begin(), jarvis_token_fim_pre(model)); + inp_sfx.insert(inp_sfx.begin(), jarvis_token_fim_suf(model)); embd_inp = params.spm_infill ? inp_sfx : inp_pfx; embd_end = params.spm_infill ? inp_pfx : inp_sfx; if (add_bos) { - embd_inp.insert(embd_inp.begin(), llama_token_bos(model)); + embd_inp.insert(embd_inp.begin(), jarvis_token_bos(model)); } embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end()); @@ -529,7 +529,7 @@ int main(int argc, char ** argv) { is_interacting = false; } // deal with end of generation tokens in interactive mode - else if (llama_token_is_eog(model, common_sampler_last(smpl))) { + else if (jarvis_token_is_eog(model, common_sampler_last(smpl))) { LOG_DBG("found EOS token\n"); if (params.interactive) { @@ -545,7 +545,7 @@ int main(int argc, char ** argv) { if (params.input_prefix_bos) { LOG_DBG("adding input prefix BOS token\n"); - embd_inp.push_back(llama_token_bos(model)); + embd_inp.push_back(jarvis_token_bos(model)); } std::string buffer; @@ -585,7 +585,7 @@ int main(int argc, char ** argv) { embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end()); for (size_t i = original_size; i < embd_inp.size(); ++i) { - const llama_token token = embd_inp[i]; + const jarvis_token token = embd_inp[i]; output_tokens.push_back(token); output_ss << common_token_to_piece(ctx, token); } @@ -608,7 +608,7 @@ int main(int argc, char ** argv) { } // end of generation - if (!embd.empty() && llama_token_is_eog(model, embd.back()) && !params.interactive) { + if (!embd.empty() && jarvis_token_is_eog(model, embd.back()) && !params.interactive) { break; } @@ -620,18 +620,18 @@ int main(int argc, char ** argv) { } } if (!params.interactive && n_remain <= 0) { - LOG("%s", common_token_to_piece(ctx, llama_token_eot(model)).c_str()); + LOG("%s", common_token_to_piece(ctx, jarvis_token_eot(model)).c_str()); } LOG("\n"); common_perf_print(ctx, smpl); write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens); - llama_free(ctx); - llama_free_model(model); + jarvis_free(ctx); + jarvis_free_model(model); common_sampler_free(smpl); - llama_backend_free(); + jarvis_backend_free(); return 0; } diff --git a/examples/jeopardy/README.md b/examples/jeopardy/README.md index ffa13cbf349b2..1d49a6f28fc21 100644 --- a/examples/jeopardy/README.md +++ b/examples/jeopardy/README.md @@ -1,4 +1,4 @@ -# llama.cpp/example/jeopardy +# jarvis.cpp/example/jeopardy This is pretty much just a straight port of aigoopy/llm-jeopardy/ with an added graph viewer. @@ -12,7 +12,7 @@ MODEL_NAME=(name of your model) prefix=(basically, if you use vicuna it's Human: , if you use something else it might be User: , etc) opts=(add -instruct here if needed for your model, or anything else you want to test out) ``` -Step 2: Run `jeopardy.sh` from the llama.cpp folder +Step 2: Run `jeopardy.sh` from the jarvis.cpp folder Step 3: Repeat steps 1 and 2 until you have all the results you need. diff --git a/examples/jeopardy/jeopardy.sh b/examples/jeopardy/jeopardy.sh index 07bcb3b8d78ac..39ae78f5fea8f 100755 --- a/examples/jeopardy/jeopardy.sh +++ b/examples/jeopardy/jeopardy.sh @@ -21,7 +21,7 @@ counter=1 echo 'Running' while IFS= read -r question do - exe_cmd="./llama-cli -p "\"$prefix$introduction$nl$prefix$question\"" "$opts" -m ""\"$MODEL\""" >> ""\"$output_file\"" + exe_cmd="./jarvis-cli -p "\"$prefix$introduction$nl$prefix$question\"" "$opts" -m ""\"$MODEL\""" >> ""\"$output_file\"" echo $counter echo "Current Question: $question" eval "$exe_cmd" diff --git a/examples/json_schema_pydantic_example.py b/examples/json_schema_pydantic_example.py index 19c0bdb5b6770..d820415a45916 100644 --- a/examples/json_schema_pydantic_example.py +++ b/examples/json_schema_pydantic_example.py @@ -1,5 +1,5 @@ # Usage: -#! ./llama-server -m some-model.gguf & +#! ./jarvis-server -m some-model.gguf & #! pip install pydantic #! python json_schema_pydantic_example.py @@ -13,7 +13,7 @@ def create_completion(*, response_model=None, endpoint="http://localhost:8080/v1/chat/completions", messages, **kwargs): ''' Creates a chat completion using an OpenAI-compatible endpoint w/ JSON schema support - (llama.cpp server, llama-cpp-python, Anyscale / Together...) + (jarvis.cpp server, jarvis-cpp-python, Anyscale / Together...) The response_model param takes a type (+ supports Pydantic) and behaves just as w/ Instructor (see below) ''' diff --git a/examples/json_schema_to_grammar.py b/examples/json_schema_to_grammar.py index fc9f0097f5f8f..38d10e8211025 100755 --- a/examples/json_schema_to_grammar.py +++ b/examples/json_schema_to_grammar.py @@ -390,7 +390,7 @@ def _visit_pattern(self, pattern, name): Transforms a regular expression pattern into a GBNF rule. Input: https://json-schema.org/understanding-json-schema/reference/regular_expressions - Output: https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md + Output: https://github.com/ggerganov/jarvis.cpp/blob/master/grammars/README.md Unsupported features: negative/positive lookaheads, greedy/non-greedy modifiers. @@ -751,7 +751,7 @@ def format_grammar(self): def main(args_in = None): parser = argparse.ArgumentParser( description=''' - Generates a grammar (suitable for use in ./llama-cli) that produces JSON conforming to a + Generates a grammar (suitable for use in ./jarvis-cli) that produces JSON conforming to a given JSON schema. Only a subset of JSON schema features are supported; more may be added in the future. ''', diff --git a/examples/llama-bench/CMakeLists.txt b/examples/llama-bench/CMakeLists.txt index 5bdbea4e28187..e081060a8a1be 100644 --- a/examples/llama-bench/CMakeLists.txt +++ b/examples/llama-bench/CMakeLists.txt @@ -1,5 +1,5 @@ -set(TARGET llama-bench) -add_executable(${TARGET} llama-bench.cpp) +set(TARGET jarvis-bench) +add_executable(${TARGET} jarvis-bench.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common jarvis ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/llama-bench/README.md b/examples/llama-bench/README.md index 6bbe4bb75fbf8..ca550fe7eff86 100644 --- a/examples/llama-bench/README.md +++ b/examples/llama-bench/README.md @@ -1,6 +1,6 @@ -# llama.cpp/examples/llama-bench +# jarvis.cpp/examples/jarvis-bench -Performance testing tool for llama.cpp. +Performance testing tool for jarvis.cpp. ## Table of contents @@ -20,7 +20,7 @@ Performance testing tool for llama.cpp. ## Syntax ``` -usage: ./llama-bench [options] +usage: ./jarvis-bench [options] options: -h, --help @@ -56,7 +56,7 @@ options: Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times. ``` -llama-bench can perform three types of tests: +jarvis-bench can perform three types of tests: - Prompt processing (pp): processing a prompt in batches (`-p`) - Text generation (tg): generating a sequence of tokens (`-n`) @@ -77,108 +77,108 @@ Note: ### Text generation with different models ```sh -$ ./llama-bench -m models/7B/ggml-model-q4_0.gguf -m models/13B/ggml-model-q4_0.gguf -p 0 -n 128,256,512 +$ ./jarvis-bench -m models/7B/ggml-model-q4_0.gguf -m models/13B/ggml-model-q4_0.gguf -p 0 -n 128,256,512 ``` | model | size | params | backend | ngl | test | t/s | | ------------------------------ | ---------: | ---------: | ---------- | --: | ---------- | ---------------: | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | tg 128 | 132.19 ± 0.55 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | tg 256 | 129.37 ± 0.54 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | tg 512 | 123.83 ± 0.25 | -| llama 13B mostly Q4_0 | 6.86 GiB | 13.02 B | CUDA | 99 | tg 128 | 82.17 ± 0.31 | -| llama 13B mostly Q4_0 | 6.86 GiB | 13.02 B | CUDA | 99 | tg 256 | 80.74 ± 0.23 | -| llama 13B mostly Q4_0 | 6.86 GiB | 13.02 B | CUDA | 99 | tg 512 | 78.08 ± 0.07 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | tg 128 | 132.19 ± 0.55 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | tg 256 | 129.37 ± 0.54 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | tg 512 | 123.83 ± 0.25 | +| jarvis 13B mostly Q4_0 | 6.86 GiB | 13.02 B | CUDA | 99 | tg 128 | 82.17 ± 0.31 | +| jarvis 13B mostly Q4_0 | 6.86 GiB | 13.02 B | CUDA | 99 | tg 256 | 80.74 ± 0.23 | +| jarvis 13B mostly Q4_0 | 6.86 GiB | 13.02 B | CUDA | 99 | tg 512 | 78.08 ± 0.07 | ### Prompt processing with different batch sizes ```sh -$ ./llama-bench -n 0 -p 1024 -b 128,256,512,1024 +$ ./jarvis-bench -n 0 -p 1024 -b 128,256,512,1024 ``` | model | size | params | backend | ngl | n_batch | test | t/s | | ------------------------------ | ---------: | ---------: | ---------- | --: | ---------: | ---------- | ---------------: | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | 128 | pp 1024 | 1436.51 ± 3.66 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | 256 | pp 1024 | 1932.43 ± 23.48 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | 512 | pp 1024 | 2254.45 ± 15.59 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | 1024 | pp 1024 | 2498.61 ± 13.58 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | 128 | pp 1024 | 1436.51 ± 3.66 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | 256 | pp 1024 | 1932.43 ± 23.48 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | 512 | pp 1024 | 2254.45 ± 15.59 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | 1024 | pp 1024 | 2498.61 ± 13.58 | ### Different numbers of threads ```sh -$ ./llama-bench -n 0 -n 16 -p 64 -t 1,2,4,8,16,32 +$ ./jarvis-bench -n 0 -n 16 -p 64 -t 1,2,4,8,16,32 ``` | model | size | params | backend | threads | test | t/s | | ------------------------------ | ---------: | ---------: | ---------- | ---------: | ---------- | ---------------: | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 1 | pp 64 | 6.17 ± 0.07 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 1 | tg 16 | 4.05 ± 0.02 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 2 | pp 64 | 12.31 ± 0.13 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 2 | tg 16 | 7.80 ± 0.07 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 4 | pp 64 | 23.18 ± 0.06 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 4 | tg 16 | 12.22 ± 0.07 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 8 | pp 64 | 32.29 ± 1.21 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 8 | tg 16 | 16.71 ± 0.66 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 16 | pp 64 | 33.52 ± 0.03 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 16 | tg 16 | 15.32 ± 0.05 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 32 | pp 64 | 59.00 ± 1.11 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 32 | tg 16 | 16.41 ± 0.79 || +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 1 | pp 64 | 6.17 ± 0.07 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 1 | tg 16 | 4.05 ± 0.02 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 2 | pp 64 | 12.31 ± 0.13 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 2 | tg 16 | 7.80 ± 0.07 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 4 | pp 64 | 23.18 ± 0.06 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 4 | tg 16 | 12.22 ± 0.07 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 8 | pp 64 | 32.29 ± 1.21 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 8 | tg 16 | 16.71 ± 0.66 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 16 | pp 64 | 33.52 ± 0.03 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 16 | tg 16 | 15.32 ± 0.05 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 32 | pp 64 | 59.00 ± 1.11 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 32 | tg 16 | 16.41 ± 0.79 || ### Different numbers of layers offloaded to the GPU ```sh -$ ./llama-bench -ngl 10,20,30,31,32,33,34,35 +$ ./jarvis-bench -ngl 10,20,30,31,32,33,34,35 ``` | model | size | params | backend | ngl | test | t/s | | ------------------------------ | ---------: | ---------: | ---------- | --: | ---------- | ---------------: | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 10 | pp 512 | 373.36 ± 2.25 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 10 | tg 128 | 13.45 ± 0.93 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 20 | pp 512 | 472.65 ± 1.25 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 20 | tg 128 | 21.36 ± 1.94 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 30 | pp 512 | 631.87 ± 11.25 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 30 | tg 128 | 40.04 ± 1.82 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 31 | pp 512 | 657.89 ± 5.08 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 31 | tg 128 | 48.19 ± 0.81 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 32 | pp 512 | 688.26 ± 3.29 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 32 | tg 128 | 54.78 ± 0.65 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 33 | pp 512 | 704.27 ± 2.24 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 33 | tg 128 | 60.62 ± 1.76 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 34 | pp 512 | 881.34 ± 5.40 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 34 | tg 128 | 71.76 ± 0.23 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 35 | pp 512 | 2400.01 ± 7.72 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 35 | tg 128 | 131.66 ± 0.49 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 10 | pp 512 | 373.36 ± 2.25 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 10 | tg 128 | 13.45 ± 0.93 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 20 | pp 512 | 472.65 ± 1.25 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 20 | tg 128 | 21.36 ± 1.94 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 30 | pp 512 | 631.87 ± 11.25 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 30 | tg 128 | 40.04 ± 1.82 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 31 | pp 512 | 657.89 ± 5.08 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 31 | tg 128 | 48.19 ± 0.81 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 32 | pp 512 | 688.26 ± 3.29 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 32 | tg 128 | 54.78 ± 0.65 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 33 | pp 512 | 704.27 ± 2.24 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 33 | tg 128 | 60.62 ± 1.76 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 34 | pp 512 | 881.34 ± 5.40 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 34 | tg 128 | 71.76 ± 0.23 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 35 | pp 512 | 2400.01 ± 7.72 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 35 | tg 128 | 131.66 ± 0.49 | ## Output formats -By default, llama-bench outputs the results in markdown format. The results can be output in other formats by using the `-o` option. +By default, jarvis-bench outputs the results in markdown format. The results can be output in other formats by using the `-o` option. ### Markdown ```sh -$ ./llama-bench -o md +$ ./jarvis-bench -o md ``` | model | size | params | backend | ngl | test | t/s | | ------------------------------ | ---------: | ---------: | ---------- | --: | ---------- | ---------------: | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | pp 512 | 2368.80 ± 93.24 | -| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | tg 128 | 131.42 ± 0.59 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | pp 512 | 2368.80 ± 93.24 | +| jarvis 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | tg 128 | 131.42 ± 0.59 | ### CSV ```sh -$ ./llama-bench -o csv +$ ./jarvis-bench -o csv ``` ```csv build_commit,build_number,cuda,metal,gpu_blas,blas,cpu_info,gpu_info,model_filename,model_type,model_size,model_n_params,n_batch,n_threads,f16_kv,n_gpu_layers,main_gpu,mul_mat_q,tensor_split,n_prompt,n_gen,test_time,avg_ns,stddev_ns,avg_ts,stddev_ts -"3469684","1275","1","0","0","1","1","13th Gen Intel(R) Core(TM) i9-13900K","NVIDIA GeForce RTX 3090 Ti","models/7B/ggml-model-q4_0.gguf","llama 7B mostly Q4_0","3825065984","6738415616","512","16","1","99","0","1","0.00","512","0","2023-09-23T12:09:01Z","212155977","732372","2413.341687","8.305961" -"3469684","1275","1","0","0","1","1","13th Gen Intel(R) Core(TM) i9-13900K","NVIDIA GeForce RTX 3090 Ti","models/7B/ggml-model-q4_0.gguf","llama 7B mostly Q4_0","3825065984","6738415616","512","16","1","99","0","1","0.00","0","128","2023-09-23T12:09:02Z","969320879","2728399","132.052051","0.371342" +"3469684","1275","1","0","0","1","1","13th Gen Intel(R) Core(TM) i9-13900K","NVIDIA GeForce RTX 3090 Ti","models/7B/ggml-model-q4_0.gguf","jarvis 7B mostly Q4_0","3825065984","6738415616","512","16","1","99","0","1","0.00","512","0","2023-09-23T12:09:01Z","212155977","732372","2413.341687","8.305961" +"3469684","1275","1","0","0","1","1","13th Gen Intel(R) Core(TM) i9-13900K","NVIDIA GeForce RTX 3090 Ti","models/7B/ggml-model-q4_0.gguf","jarvis 7B mostly Q4_0","3825065984","6738415616","512","16","1","99","0","1","0.00","0","128","2023-09-23T12:09:02Z","969320879","2728399","132.052051","0.371342" ``` ### JSON ```sh -$ ./llama-bench -o json +$ ./jarvis-bench -o json ``` ```json @@ -193,7 +193,7 @@ $ ./llama-bench -o json "cpu_info": "13th Gen Intel(R) Core(TM) i9-13900K", "gpu_info": "NVIDIA GeForce RTX 3090 Ti", "model_filename": "models/7B/ggml-model-q4_0.gguf", - "model_type": "llama 7B mostly Q4_0", + "model_type": "jarvis 7B mostly Q4_0", "model_size": 3825065984, "model_n_params": 6738415616, "n_batch": 512, @@ -223,7 +223,7 @@ $ ./llama-bench -o json "cpu_info": "13th Gen Intel(R) Core(TM) i9-13900K", "gpu_info": "NVIDIA GeForce RTX 3090 Ti", "model_filename": "models/7B/ggml-model-q4_0.gguf", - "model_type": "llama 7B mostly Q4_0", + "model_type": "jarvis 7B mostly Q4_0", "model_size": 3825065984, "model_n_params": 6738415616, "n_batch": 512, @@ -250,12 +250,12 @@ $ ./llama-bench -o json ### JSONL ```sh -$ ./llama-bench -o jsonl +$ ./jarvis-bench -o jsonl ``` ```json lines -{"build_commit":"3469684","build_number":1275,"cuda":true,"metal":false,"gpu_blas":true,"blas":true,"cpu_info":"13th Gen Intel(R) Core(TM) i9-13900K","gpu_info":"NVIDIA GeForce RTX 3090 Ti","model_filename":"models/7B/ggml-model-q4_0.gguf","model_type":"llama 7B mostly Q4_0","model_size":3825065984,"model_n_params":6738415616,"n_batch":512,"n_threads":16,"f16_kv":true,"n_gpu_layers":99,"main_gpu":0,"mul_mat_q":true,"tensor_split":"0.00","n_prompt":512,"n_gen":0,"test_time":"2023-09-23T12:09:57Z","avg_ns":212365953,"stddev_ns":985423,"avg_ts":2410.974041,"stddev_ts":11.163766,"samples_ns":[213837238,211635853,212328053,211329715,212698907],"samples_ts":[2394.34,2419.25,2411.36,2422.75,2407.16]} -{"build_commit":"3469684","build_number":1275,"cuda":true,"metal":false,"gpu_blas":true,"blas":true,"cpu_info":"13th Gen Intel(R) Core(TM) i9-13900K","gpu_info":"NVIDIA GeForce RTX 3090 Ti","model_filename":"models/7B/ggml-model-q4_0.gguf","model_type":"llama 7B mostly Q4_0","model_size":3825065984,"model_n_params":6738415616,"n_batch":512,"n_threads":16,"f16_kv":true,"n_gpu_layers":99,"main_gpu":0,"mul_mat_q":true,"tensor_split":"0.00","n_prompt":0,"n_gen":128,"test_time":"2023-09-23T12:09:59Z","avg_ns":977425219,"stddev_ns":9268593,"avg_ts":130.965708,"stddev_ts":1.238924,"samples_ns":[984472709,974901233,989474741,970729355,967548060],"samples_ts":[130.019,131.295,129.362,131.86,132.293]} +{"build_commit":"3469684","build_number":1275,"cuda":true,"metal":false,"gpu_blas":true,"blas":true,"cpu_info":"13th Gen Intel(R) Core(TM) i9-13900K","gpu_info":"NVIDIA GeForce RTX 3090 Ti","model_filename":"models/7B/ggml-model-q4_0.gguf","model_type":"jarvis 7B mostly Q4_0","model_size":3825065984,"model_n_params":6738415616,"n_batch":512,"n_threads":16,"f16_kv":true,"n_gpu_layers":99,"main_gpu":0,"mul_mat_q":true,"tensor_split":"0.00","n_prompt":512,"n_gen":0,"test_time":"2023-09-23T12:09:57Z","avg_ns":212365953,"stddev_ns":985423,"avg_ts":2410.974041,"stddev_ts":11.163766,"samples_ns":[213837238,211635853,212328053,211329715,212698907],"samples_ts":[2394.34,2419.25,2411.36,2422.75,2407.16]} +{"build_commit":"3469684","build_number":1275,"cuda":true,"metal":false,"gpu_blas":true,"blas":true,"cpu_info":"13th Gen Intel(R) Core(TM) i9-13900K","gpu_info":"NVIDIA GeForce RTX 3090 Ti","model_filename":"models/7B/ggml-model-q4_0.gguf","model_type":"jarvis 7B mostly Q4_0","model_size":3825065984,"model_n_params":6738415616,"n_batch":512,"n_threads":16,"f16_kv":true,"n_gpu_layers":99,"main_gpu":0,"mul_mat_q":true,"tensor_split":"0.00","n_prompt":0,"n_gen":128,"test_time":"2023-09-23T12:09:59Z","avg_ns":977425219,"stddev_ns":9268593,"avg_ts":130.965708,"stddev_ts":1.238924,"samples_ns":[984472709,974901233,989474741,970729355,967548060],"samples_ts":[130.019,131.295,129.362,131.86,132.293]} ``` @@ -264,7 +264,7 @@ $ ./llama-bench -o jsonl SQL output is suitable for importing into a SQLite database. The output can be piped into the `sqlite3` command line tool to add the results to a database. ```sh -$ ./llama-bench -o sql +$ ./jarvis-bench -o sql ``` ```sql @@ -297,6 +297,6 @@ CREATE TABLE IF NOT EXISTS test ( stddev_ts REAL ); -INSERT INTO test (build_commit, build_number, cuda, metal, gpu_blas, blas, cpu_info, gpu_info, model_filename, model_type, model_size, model_n_params, n_batch, n_threads, f16_kv, n_gpu_layers, main_gpu, mul_mat_q, tensor_split, n_prompt, n_gen, test_time, avg_ns, stddev_ns, avg_ts, stddev_ts) VALUES ('3469684', '1275', '1', '0', '0', '1', '1', '13th Gen Intel(R) Core(TM) i9-13900K', 'NVIDIA GeForce RTX 3090 Ti', 'models/7B/ggml-model-q4_0.gguf', 'llama 7B mostly Q4_0', '3825065984', '6738415616', '512', '16', '1', '99', '0', '1', '0.00', '512', '0', '2023-09-23T12:10:30Z', '212693772', '743623', '2407.240204', '8.409634'); -INSERT INTO test (build_commit, build_number, cuda, metal, gpu_blas, blas, cpu_info, gpu_info, model_filename, model_type, model_size, model_n_params, n_batch, n_threads, f16_kv, n_gpu_layers, main_gpu, mul_mat_q, tensor_split, n_prompt, n_gen, test_time, avg_ns, stddev_ns, avg_ts, stddev_ts) VALUES ('3469684', '1275', '1', '0', '0', '1', '1', '13th Gen Intel(R) Core(TM) i9-13900K', 'NVIDIA GeForce RTX 3090 Ti', 'models/7B/ggml-model-q4_0.gguf', 'llama 7B mostly Q4_0', '3825065984', '6738415616', '512', '16', '1', '99', '0', '1', '0.00', '0', '128', '2023-09-23T12:10:31Z', '977925003', '4037361', '130.891159', '0.537692'); +INSERT INTO test (build_commit, build_number, cuda, metal, gpu_blas, blas, cpu_info, gpu_info, model_filename, model_type, model_size, model_n_params, n_batch, n_threads, f16_kv, n_gpu_layers, main_gpu, mul_mat_q, tensor_split, n_prompt, n_gen, test_time, avg_ns, stddev_ns, avg_ts, stddev_ts) VALUES ('3469684', '1275', '1', '0', '0', '1', '1', '13th Gen Intel(R) Core(TM) i9-13900K', 'NVIDIA GeForce RTX 3090 Ti', 'models/7B/ggml-model-q4_0.gguf', 'jarvis 7B mostly Q4_0', '3825065984', '6738415616', '512', '16', '1', '99', '0', '1', '0.00', '512', '0', '2023-09-23T12:10:30Z', '212693772', '743623', '2407.240204', '8.409634'); +INSERT INTO test (build_commit, build_number, cuda, metal, gpu_blas, blas, cpu_info, gpu_info, model_filename, model_type, model_size, model_n_params, n_batch, n_threads, f16_kv, n_gpu_layers, main_gpu, mul_mat_q, tensor_split, n_prompt, n_gen, test_time, avg_ns, stddev_ns, avg_ts, stddev_ts) VALUES ('3469684', '1275', '1', '0', '0', '1', '1', '13th Gen Intel(R) Core(TM) i9-13900K', 'NVIDIA GeForce RTX 3090 Ti', 'models/7B/ggml-model-q4_0.gguf', 'jarvis 7B mostly Q4_0', '3825065984', '6738415616', '512', '16', '1', '99', '0', '1', '0.00', '0', '128', '2023-09-23T12:10:31Z', '977925003', '4037361', '130.891159', '0.537692'); ``` diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 4a8ea96764630..c1a3368a09f96 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -19,7 +19,7 @@ #include #include "ggml.h" -#include "llama.h" +#include "jarvis.h" #include "common.h" #include "ggml-cuda.h" #include "ggml-sycl.h" @@ -207,11 +207,11 @@ static bool output_format_from_str(const std::string & s, output_formats & forma return true; } -static const char * split_mode_str(llama_split_mode mode) { +static const char * split_mode_str(jarvis_split_mode mode) { switch (mode) { - case LLAMA_SPLIT_MODE_NONE: return "none"; - case LLAMA_SPLIT_MODE_LAYER: return "layer"; - case LLAMA_SPLIT_MODE_ROW: return "row"; + case JARVIS_SPLIT_MODE_NONE: return "none"; + case JARVIS_SPLIT_MODE_LAYER: return "layer"; + case JARVIS_SPLIT_MODE_ROW: return "row"; default: GGML_ABORT("invalid split mode"); } } @@ -237,7 +237,7 @@ struct cmd_params { std::vector poll; std::vector n_gpu_layers; std::vector rpc_servers; - std::vector split_mode; + std::vector split_mode; std::vector main_gpu; std::vector no_kv_offload; std::vector flash_attn; @@ -269,11 +269,11 @@ static const cmd_params cmd_params_defaults = { /* poll */ {50}, /* n_gpu_layers */ {99}, /* rpc_servers */ {""}, - /* split_mode */ {LLAMA_SPLIT_MODE_LAYER}, + /* split_mode */ {JARVIS_SPLIT_MODE_LAYER}, /* main_gpu */ {0}, /* no_kv_offload */ {false}, /* flash_attn */ {false}, - /* tensor_split */ {std::vector(llama_max_devices(), 0.0f)}, + /* tensor_split */ {std::vector(jarvis_max_devices(), 0.0f)}, /* use_mmap */ {true}, /* embeddings */ {false}, /* numa */ GGML_NUMA_STRATEGY_DISABLED, @@ -304,7 +304,7 @@ static void print_usage(int /* argc */, char ** argv) { printf(" --cpu-strict <0|1> (default: %s)\n", join(cmd_params_defaults.cpu_strict, ",").c_str()); printf(" --poll <0...100> (default: %s)\n", join(cmd_params_defaults.poll, ",").c_str()); printf(" -ngl, --n-gpu-layers (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str()); - if (llama_supports_rpc()) { + if (jarvis_supports_rpc()) { printf(" -rpc, --rpc (default: %s)\n", join(cmd_params_defaults.rpc_servers, ",").c_str()); } printf(" -sm, --split-mode (default: %s)\n", join(transform_to_str(cmd_params_defaults.split_mode, split_mode_str), ",").c_str()); @@ -497,7 +497,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { } auto p = string_split(argv[i], split_delim); params.n_gpu_layers.insert(params.n_gpu_layers.end(), p.begin(), p.end()); - } else if (llama_supports_rpc() && (arg == "-rpc" || arg == "--rpc")) { + } else if (jarvis_supports_rpc() && (arg == "-rpc" || arg == "--rpc")) { if (++i >= argc) { invalid_param = true; break; @@ -509,15 +509,15 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { break; } auto p = string_split(argv[i], split_delim); - std::vector modes; + std::vector modes; for (const auto & m : p) { - llama_split_mode mode; + jarvis_split_mode mode; if (m == "none") { - mode = LLAMA_SPLIT_MODE_NONE; + mode = JARVIS_SPLIT_MODE_NONE; } else if (m == "layer") { - mode = LLAMA_SPLIT_MODE_LAYER; + mode = JARVIS_SPLIT_MODE_LAYER; } else if (m == "row") { - mode = LLAMA_SPLIT_MODE_ROW; + mode = JARVIS_SPLIT_MODE_ROW; } else { invalid_param = true; break; @@ -583,10 +583,10 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { const std::regex regex{R"([;/]+)"}; std::sregex_token_iterator it{ts.begin(), ts.end(), regex, -1}; std::vector split_arg{it, {}}; - GGML_ASSERT(split_arg.size() <= llama_max_devices()); + GGML_ASSERT(split_arg.size() <= jarvis_max_devices()); - std::vector tensor_split(llama_max_devices()); - for (size_t i = 0; i < llama_max_devices(); ++i) { + std::vector tensor_split(jarvis_max_devices()); + for (size_t i = 0; i < jarvis_max_devices(); ++i) { if (i < split_arg.size()) { tensor_split[i] = std::stof(split_arg[i]); } else { @@ -680,7 +680,7 @@ struct cmd_params_instance { int poll; int n_gpu_layers; std::string rpc_servers; - llama_split_mode split_mode; + jarvis_split_mode split_mode; int main_gpu; bool no_kv_offload; bool flash_attn; @@ -688,8 +688,8 @@ struct cmd_params_instance { bool use_mmap; bool embeddings; - llama_model_params to_llama_mparams() const { - llama_model_params mparams = llama_model_default_params(); + jarvis_model_params to_jarvis_mparams() const { + jarvis_model_params mparams = jarvis_model_default_params(); mparams.n_gpu_layers = n_gpu_layers; if (!rpc_servers.empty()) { @@ -713,8 +713,8 @@ struct cmd_params_instance { tensor_split == other.tensor_split; } - llama_context_params to_llama_cparams() const { - llama_context_params cparams = llama_context_default_params(); + jarvis_context_params to_jarvis_cparams() const { + jarvis_context_params cparams = jarvis_context_default_params(); cparams.n_ctx = n_prompt + n_gen; cparams.n_batch = n_batch; @@ -868,7 +868,7 @@ struct test { ggml_type type_k; ggml_type type_v; int n_gpu_layers; - llama_split_mode split_mode; + jarvis_split_mode split_mode; int main_gpu; bool no_kv_offload; bool flash_attn; @@ -880,13 +880,13 @@ struct test { std::string test_time; std::vector samples_ns; - test(const cmd_params_instance & inst, const llama_model * lmodel, const llama_context * ctx) { + test(const cmd_params_instance & inst, const jarvis_model * lmodel, const jarvis_context * ctx) { model_filename = inst.model; char buf[128]; - llama_model_desc(lmodel, buf, sizeof(buf)); + jarvis_model_desc(lmodel, buf, sizeof(buf)); model_type = buf; - model_size = llama_model_size(lmodel); - model_n_params = llama_model_n_params(lmodel); + model_size = jarvis_model_size(lmodel); + model_n_params = jarvis_model_n_params(lmodel); n_batch = inst.n_batch; n_ubatch = inst.n_ubatch; n_threads = inst.n_threads; @@ -1008,7 +1008,7 @@ struct test { std::vector get_values() const { std::string tensor_split_str; int max_nonzero = 0; - for (size_t i = 0; i < llama_max_devices(); i++) { + for (size_t i = 0; i < jarvis_max_devices(); i++) { if (tensor_split[i] > 0) { max_nonzero = i; } @@ -1050,8 +1050,8 @@ struct test { } }; -const std::string test::build_commit = LLAMA_COMMIT; -const int test::build_number = LLAMA_BUILD_NUMBER; +const std::string test::build_commit = JARVIS_COMMIT; +const int test::build_number = JARVIS_BUILD_NUMBER; const bool test::cuda = !!ggml_cpu_has_cuda(); const bool test::vulkan = !!ggml_cpu_has_vulkan(); const bool test::kompute = !!ggml_cpu_has_kompute(); @@ -1428,45 +1428,45 @@ struct sql_printer : public printer { } }; -static void test_prompt(llama_context * ctx, int n_prompt, int n_batch, int n_threads) { - llama_set_n_threads(ctx, n_threads, n_threads); +static void test_prompt(jarvis_context * ctx, int n_prompt, int n_batch, int n_threads) { + jarvis_set_n_threads(ctx, n_threads, n_threads); - const llama_model * model = llama_get_model(ctx); - const int32_t n_vocab = llama_n_vocab(model); + const jarvis_model * model = jarvis_get_model(ctx); + const int32_t n_vocab = jarvis_n_vocab(model); - std::vector tokens(n_batch); + std::vector tokens(n_batch); int n_processed = 0; while (n_processed < n_prompt) { int n_tokens = std::min(n_prompt - n_processed, n_batch); - tokens[0] = n_processed == 0 && llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab; + tokens[0] = n_processed == 0 && jarvis_add_bos_token(model) ? jarvis_token_bos(model) : std::rand() % n_vocab; for (int i = 1; i < n_tokens; i++) { tokens[i] = std::rand() % n_vocab; } - llama_decode(ctx, llama_batch_get_one(tokens.data(), n_tokens)); + jarvis_decode(ctx, jarvis_batch_get_one(tokens.data(), n_tokens)); n_processed += n_tokens; } - llama_synchronize(ctx); + jarvis_synchronize(ctx); } -static void test_gen(llama_context * ctx, int n_gen, int n_threads) { - llama_set_n_threads(ctx, n_threads, n_threads); +static void test_gen(jarvis_context * ctx, int n_gen, int n_threads) { + jarvis_set_n_threads(ctx, n_threads, n_threads); - const llama_model * model = llama_get_model(ctx); - const int32_t n_vocab = llama_n_vocab(model); + const jarvis_model * model = jarvis_get_model(ctx); + const int32_t n_vocab = jarvis_n_vocab(model); - llama_token token = llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab; + jarvis_token token = jarvis_add_bos_token(model) ? jarvis_token_bos(model) : std::rand() % n_vocab; for (int i = 0; i < n_gen; i++) { - llama_decode(ctx, llama_batch_get_one(&token, 1)); - llama_synchronize(ctx); + jarvis_decode(ctx, jarvis_batch_get_one(&token, 1)); + jarvis_synchronize(ctx); token = std::rand() % n_vocab; } } -static void llama_null_log_callback(enum ggml_log_level level, const char * text, void * user_data) { +static void jarvis_null_log_callback(enum ggml_log_level level, const char * text, void * user_data) { (void) level; (void) text; (void) user_data; @@ -1508,12 +1508,12 @@ int main(int argc, char ** argv) { cmd_params params = parse_cmd_params(argc, argv); - // initialize llama.cpp + // initialize jarvis.cpp if (!params.verbose) { - llama_log_set(llama_null_log_callback, NULL); + jarvis_log_set(jarvis_null_log_callback, NULL); } - llama_backend_init(); - llama_numa_init(params.numa); + jarvis_backend_init(); + jarvis_numa_init(params.numa); set_process_priority(params.prio); @@ -1533,7 +1533,7 @@ int main(int argc, char ** argv) { std::vector params_instances = get_cmd_params_instances(params); - llama_model * lmodel = nullptr; + jarvis_model * lmodel = nullptr; const cmd_params_instance * prev_inst = nullptr; int params_idx = 0; @@ -1541,15 +1541,15 @@ int main(int argc, char ** argv) { for (const auto & inst : params_instances) { params_idx ++; if (params.progress) { - fprintf(stderr, "llama-bench: benchmark %d/%ld: starting\n", params_idx, params_count); + fprintf(stderr, "jarvis-bench: benchmark %d/%ld: starting\n", params_idx, params_count); } // keep the same model between tests when possible if (!lmodel || !prev_inst || !inst.equal_mparams(*prev_inst)) { if (lmodel) { - llama_free_model(lmodel); + jarvis_free_model(lmodel); } - lmodel = llama_load_model_from_file(inst.model.c_str(), inst.to_llama_mparams()); + lmodel = jarvis_load_model_from_file(inst.model.c_str(), inst.to_jarvis_mparams()); if (lmodel == NULL) { fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, inst.model.c_str()); return 1; @@ -1557,16 +1557,16 @@ int main(int argc, char ** argv) { prev_inst = &inst; } - llama_context * ctx = llama_new_context_with_model(lmodel, inst.to_llama_cparams()); + jarvis_context * ctx = jarvis_new_context_with_model(lmodel, inst.to_jarvis_cparams()); if (ctx == NULL) { fprintf(stderr, "%s: error: failed to create context with model '%s'\n", __func__, inst.model.c_str()); - llama_free_model(lmodel); + jarvis_free_model(lmodel); return 1; } test t(inst, lmodel, ctx); - llama_kv_cache_clear(ctx); + jarvis_kv_cache_clear(ctx); // cool off before the test if (params.delay) { @@ -1588,37 +1588,37 @@ int main(int argc, char ** argv) { exit(1); } - llama_attach_threadpool(ctx, threadpool, NULL); + jarvis_attach_threadpool(ctx, threadpool, NULL); // warmup run if (t.n_prompt > 0) { if (params.progress) { - fprintf(stderr, "llama-bench: benchmark %d/%ld: warmup prompt run\n", params_idx, params_count); + fprintf(stderr, "jarvis-bench: benchmark %d/%ld: warmup prompt run\n", params_idx, params_count); } //test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads); test_prompt(ctx, t.n_prompt, t.n_batch, t.n_threads); } if (t.n_gen > 0) { if (params.progress) { - fprintf(stderr, "llama-bench: benchmark %d/%ld: warmup generation run\n", params_idx, params_count); + fprintf(stderr, "jarvis-bench: benchmark %d/%ld: warmup generation run\n", params_idx, params_count); } test_gen(ctx, 1, t.n_threads); } for (int i = 0; i < params.reps; i++) { - llama_kv_cache_clear(ctx); + jarvis_kv_cache_clear(ctx); uint64_t t_start = get_time_ns(); if (t.n_prompt > 0) { if (params.progress) { - fprintf(stderr, "llama-bench: benchmark %d/%ld: prompt run %d/%d\n", params_idx, params_count, i + 1, params.reps); + fprintf(stderr, "jarvis-bench: benchmark %d/%ld: prompt run %d/%d\n", params_idx, params_count, i + 1, params.reps); } test_prompt(ctx, t.n_prompt, t.n_batch, t.n_threads); } if (t.n_gen > 0) { if (params.progress) { - fprintf(stderr, "llama-bench: benchmark %d/%ld: generation run %d/%d\n", params_idx, params_count, i + 1, params.reps); + fprintf(stderr, "jarvis-bench: benchmark %d/%ld: generation run %d/%d\n", params_idx, params_count, i + 1, params.reps); } test_gen(ctx, t.n_gen, t.n_threads); } @@ -1637,14 +1637,14 @@ int main(int argc, char ** argv) { fflush(p_err->fout); } - llama_perf_context_print(ctx); + jarvis_perf_context_print(ctx); - llama_free(ctx); + jarvis_free(ctx); ggml_threadpool_free(threadpool); } - llama_free_model(lmodel); + jarvis_free_model(lmodel); if (p) { p->print_footer(); @@ -1654,7 +1654,7 @@ int main(int argc, char ** argv) { p_err->print_footer(); } - llama_backend_free(); + jarvis_backend_free(); return 0; } diff --git a/examples/llama.android/app/build.gradle.kts b/examples/llama.android/app/build.gradle.kts index 8d1b37195efd4..faf26959b44a1 100644 --- a/examples/llama.android/app/build.gradle.kts +++ b/examples/llama.android/app/build.gradle.kts @@ -4,11 +4,11 @@ plugins { } android { - namespace = "com.example.llama" + namespace = "com.example.jarvis" compileSdk = 34 defaultConfig { - applicationId = "com.example.llama" + applicationId = "com.example.jarvis" minSdk = 33 targetSdk = 34 versionCode = 1 @@ -54,7 +54,7 @@ dependencies { implementation("androidx.compose.ui:ui-graphics") implementation("androidx.compose.ui:ui-tooling-preview") implementation("androidx.compose.material3:material3") - implementation(project(":llama")) + implementation(project(":jarvis")) testImplementation("junit:junit:4.13.2") androidTestImplementation("androidx.test.ext:junit:1.1.5") androidTestImplementation("androidx.test.espresso:espresso-core:3.5.1") diff --git a/examples/llama.android/app/src/main/AndroidManifest.xml b/examples/llama.android/app/src/main/AndroidManifest.xml index 41a358a299154..fcd605d2484b5 100644 --- a/examples/llama.android/app/src/main/AndroidManifest.xml +++ b/examples/llama.android/app/src/main/AndroidManifest.xml @@ -12,13 +12,13 @@ android:label="@string/app_name" android:roundIcon="@mipmap/ic_launcher_round" android:supportsRtl="true" - android:theme="@style/Theme.LlamaAndroid" + android:theme="@style/Theme.JarvisAndroid" > + android:theme="@style/Theme.JarvisAndroid"> diff --git a/examples/llama.android/app/src/main/java/com/example/llama/Downloadable.kt b/examples/llama.android/app/src/main/java/com/example/llama/Downloadable.kt index 78c231ae55d8c..1c8320e7a4f15 100644 --- a/examples/llama.android/app/src/main/java/com/example/llama/Downloadable.kt +++ b/examples/llama.android/app/src/main/java/com/example/llama/Downloadable.kt @@ -1,4 +1,4 @@ -package com.example.llama +package com.example.jarvis import android.app.DownloadManager import android.net.Uri diff --git a/examples/llama.android/app/src/main/java/com/example/llama/MainActivity.kt b/examples/llama.android/app/src/main/java/com/example/llama/MainActivity.kt index 9da04f7d3c32e..00789cb3bad3e 100644 --- a/examples/llama.android/app/src/main/java/com/example/llama/MainActivity.kt +++ b/examples/llama.android/app/src/main/java/com/example/llama/MainActivity.kt @@ -1,4 +1,4 @@ -package com.example.llama +package com.example.jarvis import android.app.ActivityManager import android.app.DownloadManager @@ -30,7 +30,7 @@ import androidx.compose.runtime.Composable import androidx.compose.ui.Modifier import androidx.compose.ui.unit.dp import androidx.core.content.getSystemService -import com.example.llama.ui.theme.LlamaAndroidTheme +import com.example.jarvis.ui.theme.JarvisAndroidTheme import java.io.File class MainActivity( @@ -77,9 +77,9 @@ class MainActivity( File(extFilesDir, "phi-2-q4_0.gguf"), ), Downloadable( - "TinyLlama 1.1B (f16, 2.2 GiB)", - Uri.parse("https://huggingface.co/ggml-org/models/resolve/main/tinyllama-1.1b/ggml-model-f16.gguf?download=true"), - File(extFilesDir, "tinyllama-1.1-f16.gguf"), + "TinyJarvis 1.1B (f16, 2.2 GiB)", + Uri.parse("https://huggingface.co/ggml-org/models/resolve/main/tinyjarvis-1.1b/ggml-model-f16.gguf?download=true"), + File(extFilesDir, "tinyjarvis-1.1-f16.gguf"), ), Downloadable( "Phi 2 DPO (Q3_K_M, 1.48 GiB)", @@ -89,7 +89,7 @@ class MainActivity( ) setContent { - LlamaAndroidTheme { + JarvisAndroidTheme { // A surface container using the 'background' color from the theme Surface( modifier = Modifier.fillMaxSize(), diff --git a/examples/llama.android/app/src/main/java/com/example/llama/MainViewModel.kt b/examples/llama.android/app/src/main/java/com/example/llama/MainViewModel.kt index 45ac29938f441..74dba04fa668a 100644 --- a/examples/llama.android/app/src/main/java/com/example/llama/MainViewModel.kt +++ b/examples/llama.android/app/src/main/java/com/example/llama/MainViewModel.kt @@ -1,6 +1,6 @@ -package com.example.llama +package com.example.jarvis -import android.llama.cpp.LLamaAndroid +import android.jarvis.cpp.JarvisAndroid import android.util.Log import androidx.compose.runtime.getValue import androidx.compose.runtime.mutableStateOf @@ -10,7 +10,7 @@ import androidx.lifecycle.viewModelScope import kotlinx.coroutines.flow.catch import kotlinx.coroutines.launch -class MainViewModel(private val llamaAndroid: LLamaAndroid = LLamaAndroid.instance()): ViewModel() { +class MainViewModel(private val jarvisAndroid: JarvisAndroid = JarvisAndroid.instance()): ViewModel() { companion object { @JvmStatic private val NanosPerSecond = 1_000_000_000.0 @@ -29,7 +29,7 @@ class MainViewModel(private val llamaAndroid: LLamaAndroid = LLamaAndroid.instan viewModelScope.launch { try { - llamaAndroid.unload() + jarvisAndroid.unload() } catch (exc: IllegalStateException) { messages += exc.message!! } @@ -45,7 +45,7 @@ class MainViewModel(private val llamaAndroid: LLamaAndroid = LLamaAndroid.instan messages += "" viewModelScope.launch { - llamaAndroid.send(text) + jarvisAndroid.send(text) .catch { Log.e(tag, "send() failed", it) messages += it.message!! @@ -58,7 +58,7 @@ class MainViewModel(private val llamaAndroid: LLamaAndroid = LLamaAndroid.instan viewModelScope.launch { try { val start = System.nanoTime() - val warmupResult = llamaAndroid.bench(pp, tg, pl, nr) + val warmupResult = jarvisAndroid.bench(pp, tg, pl, nr) val end = System.nanoTime() messages += warmupResult @@ -71,7 +71,7 @@ class MainViewModel(private val llamaAndroid: LLamaAndroid = LLamaAndroid.instan return@launch } - messages += llamaAndroid.bench(512, 128, 1, 3) + messages += jarvisAndroid.bench(512, 128, 1, 3) } catch (exc: IllegalStateException) { Log.e(tag, "bench() failed", exc) messages += exc.message!! @@ -82,7 +82,7 @@ class MainViewModel(private val llamaAndroid: LLamaAndroid = LLamaAndroid.instan fun load(pathToModel: String) { viewModelScope.launch { try { - llamaAndroid.load(pathToModel) + jarvisAndroid.load(pathToModel) messages += "Loaded $pathToModel" } catch (exc: IllegalStateException) { Log.e(tag, "load() failed", exc) diff --git a/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Color.kt b/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Color.kt index 40c30e8d97077..84e34456c5b8b 100644 --- a/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Color.kt +++ b/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Color.kt @@ -1,4 +1,4 @@ -package com.example.llama.ui.theme +package com.example.jarvis.ui.theme import androidx.compose.ui.graphics.Color diff --git a/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Theme.kt b/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Theme.kt index e742220a8d719..3298e08c63b08 100644 --- a/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Theme.kt +++ b/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Theme.kt @@ -1,4 +1,4 @@ -package com.example.llama.ui.theme +package com.example.jarvis.ui.theme import android.app.Activity import android.os.Build @@ -38,7 +38,7 @@ private val LightColorScheme = lightColorScheme( ) @Composable -fun LlamaAndroidTheme( +fun JarvisAndroidTheme( darkTheme: Boolean = isSystemInDarkTheme(), // Dynamic color is available on Android 12+ dynamicColor: Boolean = true, diff --git a/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Type.kt b/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Type.kt index 0b87946ca3ab1..bde5dfbb78802 100644 --- a/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Type.kt +++ b/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Type.kt @@ -1,4 +1,4 @@ -package com.example.llama.ui.theme +package com.example.jarvis.ui.theme import androidx.compose.material3.Typography import androidx.compose.ui.text.TextStyle diff --git a/examples/llama.android/app/src/main/res/values/strings.xml b/examples/llama.android/app/src/main/res/values/strings.xml index 7a9d314e2969b..be0735465a5da 100644 --- a/examples/llama.android/app/src/main/res/values/strings.xml +++ b/examples/llama.android/app/src/main/res/values/strings.xml @@ -1,3 +1,3 @@ - LlamaAndroid + JarvisAndroid diff --git a/examples/llama.android/app/src/main/res/values/themes.xml b/examples/llama.android/app/src/main/res/values/themes.xml index 8a24fda56602c..6c7456dea61b0 100644 --- a/examples/llama.android/app/src/main/res/values/themes.xml +++ b/examples/llama.android/app/src/main/res/values/themes.xml @@ -1,5 +1,5 @@ -