From 491766fd40bbeb72fea74f74e6dbf02fbb4a07d3 Mon Sep 17 00:00:00 2001 From: Peter Jung Date: Sat, 22 Apr 2023 11:47:49 +0200 Subject: [PATCH] 6.3: Update patchset for 6.3 Release, add surface patchset Signed-off-by: Peter Jung --- 6.2/all/0001-cachyos-base-all.tar.gz | Bin 0 -> 475593 bytes 6.3/0001-bbr2.patch | 4 +- 6.3/0002-bfq.patch | 4 +- 6.3/0003-cachy.patch | 2942 +- 6.3/0004-fixes.patch | 85 +- 6.3/0005-fs-patches.patch | 6 +- ...6-Implement-amd-pstate-guided-driver.patch | 4 +- 6.3/0007-ksm.patch | 4 +- 6.3/0008-maple-lru.patch | 306 +- 6.3/0009-Per-VMA-locks.patch | 59 +- 6.3/0010-sched.patch | 57 +- 6.3/0011-Surface.patch | 5691 +++ 6.3/{0011-zram.patch => 0012-zram.patch} | 6 +- ...5.5.patch => 0013-zstd-import-1.5.5.patch} | 6 +- 6.3/all/0001-cachyos-base-all.patch | 30128 ++++++++++------ Files | 10 +- 16 files changed, 28535 insertions(+), 10777 deletions(-) create mode 100644 6.2/all/0001-cachyos-base-all.tar.gz create mode 100644 6.3/0011-Surface.patch rename 6.3/{0011-zram.patch => 0012-zram.patch} (99%) rename 6.3/{0012-zstd-import-1.5.5.patch => 0013-zstd-import-1.5.5.patch} (99%) diff --git a/6.2/all/0001-cachyos-base-all.tar.gz b/6.2/all/0001-cachyos-base-all.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..5ad23f5d372f7e9e29eae0ea0ed62db0fcd23921 GIT binary patch literal 475593 zcmV(>K-j+@iwFSpr$S@^1MIzdcN^EWC))o~pQ6mPEkFWE030NdrW}f*Y_(-d5veHY zw1ZUu3S@-<3Pu5>cw^_Yzx~^LpQ#3rij})wzx8x)OtPxZyw9G8cDvo#7{r6i+brMc z$9b|5kH?#{crm#AA7AQUyWQT~*@@_9=gDr{|JiOo+1YD%w>wd1XQ$il(qElU)b7xK zyZ<9?__xd#@tmeITg*G#+fQo0yX$?M|3&ua|0Vxk&9h1LEJ-@g`rW79z1{66 zd!5}*Cm!^NN&8vy`~OG=i~Zg@g;_51Vgg9m5n#WWdiWTVkW|Mt!SH#RmNMCo)e zUJjG3aXMXo+FA@|o0n1H->>-3cJ$c$dYUY@Qu{%Ud<5{ZH;UR`;e4$P8^)~hWAmRzku6MS6q#2CTi*nEWakm?M{8N0DjM8yZ{c+R@ zKE{j(75&zI+@mw7XuxxR$}22>-7SCqYt-F)vJ*XC@vqPit^cB`pUNqw)7f%S`kHs@ zNqLIda#8U)f4x`!dQR|LJ(2Bp`RheGNy^*hrrp_za8h|RxQwS4$*>i5pYA;&Ajy+C zz%6e)ZnmQBooJYh6M3-Fd=L%h2~8uKWW$7R5hK=$U4QUkn2tu##>Pduh~llPJJ7Fs z`XHSSlTXngellwB#+~izv`Jc=WIJW2DM_Ah$0^twm8f$gg^{dDc(z-h%dYGCL@~rK6tnH6}DcF9~>-dpqv+ zAg1=|_1`+}ooFk{)Bj4cQG>o{{!E}n2l1D`{Dr158q;+hM*Z7pkxXXUJf7duQv%;< zJf^GNUtV0KM84?5Okk{XjBaPIG)DA?A>sOI61_e?`>}U?)O+{-^`CoZumAT6wU$qN z1VQ6rW5u4df^i7H{Dzg*!buTz*k=j_!~V0;)9%yc*|3vrZYCYVA_N#aPufrFp`Z$F zxaTu2{PN|=Pe-RO`Rz{m?b*r6>Dt=U2ak&{PL9ue?@r!R+mqAt zway-GXp~Hs6Q1PahOhB5XJ|x|^N+o=568zRXJ^r2)Nb*LpLVyg;-}r67BR%7B@>tm z$^_gJoYK8-bWksvK6(Pbke`k+#av! zd%NrPju+@1KdJY}|0HM)2iWcw26+7210r=b16=j_cJV+Bg3EkiZZAP(e@P%nXV#}> z{L`%j4X1Qt3FYMtdb{x)-ArU7ra2AC+xP5g>sj>pS(~BZ$@ze_+8f#S~S!X|VIMD^rVM08F`_d+S}8W<-bzuo;#*DYn70QlP&jma`N&FC~2+zsrz7~^!}%#vme(Qb;JK! z(NU8r+sAlHLr$|rI!XsIXbCp>_$1;5=23%)(aE0)`7t_tebzfZX+^=a)3@}jxyff+ zn%2jo({r?u@6TxRI~8BuHTnM|P*Yw*#k<0YO<-&}cs{*B59AqyQ4V@%k9Z9_=)I1` z&I}kh0H%JbO?de%{7cyx=X!h<07dE)|6??!vJp?bL~Y9)a8*K<~*j!wOZxS3k?fBm?#0*{nBC35)7^lUSr*jiC6E ziAOi-f|$BxzdvTl10?G^S-I&)X_p6>9?B^({z z+{z{q#=gw5D?TLMD~N>gfJrT&w2{rwA|5wo;mnLS9)YRA;*(*s#DqoxtK;E}NAj~N zvIeLVo&G!}@iV{82a9nJvz*W!TI3`PCKtKTdfctXBma46OmJd+M zG63~ZY8?zFW9d#Zd&eiu={`6wgTZ6z`XXhzj*Upo+Wkh{ecyLKP z$|L0v=umpoc#`z`x4o<6micY^+0w7DtYqF~p8XHRS*D{h@6I6;M=2r~?DklNp{l;@6Owt9~crJ|2H&LUC1~Ua_uCA5}SBnq+ z8mCDz1fH1It&VT;zq>j)!o4SQgwLL=6h3J-ywP>=>GOj8p)gA{McYM=!4<6Px>-K8 zkSE|x(9Jr0PeF<66LjiR-g3t^H^=rMw1uu=Ub(z`H@DaAUVX4`kD2}o68C5hNv~2% zQKMY%-|>O!FLv8|ySu}ry)$^$-`s2`-JKCZK=;XTw_1Nu(^;s@sQHN1q1zn@s`#>0D zgokI@e8E^6%43?^)mEO1W>I)@3$0Km_e69BqImJ*eYf}ShoiF-67v3h^yc-;wKY~+ z1h3A{kKUhuc!yV=!mF3>k6xeRMYr(c-TSvMPI~Xp&+&4*^5u(dm{PD-!rMl5LTRqY^t8TP%B(Kp^If87c{H#&Yo2S@+VJ0T=;1XmdY{~a8Y#M%U zbxYbv^duc=5)> zCP;eCzZPB_gpO%~A+N$Lc-K7;q%B+?D17h3(Yx1#aZk>BFJB*h-}|9gvgVZR#r@JF zrE%TisNd;!x7(xk)6LEPUOyRZZ+GIS-GXti)Lf`q=`rZo-V=8c7$ue zMMp<}`YFl>mkG)C1X2W7!;GPkxaez0Iw}uF5F_I8X7ri}zfpgI?H>KY-89x;#pyiy z(=u5m>rI20R76An!{s=cZ-Veu%%n@3znG_Tht}}`uh;46Fd4S!fhNi3|CN9U-(>TvXt_wo z>3^|Ctwp?lGK+q}3Uf##!fG3(D7Yc$JZ@Sntx#vUrV!xi*FlziqSc99|2~;!TgM+> zygnv~XI_6Yp~>ZgWE#)YEayG$FVpc5!_ZZkjne^*z>5Md!?v3V&f>-8W<)KpgBfix zPa{^Dl9oSAF6K#sW;D~GQ!$Y2#;efdm2H?OkSVA0L~LX{bF+IqtpQCaoAD^3mw9t^ z+KN8nKZ9gEUXJ5=i&i;GLC#?(8ZIKcSJ(xkf z^{+FD;3=4|7(V?^2E&WsgX5P$4@4`1*VvfH!Rv1EeK+WS*-r=SkRm-K`P0kYv)A9( z5EtuT)Gt3%@)D1p47-ET&a?K2M5yFR{B$qgZ9m<98rP5)+rB_w>~rkz&K|aRXHRxF z>;Gfpe>P}CU%mcb$RcJA(%6h19||pokd-~C`1Hd|@6+XQ)u-=|KDtlm@lExoJ_OnE z;VYFrcwGBo*Z;D+qR-dI?#pyg{~^JK{gB{h#fKmKJ{bn+~xXWb7;<$u3z!ccv5r+J5SoX-JN8YHa8|-ZPec$^gG+n>Ni(AC~vU7V2#7G zofe$i@N=i_DYRH&0~Qq;BbEH-m6|9hCi^$C^N{DT@oUij!K21Cj1HMgQ7?myd+M{z zba)UIT+REET03wJsF=aGQN!&yEa$WYa3?H%T;v)1<3RNiiym)I+qT%m=UY~XYgTMl z+q+1n$vhq0Ps`pCg(^Cs@p^E`z?iDw#bfk&APz=R|CabRtCxSV9Ks^tR@7h4p)ut- zgK>$c148-C^$;=d-$V^>m(`5B#(mPESDXXwdH9qy05ZO5#)kx@cS~m13f2K10{Y)i zM}J{N_QT21%aiwP)?mOnR1XwxK1{EBw8(~bW6Th(3V6C|avuhQWQU-121Fb$>0+zb z4z-Aviwsj~G}#_R5YZAL+ILo9n;sb1c*a%@G}tVja&XM|G@=CVHq~HqW!Ks=+Z}*{ zx3o6^;$Rj{EC!q&x;+6~NFO0Td4{IMj&y zZZ&MbCur=wJv}~YR6u#tYfg8o_vYmLqvOBAYleOG>HvGA0VX6Q{iCpMt*k9|1>i7T&ve z)Hh6ktOb&H=1&p^t?6<+uHiSj%5bz>Z92mid;S0MkBoglOdBt<#bQk0Mz{ZkRi8tC zCJ<;=3fI?N5KN}A)Wdc6KFXJJ!xqBt#51 zqjRWl2tmip3h*SjzY2v(3*f#4m`{>8U(OR@DWE1UF1!UE;U*c=ymRiZk#c{xQ8pMX z=gp!zWs6qAQsY*gR^v1y9EGI|Z!%t7!Zd*y8LbGu&*UbWc2)~CzU9devxNP8L^IFB z$PuEws9D5PPiMc+{lHOZUv%)T$kR#hGxG%>oCQ&Z$kR)EeQOVw>Au(tydLmup7n_> zh@3)NbpNg?7GckI10Lqan<>f%F+6PYs1Y}5iwTlIl7_d_c#^_J;nwbzY_RUt5WB=~ zK@-ri#0SKpj_-g#w;3t)62XvDpNhUG9`L$vXsC_B7<3oY+K_393ULDLj0dbq!?qK0 zo6)XOL+*<}Y0BgAOCHl5;19_>ky|OcRc4D)zK@Wc=kdg_PW!e^5FJx`@@MnpG2xu& zky;ib!gL!OkIlPZgXo&DC;RBnweQ#_8GWHXWuC^KSlQ>_z2A#{{<`AbWA3bOF7972 zmkp1weyelg_eYF|q23gqJNCoV{S_og$3-NRBwSovI5QiEn25dX$G3d&(MR~z5E3xp zDvEkIkpUD&L?IZ5B!IwNH=BTM2pNb!WfUxZy@q9pDE6%aQ0yjYTy=A|xtOF~-KMUJ zZzenqhTZ~6JRi!uuEC*9;(vfB+yV<>aeO-oFVl>`IWpp%Co@D^4dJCm6E%ajcty97 zx!9P4sO|Zh>&l5$wm5*hV=6I%Vs#QviN|Wm#ifffiLpWE#>!qqk6w=;Nb=+v7bYrP zf~=*}Wwy*|4(3b@GNdggQE{# zx!_EXlZAAvw247@p-%U_9t#BrLhC^QZ&IS6_5*{UXeo>NEq)MpHMZS3R0-Hj;WC;J z=4oHdk;8=8i81|V$QZQL`_ZxGYBpXleQ*!(D=XGp+Tt{c$I)>-AA*T!MSoomE)gAj zOslwPMaP@b*&>-?N#8H0(-z56mkEvdL!9RXWq(aB;%Rbe`uZ~g%KwT7SwDw4=IHn* zF$%PzKWF1j#4ny^o6&9y5y6WD8k3E;gGC0-Nw?kE3&^@53$taP_=+gmm|&q}SXlr} z(C;nThmQA!ILl<5&Ddtm)-5b{dSQ0uaee4ZRNDvp@wG83Ou+Ji~V z*iQ&F8Z767BsVIgKd7%!^sV?A5f`(&^) zdAY6{hcjj}M#O^9dE?CI>J&4(T5PfN$#R+sRuI>*`EshU1@uvfO!XGCl@RO}cg4;B zbr>}|yY03Wl{?t90sYebdv~Xa_fI;zQGX^oN397giK7`J5Z4Zpi)V9tv%NPX3fQ0~ z)S`*Wb$9oE;%D3E{D|>73Fp$?Sqhd6pkxr6;9vm%^TQqEfWR2j=Q-&;VQ0&$PG6tXb|G%ZeA47| zG@rb9uMMAaToaujQyX`NBW5XyJ7n)BNOj5P#U?9-^JX{)>dPO`bR7)%>dn#jy_Y91 zK78N%^U3=cZ_iHF+MhZY`_IX|pXG@cFgZyGWf2w(M)5j$^Y-n#7e~iGBJ>44yv%U_ zqx>qJ(avWxzM=7OIOh&r7sr1+esj`$!=ZfzFaJxSh*Q?^I!~yc}FDl%EWHUm^ zpgra3l2{{B3(?k;srDto1xFbZSb>u7JI_1o7^FUT#-ok{e8HB1D+@TXc|1#rRj~wz zX&0+y(VUqsZY*3^JiQf8L&M2Q2wAeCk2%5R%(b*^kNDx|1L*KuFlZARg6kXY1hHvz zhEPXcjRTtN0)$u`m_i<40wyqHcFJ)s)*}ZS`UC9~3!vChx?tG^GesyszM$Dp)b4^_ z2q}6pwX$g6h5HI~PTBHLr^`r< zni|%%dIE3Xy|WX5VQWUmX*Yuon5JLqIXEv|fC@!Sp9N0QgtPvR4kJ?A#F`#FN5&X2 zMjRE5f1>$;`g8c~AN2kjS$3AZ-!BX~T2^U>1d0 zMIRVJ1AM&cvm2lN28)>Fr8R_|=t_z!D30WU!sy>L`LvLg*nlTK$3cJN`Qp=N5bA}s z!B59I(a9VTyajvc^pguN)4wH#;`l_RGuHNM`U)4k&iK%TwXON5AdEH9LP42w3ng4O zSOq6z*lw-a#`koXq{%1O)`gyWFzt_z^mBqoe3O#=#27@IF)jAuiDrRfG1Pk{7k<2h~%H!?j85}j?Qzfr(i&U)N)zQ3z+wa{E|X!w_mnmbXJgcRw3Kje8B1biGl4d^iCfoPCwU;jVhs3jG$6X?+~`MvYDxJnGb_h)sbcizQ(MJrs3d1Y=?Zjab^ zKLoaM*0e3FhWE#2>OnAzd(Qsd7mQfFFQBs4OS71#v%IDc+?-DJ&GA0#m^F z{Qw_y_Bp0wO?gO!Wf_E|nQ4GPiT{HR`Z8s^gHe|#1WI=R4g@UmmDL&gb`Mq?8Y*Jo z8>6{4ayS^F>SG)T52C2&4qB;voWN{IE!lL)h6aZ}#P`3Sj3B9lGtC#q%o(=xz2&v7 z_vo{FxJ*ku&7lc+|ZX};aIHO<@j*F)n&?8C$d2k z+HO?Qjx9R2vD+G%ba~Ev0awjB2u~>=S9izz2TT3%7t7 zN1nU$<1p~bKH$H05aiCV7$j#MM142X$4#6nw~fFsrnCs2iV)U3W;QZn3(*EUP)t?s zu$|zx(X=npzP+J1zhoX*6&tL5g?feL#jB+0-_>|b?BD(~ZhG?Zr=uXge#^U(?aj^Z z?p~4E5rUj6*ThmW5eI!rv|}_*AY$BvLZEVp(#0andQick4-GPB$g%9vMMjh-V>O*= zt^!ey1hT7J=|`J~O^x6*RIN6B9#?D3w`Mj3}qcpE;@8M_z{O(ru_h zYu;V>u7G0sjblBv0}ZXWxU<||v8`#o=Gwc+{1wfu3=YoCwGG7ceOSw| zX?9cCY3bAx-;CvB>osLq(*FY6bl{E3_7x7zNf+=T0^0{`Y%=u71%qo3hwNw2h)O9m z=pVcc@U;mkU)cweNtAlgL>i@DgaW^d=P?P7;OHu8yVt3%^4Ok;)CZh{~0d?Y<=HRe`drw}m{LX8`C2`!W;f&aM zkoslw&VJNw?>y1dSYt($wrJP3=paF;w(uF|gL@3Y`Ss9-U2r%+h2W%apj|UeLD8U0 zRz;KTj%nhF0w(>dh?b;o95-cCyLVfnuRID3(o$p8gY`3D$Z>Ehf zX50IFEpN|}J4<#$xQMZ&s21b6rdC^bl(* zK6)x&Fn&qcgxpWq3QmY|--8j$L&(aW)-G(<-fT1x;{`sy-}efF_xiq1XmtPS!P?!Q zngzhOQ?!x$oqDM2?sT7)!IcEI?Edh|SBgm|(9KRA^mP0gI#6|Y>OTAIc+Vsr&a-TC zuU0&{&)ROY6TFKeXg;l5Sh)9Y6Nxu9ZFgFAa9SP`7aynA-Q$(P#z=?`tm@ypdmEjo zvG)Y-5o!c8`;Q}M{Yx$R+OD1C=L`nf5O})+y{>T_1oY(aO}-C36cn~=P?$pQJdwHG zA7QGFptIAmmrL$1_^$nD^#`ZC|8L5UNYsF`B9{NiX^YZ)%BbeGs;2VfmnHkisiw+W21RrRcSoTay$@>@)2A8{ws=ZJwGlU=H&G{m$uLh}R^s z0Dk50REK5hmX#V8R)KC)3_s!RM(Uq6W_E?y1e-TbHpa;U`c%my!!}W3^M*#aXH=|| z=YV&PKRGC5v`|Os1!O%NBf^Wv=I8@|<@$)G%fMg_J|D%5F;1ozB(*yuOC&iaq6U;& z$)LFz{Zu?(sPTx8YP3IfUNzM?R0{v`%|3@jH6*?6PikRj!vcnw&;@{_7pIZ4j@yp; zQyX=`gd>Lg%83Dbd1kMgHK95s#*gOLZR-R}+fuaL`1?bX*XUs=$pE(?hKoyegu0!< zLDZ$CBT{n%Ii+BZqE>U`0N^}-tBp?+xUrH<|LtMrM$?|0og5>NZI6E5wS4XKFxqXK z<)6XQevV)wrr)7C+)J2(lo?w9k9~iL)L47`&@DRrF1w=}$or5xn=y$xh}e(fVLV&N zz&46Wz3tf7jyBvhAzHH>FO0_@taK^e#JOvnZ$>X}BNH!+7>4D7S3*0kF6S;jD#i64 zi>n!=s)Tjh-M>E*7nDYP6%$Bx>8~cBfpsH!CMQ(Wr_9T+K%QfZ*i#5IM*qUYrCg2e zNxmYc%wm1|^4WokzT;~@#0v^2GvU02*{YpXPBdYq3aj&FizIApMsN@hF4aA&y>If2 z$lslPxG_pdPB=Gn#s$d+$7~MxT`Z#TzdB#*Cip%EEzW{U+f2N8OS}h%%YizSG8V8g z2G+ean-4`UW?*6MR^YI|psko2XLFZ*LE-~q_*~y2L;P6I#)JT{KL_zVVFm~EtJ4b% zKqmhK-M*YTrLp*F%b@IG&F-9Tsph(DipNsQsTxw`6JNwYcu1$WP`^d)iikmaF)cuY zq!2UPuFU#`kR#m(I(oLZc$@!`{o|rIvckRMMjEHPHJ&!Q(3T8he3UL8ii3vSF*X=l z7a8L?4wqDfHgGNkxV~oD;&Ob;3d$tTi*_>CVwf$V<$#gPx~}3-IKwiJO2R@#LQcRl z`<`;>$23|_H{h>AL_4P`Y#8$-_s2f%bZsTpgFSCK;IoEmvB1O5qpM@TD&cQLZ&$HigAZA9q&08XZ@$wng{$Rd zZtVsbOx?qD>5=F;Ey}l3Z1iTFE%sW zw)Aq{-H${mVE2?nRzLMl;j!Be}^u%!&MJ#js!%n~Pb(~6Aj_>}Z zt+s6|*(4=_5+Qbrgaqn;#y-oM)h6hq6;=GkX50A^F3yBOJDcrC(unBC~L^WhMg-^~GsN-L!Vf)2YGLBvsRe zsud@S0?T2vWA@`%P!P+Qh=IuF#x=Si0&p-F*BddLzZq##9+Jn~Uke5RcRw|m{X#TMd}{;SsHMF{EAaIS@?zE>s;I-I^9>ff{bFeXowu!*%~s}5 znEh<#*$u)(?H>6aE-U6KMCGn24w$RkR?$EF1bI_L8THb7R+}^?`GRw?8t(t&Wy1FW zK*Jgbu{q6%v0q#jrof4km|9?>lqbdf8ih&8P-Z?SpNLmXLHhQE5E>#Xc<3@C1e!1V8;CNv6*4QPa!ZCtjz%zR z^@yS21*s0^OOxE(Tuu!waXaqM=@sWvmJDg^PRj{NAk@O(+D|Uy>ol9!t+-&*x###C zj!SSizkvcrz5hjm)EEHVm7S>Z{?&2R+3s#PRiKq-c|1*|ZK|=%-M=?FdyhLi&FDpQ zt3kJHCwj3ZxU&ht<0peiSK&tz&!#=P0$>Zc=18|4fOcokJ7nNVl5lbhcO3oW*+1FD zz_At}ar317QUVWY72k855wP!2k?2ev&=eog6(>A7bfE}l z^4~L?>cTHX%nqEjzhv8j|hmm5Lv;MNJVu-KB;!h*lWpQq1x$ip$wD!KC(-LCd*_T{|;@9OLkaF|j zh&vnI2ud#3TQ%TSn{rtX@EQ1Nm?1a`giN-ppWJ5PSZ>k*>kDo)-0~mLI*L0>HL5Hc zRtu9b``~5)w@)JICP=Ww@DR^QE}Q4qUTCtN@l z6Qek|wQeoB)7QBq@|-V$wn78);esooYE^0m?xh6{$E8vB5dPfm>^*g_H8}0<%eRf` zK!Z7(`_Vf>Jxd;u&)=j_unRNHEn(^f)Rq$Utl0#n$wjtEwOY^IxsOQLAr^%Mzsp6A z1YN9C8Dz6?5KSQguM}buzHU@pxrt>?N#SdbF%nByKIQGq!g{-&B#m)}2#!a#G8ma`b8NMayhg#pBO{61DWTsttgx&+AO=X00Nq*~jI3ug(lt>RV|+*84)BVMWhHezR4{g|_ZB zNz3&3cc=Wcykg5hnIx>;9@3aNj8Ik3tJFl!+e?Y3*}SS6mzwv8VIEw0I0ylG)h)L; zn%>K%BJ88l=&9b$D8;|4WDpC1Z1=BDg{a|2_>ZgB)EmU5a#YCu@w_mIm;cgB_9`-M zG89#Z2Ax^=1aXLIX`+gJYkB6UOQ;liakKk3D_8V+YoQJsydH_4Y5&}cygxfXH>(#` zVLrB*Z6qt*{nzwm*ON{g-bT@Dx1e!Wu^O(Eu$&Zy!^+b%sZzxELkkFHz)Ggx-1yeylL8SmxlI!Fb};#O2A5gR z!4307l33YnD=-pDmaP$r8mXHBagochs2tcgLW+^jmZtm!KAv$_l+9=7{d5609=6LH zlW}+R>9bG8oksd$+G(!m#VdmE0a&q$Ot|!|NPs5Sw5B}f_+js5By|N9L$)pGq)k)1o-Kb0j{R zFF|gtzH5Kur2I#{x3gsWGpywg-#%>lFJ7iOU*Vte?q&Gy_~UzNa~wW@uVK~P=6(3? zO_t}<=ncR4?Vxh15N6ygAAbK(Oy1wm#d|LzIQMOM;&qDo@n1m`ou&z{U4V@ulU0<0%W!yXE7s9oKWu4mv&#mv#$!H&H}OOB z-TT6KHs_0e{GJ^GsAt>aMWIEQ=LKzEaFniX@T#(bN$r9LuQct7ZE{pIhNN;qlOt%* z9|l`?R|4+)(Kuz!aC?^17+uUZp2Ha%e{BeT;~TeWJhX#0Beh1+xSZ)VUvO;(y3xNh z7BhbUqR&It+8##JMRu7tvoFY8|Aq=Dd%dHV zFW;Y?zkhRb3g@r3T$gqvju>nHibnAyMH219=;#%fM`=pj)s%HIqXs%{vAb<1e-K-@ z{`Ktj&i>cSbwjtmsW>)i=uU4f8RzULP&@M8J%`>4hSuJ$_ew+Ji;qX|PbG=FWHnhs z^2O-uTuTCvllfvl`g$3y%S(lEel)<3q3@t2k8RY9*A6+e=r80P!B5pj`Cmwa2s|*f zt<-)PeLWuXkm(37|IbucDeLkq_-%4si}|!EG%nZ#SNYdeOBa>)znu zG^r9-*U)w@*OE?7ta%kwo4S!8x7>?@B~Bp+h}fd(`oLBrmo~s1YBu(&+!FF&So+Og z7^yhk_^R4t;G1tE?~SM|oBnlm7Z3Q7^`y+roV`8%vG?-D_f761z&X{;2S@`n&Mx?S zY@Ju5ou4(V9+nVMIVE28tdMz~&}+}6wyQ#qd@%&$0o&CMWK!pqN3s{r;68il%!4NC z83+p{_lNKQ#_lrudUx6Sy1l)mzn;)v0|98dri19~?lL0!f*%9?7_#B=>s^BJA^(}m z9}|8|j|Tl4x&h;*>BIav?#s_qfBw=BK8taWXtyvBh8;x1F&^=MV*c+R_}<|^@`1J- z&oOQy|BdnA{>>PD)7-}RGrh!r2l$URxt5ulu5xon??Qp{5y+?bWKo zmG2#Mx#E`pEZ=%W8Bt2nchSQ?K8*H->yw*Hzv;&%Tv8H~aeFz?Y)+Rfxrhh1&HcbA zEnEs;UcVBxIv0A4_^rCrc#S+95``GTKJu}ITVnwM8T8>_I9huTYkxR?=vL>+Iw-u2 z3u^o2L)E(Zu)Lr(*Wo-NNs1GGVK#F!8%Z7lS6&vhB#lC16TEpBOQ@rt zVNS0|16zXG69*N*XOsA_Hhehn5j&ngitM-2{ffp-nqk}K^`I$A0wSE=C+Td<4S(mD zL2?Oy?Bali8KyxK8Wes7jNsdBk zu0~FPTX~a=o=|VmIOxzz@x%Bft$BT!%InMGbo?|uf}zY8T@9$c-jB3Ma?n-~s@93I zPi>Amd(BiX<+t`vA2Szo+z_~~|8Jt z)sT`wGL$+dyaaF%m3$F%0P<_&#}i_ZnL{9=ov*j70MvAB$a1P|R)NL{D^VmoGE-Y_ zE1S~3E;Q#qTy>2mQC;3RW4s?ARG|_Lwby&BWS?8zPSlI4mO}6vgU}j@aBmgB;QK8^XYAM%EmPnQAGJ#6k4E&73q_q z)K3nnF>!0ZMJ$;9b4~J|k?bChQyj*si4jSwIm4*^6bX!p+caUNPL~DE#SJtSAliXE z|H$O)_Ee<+Qe8UlW>o>k8kh$ny-{SUNq(I3y$3-Yekq>WHU+B-!Q=}X`&YCYcK(fm zl3y5x0z7c9tuZ@M#A{!bp@U>L9}EqqTXkh*S_bPDZK2RL{raoSvsgG$_3SI_ZC2{Z zSJ%<>tp)#H9nC5Wt;{Rq$TzlWja zlXNi8v@nd93ZkW<=70(wOp=O8h=F{fvF|b!FRDJ@k7xsbe4zy$x_>v0U0XjkADGN! zdX+3T3nrzS&$(QBUo6B%)iygY`p-GR|BKh>XN=3w|N8C()fkCemcYij1goHY4+;nj zZ7_yS8pAVxe8?3PZP`Z}dF~Wc|9=tG|I3S@ex6zK-t_IkzgMTzE6{0I`(+DsmWe!d z&%DZim>~=B`p|0L7A;OEBfj%v>!Tilv>{pWOhm-wq`e2R$$$8VFWXd%Z z`F=A?XcK4Jl-8wk0i0EQksxJ5Jcl&{sYJZY-sPECrxEuwRP7f{_0E;cRxB;wD~{mi z(y6h0BEfe5nNyMhNW7<0Ir;O+>3Q$`FJk*NT$88fQ8X=bx|dSku-|J|WcJf{>ydFO zamnXh@u}z`wXi!V;1TcDEFi zUmG{cL#fSVvfgtX4wfRE@@!_3@grn&O-YvnXDKp&7?{Jw&{fGrt6tHVVT=z)!Gu=R zv$+6k^{#(%+A8#*tBT`1IX>;ZKI!V_MbkyKK>C_d}F?{gxI9mi2B1>z?`uJ8EYmK&+E?> z*tHITuIwf{IgAELI&MG%?Mm+$3Cyd>dX?O&fTzj~6NM@%U*bu&oO1RVtzP&M`Eg8b zLze-jzf8x&+&nkwb+|ZWpYY}c1#)qnu9st?ZVVN~L`*YeA4pxz!k~yQ7o2LDlpK;L zm~gLz?89Gz`XwOBSzVXVB_AyErnt9BR227)7;(vC1Nj8`4Uo%v!}D*IB4ag@2JPkN z3bl7j5Y(xW9&Rk>{u)^5;Wg3~p{)633;|q{*EwB8iy1F5TQ9U~5^I8l=T^N#;NvGj zu9BV3#=vL#G z&|Zy_{h*A6OLSaTWqLA$Xseq|qXa5$e;(+ZJa=TPXcN8y*`Ih0I4y+V6)S2`d#erN zJ6f2Dg^Huuj80EAF$;;nW;t5oK@k!;$yEBX#o#m;E=Jj6Y@hnja*0#~k&0)hxQE7w z9+aZVw&oKo&2Gm8oirMa8-qatYd|x4Y%9)@5Yr?<&MCA!mY!kWV7zk#*gVjQPU8OjYV;t2UBbyW}0Hu@9t?zHaw%t+tK^n!3e>GOUf*%$~%UrR<0 z{e3RY-@K>h^gqRY8^Cs6S1&mt_a>XqZ(E$JB)d_!-I6Y+Dc9|WTlB|Syf8t$j1*0F zynKng2wVg&5N|jQz^0Mq*;CuF%dX_1x@9ATn}z$FXeOdf7qRR}cSB2jn5><7%PXSF zd0-GtAV&#k;C!0w^h^uiV70^sqkYqnPZMeo_b#Oavb6k$`!) zZHGwrTI~+wt~TdNQ*bVWZ{)Hm888I|=eIeD8u@0ma-@`R?+`aqyu+;MVe|+pcK8z% zE~)O*(nu~M(`P8%aE}j5TV;E1$QPL3H_^X3ACxEJq7hSY0WRYerz(>qJh~}yA4m@w za~M{*q!M!^^r*BqRm`^)AgvY8eBmXzYWfT52Ix$)rW$E>6R45_F07iW9#V^!j(%yjOu1+on??5= z7zRm&aq*aO)dCl`JMP7VVGz%`kwcrmG)Wthcs@?yFH)FH7YC*m}R;k9}}0? zkCrrN9AtT7W$T=dKU+$sEt3pOii(>`%i*aR#YV;&=xy4Cp;bb|GGC^p%9CsK+_DUg`hTg<#v}Aj*4wHCs8f4$H zQ{&lEiz_a313L2$$qCF=qc9rQW!77l2+Vm3fegxms_~FrwNvGOQ8O`h+nW;Rpb79# z&nfJ{;yiui`5qMxLy(sAlFJ7rRk2|((LmEg?iu?mAd zKCIzwOg~&J+*}xIVVKN5UUybi7aQ zp%1^CjRb>*K2-s1e|3I7MgFdo;K{oDt}-{*_27c3o5~~xGh(T)1d|0HQH^#NE!dhb z`>m}7J*`|usarN(J+Sc`zQ9@P8bPPtHU?|(JjE4`?5mPtJ9qFh3XCG0O#bdviN6CJ zDvr7`4I{!-in@nthTOY39v{D=feVfcm_$zveaGyYfk~=LL*u0{BV;fN98O?>kr=cV zw0vGsNG5e)dxrD)2I-NaA#7ZldtRguZj(>rnY?qU(o(|=2(d2Fy1JVG2Lj?sy zU4&tYD|bSsetTDJ;g_PCVTFf?)Afo~t!xdGOT%aCg){Jq7~C#0By-HBi4ASi^AR-) ztN0Ft#fVv9HHc?6q$!+5odlvvP>ZMSRpfkZ16D(GDq~44D_TXC8kiZ)6B3G5393ol zJbd0D9M9is)?DZfn$b0J!_Y)4|LV%lJKdK*cO`n&gg`dEywo~D%KOq=y#ZDqwpI3q z%2R9E>b4ZGl<^&Lo|xfu!g?Pv;s>cfCko~1&?Ho|Wub9@_@z*k-0#F6FDNu^tZ^%x zpqdi6$*O6q`4u}{A;*MrAm#)ZCau9n0^)+DDIr^{T(k*;A9kj`R-}IP-^`JG1XHi5P|x1Wz!xOntIfPuA|}P8Cmy+%RMf)l zVo~gKAdaZa{b$QX**5KTL_~WF7{nvF|E3~|3x?x5`F@iASpJJ1Cv$9aks(1o7vroC zWqUlkQxODPsh59Ttyc47#EB%W;fz7BJR7s95S3Oj2I7Z<9VrAcltI)6ksYcJtQqpF zLx;ZuQkx`tby%BW>V>mxT_}}XBzj^&^~;Tx$RD5e?+TXxVr~chV(5wQ4sUnip|=1< zK)SyNqL%pNIpVU@R~S{qQ57z9PGn^MNDRrIJjpXahhj_j!YkbEF27b{>q{wXRf{or zpllXq-#VM=#fmVOgb)-}i(UqKYTd9^CvLSqK*(~6l}G+1&ip>!-@zHaOs|ol(koSc zt2R!pjReC@%AUy$ufB8B)20dwP)zcJJn5#-G?xT3x??OHI#@vR=QGc82EuLfen#JZ z+tHl;NJijtglnKx@>9ZlKmk`uP8KjeG9ifVde)xYEh4-7ah&-Q_7p=8l8y=Jvt zXv3@kVueDybrH`&91zj1cFRds(Ym$$*-tnG+3R>8@yIp^S(>em z6=3r1NM#;&hm`UbaweL$$Qc`jBUP)GF$oTrrbLqLEZg()bj-G}P3w>GvThO>+L;dG zTt^j-mi?cUEkLf?!6F&DK0$i@fX%~b-K1R7gjmW@b7#U;YiAPG=xL$Wxktm_6neHJ zBJHz3B8>f@&~lwEr)jL-Rhg7I-_9g&y8QTBPECpT2iEqb1S=?Q({_@&+0@+rJ@DFpc<{3;?%3qt7pg& zA2-Cuo1-;}v%PJ*w@i2+TFj$XB-L`swd)i)=1Wh4T&eRx97N8peDow2VELBFf%u!1 z@C8lR;xaS;nK<>cPrRDC%CWbWFj7P_1<^F zAuDJ0{LqZFqAw#Rt=f(DLH#FeFfHXcv#-808;DGZ(Z0^R)YaeWI{4WR|C=ux9cPo- zl5Ra1pP*O_G8XCu(dugsZs7ST7fFR9bEypMvu1ypgJE05jj&w1IGz?m{K~$n3=82M ztyZ6!!ApKJtcFCod-l8YkiTZbAF+aBuDUXoJLqPWeo9xu-&F1EY|HL zPvLGO-8W_#mp4GZIqBt>Wr{w>L;0c^vLBURxO5c@TVStpb%V!c48YK9)sU<5o(5cZ zokQ>{$Kv8Ci+64D4BH1)5a?4)UqBGjDS~;NJA&-#O3G99M9KE2SNP`u?w$o}m9?X`Jc&UILOubpfDebGQj=!(3Y^Jw%7>zG&%Rw7=;(+gJe z={eo%<4LkPPv935SnwIsbS8<0lq|TSMX-{ zV9oCD>1z%?U`y)kbYGQ}0*2oSGMkjR9luzWp*S{0uF+E}ds2YMqG8R<(4{nie${^O zqg>PfVIc>~LGcSMQzZ!5&YLOwfaC4?g46vX?*Na3mT zkz%*N{S}H}NnvUAe=8hcV794Fpkud!_Vn9Buc!NMfN#>}>RpmxACJlCFuD=-nMLLJAHJ3n?E*wEKdMpBhM`UTs=k*O)U9|B%H) zIM>g!S*&6P6I_j-F@D@!*^(mOWa2}W$j78Su z9D9X(on`89C#Yc;apKjG*(=oyIqbIE6VWo;4-o_)9c{3B%JFz&sG;qfDEu(9#BSMKzRa2WF;@x?dB!i!ru3ZB^!Rm#3MjD(fO0IS zR|J~Q7g)SfW5P4=2UZO&7+kosFjFiyGUwBw+vvbh6Vsb`GQcP`5(z_n{J9_0x0@Q; zC080=!9rzA_nTdHXDMBDDe|TT35{cU5HOcJ&uaeTI%7p~D}w7=0@t<2O0 zpU2jGX&)A5wA`JN%M4Sq#!2Y|r?$kOR-1A@hqlA)6l^|)B0TIS1efFjS@1Z%HR;o8 zJu3(f+c%;KK|zj;GfH`CT)AXmUR-HiG)N^`l-E}Bc4-sA|9X~7|IN3R6Q&ndArm#q z6F`d6at^>|z$lh_JoFMd9@KnRO`0FrfRFH~IFVl(V02T`DWn7YRcB;c=;;I!Lb10; z(GUL{(mb8MBG5)YFzqcz3*wrrVd$JY33V@$!~=P_z6x$&rVBxj3d!k_l z`sP8^D58JCSOpu%Jv}=-7qWVZMi*FVr?T>M?X1+CQZa=4%yqLe34}4QsQrPjwWudK zyM>k+C0-)q3aDHpnN{Bi9KMOpwxE~;tgtFPa8K~O{MAHP#vopv$Qn)rw!*r!X7?0K zbK-+wOqmJEhG$iLVa);aR}H&)}}4 zs9;0*gti0v+5W&DmQaYroL`59|Ub2M?2_nD^mMl}02#`5QiT`M%cYqKmKt(%F_ zs#FgZzBRp@8pL?$7YmR?=lyudc-W-lcQmgVcJo^eBy$aj|8O!*_?xE!?z#e5L|8lG z&NPF6I& zqmds&ZQKGrcrD%g>h?};VB5d)+ZQ6IS23y1E#ZSp@#cL{>Y&Wrxh_l})wHlio9=c` zTYK+n+gXc*J@Z0iwI~$ZSJbdC99{8?m0eVLcYG*!W4@|^URxkjjUC0Ok}9)ZK8PU2 zQDoSi@M%rs+T!iSxqB~b=TQ+#WT3JP5%pK2BtbI^Y7dyYGEASJ?XX`_|5gnktA(?l zykcyU$PkQ1BW123N~3S11IFp;H)g@d9>QBZfO;O1Be@F~3Y5^?(o-gl?Rt@==5;xFjDbrSBufu}fk zh9Ay!_4@P`N^Fr3b$0&4`;)UDY=4#fv$tzO%D=4>EaV7Sh;lf9vEn*h;VQZB zPDZ~@!@()&R3oOft3F4|Q5Nv)IxI@|#jebEg&fvq@^>aCHtow^^tHTsoi(=fuJU`& z^y{&Llk%~6c{seMp58#2^a6|G^e_8ORS*_f=gI8T;PRsJO(AbEjRD8fvmbl!j*efSe&0KNdwLT5 zeRA^h=A6?~DDjmWfx(sd)6WY)gU_CVAbB8tb&;^=9NO(6C}u2AiqL?jBTx>iWT}}Z6H>49nJ&{e|!%th#R?R}KaLp3q;Wdm{x=LyB z`y;l$3YYTWaj6KP(dwm|`A#NzfW&QySU9aF(M1Dy+Iz#`IT{J)H11raw{vicY9zjv!hAH?Vwo8z-mx-3c$y>h<%xd^5 zr*-iv8r9vXI)5tbCi^LM9<9q6UGr16;3jn2VI~)6Qn3@Gm=mDHRHr}&U*YU)Fsi7P z`LnzCM)M#fJr1e+xBS~yyG?S!VcH!I>9-cQAOwZm<2!JrwxN-??Q$!lNzBZPPKSkN z{K_b%@J@F4@nf7S>;fx3ax9xw>{onL<@Bg)@-1#&%_{2m;mJcGNH}E9{5Z5=0XHV6 zJH8LptK}eY<*dFzs>8Q+#QxJ-P?&-|cYMDx8eSvjl;_d1ka(bEOI6MfDst}76Q_-3~yHZHPAX(L1)}xd4CLrNcQY?v2VJIQB{oCljl6fY_ zjC&F2h)Xe6!8E=_X=F)jOA-sRPUK+<+=EgKwn;sVw99}v`0sGRfeV5TNdTATUIeX| zwV>iMs|RZ_qqqwz@kHxy%eNCavS^4eUw!wLSI{h&hM;CpEQ1Qv+dAB>Z$pP_t>|Rh zcreKTTVVmQm&KYvq{JS=i42Y>*pN)T#X^0}l1H~ReoU=3(~`x(rBB1I!oU>0HU-wL z7C@^kqpP5~0{-m?cT$=wcbx;`2YJDBtRvp2z)vBy#u8_HosE|h4!u=glFgYlbZ;I* zed|%@9?7t?|FnaxU?~n6@8S9gJ)o6OUG*QAF<_miw7zc*`)6B6tl~gDJ)g0#nbGtP zmPP>F1FDVGVXG(#J@N&T!YkD=*1dgxileFQRvJS- z^WzB$1o3+%h~FctioYSeal99<*j^cKkTE?^IE^#Fk$qgZ2-R(=KW?5y3?_cpHQ*$3 z_Q7d61D34`Qd#?)2$e4bbt?SEb92+>FF!Xbv!zZnuNQXpllavcqTzB9`eBEu?WY7kg zs60-z2XD3NLG(y`w9|zY%2-IgC!|aEmrKiBUOZOzhU0h1JOSMv%T= zVBGzMc}6U82@7DkhknjxX;-&W+VU)4X_NDq9Zw}u(} zniwH(9>@#Ou$QyyeMN8Qn$#L*%tDy1p@P-f zp}PTcW-}wtl1^eJCb@%ssria92kLmCpZ@PgHBmv}@?M#*XPj`dCApVS8_8R&(K7^x z)Us!EZ4?dMt@sGF{1X)HCL>s$tlyId4J^>|^G=%&=Sn0LaQ?YAmp5qLRbNH&5ONMn z2$KXy8OU%V&fP6+YBEJ>qA0%)_`zu}{OdfKCKvc^-D^=CxMla$SML)c0%O$UE`m77 zf3-i9x53^ryC167=Brt~BkrEbZt$lyyC56kTCnP)W>QBBN%J6wDu{?yLp|3l@id^RY`7_Y z%0XjVC6Yt_!v!ce=cTs{yYeTt5#EpB%WuCeOnObPo#(G99r)L@d~RQ9Gu2l4Go^!h z8ugt35gzE8(b9OcvUlIhI~FSBS0L$w2j#Toj5xfY^a3BJW^bc@AaaNBHG~054KofG zYbb7rjFpnxdKzgIDY^oZ7izGWrwNBUNzfq4R8mo97+oguaGs%-;nake zN#fU$Ew#o$dM$on<2FsKDU3_tXC)io`}6Z$|IBOZ@_1weMu(mmznW%zZn_22Tg(1N zlqqHoy`b9-j#8?ARo(MaHwDt+y3r30yhf3i04N;sQF(r4ZJgH))E#bZs`Ci4d%FN? zf1C}sBNj9J)30zvH4yrOc>Y!*#)UgnLj3fY-7 zRLgmDmy8yOcRn=e@2*NozLet3(_awr0z+=hg98%xT^TvR1QI`I)Sga$Lg`-pSAI2TeRTo7Ev<$NTsI*1<4eavz1 zBsNZ#<3*|=<(#2muv{>e#UZ+#2@*;Txd^1Ne^`t&GBEqv=-<$3YKkLwx*YKh!N*`*C+NH z6|!1VC7HJq&O+hJy);@f0o3ja4tQ>8LSZkg&VqME%OMfN<}5cYy)d>^l6I--qY7}N zVWB~VR|ULb4Vb#M9!+nhhA12<-74aRBuRj-_=ZiJs2fG!5hTph{K^>tT22cnsj|5C zEJM=`^HA@TX|^S4vAD3RY3K@!LuWD_+~R9Z=J3msR%I(uWd8)@k;g9qf$t~AvarQ^9u76s9ZTjLf?^zK@wE=`v!+*O%wV2+ zrDDU9U7F`iQuq>eGR?j0Ky@BvSLGx$>Q7qP7OPe?qtLrc%O{=&j*7lVdKG7Q zz|Jd4``<@XGZ3k%68PgY2^e<=&LHv9_K!a>uHVie*J}R;A6So9km~dj9~a!VzTAFdG6m^GFIPAL?yy)A2=8#g7Jiy66qhvzYbisWiUw8=CydB`Q0xbIM zlw^1X&MtCOis~3YOI(W(c17%L%3Nr@4?|lRw}A{>$gE9LZE~yKAM@YKebE55Ubc!Y zk~e1kk~2y9U0~{ldZBGu7gs1Vut}Qw&1m&Foe>kfTIo^@0ATu<>1OuYV^pog>XAzw z|NN=Wy+w=O_AVRVR3H~bf-3Ck)~P0BCq#SX)K!YfzsA=jcn4K8z#jhZ6jmo{x5 zNkM7(obgEb^fIgKvuMiVaT%(dRY%!Kc_y?u`S{b3mx$lEqtcnz9*?>gR-BJJjC*Ee z{BNtv#bqywa8MDCRaN9c)1yn4o*@0N$C^O*c}gr&tmW&ozKfL;M23K=u!e%?+)>Uq z$|VGEOz^m)UCvrx5kA7@z3Sop*F$Svg?1SUA}mj(@K+@_Rmcho!cJP)4}CK9z5F{Y z6~l9sPd^Darn%zI+(pPji?8p?d{>DX4<@`KGu|09camSFlC*9{#~vSQ3ImRIPs?6)R>?0 z{O?AowFt6wC+bc#uG|2M8?f1ivoF8oQg~LepI^o^E9c|;D35OzS+=-riH#nS zBak*Z@e{FAH;Fo&NDg?ZhM~+yI!rWE1oy-Wla$lK*s{@n9Sx~uEfFiI+s?CyMTAFN zsY%T?Q9n6i(8rMGHCT*qjXuCi_0akmrsz7JBk>&_&#llEo?eRbksK(~zpWi$mA}lK zdg*zmroK|>|Dfi?(Een_r2Lk}hrHpw-rzc4{>8gVs z#}_#5k%$X&1Ht=8#jz#{kI$pmLtGwzl zaB5vWBARyXevBUHf%IbZh}pl1Q)|OEHyiiO6|-uvN;J){Of@0Stoj{p&YVF42|~HH z5p^XNJG#W+xF_i+Q5Fp=ZFo-q^)YezKMhK8m`YNy_ir|2af~md+{RvaV{f~3m#lZ4 zEju=J<*)2S82`mlclW3MEQd?*BzHJP9CsSYMYQvyrtM@cXMdi>Lu?JSMf0Zd(E(~X z)UN2kW_!DhToPN2&TeO?e@pngN$>YMo6n3XqI&5B4I5KYxfT|EgA2wb!@c$E@C}kb zjfx41!!DA8?z6;6ND1?1-W3GFsI3L1hIv7ax|aXh{o*YlB)Y9xTVYnWd!l;n$`hK< z<#qkhQNfXC4Z^a7q3mC$yknm*NMi7Sv4>t%pJrOzG_r5?w$H>&Y7edVLU%6WO$ z39lbJ70_qSTMEflN2{@{Z|s2O07ke|*w_Dt#P6rLu)pgDo+dY@ z3R#|6<@K^qo>%K%%w2xkeqG`RR~=MxPX~2N*bz{wf^cyJsk;HaI)^VXFD7#U{^og! zyNOUI7Y5N!gt?7#nl|0cn9VN;_bM1r63^h$zcJ~!xv-)ji2gDi4@JMDAr!Hc1ZH1s zc5}hCBUXozp&EI3bV~|1rGm0CQUyyibLWPbvhmg;iUa0|rB~|_={r$))(3RBL@1ki zR(tVzQ9X~P!DQ-^DilLw{v)MRt2KaEVK!TT-CsEcbX6FDf5l{ZB;FxheBjGg<&dCz zh7j4P5c4u$WV0DcKoS3QYpWK!_m$~fCBR-Yx$X+Cu zQ;OY3e=y*%JCmWo6`Yh9#h1QP{xLVE%(l2rh6ZBZOi1+8` z!X&WIfGtDf0->hX+{>=7&iQJ!&8Gmd177&=&lFU$jn&fE6{f`^A@^sD;upX*JOR6!C9s#B`mq z3n-mY)(Dis8z0O*7bVqgF!KQEn<~yQbMm};iVm$cRq#rTX<(Hp#h@67F@6P=szdcJ zz-M){0h1Dg&;DkNBbUdsN31ru=4ui5DLutA_d}I$L{7?BW_*YEIpl z*;#B$Hce!Lx;}UbfiB>-Ys2XTu3U%y8DQdYqTF=zWhS`J8_Q6Ee5sMII;WO=E?@1d zRo@>-;^b?!_F`>XlGIS=@!+CczYq#};FU8`7u1IkaT7?rTZMDV;o1o$FLY7fSy`i8 zP-s_KfLi9<0$W8q^-3n<)yc`r7xeeR{YMn8qjE&+J9RSU0-a{LyhzXSgF83AQ}q~g z^#Wd-OzskoCE5xOdr|LJO%mwGD0oQ|QRghr&>qYQ4uCDm;>&=dt^?-x5qoN`3j-s4 z6lycL0~>iR=B%@fb%bj2;+~CWlfJyrLeI8!uunnNE?ooBS4m-LrlQ#nrG4mvR)>u0 z(5SJ_R%J+3TZ$zxmvJELaGHgMalTAcPeu1O@f_w@j^bsCb>dcoO)pcGmHb<6(ShJN z;yioTT7W9Ze8s6OxV7EsD}!J6v1NExl@ zKcUMq6r7(kyDoYNVR9NT}^#{}{a`Ss#5S&B1AbG;Q6C=2hq-}zpR;pfp%gg*wDci# zXf%frgW#tRJ2F#wxIsuKW^$MD+#2>uVLnzsk^!2faDl*gUS}=d4K*JB4+<$itlOr= z?4g^dZDEd1Bj@E1O{(x9UH*zZkVks69vwYmRZ|js(Gc|T!W9wEC()qe!WO+ za=hpz!-Y=CI8D1GV7&7;Rk2kM3dR4N`vzl2qyXfAdiJeNI6Hx`70?DSl4BUO{6QS~ z#pdX{&SqOV3$Y5)YbZHIxc7Z7TAmk}NmrOy?*#2aC12|cp=d>TGQ#mP&t%>G{O^2Fi!D_ckieTGN5)4XPWR@Y6I#a_9)VnU8x!vGQD{6N6W7v=Shg=`Wm}lpF1H1ipql2N&=;-#<2n8*7H451Uq9b!`O{ zmpqZX*4r40%+k{$vrdx!fRA3he0Ly4h$XKMm*}NE3}txJouhWWN+yz}fsE#j8KR&0Dwcb=K%Ksl$iYNl7K= zE8xPsrdrbuF?gaym9FrQv7_oot$xyIt&K|e9O+9u*9C?J*9X&VbGNkW{bZ61Q(Eu% z)_RmNLXdFy^ePd-fX8<|$wcT?o_Hx+S)j0RBm+Pelx~UdnyKfy#E&w%<3vdFM02n- zy4W;<993$vvd;iS=aJ)gY&uWZrNc+Mx?D7u%24^dVP~_u>nDr=r_{V_cH2#{+-lLA z9Q>-wdKjcQt?DYB3I|!GiEElMcA6}d`$L4*pf52@HOkaFyk2eNvZ2-Kb=+o_)5Zj< zsA~5yQ%KoyY(@mC7N1`})bwaCwdV2kg7!x>QbFD4&tRE#Icwo^%%duHUl`Z5z&vem z?JOOM)M{%?`^@L+<^Aa)M-N*5<4;~i_TS8@*v%4_Ui&VT?5;A7w{1-%%fK{lk%n%C zF4`JFRe7y`GFu{xc{8gbDW220b6GVsVVN&uuwn6Fu$*XY0{eq0?NdTf%>d9oc($xz zIz*0X&QZ!l0QPI~q5Y)zio=%+;<3^nDc%fBuDW4A4@zQqz;mT>>IlZ`V9(otAbtyi z$U|SEtdtnU#A%q{v+uQP!u+^#xWZ!Sg{b5}+Jp*N_SbmqFqbPvElcLo$#McRgzyJ; z^%Y-!2+<-&hXS+UOhgdaP8`l83cDG-o%$&+Mv^FtSd$o>JV&bW>{`D6kvlJoc6nzJ zvZ#g}X5*zW1>{nVDzN;y!MBhlG-5kW3E$ed78`4ui2DPZr};~EN7M{Y8oi|%5UmF#eGm7k3ESpe zbehnszRE9>x7mH@z{0=WcgLj{91tyXXt*&F;8&6w5{{UYbh90$0VIZIHXh04pZVC+$R)XegE z!Sdfm?N1};E>!i+tZZ|45a!LdL2}SFg%F2!q0$t0-5$HX?aTJ&d%TY`$Qo zMYdiIz}eJs)3xMUPzv{2w&mQN{hcw(z0$GV7pVjj;?ry)*~7gk`dt+9uG)8%S!nOQ z4R`EAVCkrebZ~1`&t&s8{ORJ0i+OUvr|SDQ+>0y(pX=9U++EdY`4|q0EAeeM;Xr0n zwRXCDYe4&zsO&U@weN{M2$EX9)$|9n4$V@|gSX#yEc zCGNY^G)PgB8(SU%(MNMZmN;8iaiMElnCu?5jXbw`lFe-3j(FkC|C}8#aD@lSTmmw( z`4vK#xAH+UrI}{5yR2p`ZbD1EHzZf7RHspb!Wm_F0zm6_2Bo)5qPS8He?NV~PQG+K0?NPM=RL#4qQhh#8qx)9Hiafcd)KA|cIBf3;wMhkWv^{6)qM|fLU zb6_&svfqP6mJXua%J%JAHj1+*u-?M1Zi#o6ocli3EJYI~MwnS}^?%e@btZ2Ka^jeY z5K074=s_~90PDn4k(}+BxY9fR zN;b2pE81Dep=qaWf@yRK=8Y@dd0147I^w&EY}|)#tR@_Rve|eMx_K8|2>lr!7LTd; zac~Az1(b|HZ7-o-3yH@!_rhJDTz&EkjSzk^fKlJUpw5k_#Hvg_n^XJZg?r2QhK`uN z{b$sUezUF>g_Rm@ws-`2(OCuld{g;zWW;AdS+ro$>DzZv15Ua8q18o)yoSeQTk!kF zz~2h1u6taOZDUPJtGcoc6(4ee%=V{OugZ|_6;!rds9AfM>+IJ;LyKEJqi_0Xpi5@8XCi;9i@oFtg-r*nkjr+<=7jM@##!raxkVkudeFGx z*$Lj{1Tnc0ftr$q_t}9mU2Od!-gLT%eUF@ran_H=us-KBnCWV&Y?|>{jhWFyglsaO zXLC(>_4ei4#&i(Rhv1@``%+XubF!5uVqFTQXq8sOR%V5F`0Z3HFwvfH!Xs|sbjx8@ z7_MN6*j4_P6B%2_Rhi&vX0pv|X!QR>-kY|!ZDi}BpQT>`yPvp8b1-L8lI28^`218dt6HUFd$(sC#dM z&(Kf6UHlYG$$--8cHL=wd=8LR0g_L>cEfw-H5;^2!V)q804xy7!WA z5zJ~6e8x_zkCs~99%302*ewcFXq!Ws2*(9riHNMQh(8!q!jA# z9@0cfXL@WA@1Zmg2c4fRyb|?f?Eyc+PBqz1S(_4_XaJSn3|p7L=o&|!;oCVk`KnzB z`-6mFq@QT3d(hT2KAzhI?Y%9Iqi#NHi`VV8u?JMH6T`F%0R@+!BLioLNsz!Y93Jr= zrkrjmhmc{Wv3N@HsNT6_sNTycg{?Erjtuws+Ciy&pP50m7^cio0DB4{2gP7pXD zh})=a!IuEJ1J5!bOn}}6D=>{OqY;fD(^;>C30AnX0il?S#>@HBDvyq$b1=4UtJ50v zy;9?Ie|F{uL%MO8$r}NB10BK-kS7 zOp%vH`P^`sB%7Rg!1yjm#dJ5fWx)5P zHOSDXMt#-xGeaWw?s;Ui-NZUG7;IK|ebCP6YxKUww+wFi)_2shKEkFcPkdKn(PBBq z6DNK@mAJWVNnkS_rr|!Rs!TPOYn2J^nCWKb(1?#`&bK06%`TjRS6S3RIdt1TvF{D` z)(*}D$;HiYJ8K0)U_vwrA?*AR1Nu>?1Ui?3&bC+e5;JpPCtI?UuVkV;(Jfb=LGAZK zP9b6qP(y+4j4Hy{xhHbT_A;cfYcn8k_y|+J7Yw*2pB8Ofg9M_+Vf^vs+cF*c}X)$G85f#lXbF-nCm$S$ah7(=o{FytFyvk1L{ZIewM+=S66OQ(WRwk>!I2~`UL*tQ<(;8HUulrh~Xr4ZT%m%~oeU?gfU*r$U?25SPQE>QrZ zNpkfKBhI(ytc{Ru@tpdXTioQj&--YO-y(*9($)(Mr5bM2X zP}H>>Ph*yv)aqSoLH!z_xg>0<8(lkl&rnVr-E;D1jLb_b2p^zx>s*hgz_YDelPor` zC&77F%z}hZ6>W?9@6L}7{kKPgJ3Tt&`xG0|*QslIQiVeQDNHhxvb%l_@as+I*PGd| z&&IJ~R3J+Td~J-^hMhD`92NCDB!*7s%LLrhopFZ{mlkXNp&F|;8$=n?lHs(c3cZ24ejT$5|)f}EJz?yG1 zYLNIlSA+RC8#SoKuk4WXZ#HVslp?BDgZVdW8#LH&V`Le|yqX_!uc$$b_pX`^y5DTp zAhXF;FV|r1&DsWYOmI~;=zg=d!5llP8g#$eyg?(2g;&4^R`1oI<}9jS5zkS*R}VE; zP%Y#as`qG+FIc^?-EZ#MLuZ55o3#zXh36))1#iUFwR&UAE3LY zxAHeyb!(G>*%yyVq}*@p1|7CgAmaZPgj)WRcmkJYvcxo= zNa=BlyTbWOUA2||sJJ#2an`mpk5&t)z%&}sC7lgfs4$tPny+3}oi|{vC0irr9+6Qd zS3j#(vRmK=tU~T4w9dzBp>4LnWpWKTG&o>Ubq^+2IM+-vp9UJaZQTsPaAj^49T zBUgvjC<|`#;>zt%L(sAXukVF;xoa54otX=YHnm+!1AQ_bM#G> z_%z|LSVxNn3Bo%-1OOeGcX0SiI5wN#Bu^iBkL)=rSUD!~CCsBQ6S-y{+nCfvFA?1> zlcv<|fG%Tk(TSL7p47?6*P8>k=Q7VCaOL~P2QQQ0Pdsee51px^?Ke26OzRG)c-Pxp z?h0oj3}Y{vNKap%^g^Pd7CIogdX@oQ)ov+{FkaKSL?N4HUy#k*4!y8%d_4+$G=;Nj z?&rqJ0^qx{FMU^F>u1#=)=6&3>&Yx+xH*-e1!O0t!DZ?_L6;SALjBWU%Is5#+m&=R z=*DE%_;X)XIXb)m#nzjHlcSRh!&&AKl-8mU&U83XehoS{-LQk|K&&j@W&E- z)+lTG6(%~x;bAbxK*h)GgZ-GIqFLwY1WCbQ%54eXVFg!7Fkc2qQ4Nd&@SRyG3=Oy` zAbv{Yj$5u)?AZ#sN+Q&#f{fTRTlYp{d2URsN&g9m8^t+Q4N96J>_+0}rryzyoaft0 z6f9F*1hnN4M#IAV&!APRX<5z)l%w=WEH8k1*7dQZtQO_mhvcR{$LkD}DTbHui?PA? zJVEKEY-Y(b|cC=)DrFX68>RT5{D3T52Ajn=FehBxbqIJhS|5H8=%p#^mZ z7}806gER@qfvjKkBnj96srQM-Ta{$=Ot2QfM_t9qq6SboY#$JvmjN5LX@I;ImnG<49 zNk3rkTg{(p?k|%oMOMq_O38AujwWQ*h#YjYvkA(%Vx@g0I69sb6;v*bCe~g=$JP|H z_2TScQbdj?!i^vcx_4-rl5qdk2nas(LHa+JSw4Z7YSIQ8JtTi#K$lB1vGK%}o6D$I zYHN!bKm4murr64Y_7s-AigIPIB4Z)+DWokbXKK$5c?f6A8!U^{Nt`qO3Q0gLkVrX+ za%xQ!LF65g2r8fucFWl{PK$Wl^56b^aDHUs4yYOk+E`E?-&v()NGev4cpll;XBQU* z4XXE0%ftZ57d<}t?cnwCOD4K2;S&qNn~FLh%4KFD53sVLE|hWCHh>IQ)TdJJ77V1m z6)jMy`%a>{1L3as7kbly6yOHD4L>g^=jhX5u}E}G@Nx!J9yal4gw?&L@GoyvAmR`` z^MutarQ?&M3;*Tu!B4*E-|@+-Q_!}Q4Lsq3(!U-3ba1ZA^QFI$7_hbMbvz?b!$+3| zAHYS^a3nFXBwCJ9gJ|)}N;)ddsyK~3lj1PyzV|)On)vPYYj1aD!(35Dgh>t!W>&ls zt7UR(`YHE9KdAFZrs)ufAjqF(3{7$sx-kls#Du+g;fdc|g*nJf(bY_xT&A=U@Uo>O zaP`PlLxs4=^6EiB*9CV6LsTPVa_la7qf3@(m5WD;%aCcCt_63=Oxt4ZSInXy+TrEvq7wA8L0ran_f#R1x!>l<(??R|Vj0(0@+}A7<~E8-DuTlS-VORkAkG%3 zRhh#^ku@hKe4C+PAub!y>@Zh?cXn}FHBwF+AKTr|p}$&P!N*&`K-)oa8Q$20f3+gb z8Y~s6AUYdJF*mW+%<7O^z2=WxYi{<9`TY&LJIsetqgVzrPM(U;9D@Oal3~HZD9%>) z>erjxP;SSNm0ohB@Eo7`0HQJW0q}GRu#_-{I%*n$!pL|!|K#*U@FeFKXUB&ZM=!lU z^_zbjoLrpyhe!U=Z%5)cU?1vWF^B4p;aiXKk&q?B1e^T%_}qVU`tsfDBYXi0S|*ib z6UV36KaNY5FK;+7vM%OwxSR-TGZ*`pNssl6O4-^WS1 z(+}uNECa~e@^5^(NMk{x#B@?s`6s@Ue3`5$|E5+YVN=b@2jVpm1jA3yCoA|6jYyZ^ z!J3qSNHX2~@!j$3m;R6Me)3OGN=$7v#VLC)yyL_3UtlZ0Jv;u*KR^CoN2d4*`$jfv z?kQfO(v#fAD!T}_j{tjYRC9F`n$d6W`6YJRKyL?g9t?*L_>O@q|?XLuRR!Yl82ijUV-ew##hK_Z3(W4sEy z=W~4U`&F>EukkWnUVG=^61yrcu;u!`{t|wQX5K#ota3MASNtIPeJL)9_|AJCz^_Ss zm#d;Gy*@rXIypZoJ$|_qv;X7y%V$5meQmb-^632V?D#DN1D76yMFb3~r5|-@lXfp9?wM(&AgJa`n5Nu+eI^Mmyc+ zish0`IMz$Pf(2`LV5KVbLp-o)A(Tc307F2$zZQfY895l(74|eSV>svNGwoxjpqz1J zc3L=VQiJ1>A_94EupAy9?QYk$R!nNedI{kzspuga+o_1`fq0LP@w4IrK|TP#atwR{ z?;v783$33okKP=dyz~$MafS*2oV;EGCl7yo^T{hTs-7Pmo!C>SG%$btjGdhFjNPMy zNEsS^+-dl;*~(5Ad{oGd(d15Ciiie$6MvAgUHj8t|IAW@qsNngy;(yJNUp zU9}*FY$jJw5Dt4$%ZoXx)XZiaJnmB>ABE@^;vL)}-j=dvRrZx7d5#f5%61GZDydP1 zal{obc!vv1ZFpZ``>W}KLUOVk26X4--~xBVF#y`%>2<43^E_be2X_A|ngFyvTENyd z45Cg|%mG6)6FReF61?Xg{e_#-E&y&yP^jwFbQ#j{a|TP(k_b_ZR1C9IP-h8!94xk& zjV1hK)R-yWV`$1#MRvXKmiQ6-2~~q%9~*Uzk3lP$uel56du5d4 zxxxLidL(?6f>64?aBZJkhAOK&4I5mQ70U?5Zq6AE(0*e|IvjhKESrGn+pN-`fLkts zNHC;R)W?^3n^+SHwSxGi2hxN;he69L9-2~p*?8XH!Da&ut~*Lc#jK#wBJBz%Xn82 zhcf}+8KMkPir_Tj_9}bd;}cHLjE}{feZ*J0%5b@y1#n)Hw4O;J#SI#1r#Rt)s4v+l zR2R{`W|+-2GbWaA0a^JlJBq}EgyS|0DP}E@d2SFMXPJcB?}t9jYlto-GP^U*>rS(Y z^SaX-eCv7LvYb-j<_K@6ciL42X{P$CO4VQz^S%e@b3p2-3GkrPJJYuVuPNz|0C;ZU zG`PdBK6+GwRt4Mf>h-}-;KSy>Irt~ogpOVx{PXCgbc!Ght9*qczMIt|U52x4w+T3# zH;j+ICt&7;T>_$e9qOq@Of3gCgdm}rNhq%V8E$cD$R#kP5T2EZDd=`v&1$plRl41_ z;0ii!CJNgI$Du` z`i9(JGWz7tKEpD~ziyXvP(V0tYtgqll(>=lHzcSfhZ7#?QUCmtdj?&DFC!#9p1N>+ ziGlaD*(LgLil`BkE~hT*LRK15cZ@#Wq?e^Kkw3sM0S8&~Ik=6|uxwU2#xPET@rV)I zBlXTt{j#un8vq)Hi)^Sio7e*5K?;iG+(G4ezC6?Y&JIS zdJp=Vth({{%6fgtSql~ek#%Xy#fjQ%P=-XYoM2?P@-~SV@d%L*35a(#MhC6{_A^2P z=#)|TJ<)uzxEU`OPI*&4XMuAV4B9(kr_=0o8})j3G6_e6K{xF6CMyI^)rLdjRPT_4 z_q!-Hi5~-(7yu##?7@wA&nk*6TZ=0&(ADz#BzvmR%^gA<&1S_;B!JmDpv*X(kdV$t%S zhex!PwzibaJQj2ssH@aSh#?_o-fg3tDt*q7LDeyWvs2xX8_zg4%fr|XSW zs*GAK_;*jtF?@UmTwB$Pr(+IULQ%%`LMM9hfp4Vi7EBDlbD-J0`Mdm7g?TwuneoWKF;li>Mnh(Li5 z&weMkNVmJ2OYnTY@j{#*G5xsGu7fE{rK+HD+2U~+X*i^ zovs+S%+5{&8Io3`(XiDN;D-WV0rN<(T*Rg(p}8AClW|yIKNa z#$gtA)GyP?`zE@D8p6b4R6rV6J=qcQLcHR~q6Mit;3p(`eGg=E%k>H`FVsP8ogE## zr2Phig2c7l3vnaV)4O!EnBt{sR!80z=SFqM6OE}4@8_o<=1NYl91aO7x{~avF#tm? zV)R`#1%s&2;V%3dZ9MNmUcX{#rS3%b_ob(&SD zqUAOb@|aIo!A5%%|Cq(I;Bp$$m5TRgsDe!HWHNO@+Tgy!ub_LoKNCF$BoDfBwx)u5 z_yG0kFM?=VdLpV&aWTFGv_FEcy#l)a1l!?3K!ax=n$Fjz zW^0QG+CcW{_xY3)m!g@Uz z3_GKpZllxfH&!rD+=7F3;y0wLN6EKSZGoT!+D?yhsbMU~O!@KS>N8v2O~c^48Azq;5L|*$w1G2nYNZrGG#V-(;-Pm8vAWtR9eI60kR<)@>6lg;!an zEH$q}+1LxXz7Bu%k5BwpuaAHF`9etD|7mLLqM7J8-h44i-w0PIw4QyrC>tZ(cpbe)ZcXZb zITQ;tuC#!V#D?&SeV9#wLX`)KCxzIYxc*6CHXa zJAey@HTN^IPo7!KiW)VF+IzxX#aTX6m`0N6hii9lITCP~b+R#8x(3rSYp}dWo-mO* zJGwYKI5}6f%&9Z0@uNLCl~x#+JqCk}p(uy}KX`%R<%_zQcw#07pWacxI^S@+%wjke z9=@Zt#vZr+Dx!Sa8qtztGausz(Fyl4J&RIS9PW~Wjb;iA@1KGxMN;EtM22md!ip|} zl5+>vwsclQIrL1SvYIaQ6Fgn&JMEVKl9bb&U*vbiAM;3%+d}BXgfQGBh1YvC#%yh1 zkKw%lpl;-a8%DEBHd1y-Bhb(Ir)XulEB?UHcf+6LmFMY%bksd7p8(B;1w&Bt97l>2 z`fNDx)Ff(Cf_=bgGIE&<@?_opH9p&jNN1FdN2BFD3eAL15N8!FQses7`MEF=tCX0# zsLgBkvmQSWno)75gLVfJRukN;w?&60m3WDnT?a;%EesfzCxQ+DC{$N%dlbsl{>zs( zH*dDQC(lQg=#^2z0M)tbiRU)c z^?JV%cE;hL-wXPyl(w=7hu|S!Ayd@o!m)uL_9By@iA6bhHF3Fg*#~74BjT2xaDW`a zNIxxSUHk*@Sqc`dg!h!cj5Zd!_?lt>fqp^$s^IDBY6Pwxf-Xex`z7L^TwE=yUbF2T%o9Pk#p{Z>uGBImKt!00?nEI`qP+tp%mT^B+Q&FmpU8D|ah0HBCVT|C z(6irDMNbd3=ZV+NCNQglxDslspm_aGU(nSwOfZ2=v=1N}x1bfOjb*=;F8Ajfy+R({6 zCS5gAR4$7B(MQo#G%6~AI|4s;X%(>)i()-7IM$BFwin+BwO8G$y@22qoH$+`8t^EN z=OtB23O^bX19+5pMv03O#S`N|89N4W|5e^$_^9zsNcyn2tK(wRTt3?Op1=r&V4)t8 z;lkeSyt&n^!Y41wM&KAniJ=y38__c$zYfg;D7VDTdC9yg+KfWfgs@ctwPxFlxxxN1 zSOI8>;LIo;9;A|otfA7Z5!{4ShY#Hx+f663PjoSeaYTvAOHsxnOsuK{CCQAS&2ZIlw6?b|pef)k8GgX^L;#Y;$yT?eBLKDI@LdjH!!>(U?ZX` z4}Tr+;=Iywjl*=5MDqnCC&R0(C9bfxxXOCHYO9g=J92)oh~g@Npn#6L-WyaDEqgTw zkqp$dz`+!~S;WRvV;;-$p0no4ifpX0>e0#ep3+a$<2%Utp1O#2*UH%WEp9-NgCIv( z_ayD%W+-q_{_)(SJ9xId0XAR>8!c6@E0s*R{k^6qvk4zCQDzIC^slF3nKaK$2x)2x zkBMACrm@C&boSKzY_280916SgtJbi#)*a8lJf9x9%4nu6yA>XMqt~_`dZk0n2j1zZ z@7nBA*Zx7&*CMv2#v{QW54rKV-aYSpt|bY1jYE~;=Rs3~u~951hV^o+2SKhw|)6k;q=_(l6!G@C-s z-9fWayV`6TOhPK5Yv`gUKM52pZ$s0jejO&WN$QJvnfagt@Set#s&QI7p|>SWFDA)OF_EzkR0GJ{zSn~+bKc*?x<#|lJNwoD@%XTmd@OHm zl}dSbxLksF;xd_jg1D1U?;9T=>d>tI>epXoeYVa1IezR@i zF#wOjopFztzw5LHhPT(Z+}hB&v1k*VtRcJ#LQn;xF0R)QvmZln+9(FUrO%)|VfbvWI9bj{ zGImOG+YS=$UVg1~%NAd;GE&7+w4)Sh+FHH7a-*8ebgvJST1jt z0V66nwTWQs#J_sB#nYRQK4e)P@u4L_(~L+NTBQ$z_ zjY`029A7WzC2p!f<)#U<_kCWK6jO+<*cT-+rZPn0%1MgQ^HGc&`G3WB!F-tgp8;GD z3T5dk^&Hxz&c%1oH#nt$J&b$AZ+O9{K!}z=Uscp&rf;aGp=W5`=MEF(1*~7Q117*0 zyV8Bjg#_<$8g_|(c}l%51CZemwS57rH!vtj;pr4txPqILHA!Yu4paY2CK%^R<513Y|eYjeDzsf7l6=*$E$D+|0 zl=46QB|%*}s3B?W0NQ~OEM-zB=%o0D^kQ^dQJ9Q_{zDYBxSTO^AJ7Cu!=Rt#3$J17 zAe@DOh4;2ZV?@cpHvlA9@c`6I=ukS2{c-fEBp%7Rn2QaNLId;527#b{{ zDWO~>h(G1xsK;GhKHtaVQvwGq?3^x;>y>=9wmZ3kAf(_TpieS*q~Lb-XQ`Cy=;OK9 zd#WZS3HRk?X;d=b=y;k1v(wpO%dS_l zUC$O=*p24~kXs6S7?q-7|0503@OS~QB_Qb)U91c{1nefG%%sX3l0 zhhI(mjWR4UX6PjKQ$E;ZImvjGS$kEm;wqS%Z&lnwqXX5Dj%1kD1%U^QFJxZoEg{;X zDGtPW!E6G+E~QC!cNP`fvuTGaM_OtJ`N)*8dNNuX7iw;XmgYrTDIRrpx^iQ$tRH(; zqFgr#l{J&#q))l2s1#3yYY$ix;^`e~Zl4!k6Yk@Zx{#xp_&5s2%OqW}^;5?O0pSv? z7NJoC37ZJUv9w}v6=F%iYXvxx{-9fJx3dwz>gxyrCHf|sN_}zJ+a{4LpQ5c8SeZ{P zpxzK|mxPCW6M=IVXZ)v-EYxJJ;P@2DU9hc@z-fR(9!7B;su}rQvW%kUOrXVe-lS0s zvbs$8U^Rw-@M#E;13pL*I!(_E!exVGbcHt45K~kEI*{lv;D>n$*q>7XdqX@1rL%L$ zM`Xq_6!#7F711pYID#&epqH4_E@kWi%;qa$#UOYwU0^Tpb0H#gyPBxU#AYbaQMO4h zDw5tJCAX2=${0Xvs3bM|KKfudn?Ql*x%U;>cEUYL#Zxl0@~9*2<*37J(R+qkew<&~ zU0gh#hIqL^F&rm;7rLO$W(sxJl0d*DUCJp!% zS%d|obPCiNx*<+)8)PK6E=h_T_Hm@sm!L+P6G-vYfAnYZjI@Abpf#u#yyFL&#FHxf z&p!=IVMf!AB1PS*ZW}XDkRFFfQBwA)CfZa>f+)rFyvu0B%^eg^2fcnCYv|!iLmY~r z%@|rm9Z}bz&9T7Y5)0tEC=k$U%eX91;-)J?)Pi^Nf^p25H9@+9S9*%J2=u29p;8)8 z7JjlQb9$Dd@2y`$NhVf)!Ca_!>>$I$jgaCP4RE*|i*1Z56bu1^ux(VIKsN~GqOk)A zf0b6V)6K){qL78LL$|@T4mv8jC-PUFhkj<1Iu4sK(#B2WOYt2rFrfcU9D6rQaa-Ui zyaJF=ai50cZ4X<)bh4B`5ffpmX6j&egQk~rGF>F%pDG!PFwRbFIEYKcR>pCd0yyQ> z5)#N=C4i|mUoPkdg|FAqi%bEkv1{>-Xg);%MbbCkhN2~OjamX;S(*Zy4QCy_0AM+2 ztPvX@Fx0R4tk*$1Nm}>Far|qWadGiLn8?-vcpnMvLGU3XUWkd9xNrm5SG25DMjRB} z5MoEbX>PT;JvX6+q|{9JU87CkKc$ZK7|WeR%X;M__I0<%yP%ENZL2kC zxyix?y8!n%L&gFDkaj|vA4HuHr#wsmg#r#b)rg5RmKhuqH1w)1jbMw~El%(J-1;*B z)@0=-p?+>T)^49Xc-axxW01F2r%B`z;FkwZkw9>hNXj#_A}Qm?99vyA4#<#gVMM>B zLp(XgAS>Xp+&xC#pwOq%l+==TcD%lvQpxOUli9S$m;f!G#d;{`U`A2Wp;% zg2H=RtFK-Y-!);E<5dmmlFFiWP2ImP5`eCi08}&)m933uqO!GeD&AkSRJ7@IKn3Of z?;NOVa-(UH?R_b5{>pFBoc*ORQg4f8YGClvIp{S2@GMF`mYztacXZ8U03pD{ocaKM z3ZW&*9hvgDbR zLB^RP4eIY8Y(;C(=~p`q7jeG{?!*=CkEXG>icL@KA_mxOfEZC<{GKkvU451KfTAOW zazSg;HRLHzpt6#7Cg+z?WVLUbF5g1;q=^UNV`e*#6*^4o)|E)>vY4uUh7tLs3WXFgwrFF{E2qw zmRd8&_a}Xlh0A${Z@Iu}PHZ{S)icrb0t~k^Kg@wyXwLjOdG0si9AL zg)m!WIJ)H{Db_I@1wv92LK~?u(b-Kjhv6%fl-lL{>s|rYvB`dF&^C;$JXkD%RY%N0 z*_nWSHDmUFR2W;}@~tj5w5wnEz+GJZqRz(cwv!)I<{RvX9nTn;sW?p0-U=C`W$84k04dViY3;Dj@V8o+ zr{=yF1g@-XNlS4-kKDSYS-=vXEC+n>R{0qAh%OZWJc z)Bj8@eWYyWt%zBRU^Wf@@n&cU%_{uF0U`Rrh>N$z_l3b=i|n z3D;Lm7U+C*Oy<3AdaSdGf01FJ8#4hl9e8T;0o;t5l-!j`1@e4=1hmUs(OLThmlFRO z&3&OCxf>A!5K)I<6;STcs~w~zC!npZE*@AfnnK8vOsk8d)$cXuYN&`9IeMpuKwj-e zyZa5VOs75sEaxj7VM*jRADoZR5z*ynna{8&Gafr2%8kp@}zGl&vw6q zxTEbxr&r)Dz425rkWn{jxHiSgN@8%PMp>l&<@eVsAjJx^&Sq>t)^c?AdboQe`}>vSV;~QDuCqaw}rG zj+k9@IhCl&w*jJI<117!xmEFKdOg-N&Wzoi`Y?TzK| z3Ut=G*Y?WF#+z<3M=j@8bCk<%I2moTYR@^^*<|OrCUukj=NjH7yU@9a_uGqB6OJ7T zv$grAWa77+O{42@dZ&5pl}xh1Ogp`>jFSB2oVTG2dTH(K3?QYYAmsh_R}*psmWVy( zfH?L*Vb$Va$T}@&Fg_tWPl9Ng>Wbm)_xMgaRf5L@*ki>6A>%I^%tVak9Ut}OYP!uE z&XXH>B1*l^O)#B;4GG=-X7O#Q>}VqIUT4Jv=+5LS7kK!wXpsv=+kBY!@;H>yEky$^ z>V-9I)kNXYUCpeU0MA0CS+&>?SmMp5_u-SOotIX6yqdjrd!Xh#Q#ag4#zp4vlwG@v z^UkC0ImmTd?gdxJH{S(DD@7iz8|=%XfmqR-?PjxsN!yw`ZL+$Sc4yi)fXLR%j zf>)_7m`?9X@&&jZ1Eom`&x_j_MkWRuVV>bYiV&3jhCJ8W-F6G}P_=ta%#i80ao7^C zB9_)HLB&f+ql@x&m~0Ov8mza=s%+GfWGCH>8nM zL;|mx0bf2PkVq>P9V_JW;CLbZe2fAvFBAt#%w;#o(-f9h#u07CYZP zz(J*|Y;1lgYpu^4>C%z?T1bQ;$cGmg5ctdg+@V= z+|k59W>VN>y+Kd2RrjBmtt|^Yp_vVz&QdapGPlB7Tm+Acc^L&aZweYf3!(8UoqnUq z`7$3k1oX@;QChfKFdkz@7m5-TgSvzSalocX9#GoxLcy&pWOcFt(&`46fR-t9h9>j9L1=HK$PC9pj zCr19lH0z-ViYm9*r?aPQu%BW5iXMr77^I;e<6H zWz#uQ<%?&CvNB5-;DaepI+!!un_vh-%@X)GaMCE=poY^hpll`a0`Io~a9DsIi48o# z2)U3CQm0>S0Vx1v>OtdcXn}OurPDpVCejD79ikU}+)3DAB^YfKQ84)^BfRV>D>I{o z1p%Q?Fs6qW{>9H{{>kad+35va*(3qttfmH`t$IxfZb9ZVZ9LnA1Z!)jgP=XforRA9hi5>((UFtUqi3Mmj(EUmcPCSVr8 zW`K0uUPU8vvzo*}mMGXN35_VB6mf_N2Y(s|V-C>8Cd`H@m=wC3x=&>w)ji>4{EeGu4>>~1h4jkfV&bT3d}FBzX8O#>xU$wWGptD2j_R#0=o zIoja^a9+iQpQ2bZk+H|}Gj;ED=B3LKFy6o+qZ3(0LTq!Y+=^$d5zMC$i-6U&77Id@ z#5hr8isA(F*^=mjj%(I5r4+T59TBn7h~|^hn6oYMGB3i}7_~=2=oDh5;CRd!mE|mg zeu{f27$vcgDD-FTAnTQMMG+LSx57k7K;t!}#c+s?vZ2V{#<9qGIu4=6vTD&j(_u~U zy?xh*AVS&*#Byj2%n#YmE6oq;bgu-u+FDPRnF^Psw}(PMrv~mtJe2q4c{l|QNbY=? zc&4m%55o;Z0(=WpPF;3bn8|Xa6tH71#Rikt9T!7|nYgq?`;cri3O^C7IRs0wx^Na| zR0Yln^Hc0GXE!Pq?1~We-0smzuyO%#535MbL z&C%(*3km-uL^JdR{P3_+x=Ad3nsR0pX-lF+($1n8T$rtC;i`r>XN zr98&_GCb!@6h_b*OeA09Mu~322Wgx1I|CAg2V%brzR@7jSteX&|sSwi_9)pHVS55DOSw; zzMzCc&8%##04;F2^smVtBV+ztv<3p3LJ7Q#3_ZIpV92UuIB(&b;@?7qGDRw z#2eDi?o(Wivn8{C1h8A=zZx$Saf}I4SRC5XbeRHPK{Nqy=20{bxrT8GE->{xvsYCH zCjj}|aQze15!YA zJ_d?+o6Wv!g%q2B>%b%v)B$LThbD`a`*xL3B^Y773Yz6?kSM;(9KtZpD6#`|#UaO{ zFFfR5Y@gJO>kou*z~&7>WC7z2T-L-P;hhfi!ZWhd#r&P!X1i}8D13t{;cRzo?LK^c zaDMI|zkGd!r-+bI+1>?E7;uY0-$hrcuH<5sG8RR9y!JBoGU6U2)D{#Vd@J)(b4_WD zu+J>(&Csxaa9S*j7d-mUpAX)hUmU$u+{(i`WO)ZZ;M?AIO^sw%tB`PSNEeN6qeH6r zZgbGeq^rp+FPRaI4QzoWFg)|stv_0P_MToriW8afo@wSk$Osfi#CbB)7p2_RGF0$G zf_z>B&8!g?Iyi5FwC|a67qYP1l}w4Oa*j%QGM`b=2Qgd$*xey;#Gm48v?ECGIpEp2 zWZgTDtJ?2wIU5F3$U(r##n3f^yKPphdK_FKTIa4}o+qQHb?+eMnjkA(BA^Lk3Zr>` zg!IACI%2wjQ8?%nV zd2z^^MIkYGBW3gG5^&SD1n{SYQfM=E-qPJ3k?@`FN)rB%4ZUt7!OpG1eMPxxK$e$I znDZnaLiC90H%6gJ{gt)9hH_5%yr?Xq)I21$`x(S4AC0wI1*mlOyD9hEzTVgJCNAe69wVkDRb1JJk8U?bS_5qLU=GI|e0 zRq=VsgFt#TPNN2THPn)!>(L|_g}eg@T&>$`b-w)$5Is7bK(`a1zPNowS17uP+69T1 zaG&&co`R|^X7jIxYO=2`y6Zt{C)hsiL$d<2$5OyFeKNxy5}BHLgIn zu|wyp)$Y1v)~~%VFk2Yq`G^3gLUx(m6yYwbX?u!(4^B%J*NgXg>F$G7I6FD$%%#5u zmh6w%B!H2n-k0^D}gY+I+W26 z>7j6DGg#?r^j;Xl$ZZ^dbKZ+Gz-ZkuCH)%fG!;XKv4s7Wbasnic1P9WW1-XwkM2g| z9b~J(Or~>j1x8R>kinW#O~ZVr#E(cbALvKx1hk0m66n)C4B?sus9ogEJsC<~CA*hO zyoz2^;ElWhoj0hE?e9&Za5_%8IoKguDQ%;E*oeL4aeMHe{=wnJyMv3oJxR%n#)Oe!xJLe8$5q7%9@SC^ZIH%Yr>f8dr4KR;4dr zlQGB2ERjLDwKj`#Qz%N=wW=}$@ZEzH-z%AzYI;K|noX5`&u@K^8WZ~-AG)%#7ngKo zI{zYbL=?4L-Yb5u^R=s8qmIR^+I>WveS#ux@Vv-d}|vw^0lp0zRe2UYC_l zfuf@r0JBWh;U>agbfOFw_n}9*vIO3*Yj76LjNeYcV-`0OerED&i>I+zsT3 zegLmuWxv3JJvooY{sR7GhcqJx<$Wtj4&BA~7}=vZ;3khA>ut-754O{qkv(tg>_`9b z_0a*i48A;)rag`wPlhrg+1olkIlk~;AD>_NKOY^ulm#czB-XEg#`k7J`PgQp0^LTl z+5)wFuQLFsQ3n-x@ZxYV6yY`*O1ofs6N}$QVFETv>9Vf^qcAWry!OR)D(2~qe99}^ ztMc>ckW_Pf<@<%k1q1xlhfTTx!e9{yssZMM<}=tZOf5kTUp1acbj}TmRvo*G0#m(` zS%)Ya@*WHtN~Vvq3)F3}q8`@Y#{X=5cu&oIFpWK{%2@`_{0C1?PmWN*p=dq7{_#k> zJ9~rFOVGI~YpmR@{CKpw+szHT-Q2X>U?M8~qyjcA{FsJ5H9x~i5{JhwONDK!ADplf;xA$%NkVt6CR ztgtcrM_^a%AQ{mHMPry$7uYz9*(uPXKj;OW7g&~ z=dQ*tDN@@k&-;H>E#{;oT*bX>vbVQ`w|CdgX!N(Xo~H3K8HE{1R68somy4;o{P1JR z(V`O!la*euQpEU6RD~*Uh#qZ-LDOk``vx0A$E}@(qc|B$XN`}rIcfJVVmoa`A?jpq z)a>eI--~Nz+o8CU4)u=qGvI}S`DoFEwFetanARMua^t#`VbPTT;nI+agGK5m29)D^$zQ4to@N~FERBEx%4s3L6egsJ#IBUIi6;wgt$3wy|agKT8zM7T#$&birwSneX0X%t#M= z@3EaF1QXvk8i)tbYxa71{0uUXi_0TS7CG~{n-g|qCgKZ{63XCVr9(UW zfR!JiqmN*tF!l+~3pF7_^Ie`W)?~_rk~r;S?>U*|e*TC5>${_OM-Z}mbpG?{>z8W~ z3N~-XLQGZQ1>z{vk#4v8Up0LhdpTWWP7p|PVAhWC7MKh@qR~LGL*_~IgZG>(yT`c_ zUFrFXIbIPFU0f|Qr`a8=0f@3~oFBY-`}*j-%xkdIpf%_W+$kE3oN;-&YQ=*Utq*&n z*o!$NHz-!oXJ)HbW6k^q{zqon?DWAqsZZ5QjZYFRCy+N+dwj=9n+?MJ6v* z)OkY0Hf9GAeZ_9pwLgSot~RM}-b@@{;y^goQFHD!wea6IXrf?Qbt3S1p=v(&3fg4$ zT1U{hziNP%&>`v)H`QCiBW!krl!$KQg%fj>1N;Dcs096BX>O~q@8&FkBA3~3i_EOe zekt>DZ(7;CGTUab!wDWVPt^r(d={$%kuD(OKnGZB=V~}c*y=88_kx7DdStLnQUqZz zxLDmDN@u-(%YfOWj2^>Du_NOOu;C0E$rlM3?{u(R0h6f<6hUP2HJfpewORXapBUD@ zI{?WceKIeF8Jck-R+|bjGz=s%ESq)l%#=Z^2nS((g=6P^(`U=n*$;CFRbDR-O?03Q zWz2(USZ&F{Vz?Unuf!bsRnBiGT2jHlH9N4T-6s6+YrC5l|?L!mkMzC6%8`5$;%S!61HejE&wIFYtOKIwQq#t zu0VM=WO)U9>qdL`EPNznL8oDmtg52WoSai*CCDutATz40B z9_NSC_@etLZDl~=Ywn1x40#q?>jSY--IOS*oYCkM0a}Q?3&cm!9y6h@gxeI9gN$hoMSbMiSD^Zj%-G^W%)c4|L8i+a3S<&YBh;CTTMpz?u%(0x zh1(a33-wO0gP=LUH^jc#^+7KrL=6lSSzBM4AUV?NBim6;TUim~D|uBJ5(vQhU_V5i z9kRKJOe;prt%n+aLAYb@Y02?&cq|4AVzlts zgOFNSB%39bi@+0FHasITucQ~EF?}XZhVgh$`T^RW=;nH>`?=q2VT5pR0I6%%+n-mT z0ON9bq_0|LP@my>)q(YJXvql)*O$%ERf0(Zt!Ds2M%~WW&K3he_6@Imqt;P+8`TU-d$+}Ok@oJuj z;-8rq|A_pF!O1)Io<@QEb>AZ$;k6LF#lui(&^!J>*-ONWE3h6TReoBHN!0UyH zxK!&Os4TIwV`kT0-dmS0^o?zg0)EJg42K?BrtSncrDRj(o4!tYau zs!8*eEKwH|has@p=WrVzkXN@8s8C}=JA&ve8DkaF& zo0>8YOtXZ8klP!Tadmq7Op1}JDKT?GR!|MU$VbXaK53r#qeO`9kF5$<(dCr{jsOi8 zer;EDHA(ls6Z`ij_$+?b|M%=MTb2vHTy=bS&;IT!aSnlhLP`Ux3MDrVN0S))(Zj;zgZ`?3uM@iG*Rsn#_+C)YpZn4Gf*q zXp(~YJO!JDcwi8Xs^@^mC){_zsjaBf+|l zz$b%mF}}Sk*~lr!vDkLTT8y>DjK!O)1T@D9k`-A58HRKU27Y{5^$uU39{%FLIXyYO zI6XN&tTL@4GjPAvZ~g^*gR$R`|4cDEuMb28Qm%-mQkp{VZ}4b>tdx7q1|Tv{;o4Gw zx3^ir=7*xV7CDjpRV%o~EMzEVq&V4cAa0A8*_{EwD12*m*EN;Jwbk;Td2PDb zp8>p(g-1+?&&v?_MAI2d|G0&P$I)y;>Bbof{p>jJ5EwX|*IJkZM`Rh>+jKv?BoDCpO0A z`yP_g$KqB5-m|8{qIcnC?Glts&k*nPnXz{DD{ts}x{b1e*Xi}b&Y;%~Mq#gB z?{`O|UeIoJTD?BxKkIcm+qGKF-R^d!QYmN~b9Uiui+|Co!upTD5gZkANav@oF3v9e ze;ghE^z()PN-#^OXItWFNWYj0#0)mzWcO;ad(QNrWII1x%XC|X+FLJ*KA?X_SDsS&^# zA|<4C?*}?+tYQyrQ5>ZWd|%~whjf`RAb+&LrWE|NVAud-IY<~rE+7Q~na@?L;B)Ae zAU1V+r@;BJp(KE}vdcP@HCt#d4u!NpqDZ-p0AfI$ zzfTTC2QZBS;fa7iFD76?*8bt0p`(U_s?pAHgQE~4E{Hm1B`910Oig6ZzZ{D>`xSfk ze+=OoBUoxH!+}yiSh;BADZpk*0N{*Yk#8S3x{G8D$dNf}GY1P=vWmCaYLmgYL4bbk3?i*dB=c*<3O;Y5&LI;Aj^!Y593+`OzkMRtr zYPfI#RYe-q*mk=IzUtwOEY*kr%xecIQp9OU7}HSiU>TyAqBdA&KyX%1H>%GTUk`74 zYzIMM2z-;m24hayt`8(RE}=~><2^2OQc||Sc1wElNIMDE)q;+RhS7JxBRtn2Sqet2utnwEphFF%|MEtsOJy1bbw7Q_e$KA4$&kK#q-1x(eA)T-pyLDx@g9M*Xp z&A6JjVS15{!ZuM-m|+Q%Px_|+y|&Q(vW^AH8dbP;;h^vDH!wbZf4^Dv$cRAbvQ{!! znJo|-uy{oA6Ci}O!*_sLprYAm;bj>$Q^#+Z9EvWR_c;OR16fEmyMHRy{j-4t|KR&a ziS)+ae5-F5ye=}=49weA63>7X**li1mw->^tq|&<{3@!Kh%({DhB>+f2YAf!>86J} zZcPa>Aek+QH^+$dOCVhaG(cu4GPj-j&AlkwnS8a9??*CpqQqY%4sbRx8uu0X!Es_r zAfI0}q6dY6g7ao8Lnn<}*pRR}%1V8SQq#x`Q)mUZ%BzA#YzuG*2K2ufWT5yO(;Hj5 zWn{gyO?%!$zeu%nltwLCwlB{0VR$8^{?a-Zk{5W2w}g@Rli4G{6yPlhozj;YDM86P zAYZc=4aP=QAQP4J$#O2%4J&&%+b!7J7jP!&MV~1!O zJtpBkV0lQjhPHFSnO7QF9KOlzYWc#jIG9WLdTrx)$RS(#D4Kw>$y_Y^%XRM;W-PcW zHv7c75>K`FGo?97k1)p~_kCJFa@K!8E+(a{^N74pC^+`t7S$VUGd(o(&p^AVJnPOfs zUesZjDO2};yc6>Zq%K7;LJmHIIr}?|e474U>>p-g!dp7@HFiG|_dbXc+{tLFl=ayh zku(uRWD>y(j1b$2(jTh&tn8qyC!%|cJ1H!&2cDc(K1j+b%9wG@eF(0CyDf2;q{lvx zvMJ&ZT#3I45j?7gb?wv7_aarNQS$oZ@Oj^$M_x3YN_C_ay$79Rw!utFRXRER6@i;T z-Hr>{ZqXOY1%$M~jI6YqI^`%>$U@FIK|$O*?LJ(|%|^3d&1xw~4gCk+e}!=WE={80 zm@96gtdlQl7>g=dK#TWoc@4=f{8%AJoioIFPjBuFn;cGIw@Xu~QwLkFDN|ybl6$t@A@eNEk&e+EkpvWi(SlN0CX3`4d#(p> zOA2NNYhqu~mHhvfgw=^^sYfDftY!ZR^Kxglv2mqxM)ZfQh638urf zV$t?=SpP4@T%=FM@ z=5JyM)8#ny9vdaE9%o;WF4OS>@FW+DX$Y@s_N(M$?PHu`9hIJ4IZpXE%Y_&vn~77) z(dzvNQU&23-urR*fpL@s!$W!RdaYiy2XUyaZgmG0K48@GebI_&Bmv&W7f|7jJXy;RT+dKNtx2I|LN`PQm*x~Wuv752U?Mi-!*UL*$qwoJ60|0#Nnd z9Q?=bo8yxzBg7W%a+HB+N8kWZAB!-4=z@rnB*^LL9wI(g7hT|!0dg-iL9NlwgVP~8 zkS+>3C#s)e%PEMSM`vgL+lwD#oP?+n6!k0MY`P#tRk_57HyHOM5&wII3< z=95)*U~>q=F>iCo7b}F)c=}12*~z^2OkO>(&e)<@VPggBYAe;mj+w>A*6}P~8+4JD zx@gx0Q6gGG#AGh0Nimy#@Hc@HUoCuJ~RYarKstyTN>An4nrQ4>gpUw6C~a zC67gS&Wfufca20gSH=#hfMg98kGr~HvjCn0XkXR5MDW^3mAAOqHyIL#Es^sl&_;FD z=}RfohBePjE+m)6m0(O<8h|47<8cM6CI}FLK2t1(FeQZ zvRiQDF+TyV8j~g4Qqx4=&drYaihVOCd`n=D&JN$fB}%T^I7{#u!z=a*JS$B0bsj`X ze$P-cYGz&w*kk)jmUl2&wEM-R zvC7IZ^A$^ILO6@3RR(@C?lfDP%iN3rxDbp!F2yvarDU`$d&b{SV6I>*laj>>tK234 zs1;3Dwl;0bf=zQ?ru^~}JbBV`0O1H5)16-7qp$z0_cW{UH=3XaFVsq<`)XA}m`E2m_|W4MOF+Wd3OWo605 zP)gtw(61E~n~)JFVbD$NSy~Ucmy8tP59x%cYf@N3m6RYQwtj_SM%H!DqRz5Ci?r*H z2>H9ViXzYkfo4~gf>kBDVHpJBeH6nZFz{N-PLBoCn`U@<`65k5xV%6Oo9zZc={DOf zMlg${Q#>Iz03%UyG1SgL`!Go)SQGS|xjV_pidO(vITM#Oo*f2esp)oVMmuN-n{qp~ z%q>{Ul;CT}d?cICc1qq}#v`jJhGWKez)kJ)Bq21mn4^x(KyETea=k=Zyg*l#{eyaT zX?8(RQu5LN7^k|w61HNG$Kx70FjuQhrdKY*rUV*jhC7|G)98djFsRojVPhv83_GJ% zu+C(9Rb(AsW(#CY({<)XeLHV|V|yhZIR`cUgLS~*5%E;( z;^Y$`4h7m?cxy|jQryIfk^#)L^u=;zxvv6z@2iY)dk@i{@BI&HC-&}t2k))bOXTNo z%{{wkX1 zi{sxW(L%ZOT!+bQlG0wHLm5xxz8fwlW`f??@NR*)lPo4?l_I#ccr)1;GSpY_NfvEC zk&B%<22XIuiy~wien9iwvS(dkmNG`)Xsoq}M@nE{#L&4J_S-- zbhcK@*^1-}=?7w3cMCsTu~x!L5)V`!W)N;vx-Ra2Rq#sn`-tG)ZVsB&R?Ao^qHbu0 z_9@m^2%_NU29O7U!1l+sYf+)d#w$uxaodwg-nbo_=c+f2CleoZ?349+Q#0bhplag( z8|YL;$31LekGv!5p5*3s20awyna&SH&lP zx%#z$B53R5L-ez1_n8V>O$30@un2+Y9;lZF8b5NwPyv zXx;$i^w{8uX@290QAlyxLjZ~VR}E2L0Mz+z4@kA>eXm_@b=T1B$e%*^;X1mFM-4z= zrw=0ETmKI7y~6ZmQE+iPE`fd@`>zjAk6szn`~L@2+r`OkOr4>}ADP~pX@1m~^OW0B zTvG9}gL;3=(d=3g&9*KAg-bI=wdcDpm^w0GL$;iwZ7 zi`cr2yp*lqp~b38h{j!4YU=5=)~OP=<=@81wLh833C2J9_owhjnWSkHs98~yRm{5e zlNA%$chgE{OkDQ%$+;m|G1szn)E41ibh-xZ2FA^cAFZA2gy=p|La=lc0N8}3ep3D& z#!M8CZIRrq(V4N&I=Arh=+*Jbk^ko1#nFG3P(6%+8D$A9qFgk-&cQ>*l{*%K+?Bu> z%Qi9A97CW^nGDsSZBhXT9JMU^00FPqm`_ziadF@mb?;4ZO-|MDr*abWf<=52jdZjs zj?`z(fi-j<%_FOIhY?C;lyX}P^8PH`IDJ`ZEwbOXX739juVzS;{0;=nSs6(;QCDpu zeB<5~lyXPx-CozuO!RHK0c=}59Sg#oW5VvE+2SrdRCne&I~%LXFm&_2syNl*V&0BJ zLV+0*r49X?_{&niZOtw24i{~J>92PhI|aAfS8bglilsV4GXYjFF^EK(XQCmH_bpD9c)^4PYKU8Q4*yw#*2+?1 zn#H%`{Wazj7TunuCHh^61ts=N7e{h5U%CTNjQ~H3ACM1~O>WYh^v9jys6DFJTm4C+ z(U}BcuQx1WHdF&yPD8yQ=P%?kY_>m|O#!_+Gl#cBHFX6yd)e4@booT+kahMess#V> zeMWTGhgDRaUlPEp>5!|5)8+Uh$Pts48^RW*GHjUc%mF*bQ2fov;~(fZsmo zG{?>H&T!HxT0*0V>>?Vk$XMOmfhz`oI?XbROC(f4CRWlZ>k7dQ{^`DV37Mq-G9!#^ z{yw)0)66}mRAENd$U6W6Yo^mBMAdx4_?c#zCwiwrUI9C9splT^yx^fqi_Ag{6?3iT zF97cUUvdvLyFFx2nvfHG)!tcu$=RC+!5eYw?+bRwA59jecv2S9e*!_s;s0sdYGE4q z7Pfg0)|x}}M?sR@#h=1NtT*dln!O&mn)G`8Rhw#~1rVRAy;wjV5h(2owlEsMe|`K0 z^6i}d_)s=xEef^7nC~-*ny11W&4anM%H7t86 z-lq-k4;k>=*aP1YiKku5rDze|eR=eo|G!R8AUzB6deDLoS|k5o@l4D`w73c8X;Ej5 zI>fqSC9ypL#IUfISiJ34z0;^SwzpqFwA)&A!)Y?Bf5tR-@6xW_G=|A;`M_v7B9c&x7$zH2Yz`NSf_- zzb@p(7uzqz)a^nWRj=9hUWG$Zx7GHV&D~aS7d(O+kjCVEIRwdYS%Wd}X4;9J{X2Lkl~hV2R< z@Wx_Zy91rvIxSVo^k^Is{ApUMmA7qTP%t^ef)`98`s3cX-yZe{%}&2AwvyN#K`R`M z8e^M86f{EAqTnS`;Q_|L&<}vZkYT+570!Es@{0ogo(~y4{U8YgU+jyLUHGXeig|=7 zG%iAQ045wK$W6)y3bEjsP;zKKH4ri-pn0&s*5>go!3ehM-huanAnlhkAC?WCHN2ZJ zm;un9hc5tE)I+#6jISE&sA`kSSRGe9weJVh+u%-c!>&>*Cc`}uq$;sr!PQpuY5~9i ztE0v|^_~OmMTmRMZ+4+V4 zkJGc4{+omIUrN}EvRC@=#=F!#wyxZsS{+*BPTwVkKD>)tV0HpWX)MyHLKh1JQ1c_`7W6GA_FfHs(~0fU z#CF$C?ESi!nb^;h$)$4yI;ML60n%w*V^ubP)hhrhsY!@WJj2Alnh<>Yxc%pEh2Pb?tyXwP<+7C&QdE zigkeERANuM=Al_qrb;yXwSY@1>2*q=*I02wP;qBB_UulWqs$JoTmm_?>Xn?|5O1*= zC_qh@&CIO!BB2uoe^xB>y*5j;c+I}DSvW&QuUjh?5%n!Mf)VnuwvX@BB#;Ld`5Cpk zuCw#i-DX+I_l-v6ZffGcExoDM(X=@ER?G2qt^S3p(PsA$r<(Yyd##p1)PC?PeEw^e z;Q`J5h3n8E(7{f7-8ww1+iD>`-*+W4Cvp}q{vs0ffR_Kl_2`nrPuF?Fe~X?w2o<#R z;8Q|bNkB6EOSrHHHolS<+h7@bbmRBdvbhiIp}v+g0^XaWHz?Yn_$D75ygvTv;XPp_Vb>cD)PEbyIr!1C;Ro^zfHf z+1uZzU)yXiA%ykYC=Hz)uGx(^`8JcjNjuWNw|MH;7HR7uu2mv^H^Q9IkCjWXS)ZBY zJSvq+PvyMRwEsWUCD2!_(r|2WMIZz#CW>=n0gEAvF*3S0E zJ-tt`y8arn(jghH(_BkdHtJ_~hPl#1W;eWF#Av<3CXnAH(hq-2QjK-%%Q^(z5sOzt^p zn=Hlyn!WE@{5P(~24obMV}M@P^g}PlN?eW5yCQPWcH$ znEnNv<-=R|aw!b3t*@g_J52)ZU#r-ASih|$F`Li|FCN7GR%aSc{<_6^K;wVK+H@P} zoh=jPnD6 z#EXsHgg0?==h0$sVAYrXg+N98?7+?7tUL`5Z&7Y^&u!=D1!7PbcCKfyLBSs(U&NMo z;)uF*{dV2g?^pB_kngb6Zg!paWOn)6XpH%^gPAuV0~-Y0jHdAr@;lPks3Gj}ZpQZ` z*Q`XO3pC)rC^lGoic&qGm3$sP!-5;$9-`El5k4fZ*eyd!NHCl-?*svhxhX6@b$sj zkxxv12}!AUa^@eteRp2o^S;QUJ~+#=UyIH8B)%x(M`MtNiY$Y;da!J|bScxy8fv<( zGA!WVfb-(MgN*k7viJ7=ZQDws@Za{Qz;RDoIhJI-za&kziYzDE*pgS0oAmVb$4ilv z&5a~#Mai-IazFcn82}_ef)r&tXS>f`pVLSpF@wR(U@!m%gYq%?;}61CL+u=oEj%BN zx;;a*kV^eyXEZQ|gWPfr5!7KFBJ8!o4xm(OsZ$$;Ai*>1*0Db4qOXpltBB-{TraF9 zEN_)>*i^ zb~gW5;&w3^V%(6+aKpeOIL!imo|oObaVGE_W83y;JPT?n>w}t}MCKkU4)GsTZ*yBD zWbDATroId>?DYlf>e~G~xkWGVSF^?lp$2ka3j|R-&(|Y%VvC6aARf^RSPWEI9|QGB(lY4(Y`exw zcT-LMo*EAQC5MAzni_M~oxY%rhl!R+=FW{dhm;b8QsiI;couHV420M^ds?0Q@`Tp5 zm-+=!zFDZ*OBMIRoPoD&m1qebM?oCgYB%#`xLcNeX4Av{=wdFC>>)vj9=Tq+tF;F? z$-OnsD%Ozck$B7b1O3M#csd8nL1r^RCyaB$UqO;()%|hv%wvPwKKC>R*>K7xp`>4Z z(V{n(4`{+|7xJr2w;Lz9dzZ7!K%at~O-A`_G_hnQVgciW14Bnx**%$&h1Pw0(H?jc z8^)}8V7n_77Cl@^Gf>&&&N|Jp*t7N-?ADvs zEe|l|BX7jt!3C!H@T)aPPiNhiwYf^Co~@>Zn@enG4dBE&fHHlPS9PjYo>{K0;h|xy zw$ySyg&Nen;cVk9gKJj7uOU={0Rq=I{RJE+;pQ3+f**tWnYfNueA={fPkuwUq7)_c zC)O~{H!2Y?3y^TXfzfd+p4D7 z;YZT(-_AKDfb{v9S7ZAgwaC7QHdaDV26>^-{bGX@eVhvji`ItR6M)oA!WvNlhxlN% zDEEWBNXTrjG`A){+CJRp4Xfw^t+>uRalu@zF&Cu@Fz#3%&4fYOWP-y~iY>r0DbwUy zi^bfuw5xP`P$99HodhcI6G8tH9<%U~C$KmTfy`mHMz$A+S(Ic=A0?Tc2(HKd8F_)6 zg4rC#LFI$n9x)^}PvA&@@Qfl$iA@bhU|U=7j*z|G;6bjk6o0myv7F@hrtkY_r44t> zpnj%xI29r6_Ehn%!r%E0CCd(9rmLf;m*NiBJCmY54mH_h<{8B7YV|zoX_VUHfgvpvYt4`b{O<3LIITXqRCoejFnJ7=9iO{Fs=Ptew#SV)Fts)((SaAvU?fyrpF zfR{#cz)NXd>}k38#Y0kE=IGj>2!>{OHFi11_)5U}W}Af?wRb9f7P5En z7hhKS%gwF3&dmGB!I7u1KO3q*`BS8jrBqciE^X$kw0HX|8EO1#svbKz@)U+=MHW3G zlU?)W8un8E2D^`heWZ&db1NdV^bJ2l#*UD!*rpLX{sDnj>_FKnGPdG)yIV}k(UYf$ zC<`L_v`u!=2+aG5g&n?Wq#VB4i*jjBijEDPlj7h7%hILja4ewdfDBoW!g;GaZN;z> zoTllQ5mn16ol@Cvxo}T3x

f?OqlPC#sDK>lBsUogxEC67S()i^BT*bN*K>79TH$ z#BF4SROwXB6J)Rxi$(+tW-ke_ors1$WNJ@>BmuDd*Qf}(-gb%RXUa3QVBr)ISO|cn zW6L*tETl66o^Q`F13ryF1k_$4sWFk%vPZFHAx<#7yg$boVCCzIYG)YIW8}3PDap2I ztQRap9~JJNky;IwxT>K~T=lfZB~1VD*baNUWUrRn#gt{+Lve~SWcrX{wOKl~^@LgC zZZy&~F=d{}!#LFGqKrJ_e32}K-Bw6 zgB=lVw33fsWv5?VJ4stJ;gDHYOjZw1TPsO~PoJvC)<;$k*?FrqOwBUR*xARfwUT72 zBeN8^)_m#ZiMJ*tGNb?EZ7sg9A24LAw=(Ih$bdVtxYfhlmj*uJ3C?&5yJTKJ7GJ39 zDH}HG5Fw#QT!4q@B$B}WD(>&Hj~MqK-Hzv#24*6CW{K>9^RZG1t4Ng$zcixBZjI0m z&rhQMd1WD2$ZFbebMny$leo;7d)*&QjLXiath&#=;3f+*Yu2^K=kc^Y2hUN)&&l)s z5vFN2933V+@A$|7SgY2=?9*ekzrT z(iC#Ut1?eo;>JvL7#S8y(PrT+c2N^tz<~EPBo~gC-CcozN#9bwGu=7dIH?29xTlj61Y z^&0LigCp^`-exu5-Vkr)tWCU1jy7xV_z*L3=Y4j-qwyB({L1YLFQ6y4&fG>^nX;py z3%gx#vkBz+`>d4dL6sGPjkjc*5$yeO zU^&C+i=YH|wPav!Nt0n^Q=9B0CJjfseZyo(-RX5ah8BzlA$ZzQ5Y&|DT4bOy1vVR< zS)7ruwgjgj3!#ai8R?XDWe1rFqj?4@6Ctw^xqJK%YSNF!8Awb5%tj$XO9Zm6UvKiR z&%PvvoF!MD((%jJX-G9+MxZTU<@1*z)YMR!F@_kt{t($9e;C zZB*do|m%#+w{rn<}w`rXecR)#RX70Q+Y zIIG!RU;L9kdPWixrEu$gAq&XFfm@s8*1V%ab(|$IkdD&@j~y=6<)Q*feE-kV z_s=N=gv-->~7VT+eR%zACjZ!)FMrn>hmnW9L!UN~*M1M4!4Nk|Ti<4nO7OXi& zCT*r|ufvopvF-zDgn2|H#0p@5Bt?lS73e#0taRzIXh|NdA08|H5!m_I{1ix`(u^!^ z(A@kSdB<`k2YselV}V9GE7uDQj3mFh~#pZ94{>a8hQ;8-v=DH-W=(6E!KgYT)gqTwl|RC<0L70J5xQjN&7 zrNr7LQpkYV;Taud)Ft|ODHEaPG|nsM=&==zH? zIm6iS%zSR3Tfjb2`TRK2{;Ix0B=LB^D|`@WH)~Bw%^ka+k=HXJTOae zQ4di~9g4vrTOU{XAR&Xdd>Tg$BcLg{Eo>z@`+Uu3X`hqaiCVdqFSp4F|D=zi_Z~$! zZOylfRqQS*e3}8byAVHh<0LKDDLlaR21?W8{#o=RWrGNgF2F96W|Js<#W#wk(~{$~ zO7%)(S}7J=R;gC0*Q|P_RZH6_lF;Zji=@|V1-_XtmthGWKf|$6lruKO*e>Dn0wZ{2 zu5XN2%nJykJjBb@8s4+*-PQ4dVaRW&y9YnT+dN@mrIAv>_VDUBHHa<&Hxr=LfESC5 zp>_KpR8EYMehTe(_6 z7m9x-Cu+xT!zu@-GbfJg}6Bb1?}Fz%E8TiE<4t3dB!+nwLir z5$RgN^$E~FnN|u!$J`!sWmigueJ&^+<2**^=zzlxBhw6{>yFT^eUT|=we-IAO92Vah2N!aTM)8XF6#^mm(CDD_O17EpZ! zW4kBXr`uc3xP67&#${&qvD4)4+o+fa7v}62GL8yYDxwH@Kld&~5+gUGKbM9fMOYbR zr7}Slkyb(#b_lVeKZCdj(NSTryz%18uji>{Mq{#YRTg@olFWPddm_~en>8j`u7XpcC;^%H!J)O{Y_dZ=&E#vhAKj4HiYC=BS)BfF z5m91+m?-_<_K_jtcy1Z)iWWp!3IBws+~sz`Mkk=( z@d0-{QdcVQ%V30eR2bt)XVM#E>QFqmqD=eVkg{%Y3h_vjmKX{6kv^pFg-9NT*qq5% zL)1jm@Z;B%Y`a9vLY~}+$6>>kNKvq?^mmVF1B)MhzE|S?iBZM0rL~D0(2R(JB4Z7!EG_gWj*lQQ(EM@YWAI zfG>NO!_m*brcYyW8A76ki9UPA*+g)K;o+)@YpWeLY@;qJK31X;Ap5I z^*Sy>^Uobga+3*&o;t)gYrZ^W`>zs9NUZmha{C81if6&x$+mQ;V1-Ard&nyxO19Xe z(nelKJBnO-r>*v8vqzC01q*4Ht=-e8XUCh``AUiE0sGW@Pbp;K893ibFp__XAvYxj z=U-)9{fiHmf2lDr%y3^XVSXU?@zfzs9K}mgdlsIH7u*r zZaa2;)~J+bjh5+D+E%qqF1;lgJ1bIdl2Yj<{-0Dzr81&tyD?5^|D-jS*w$i62)~m-cJA1u{JNs zFISyO_dO}qPRsRQiDg>14~K_i92*MMz7(bpJ8LWy3Wub4<8CMvoaN8(_Xqut`a=|~ zSUg3Pg`2HwJEx!N;N;8;D|2lwpjN-Qx!=fO;6~Bv&OB=iv56+6&p);4L=sO#?y3%w z63pN%Ri-}xYhiWf+PXcZ|C~~NnYlOqsd?|0OJ##<2YaHgP7hXnESSGoX+V>~Zq1c% z-1_?Jc7tiFO9Auk&j~F{1EY?}!f2LCTF`Vb&$ryig6Y_j4J^i%WU$S&T)I!`uTuz1UORNwZl7vR4r^y21x!meSeBAXra07W`is*$XT30QT&s8+C$m{W zGVQfXOZnNjg7q=B2spQ5olduuLSING0gklEi54uS=0N%yF#5Hyv^I@qvtG2KV2xT+ zlD_y$#IthU`&twWtG=eRc442|i-NU&SZ(6P6}Y-g_R4Zk|7+{6Kl>?A6s%kVp~!%9 za|4;5WzE-1rACs#68Q^cRujSejbkos$NC)SZk-OyQO>Go=^-ik!d%7lPOiFwoK@jq zJI`3sG1Om~mJw;(WCx4-Rzt6<|e7!^f~`FgOj47Tv>?cCW(F4R_+z}D6_OB2IBA;HoA zHr|>#)=nb5p~NM)&Du$Z2;mw!IO>vH(?kW+(}{3!hmpHXQxyfyw{EHRotf@Bl~Q15 zNr8Ltt<8L={6KmYNeq!5kj73_&BY?SV#-pWa4@*Q4HVuo3ze+jQ84J+g)eJ&<4n!C zb%=qrIGF#1+JoKNEWqj1_7T-1G zrvfF7WQS@78p#!#Nqm3&nO0n=Oz7 zeo`m)#~)#Ha_KF6_u$}{%W?l#NSO#hqF3F%L4R|LOt^IoP!iW?Axq8@F3Pp@ctg1; z4Q@N}RzMh`76`(mAw>|`cnrvPeSpG>=nKeC3U%ShtM!)lP(rQG##t1;(B>2mPYw>K z^XM3+k%7%CX#;YjGUu)QHlQ#imd0C_3A>0I@@zmhX97jw>K7)XS0&z;F{Y|fm`gOKpXqjGmv45B~!51(+Q zD3Zyo&=da_1Ll|1-CTn{EFH=i-&{WwnPN8C35!~jM+YGk2pb03jRTh^!onr>vA|vW zyt&1z?pJHqdrc0=FCPXM`M1O2c z@l8($c(?3fiU8xx7{2DzoyhVrCJz1nA?8%1IL5t za1a{}gzYDHEcsgTR^?AHC!7XfIS)#WR<&8PtV*L)E*59aR=Y$WSoL~c&p02B;7U~baoHJkdxvU;gjQG>R)VLa z>+vLf(WO1PFsEH}{oJDa!xK$a;nT#bs)uBERb%=^+ZKO!TYQnjt}c?; z!QiKpMPat|3UIfYwJCtnna#Z~hvXY65pH4TZ>T5=c*`&C-plVd!pxD}+@-~CsJ?q1 zF;{euq+jO`#KwI+m39)=7NCQ@l5a44$XVYx1mD1si{Kvm!$Oot{ju?`+dV8qkTdK8 z6RYE}z&2zG(4ececk526`rFiNJl(hpI$)aC+Vk_|%dKnOk}qhc7tn4@!n}3CYJ1Cq z?XVlpTz}(Qu~uQlAL&@(tzQG`0-&btQaar3mH6rIGQ5kn%FRZt+$h!RW$Ig1ta9D1 zPG`+p(^R|YZhWlo?k)$q)P^q2&-;sArOstIMaowmi=ErrtJ@ExEL!WjAy$& zt<3DY)hye^;?y?Fb+grMv})}gB8($UkYBtYyKNGrC+TTpnXcC})fR;$!#&)|kgyIGq#W}|KHvY>jTJ0uwIg8w6=_^;`N>bAK6 zNwOXUpuGdPIwL@eYj0s}Xl>DF4+dgzIF2+A! zjt%;`k7Rd?ED|P?z&5oM7g@bjv0BYpwO+Pb)K96ksKspCWh=Unkp*Ll$RZ`#j1xsx z`hQqtyF3$ZK`yvivr@LKYP;REOVm7;>Q=QfEtQ(t1((G`(HH(-@##aQb}&%qrj%>t z_RMP68fJ<1ze=syoHn5gHk*5hF_Wv341f!9xD2NVv;RRbXEf~oCFHrElNo3MrT=Yj zw_KyKY#JU$iijPoS;?+UownVsP*260l`5uPn!%dz4jD$#B}gt>47(q$o0e;;#DKG% z%k!}@9$t^Sz1K&@B7-ihJ#RDfi`L5Zjt=AHN5kR7xbC05rbL&L-oK;o>!q^9_|cj1 z?jQXtnd?vPqyh3yf2b3u)O*wAsR=UFFgIRG_@7m0R&KSeW_?C`X1h|EmZz24tX^*I zkg5h_f?zczahnG>9~H^I@4tI*jCx~(a+G>>68l6i)jSG1?*Fg}LqeEm8UuBvwOy-C ztF>mSQlHWJyJI(MRcdRS&UDuVA_40zA;yjB|BHf*2HsA!kA?SHt5lm!%cg@M6;Y{W zw^~iRT&a|7jfkQsBzZ(%vAZBxR`ohh55=|U@`|>le|=elt4(N);Q&)K9(|y= z8ijtJ{o6k}JmD`omuDpQk{sC1%30bz@s{LMt6@M)#=)&KU(pXj#?IWaHf-M?wkXXd zVQ307bK%Y($ZGAu9|3^jIAYK+|4Zk0N@X{`$W@jcR?w+ zqqOeXdcTP1p5FYE^yZ&P%`cafwl~W6_ovPKdg=7))>(RsPu0^;wNvP>%wTq86I`gk) z+S?BG4KId+j>5XP)RH=sbTgGtQ1YC$pj^5QfmAI}np9YTjeb29MHNzpZ@Zm|N{2VK z%uQw@YPTFB0|lEqXI+-7|M$+j>&{52{@=};t+~!zedCrxWURiTwEp_y;%xYjo=U;n z?R;)~zd6~7sNaS_8R)R1Y`E%Q^v*{8pCW`@xpRkwN$x{Q2$hi(g(c%juX8yXUR*?I z8AD{|+MCb!BSr$tNKI9yruV7a`KdD*ss! z-OXDlzPr}nmQl5!wN|3llFGgnS29*ziBw&URgI&Thml@k3s&f2gxYgjd7!>W;n)nW~+?bWa<5}U1I^-_KR zhSf4Q3{*65%etfM@vsNUda!BT_15zU4xjAgNr_9S8Pkv4M)97;5XvXUO8n&0Ym66_Lm!HPFR7^ zIn=s`a#0+V89x31(E20?AEXXm1uel_j%#hzW+mT}Th2&C*O#>>&Vqqxn@?`dI|tfJ zt7VM9r{y{#$iei?B%6no2Zzly^0O$B6@QA}q{S@r?()_nnL0Wkj|E=hmD%3lKR&fogfxFb2b` zprdHJO4+A$)Bb@H4!Z~t3Q=GjF&AG#$62yMvPt;Z8&58Kqh5s0kB+~gUbgcHLV+3; z2|T8sl!u(kLN}exFC0)AoBJ2kwp3cW4 zIMYz~j&_hI(o@q2v?`%7piS(v#i4djiqbbt=&z?sSsfI}O3_JL8YNi;D=k?@j-UGoL?#)*7|K zDYSCL>4e_0LdxW3H0+#d2z|TuO#6|f*3(H1P^S|+6~z?&{qXZe=R-_)KR?WAXLtmu zjdW5u;vE{x3Tepvq^Fa7`g)Xp%0z3yyt}WL6<@yF84QL44RgEZa!I|Ti^nmzr6*fY zorP6Jnc465G-UNH=dm=EOX<86sCV>w%RyFJbJtNviEd}y?VRa|?wY=3+K(n2-PAa} zGS+mOEmkth;qGvF)f?&P^}H2z?F6~mCO)>4ZugI1Sbx3BrxDt||&J+{MMR`Iy)?}z- z3e`bam)wslk6nm*M9GXF_9i_oZ!VpU^H=4~%-d$-jnsy8-Yl>n z=Z#q0dDk2CM*XhAj`T9#yo2got8m|H6dEj8BzP_C{DGifPtn71(6F2@ulPu4!-Z>=*1}!otM~jkFQoNXT~kl8HI*%x zE#NvTzC`BDNYE^Ki%()iHIWf}ET?~cMo@B~jjW0EyS(N3JPfKPFsPQupgor5|23Mz zxl{WJ+^H!$#DmVH|5MNC4<^0H^t~{kR_4x8cA9-+ZZ{qreYG}@O{NcMbs%Sut8CXH zq20N;ydl+ME01fywIW+PH>VyP>g9xq#Zs(G9J+-{7LfB48_BBk5hqlK1Z`nicms>V zxbY2}=~EMu3s7!6P-`iNrJQ+WFq~i+u&9VgY>1#?A$-+rxST)yEnzhm3Dj_oA~}?H zdH(;;H9o6je8aer3|u2r`K?=z&UdA~8t4eTd$QX$%B_5xoHXjSd|BQqf^wjsBfO!Z z+v&dV8RPf;^NE7#qO87x47NC2&CKn5Lynq9aE3ex3WNTOdr*b2NP`vm(8QM?TC5BM zxWpg0b%WnvBLu(v)bDf=Kxz*Blt;dk=chVvC8omdC9{7gETHsd}_rRWt z7z9>j5D9=krQP@dX|N}bCH~PHh|h}nQWakqJNY&YeiwTk412Jj5Fi)J;6(lbYvK$b zTw})H!O|`M;_ITURbufmz|0rUi1hsvV{EWI#c;AUA^s{@Zs9Tzs~G{>U|P=!l;^Pc zD4e1(;faX}dl|Uo_%rlE1N^cjCej1wVyW5{`B{nlu8JCzkx=RwHKviY@l3$AR%850 zd}HG2(T?}-Q(5jSbq4jZKj^;iUscbT(Alpp17jP-__cR_PT4(V6?A_d^*{CdeWt|? zxd(`+!B70v;B-*$^78VN_&OPP#n+&dRE%!TumWkTP8!Wdz7jW?Kbi?T8VnK#0#VE327{dgfk6Mr!5}&iMD&hooN;wE8XDBk>3$er zUp`9*7QTpL*Y?#IVePnX$ek$kUKCXk)-Iwq7-90r?w_S4F=PGAtqBw??#4x&H`Lg@ z9Jhe>WakYDd#vLo@Mr3;d-hd8k?eWBaT80A_}8{|Dxo!bLth z5nQv#s5wrI=roN^(&?N^iRRwinsB)`WEh0PJVxxzYc}H78~JL5oHWaoe4`Q>@ipc_ zddn9Y6^o%;&Qq*NPSWG()G4yAkyi9CHX=l(3Qw6Hs2Ua^ezxgBRKt!VL&5~ZD3b<( zT*P)}b8w$?JkWcCsdiO3UAsn!W8t6RYTCb|)q9N{Pic9e`N^&S-w{Rb>%l_ElSY&s zef?`M1;j>^$l3rMQDmK04MF$v>QndqyGAX=9N;j}>4i8VdiC_aE=~7@@(0SQD6`VZ zd8Z2&#o`O*Ux{jIHuyZ0>w4u^S)|kVN>SAl7wOV_WjM$^%}N;^AnIG>C4@M4iiulwZ7Ed*=J8v%fV@9sis{e^c|jCAnfPpLz_IxT;; zUacRh62?@igT{JK%wPNp1Pj4F8`1ww9a`$hS%=!p9r}M4T6T8msF!`&AM`J;FJbN6 zIPYBcFMf`1Fg4ghx-^SYwtU!;D0biCSLu<8Ya8gXr1D`03x=!X_}0`j(V5;6A44mA z1YI#$ihPvUyQ3@q?*`ac_j;wD88*O<<$hL=sHw*J6&qWTUVPSjd;P9ExwssUQU=n$ z<%CzKt~5sGl=9qhk1Y?parJN4mlx___^g!*jwVzbGNs?CR*dsrXJSyUs4UUvdy(!4 zh&|((UYb?MZnr9Stz0S-v zx%3y1j^20Jm&M{fcv4~Cs`k?R!oHe!_qufd`(c3V0ay}zxXhz)=U5wW?f&K@W%S>e z>lWpq%(c zdoDaU4l!An!=ez-17p(xUPZt>=g+N_val8@7hZZ6WS=M`0^sPPfUsk-$q=~SBmE#Vl@_3M(qPJ^W1`ftuxI815}zc(1mhls3<0GK9LbP4Myg;uZAKyt8%*Oc zm9%=qsH=k6JE{tY=i?|C??jtvngFEnJYI1%3x;6ykHgopz*Db46{pC%kOomvDii4v z(jcmFh%_YwUnLGvOMy^UpNZ7VcyQM)KwPyt*aMEO?aDm&mPfu;>zmoozcFi;+ym@r#e8Kxo zT@%t@u~=@`%~`wJG|ifgvl_Tt@N~HabEBWjgq47IUCNiJZ=J7|;ex-DZ^-!S z>{H=__T{DT6#6#p$#99``XzaHby27kOHk(Z^Zfz3D@soRSwhLgWOL7731BQ2$=HGG zGB*yv8|rvuJSKSISJlDP!6WKDc0~ZLP~gqlT_r-|k%p`H!@zliVTzqYQkF|37cuqU)62O)p|QC!!@JC#z3c8&o3F)d5pgaEJ9mxY2k z^$Q&G?>_=*VYP6P+e)f@JQ?+`2o?)#jTHKOZus`!A-EB6ubqVl*QYsn4(0XtKtu7x z_X%$dBk`4)3}qr1BmEole}3Gox6c0|t2Mii9XCnlCRGsFIKjhiYlo+&y~WHx9?U&$ zZ;4{zYVnes59##i@Z**b#XsbBexVx5?2;zb$_D zVl^I~SaZi*!cD6`t`~HcAJOknaj7jsSb_lX9DkzP7O}`nDi!>4oM7kR#!g=+I z1?nq39Fpjbm*TR_LeiC(czNdH^UA%sq2TyY9B1UIZUro_;zqpSurbD%K37QvJ*ZrQta&o1~y}TO%`fdK({NHopR%!YOhTY>RlJ3jY}zH4 z4!InjU0?JtzdPSSBu9J&|A>}y=I)HJqBc=hHPTVAeX=8}lv)hx@y;|v%I?c9e0bu<;*xSN zrJ)M9vQzqsnxrVbyC{3%NjT*kK56Kree^`~q_j6_sKPz?L`-tBm{h959s5LFa&nb4 zTv8xT;^4Va5&i!XDQE>DGIy!z?T;UqL26_WNOdKR-;j_m8~|Ngg9lh)T&ly z(^_N_5(8(mj@Vy0xip(1UI;Up)9);X z3PPx`-R8~el+_d9Jb!ESTM$|i{=sC{wH@ztHiZ>?K&yjdJKl*e?BD(*SF}uGy9Y*XVz&1+eQEF;HR_xB@aJY-r4SAaf(XN z#CwdJ6Q_Ngi=#l;_E# z#le&KVB{wZ*b2_u_S*bNMo5iBlHtRF9-fp(*CB13UAX1HtrU0gQtDvgE2Q@q4 zsndR?YjodFjF0Rt2dW-ZG@O`#$|UF4VIK}CqG~HsL5tXUoYf@a6rhQJ83kV^}~~c&Dy1!xN*qQV0-6n zs50?)sA{lcYHr*qwb|p#w_9JbeA1OC2mKl3L?XWVKpq^-mn2<3kPmV(eR1b=7Wam0 zLbk}n4k-4lo!`N6FoAmb%{rF3WUQc7!1mJyxMsrECDvyhf>=_^sG{H(6 z#Nkz}!qVCR2}LR2AMy;B<={37iD$G|xF%Viduz+7yZHjYM-u^Fb^ANDS~E^+ z-g3QmnpMl5mhH;4ZWfEBdd0HrQr#+7t(|(EFna1rpl0Lk#ua|MvC?Zp<;HFgT(oXC zg)dgoJB*j?w@PK!$)TW2f9Z>jyYHo9zIB^W>~bO(TWn~tlUOmXSjWto$#T{q<-!hH zvz(!X)5;p-TDw(`LKJ*>s)zfjf7b69@Ay7G0--(?sM)6wyVOK(gQb_BQY8CseW}){ z%u4NgrBthGck6dnCGFWaFdX*q$>7f2G-3NrVr!cY`er=Pjl{W z9^jHh3Vy%`B-LEplks-ttPB0qAsq|d6>Ji-+PdEAVGUQ;kzDtfR7&MGAsyRXkoVrw zcgPR4+gq!(ceBRZ`^rDwEKGM^q=x5Bfa1&n8f~_VE-1hKvY@pyHs~v=5b|#8ETJJI z&#^v}A8?)j$2I&YZ2jWW*@Ts#9pi+6g`ce*hgz%I<_q;bU!ipc^R{+u7dBO=TWWnk zFP8QxWgV5LJ9}X4-H`1PFNmS+q?Ipx$Y=;Z-VLr<`b2Y1uC~*;YZ1O-2zGJ>PyAcl zmqA1Tat>6CIThsGqnI(I;1zLP*jFZ?1dxcKA}Zk+;d(#Bk~23fT^<_?jw!_tWNvOk z*kU3N!lv3HYwNAIa#+>}Hylp3zBAj-^N53%CLjBg_rvQ6=?s1*A3LK_XE6Es6^^qW zeG9wfh|689<}M7t=6Y=|0}UgWy;1i)h3UNQU-TzG!$pPX{mGy=9>c~A=@9C)PWs*J z3pyRWx~Bf?xL1Tcv<@4}D25bH&rn*FnYOc`n&yYC{}~2Q78;KC7{Q%DsCOnEgjNr` zrHEL}xkqOy=u>PYcD{mhAWLtPCtubsRUI6mBl1P~P{w)EUs}aHX;5vyG<~0RAb(=l zT(GFbJh|+UQl(sN=gIZB6SRZ5t0FRl>HH}lf^Xl%U%5+nqkZAe&8haqe^RsPI==Ru zxW2`p_KI>vTL!ys@|%MH;Lr5f*?VfDwQrsETKjxK75HBJ27(A5^jz`4VbZ^qej0Qxd*nzm>_<@G^hIE$nOQe&8+PBR zCeM7>HwRr2SZIm&%~X&=W_s-?6u(@w~4N?0twD`XGh?pQRS@=8LK#L0!It(KX@ z&RNi|dS;;BXe5BznStp40$JVnQov9XMDy3qjf;c3EPNcq+f~>>@0=NLf1dOh;!;wP zkHZmfW;qed#jyJUi&Qj4@U8K2)SvWNaa`f*b}lYRRZ~2Ci+?pu5r!+Ey_HxbpuL@p zMbHlG*qnW+COg=7*%@4SE)1##Bj`uvijIo$@O%Q7t@l`0Yt(oZz4e-oP3aX3hDL{T z75=If4m+l^A=<&yfzSKrL!)&69b<>lM! z@xejqzVw#UIK=)IKidmEUc-Y`d)dswlsz%CEZeOR2HlIt!Z=*xmk+s_8qb z-s9c{>UQ&hO8mmRfkf7XNkyy!DT6BqJnPXJCjeGT0C>g#`e!f(P9HXof2;+geW?Rb zjS4`qu^d)qg8^OX_bNQ+rorm9YV}h5JqU%ppVpe%_W^vhs(s%U@9PcS`*vBpZjJ5VDZKu4&d$i`FTlFad$a~Qg@zf z(`PXMrHV~wIMW;$3$zt^SSu#MmpgM#$79fPEXJNgx#L&j87;z}(Gk52&;N{93E3Qi z8r`L?!jyW}&Stw_5`^1Pk=iz<+0n}8wp^FeM`Ep;+j}++=kH;Sye3%5IDiT3`TH9j#=yJU$eD7C?NM+KFrhU zl408B3hkyCoA?9S9e{H3>%b-0E`9d~b`bnqSX>8hTXqCSOwLQ^hF!Bjr>9E~CLRtr zdPRAt_p6m)bY$w8(-BGFTb^vo4Yd3)S{;xG6?niX&j#MLW7-C;H}Ik@SkZ@R0PDHF z`F!LDo;=qEj85Wq)2{!)Yy5Ic(rz#Ev;v9&N{O|+qjpUE0 zn8XTeFrP#Y9vn<*xzDdO_+idtK%ZfC1T6)Rg0_h<6n-QDKb(>$mIOXD;3ot2-$cL*F9A8XA;p{ITE->)If8xK84I)xWeA=4?oXC5aOiH}}OYx8i z9`*e@Qo5hblKB`f3)cp{c|%&6c@La;H1E@Zll30g5dLb+XNH7#ItZHhrSvOj93a8h z)X+ex8~FYv)*w0=K4#@ZPoqEhsdJ&SA5EfIfaA7^{xOqQj@0<(HK_#@9)LfTb7?WK zsKg~d(6{vSJV#(`>RG74*nE1W6KMGivI>uPQu?znu;#yD4|%87V>mrxM`Q4 zC)f>Od=}*m1Tq*-#@AO@dOfvSRwt&FbXI75G1tr7cQCpY-q7E9a&*1qd-AYYur`;z zi92oNTl=5OqdbnxoQR;x;gwYBpW1OsUVn;@R9|jk>oJETm8!vEPr}SgOl3&vn2R8@ z@~fjReITl&a}7#oux?3f(Jf0IW6xm9|GQA&oIv_UUMAu_L}*CBq)Gp>cQ(A9s5-ax z=Z>>7)^LU=SE-ls1nxHEWjOwmlR%jGd3t)`nKs)i!nrv+@(a`k79rNPH2R#wZa~XK zdRSJS4pcYNz0}?nSJh-vD57T}CXg~#j{ismXuAp)3e@-X^o!}T#G};eZp}H{7pGjt zBec#20z@J@l|`eJCCX#+H?CA%ms1ofLb;+$NP3DQDQd*p2P&cv+8d1&LfLXIr;z*( z0~2sZ;@zkIkmF;#MU*@GA{|*YeH^_|(W9j~D5rHFH#Xh>4E;@*5=gBN^(d=5C3z)r zR8*65v1fGHZ$&Es<|RzJx6;H5q8+r;F8+m2hBzjm`TTmIFo(Juk;k|l@y8)sf7ZOi zw#LB%HYCCZ20PLqwvsBhVt(UMO}SkVHd_jRv*p$zdwf50)(5X6lTh%Oib@cOR0N(! z7ky#o70wWC!NQzorkqz+h+#Q6ubr5`Sm7ixHW5{%^Xf3;BvnigsSxkXIU5$?9c9Mn z-^@8ReAofJSq?#t^W@$66;C#YY6bm|0lX%IYsld9yV_xgm%YodSFs`A`?92zz9y6R z{jtF!x?cq^tvT%Nv!2?$=#0n4`;V89ZB%+PzB&s5K!UXVcM*Apjp4I#U4w8^pV%k{ zs5I;|#KDB~_|%4mGk01$pTL7)ma8@8`7FoC=b~mrg^!NWJwIZL+h`!~?+F+3w>~j9 zmK4($jFuviFd2jORN6DYuqdF#mYPPut>R^+`9iF^ni;`JUaaVs zUn;*QndjBon>x8PU{%k=8~O>x2MXX^Ml!vzebX1gx7PrVY6w-K8bS>cFcq@5T|*7@ zo9MD&7X#V=T9nGIa2f>*%;VwpsM|C8!#pVk{rGS6^}-N~`h1l;Bsve(QH6&S0Qu-L zHCrb**tzB(dQ7a5g*UK;%Q_IcHGFUNrTV)P`CX0tu19`1BEMVc8tFz^xlKpZQhpa% z)MlMEaEs(ws$|w_FAeROYPO*B#SB-wY&dw6u0i>*Fjp&gd6VP2<**XN)rfC@G1f6l zEN@`(!g(V03t{>G{UsGE9sD{}zUc7u2A#JTy+F!<23D)!fCZYZ)tVxkdATFJ+ZT=t zQSZXW*vW;rbT@ROF;_QhIl+FBPP-X@l#RT7$dcBV7-9bxDH9fv?VTibS{8*X8*~1M zduhtAgCY!q)EDC_ng?4;pRN}4*>@V-3M8O2hrRwl^5XD8)Qv;o8o2qSkc7KY@KTbg zD@KxBRNG`VjSOuX#iXT!8EXmWs{|rPe>TSbfAp}sD|1#5KUz5VSGE)owW!;r*c1=S ziMc?8>vGZW;u4Vd#6sVfcJ$ma)g|w<&+g42-#02v5hN?cLbKBg{>lW)cB)un7W$y# zl=@BJ4kw#cE(X{S;@$_qJL|Rh3V0ZA=|icWn^2&@C3azr|qsvWCNdm z;iTnYcX6>BDcMriaio2B$(*Xu3~07g)5~LQ=zG=BDbOs|AsL#!PlZmeo4!wlZts9@ zr$J*BMGADgoC;mp0bNOjuI_-Yrb6%Fc{`Qo?HxRCr}7+QJJO(A$-|-;WMF;fen$Ve ztyxnmGu__gj<(V=E}VNHv?^K@rYg9O3zG?DR6!Fecf7=*mztZ84vR!L$>$}VLt+hi z0)|35s{v+4!~tPELnK;59m1sGJgh!T--hrb-nIE^#2@;xTLuV14RAN+H6Ie=Ap_z3 z6N<5QFg}!11C%=&IWu@+N~5D)A!*1DG(Lan4qJ< z8e7s!xNrk80H=})RnsukvtVc(W08YoCroOnq!uwA1WpqkJs@4jCb2@VWPg)C_%kwh;UlIzM#F=cOOh93Al?r# z!)CoEGjr}c^2kVgjBWzL<{ycZ8u3M7;`Gz07Ivvgl`cu*-HdZSLqE|(*9 zL>BQ+(0bRL!&+}dO<=|vWi|_WVvtXsKm!u)l;Ug{9Z76G){P9nlhxaeSFoK8ghT=g z$p7cSOqfn-O@<~o(iQR5^Xv?&DX{0z{Gg57$5j-@@wMt|3Aw>O$9I88^-7q10rf+$ zBLyb~+~w9`A~+Cg1-bC}BkpJ_(w67a0XI!Y(e6W?kRdtRnKOCM!DtW2QvnkL zS47uGPy6E;UK!0rzuOy(d%2@`R~JXjqhM>HYy^%D$xwp7XCbVSRRsDz%i-#k>)zMW zj#l#m{0w!g)SgZ4Qn_Z?jbhQR*QV9_v|MXC&d!tgGP2YY)}jz5wrRBT~m7INn1!!dy)LiZFO!QUvpac}ffZ}f3E z`Y<-SBY3Tn{JUNg7xt;WC|K)<)kaZ3<+;cJ;Zz#!Y1yXgXBLZ7yH+aO4cn&4=N^==H8|EFK&D;syP22O7tX z@YJ+HKmLS7CEvI+It>=7OuC)!`=7xhb~eV8cILB7qdWTfY69U4B6{z`e_Z#jsb}+@ zWfk7wu*>{?4M-3!sE7GRBq=9g4k3O;w9v5J)h)H2&@3SSm(s8NOZivSZN7s?m9m5l z3)`g?@yc>f|7+{gex^uRe#`}2Yu9FGtud=N8<4ujo|UX}rQR;tre44b3RS+!D->^| zTtV@|XSvjnPSYe~~KxHH6r<$ilogpqy*VYusWk zxkNrXSb|P{)@#>xU^{uc%CUYT_FI{ou5Jd2u*}e-pv6O9;%}7RrL|b)R(Wz1vh|4i za3!{_bP(ftIHS!Y)HPRupAQ8MK{@mO6B=@a-ELIlaPr@f+&#*Ty zoqJA`2t`{#6dYT93_}fQOXz4p+xe|eo4E=@DIqR`@?qo6S($Q!+Mi&RPFRpQ`2pSi zGf0!>JM0vPh}Q&|L^4D9fS!hY`?biFpRJC)lG{T`wEWc)MfLJXBJRZs79+oZ`F#qB z?vkRTV(Fg+3t-9I6Jd}4(^$tB{sil2!SF3_6NLw4xRM*=YW^sk%)G#Gp6~PDAIm%m zWBB5o^p8KZ))1bDWE}H#OvZ+f^j~**^D^EAe7>-0zhW6!7Sy|g1%oeS=`9QW;pOEu zOw?crg<8jzGxVbR|vg#Hq=j#R_ zek_R=VI%s>4w}-%djOpVP z6D`obk|*Q0BWkZ4+h=$BARc}VP>@{DgLB%m8SA_pX^~AIOC|LD>Sg-blKeg;h?93R zR6lMfsNBiH@5rj3D1U|{Q0RZW?+icn2TajBgP-X$?BfP=Ab-0xy?b}bR0y_5e85Ql z%(L0lM>@y4-!940wR|>bisJv+8NB;+orwPnLtsz@0x|Y^Wvs^@{JVl7f-Pcc7=6E! z5_qeBzKY(O7C6;>sRb{*h^M5~y%zV~RND=+Rc+gLi%#8Im8w~{%(hvbRuepX9jb_j zuPcJrZBUGO;RbwGN>XadkNv@Ahfa#lFH241L+|G?&Z4K=%?6g_rw)t-82{z^E67;H z#|ajUX4+K$OgPWI+6uRsC8mTlUA8kvvdCxWf$$5e#I;nu6Vw6A?)+*VIaMWQk3Q>l zKOD-5Dt|KPbaFl3z({xonbky-`O*?(rj#-+!u>3eL1BrJW7>Gn6F*}P zR^jZd-wpCJ_+i<%=7v6aAh1TVX18OLg=6~kbK&4=Yfs)`v`*kbO9mLnO0i&_gK#qyP%qAg_e{l}d}l(pC~v zq$1h#S2mkwqtR?SR=Mq@=C4ewO3GhZuT)w7%F~^7A_L)9>-x6?oW z8O{YxuEz(Z`<6&+RiXw1VWym{9eN8+5GOeQS^~hkUA%+({tf_h&Nc+s0dUU7&JZya&(=y{E z%Axa07bOT|hcL{Lhzhb2SA3X4Zdu&*7f4vVX6Z)h;CbhogbsV=90w(qw?Sxt$Pp8e z2UHp3kNrvay#e>g{CtpG_zv}EHlD##CgNiJ@v)R#B2ans<%9LDl^cX&n`)4?c#MjL z5j?O+_=0H){KmB!Iy~`JUkvN#~Pv=m}!XB@F7>pXqkSc7_d5!|igzl2x!jx!}T~-n1>h4VDbC(c9I^SsOTZ ztFh}LlV8BN9R{P@>pLYDUB!oL@hVsaV2dByc@7CKm)PpZ*1-dJ_zf1qR3h|ig@6rp z`@GY?xE}R#C~)WAv9=pWFpsr1M+q*xF>!`-OOd z0C_C-7vpN&nM_8;$6?N32WlSBvhvbS?i zBn43+bTD)pY)o<3UcyO5sOaraJK6>wGj$o*b}O~<5d@vL-5m&S%*DbqaO!{bu5;Ph zQJj11YBe9vcGJ08&kYU?5Z$w#5a!e}7VeS>LaDBM`F02J!dZA!ZeR)d!B2zFEhzlu zSx4ncip`*YMrZUcVmXcv{_@6RuVErI3ITkAJqKev+v>ar`aej)0Ovy+CJ;SqBbeJ~2w^6ZD-}KQg$A(Q2;F&Ohq(iqDfL(5KJL$dDKCB{(`1)gIi>!TGwH%m7{% z-atLC)Trw?o{)&(VVHvUJ6x!y&y9#@Q^e|(65zsU9tZA<=3VHUXkMncA-ZZpmqYVb zW9>vKrJU-7Xb`1%99oue_d@e7?n~&ZlN<-#YrU7CV`idDpyh+!%%`+MvFh2%>CnkG zTT589qiS%7-lv|F%(hPgIQYw6` z&$XOD#fu0TQg4mC$?MyQha7^En0p467yOmu*g5SOlV=0R3oIfIDe4E7wE1v!9Epz| z((H`>9i4XuFxOB<5A95tQ5FUdB&K9ltS&COAOo~(8kiHDmGPaW@2zpC)+g|9xM%?x zXF^*H*UB%QxnP*LOfT2?|>bDfs-R0-`_4^$qzsYE9O%s-Hm*Pd~CFxAra3UU+Uy_0R;8bS7--0lpgfx8mGDChy+9_QHUB zl;A|(W6Z`uirJ8-y428%WDD84JOEJE9f_^T34L+2Q_Tg`7@RR!yI)3Nz6KL9+1V4U zZmQW0v$F4Jb@|8zHHB6aRu29US-Vt&8g|g*MBW^RnJwM~OJM347Oo?!yjFD(mX$6tenUNysKew1d7Qr#1KB?&0{V7PQw7^$+M!WF2t`D z3DyF}la4X`z?UU*eEYM*11xx&nKSYSdrzY8uOPC2FiCt5`2*n`Sby;DIN9LZwrj!Z zWaj9yzKPS=`%UCD3Eml?Jb4kCQkJD1%?@9hpdWZby2!dGBM-y;;IiX)y%8RmR`x&{ zq%n7WJiEfaAP!vQgha=AQe1g1-ttHVfr}q9ny4w~ElKd1jlh6XL`ClX6xrV1Yr^jS z$v$66GOWVi9i5%^Ax)EL8U1RcO@w&WfMCV~AhHS)1;TU{RR9;btljxr)p@@zP~oS zSq>nACA*+-KHQ=&BF+Sz1!lr4JfZ{S-Fp5e)Kjf4P|1IXLXb2~L}?%5nf#9q%Wr2v zM3^}RCz}NfZoYVj;uJohAm6=C3kabjOIj3%TIspBCPzHxk5rX?bLQfG6O(kTK+KVG>9az%ltZkg>j?A`9{|NRdGyK6XX}V>rn1 zm=HL~htZ#g{{U%nX^(WUOZGlZXiWyC(s(}{Pek%O=5%vPS+|4UY)@~b-V99>qGdfe z03rWIj^Oi%Qgy@}{-fjcu8Gl&56l)byF-Ne0f!OQDNmFEEiJ{NG*Uc$F-CM!^%Vx> zsEag6r8H$r3*-vcZ8G%nu#-#6Cm)?4AWG>oyyv-#g=4!^<`B@jv^h|$gYw)vLDUtV zyak1R{}HcY;c!yO)j8hBWWpo_9CDMoRg@d3=-VmYSPZuliNzY|f)o|rtB2ju@u8O; zv%DiWcopvkuZ^O%0SI0{a7Y&Qiu7@10)qhozPN=y^B_Kq*%F}FTL!dZR^^jPWVC z9mObW$1WaV{Q0GH|Ly&+*!!6F@i8%_tMPPxtjF=rw$6p~xE<4JmOV%K7kXrI9|C#I zU0K!-LSqxP0>^CeVrkL&n9NLOy;KfH%?g_fD1L2-o|{7p&S3 zJf>XzB@$u6L~A9Zwr}gxZvV}9N!Mxu{V|@OY13J<|V&A?rwS4#Y~a&$BLNoC`23 z#_zkuMPoh*fLoBiy+3Bxr!dfnvoG$@kuGPT1tH&LWCD&CGQ=Nh=s08jg!(Tqs(?y7 z1ZQrODi+@EkUF*)n2sYCoMXTthq{*RU<^2aY>5C`K&8Kq-El}2(b}%p&XRkPu&%`x zuV{lkky)A-!MALe3%pV8BbzH~2<~0xd8#)crtU<1r>- zTtJ{+<>dBpTf!3cOWtW9^sC7zf-{sq_B1qRf)m`dVJ01{jCBpus|D@cpPO%%r>u#1cy8HK_S zbE&+HoiYy-_Q!kV*x87bdTd_84+ijV@(ACIC9;{#xBjgP7i6BbVO;TRUJkM;1*`?*2RPf@_qA~1z`nA?rM0MRo=maJ+$J2g>x8}J{BKoIw z7S(Zuk8z5i6PNC#qZWqZ;s(#cc!}T15kFf<6{^Hexc-ir*4Yp61cIzvD1r$r*y4;d zuSKoIc_riom*A9);4I!_25>&}{N;jm>FF`-3lJRJryoXdxSDkmg@j-6<52IXZ zeITa<9$?~3>E7N1h}NZAo*c4yA+*3?&h*+ewo5pOWrwjramruhF-w^;Ggz+SoD@3M zu~ljBvw+_&W2xg*Zc-O$&C=0A%5{xMqXk5Ur~0hao;p^kV%3VpR&!b@ITgpLG;3*+ zjUm$sI93SdTx;P`aQe|wGI|Aw4T9 z83Q<&dqPJ=2k-obneB@97CLU;(7}jTU{Oxv!e4}tgxYR6X~xn|CWtD??>s3N{J>VX zFq2uiRb{Q_RN>M!cU-AHbE0yJp^oC`0j5i^p+Tm6h*DgDi%F};r8&HLDW`i4VBcUe zJQ6B?B5O$LtMG=8QQv{nA%GIpd+BW8zomcbuI_4zVn<(bmA0qNM#ZVtP0MT-i=}E6 z(nL4v)OP7rI)+A7=Gbc#XuF02r601f!g2pPmdbD2 z67Od2R1EB}H$OLTa8LclQPvS8WE%?{7N)Q2D#p9f^RnJpew<3i2OD=|XsXVRKN|kEMt{02VQl(+fS~asY zYv{$GL8J;oQ-JlYChJ?xd>PJ5le6A=f6y~}KlKKa9Iy&O44=BC@M~?|8K{rc?fTEH zzX6*(xiUJVcjKJ@nJOrzT0Gf-CC+}_bfXTu&4Okr%B#_EG7L5#JzO#Rd>p(EYCJ)& zflNklu}WAW?Ie&y6AYmLur#gDXz4=4ikT4T^Uv)o!WG0)1aY|T`1P34`E64lWUme{*0|Y4A zab~jnd7g<#0)19jcUM zWj&Vt?iBS>7e|+uM~B$dVCH)_F(HtEut@Fx`{mUkVap=C2;O+bbS0aXP(oyZm>M}l zR)arnRRyCwe|Pj&Y7d_rXr)eoF(;y7wk@lPMH6bMFok91`f%~YvhZC7L~?Kwv!caY zDG9F{eohy@BBYjrG$$a2a+y&Ib+p9&P2}C+54=i{&IldCEuQ&E2#wA#r_jpAMl~Ez zTbi^y$qGbZ!nw1!c|xF$vgJ=C=kz7(42_m$X#dxV|wqm(d= zT2*z>jSgzkT)cFi1bL-Y0K!k?m3IWS6-ifaAq+v32n?B#C#Mo_9E8(>X3X)rHV$Dj z-O;2<0uQxjsPsUzdD$)zr0_e(O9?*ACvmh$N#O>94ML`nkuf$vtBZhSoP=AA5NtCL z=>F^bGyowdT0FFIZSn}z1xrwO$dWV^X^uDtG7~-x-kV`JJ!giLk?2T%fQ~x|*Fq8N z2&>?Rt7Mk#)QdB`^QlTo%<{L6#;fPYhs0HqJ>aJ})y7|Fx=kbU1gOw9l+3G~J-(i9k7kCZh`rxR?hC*>=oi zRK6+RjAM4>vT!+3`PxL2d&b;Lb?#r&4*RT|&4V*oq^1~HfdW8d{d(1n_F1>Nx{^?w z5*#sJ;$&n5jPD7Z>OR3ien5MMtaI=nmK9Mc*nhd+JOqo%y7j>+_XW>o*-&LYzyc^9 zFtj>t;+RpZ3;Q2esy3<4&CjrHtvr)EVRsaS1K${oMxy`(%WydA4thc8_e!VW)%?62 zu0BW3=@H++UyCG3wTo=wVsd$MPNRML?qpxE&W&-Rf)5Vx@dHeqyU0iC!#?N6z)0u& zZ|Tk&@}FK^9*8hO9SB|&bHFYHO^KO<5I2tQjmeIOSY=Dn_nW`leZ!!eRU#?;fM@!+ z)03ZZ*~jQJ6C5C)ZoPqVN$8k)4;^vNC@lyZl`dG{k9w{<328;@%Jg=D)`T<-uHzWa zf72Lxf(}~r5KVAE1cU{S*zDl^oggj^yGhXUxn6JbYDS@3wM@0U$D1w9Uv$|5-Nth zmM-Fb^LK4|M?^K)@whbYxz$(C;hXlno2Bm(SE=Xy?0LWNe2_gK6rK;W@`r`;sTWy; z3=Mqj^5C1JL;IVHtK)-jD;K#Cr@5CeMEqM}xsOK1C=A`e8JL~U!0&p&z_Sc599J%G zGH+pFlh2T@V-lX>FNyhhEG?oqx$_W_n^vBu3pnRU%-5K}omNoUGXbldx2$h^sbi|= ziJf%1JHMuTp2{XH+3&OT4s%VXOIun;;alvdWps&y;BzRCG#Sn|`q;vWF+knl{6U?_ z7EYSmOnUIbmZnGzsWWPjq$07*pNhu8+cP+ao*sRV9siYoy~bbydwcaSk?W&(T z`m2kEypQG6ZBdrGBPsdLaT*ANV}+v!vmfcAQ85BkXEE-px||ZSJdO`kXkM|gS@Z!4izDhv zty+xuLPb+OR6bWa2&?CIU9Ue1yJ4pjc%d~MkG#I;RZCu_G75`S=^agCb4+s>KDws1 z56Bx8bqePr`kJ%8#3ijh^0V$CJ0@0^oge?JUT>fRFvkTIfH~GSlyO07U&&2oh9E`K z(La=t@F)qeKKG1eiFGZ6!~Mm6c9b~cu!Te$XhBlYHaw|nj;e^I2F{qOP5&mkzM-Q~ zoU&t0Q8Tdt?XvdOQP47!7Uhwl%1UBVTSigcVi|hx-j)0Qu_RyBq-2+bs#V$P=aQ`) zVKHmV1C?Y;KPXUNtn1>NzaO6?x?B1A4}Y(EO1xV&#}jY4I>EJva&|lJz%qtCbJ+8` zolaIN1)i*~M0<+_eIKvIj@jKM^ zvk-xlk~C;#fVhPQ3V)ffPKN6j(4g376&5vqT>%#tYqMkPJ!hG+88r-N{Np_RN!-fK zo_y8gSv-#^@*}3f@xhsMv&1`fmIqM1HHG4$G(Zmr0@g=`LI<@OshtV%AIP)_c&rQIZF(K7~k<1Rmo(UtPlHY_V-phi|& zh1)1^zNAeb?N|^Y5Pmmq(>Pp2$v^xSy+-*!xm3Xa4~`mNEcjtM@1!Wnf%qH6s2oHK z)fX5Tiyx=Kg1*4g1yX%XqDTXw=QB=3t0NhvW0Ncw7b*kW2@w4IA$qh(yj0Sh<@h+H z$D-giq+DeO8q~z11m8i+sJek(B*DrM^D{ZPUfAt9$!c4%AX)fWiVNhdrU}kigm=r(u(D@ zUm|%*!$iS2nlCmh3lgs{4-LjXy-CCdszDo-w5~x=8buOA8?3BUiZI0^GO}=r$t+?Uz@_Ww>KE$jIWWDE=LB#<2s$0#?~c8` z;RMFGT9;8Sq;is}m#pj`^vPI>KT9($A5laB!))2O>Z|5&v@}voI9eDJADOoOk}~AH z&=PXNTp4~G*H0PPPa4_jauIxhQymUq<7rJ>ZAwKN1}4*Rz+@T@KAB7$D0Swu(S)+P zWsh)$1z#!A(ZP9zV1N8XfL0bqRgx+1pBmE2WctY0komDmGM_B3qgmxNQLp4g8M;Oo zfaL8MqY;7E`{TgqcDw!2`iYViQ$1C(*Te;eF(fYF&k8QU@mB-I5$LMK3}xuRYaK4N zBAqk6H(3JLjH0x_sviE$^JzT5dPj_0TGU-;=LiZlQ^5j24$oGq^p=qD=};u!fwROW zKWpa`D)mTe%O;ev29XP{cm^wOlh^C^%R*8ccQ=fnS6H@r-s8dz!Z702)~wyXdni7B z5Gq%!d*?Q=m-9@u3iAyI1HocoH9_+d!d|$zcz1qv3@Zr+oAhqc37W?RA*h~LPBLo1 zZsk35Eo%^TN24y9KXv=#anG=&6UbP}*#wl_DPFopv`GV< z1Z}j9pg=L9SS(4$MS*x(y|7I10gVb>gcvB2k9*QKt-e>XBwH1}C)xVy9qo0O2m5c2 zPv6*Y&)&SK7;>XC>Y z+X*B!V+5^g1#KXk7S05(h6lg$__QnF^#N@kK%~R z_GpD^OhE>hwja35>$U89&Czsh55}#~IVmj<`@|8Wv34D0+i1opoW#4ao%CT!vXa_b zH92{O1$Z^chI<-$$wuQ{MWZ4&hN0B~x zes{8OzuJGhe|iAxB2GIH^^|sw0bQXF zSa*H&17Ry%{_W(|*<1U~`P+u1Cb{68?L>4WflZ{P*~Z>0${zV$^LYN+}2&BfWf^Gihjlk)DM*C(60L2uMD z`|`KzztkNPbe&pzoNcoaf*L{qzw<>kj4$o^)HaecfW0 zGR%TKgxpVRFJ+~h8Ty}N{ZDHXW`6pUV1_BB*E}?+DPI-3u=8ja*kMf(y~GX|X9su< zd~o#^V@qZPQQbjcrbXaCX;sdE(%5PZ z6j}QgeMQK(OgxhgGW>JCvi)Z;{^YQ5^6u*BhlZ?$k&AMzOkrTBQ|)pt{~?Dj4AB>C z><69!=NC8ZnnSNU4t%S0DW_%R5z`*fX~iHFD1T^Hv54wGsC?U{M?h zFHP}|PAj3EUCJ4yOSzr8H+YZ9Tb^cGMZ3%jk?`7-y>p4{OHU-ZMpkzejJn-X-x+o~ zzTuh!2Lm;R!xeHBa%VIvJSFZPnxr?OPtzLiI_W$D zG!Cp0_CBf%yh}0)LK=xmHK~>4mwB3(cb8}M!KkYeMiUlKhW&AuVHhM?tet1Dl3y=w z5MehA@%MZlgTz(lDRdoeQ&C8E#aHF0>VDiG7^5Jx9IMxbm2^1rLciY|1COon)=CK( zf33Vh9vhm-Veq%WUu5lsCd2G~i8+y?UF$_2G@4-FvUGdWs@8Y4eGCmn`@JsM%7tvB{?~Bu^Cy4jW$F>`EE4P{efWK!j$IS>EYnz-CNf_(IplA4MDMa|Cw{U;XQ9CjoDLvs+323V z#E>npCc-*_7$r0yh3i%@giP0lf;?wtZp>L%a>^OB-+x!{hQ<0SMnAE8uX5Q#8NmR zqtk|}hWskg=XOx~u9&`H^ zFwziBF?z5Rn@-O)g09;ibys+-kUQhF!c)29i=dogSxOuqqfp}>IVGY_^bpxy%qIp<|JeV*IWF~TQp@h$e=L${mRa_R;Nm4ePu zWDJcCMoHjJoM=jZevs@Q{;ZV%>QUvij*uaoJ*Ai=gVpu?BQooY`$y#VV9|*@TE52p z7jj=%WsTHxW;t8%FLlmwhwjMlg?(#0#B+|}c&_V*rn$O1XE`-?kWbjD+@Re9J`^*^ zD=%lJqhHAF;xz)O8luwyS;y!I@ePkDPZAnRoN4oiwWGvDu_ya;h%pHrJ&wsz-vx5i?Rb zFW}5slxz3IH-E2JqCH+f?ZBv9NGiIwQ>Z)Fh`>z&uRvD4hR<4J(dJc{8VHSmUxCq(ZyLSPhn{(o}v%`eBu&9fQMQ-6ou0prDv*8C{+Us zl(k6&ngNaMP#@PqLe%J&^d<2l6-`UlG$Hpvu~OkAwtm6s^V^EJmW>QHkD>q-SwnlQ zgt&e$Ff7yZ#(lHXaR>e|bOLwS9Xd5kp%+nN2mKW}d-Y5b?dWq@UPNfPlI1M0<1n@f z^Q?^BPy$zg#SBk>;u*}u^ww<_jv;w*pH|c_xyb_-#rYO!UPV7|5jjFbC529lXETBn z!=%U`oB|~|gaJ7|Js|kx%ZC2GCEgyNzWc#GIeulIU%Wd#I@o{v_UJG(;pz9RK9kHE zw7TPx3ES`OqYSov+ z9EUY9A&i+hYyTW1aSKtP@dl9ta&gkUHAGZ=6(-Qgp;&hxrBW1}!#NC50l81YPvZF;77XRMDcen^)5WH9t2>*L zg2_Mw5(W+$924HXJaBUXIG$Lt($l>~4K5szZ=Aa*CTx22q+s*zVj?-JT1qsIpWHhS zsd6o+l0Rh_^ez&@18K+tFF+V@&VvvJcur~|NNIL7sjfg-r<1V0xF+kh%ld6eS{TN2wGuMOR%-a_d;fN}&GJ>%wu z*yB(FJLR#C$C0%&ryz$kF*lvpATNlb_v^``Q}BvXr4m;piv+;3i|lVYk5DH`9s&xy zoO6@ucBGV+D8Xrio6!R%H!NJ`M(Kl$Qw47WiRIwXGigZ^a6B~x$sN)o_z)2ed&X8d zlj=wbIBZ0AI?W=ceOd&bg|(#p^6@R4LpW_cx=g+l8y2YWEO!Y9AGwVtx8fc-oDBC) zkV`e?WOzqvCt_1gKSnI8EPd`$SSUr?VsA4i!Y^P~;HNgu#)rJ0JW~9Qom=(N8o1R| zrj>}Yax3d~c`F+ZOW1RNnw_jfyywzW7MD;N7lYLuDximpGXa**02qx}ofS10asaq? z1_uJxlHdqulaOTnnCAd`-z2c~94}N<0m3GixKp@=2*unt@jSpOLDOyq?fDC&r!WC2 z&?538+>7{nZk-Ugay-bNDwvJ#*cp3)Gd6l~pziiue`uPH84SI3B4Fi#Kzl94RfX(Ly)KIa6#U3vu`3lEO>esgN3CG;1R>|zpBo6_wOUEpzY&z#zk(UGI8Pchs zK-bhuqWL1-J%L3pgyHT?A{RPe^;n&_oSxGU2Sd+v2A$4$+zopD;m9+>l957X5lq_3 zS2Vr)!?w_ozVZWj4%@R)VDwUJdIKhdiBHKPmV7Sg!mF~Yl|LNFfFEPk+Y;_sp~9E3mgfP1`Bip=~Ef77g>ZMiZB^!Q6rc=|dF?YU{#lZ?S)Qg6^eg zWQ{-DpYPuz%hojZ5l6dfagndo@-;I1&M4?w7WzaEoUYd$49%|5>p!x5sbXrDF!fq` zE07hQp;SdVM_m6L;qjY%qj^S&e2nDFFB{+Ddl-P3XoupZF0FA&|Hwt4z%p%+nW4J&Kd8p~wApI+n6rH|n_}%Y z7P5B_QurQh5Ea$2#mu_a|GpMAH)(@Xo_^Wz(?xUltIloQx&HaN{#(30yCCqkFB>Ru zVRKqI{!QRNv2AsFM#tFRCiQ$juncd2dQH!BJi|4HL)e6YRXxjbJl`K%J-^!@^~4FH zeFbx24uxSnr~ji`-=t!mi*pPBC;npzLf_Yp4@bSf&KHT1Tr~(U9Ado^bN7I)7GjXb>S`?^`dw;8W}ySoDDJoh4q%9 zu<0+S(}#7Pu?kt{UWEVEWvI-8&{&=`mpw|H`9y*`%TiXe;K37)*_=ZWj*6K$Q@lc# zec2Xay88hG1lKo6>wTXpDMEe!x9eM-n@74*%#wJ^BnT@rL6*@N28RBX^HEsj>bkJ1 zfmq3L??j9BjmvY;V5W7+IR`8pdUr4jo)c^HtWsd<%p=*jD74*}6fJswJxc^Fifz*I zrE3NVhh{)oWyXA#6L_SME+pcnOw(f`k(iOvM_V4XXjr<*EAKImH%xESfg&!YQn8lZ8A$BE4WGIUkC7E z&qA1?NyWmWcM!8+D&o3zbs^;k#VpAJz5*NF)?_T!LI>gq&$u#>A)Zu;alN%!5NZ37 zhDplHEQ8%7UM?#8t!2sk0{;O=zgUR!Z#W8uT^~;4 z!(ON3I_}6Bj|{|tC99^Y+*yeXe|51)FE{A^4$w2hRqoxhZ%bOBXm@u0BlCHGl}DQY9mAl-kCqJ zY6CHx*wGD=V&S$kd$8T*qA?|a1qB~MLAqqxuQ6N(mK>2A%W7pZVMUeIgQf{0Zq?J= zPulOnFpYcOsM9eW6U{vwA8~l92TUuWG+f#nvcMdZHz$9fLh2^Iw^_?>Pt)t{ntp-N zqEdu#zfQ}6LIR9M1hd*FyHpyzc>??Wt(_ThHkw6fkL~srCuuzSD*Y<`@hktQ=OPWR z`R)XcmS0Ui2sqJvHUA(|{-9d$Io(Hpa&86LhbM$!~;i)&f6U3^58bstHf?DBnBIR?Jv!h+H17@lDaO~>ty z2c@tIj}*pS`A8ztnGVPg2|o(96GTEiJw;uN7Sjd!r|B3DXh}vktf)DjB7eY4=Q|rG zr0CL%9H5+3*nUUa>xta;o6E+Hk1O!za}Osttl10^YmlyUo-=EtD`{(3|7s0iBbKL7Yp)($KtLM;a zImPL-`kpl69MP(u2ec{}9nF`RR$98jWR&;15%(rIr)D7_zFhT zFnI;-FlgwWpchs+xo+S~)CC@#zeBHM`}N!XH}?M9@Av3D{Myw z7=k4i*o$9T2Ac#`yfX0}l{RAw)#%r!LY zt)UruUVl!amA%qFE77{v8lrWrk0%=Jy>&$EuOnLjGZW1u`(*z3+(hfG z!{Pd^;PaBL$7JhmKsML^{~`y`tWWzVe|&D5eQuGXw`P9+Gq=wHZ=Zt=w$G2I+5cLZ zW60a*aD(mhqlxxskvTs5_Q{8+!6t{Ok0#omMds*zZlduiYOu*E>Z6JFr;#~^pPOoY zkQ!`qkost<{qL1IhGw5Z1p1GYExn!mf0+YmW|7w~e|&D5eQud!_~(#0hFu1$ppVZ@ zv)>_elwTdcy11;QB~g2J$`D;GcKfH6GEf%O8(T%z5&!W-{wv%4F3S<`pK+?z*=M(B5~vp=XZz zU8mpagpO2ev`E!x9#4&w6I61Ttx#RnTtBa?KjWxA^i``cHC$EQ~>ib;Om6NMCq z$Cngh2be8LlE#zQPBdX-5oz)PX#_U&KW9(aAZd=lw8Hq&q$0C#Yabn-senwZ=Vw)7 z&sa|(XZhierO^JnA0D9)qa@suB$4{kS zW!ELdLDr#`Wxi%bmUDEZu4ocDj4z_ulH9as5BJVPr_)gl)GTNj>8ZT|VL}>zMVAezqGu2#W~hO| ztMX}atDY)Ds-A1QhU4_QBd60b2CiX*LuV8?ZuOp^mr&db^fv`qydgn5LyP?&jAns2 zy!g&O-9I_ndP2#cpe?I5hO^}qpGV|GZN4Y~i5A`&66_7gbQ_52a)K~$2>c?$JfiVo z8QCmLWg}>o<{EDM!CmA9;u(C_*s0|{Kkn)?WoGOQh6B^|y?)s1bb2GV+c(0%82h7Y za_0Gq#LS=bcFMb{;&(PE_9A;e4OGK5v>|*0PpL02B&btcQUCnp$i6y0IXZiH#gXWN zfZxRTY=^8MOCb@5j+WSH6?uD`4j$)?$U*G?xvFmF^ec@?E>BNr-nqhxZNtbuPB zE{Z}fj8iwTx?_LPhwlmL5chb7O?UkY=YHXnJe)kxhA}_36s8U&#iKy2ZI0Cspha_B=PFaIHqCWBJy2=$KyU zbbHn8@My6`mUuKdP4X_8Y2!x)kf;I@`~WY%J32i)19|J@P%uCq{{8KR2994> z9`k(`SfW56!bavcNa$u3zD=(YkOEU?pJZltS53LBrN-Mh~j|ghXz`4CrDbaFOFs+)Wnlq}a{m8J; z9e3;u4Y%JNz!nKVN7mTsf*4YqYg$I0P1*xmM!Vw{`b*L$Eu-`nEzPG9L5--*r5Jib zJe$rIGKfQllo@eQJ^E;E=i!AhptiL0CuEYO^GswVwLo-)^)CsoF~E@YX+xkLKS}~` z5hoF0UCjt%hf4}RED%`BN4&Bma+ieMhRAm*Fw^aWo>P{N70358Sc4JliwenOQBae( z4#M8ni`85dHfuT%<6-K;%HaugNlZ0(v8oQO=}V!Qyp+xQxTbp&r%UUj@-boXsYNpL zrt=2&5*C8}D>(X5_>c38qu0kjR6MfZ9-XR_4Ha=|A75S^y=f35v|mQFJ5d}`dUbew zQF=ul8K0Ild-eA8+m%waGZ1;xiKpUZ|8F)$n{0}IhnaVNba5h{^Ay~_+JB+?P+wW` z5tN8Yzq|SYUt(_DvMVafUw%Oa*fdl&dRsYjJjOQk2}MbAvNS%WJ(N518~s((5Q)lI zX|KwQ)cuI|77m?ATXo}>EKuMRC0um$1B5rX67 z_{!dYb0qrtF<@P?uCW^EOQwJI?#(JTcMYUKxzm|%FNSX??{m$$S5n&0{&hP2VHmny z&#+*%BNFd|th75@P9~L9%_=~;8Z86%mDULTIVrl(<@w|nUT1CT>r+u!8GTY0e>go{z=IQUjMAky->@;SMPrq!=v z@753ImG5Nu!^(F+5pwyXmwEeT!%xrP@79*_K~a1q#o6gmF^Rd71b9a#8NTL!D@iU< zlH0#JI)p^~byZy)y*=8$gfx)opf1tj(LNcOo|qYI8Bax z1v_OJBnZe4QwgzJ$p;blt!oTHQ+`c_{uLfR-#U*e7KXyrtW4<57SFe|IHfDKEgt3w zaSuH97xOQeXIE2;Ot6zGfUxRDLw;YN!*SRuV(3GCuuP)ZDFF}R2+(*6>Dn*hkB0$~ zF$oBgzg18P(Gh`47x;l);&20qUV9S|Qd^S4^Yd;kb$wwmofe*7+BVL}NLA^Vt+u*?udCgKJ+ngRqlks@p`Uh<*iY+-HaWCfik z@m=K8p+uo1$B7>#aL#S1U_NQO#8?s zw?we;I!%hAMF*idSI$Vf1PsYx%<#UC3X}lHN#M$6r6ODs7BU6f2Eq6;fL%HEpnowd%hCH4KtVXgKe&w`V$Gw$PPx*Vlo1t_f!M`UM;tT;jBsg`6 z(K@s;NCqN&@a)Wj>l04DeE5PHMi2%10c&Iszv#i8`(PJlPdxGS1t1LY4DI+nECmzi zL*O^emZ)41a?or%*FYH*prwf7UEp#FpH11j<|vvE;1C3g+s;R`^wzu z(oHcQ5(3*2!xGl>)Jblu2A9WBVS#2?(fp23*zZL!3GNUn9b{W4nq^4iS~uiUpn%Y6 znuZi7CteVa=)Js@3rPK>^lWmGD7xn}T4@6Z_3PpoHg)I}C?v?Xhi@nXgItwTip7Vf z0t~ZVU|gMN^(~`v)yK6S+R@x}&%p60&N^zLyhmCilq6e3rR+mvFlB;)rm7$eU*8?9MS{YxWoGr)emu>NzoT!7Iz0mH*ws%&N zW?~QVF3emcp(4}bMTN9`e0qGWUQDmyI! z%yQVqF$H}?6=i(|hy$c4 zceX2)!ddnYASiVZ*3&4x`CLPmb%B!H$G5?(qSTdUW#ZkGfTE}bX?xPFS%z~!Nq6;` zT!8(-F!Y_#*z=9B(=o=z*y(nS!Pu~>7vMa9arMog^U}*JuT<~V=z>2x>4*s81#$V1wG8-+0YKEYE7rDVVDmN`Zr$y|a^QX;{k~u;Ll`3Zn5Q8376NrF9K2?59bX1Z*exp{2fbIvSKIAu+uz#4kZZK_ zqkuN(wcThTp|jzal)a=putDNX*uWUqTbN;qaWhH*Jh!$`T&6eeOenZtEN8P|BJX8k zm4her{Y5T0Kuc&^Y;CD;^ztT7urdNZ(;wLOoKrwEy4Ds(+JK5dH)9G3vCQravLODXe%PWzbMD(=7h2+RHswsR*Sg7#VsWU|( z9AmPR_yPwe)x4I9G^7q2jtV_6Aw^dV#}zz_r#DVQhGP^T2jQ(aYSk=1h>0PWNXgW6HVt5JCh-GHVqs?MydiBzP{n!6wkkJ+tws4N!;{ZY^9 z_O0Ha+v)gjID!L^=^0+lp+_rVb<>qEXhckoR4PAlOm2z(*p*ULAvVmyRhc4q;#Et7 z3?v{u91-z_A7o06hKwCVgNZYmSQga^m>lmbHz09SY9dsYH9U)Vn%oeyfO(;}bUDl0 zDhCWw)<{5gVAt+rDU@_%4try>=Zv~Tzte$%3M?ly4b$mXk5pZ0#bK*U#eO;LmZKym zd76K9c6vx56p!rvgRA53j^q`y0sq0~gyssQg!2{{Wr?>_HKA}u$cUN!EOHkLlp2Qg9yVGs_(@z7fps7u)u zkCYzM#2$4qFbIB_6grB83$ls90}%ac@Ji~z1`KPfgR8W}FJ2_MX~`41OHQ2W6S<3M zCQ%n?;CQirV!S9zp|9Sa9egV%e9=_p$@%`p)w_4chkH+2Vywz+XJtZd3p-3=jQO4{ zmdnVejA928zmV&}1|@RTIPNtFB4j#-=QJ@30)G+Xxrv;YzW&;jW@TtT_*msz9A6K1 zPYQLX6(%c0xetG$L}PUNXnY`p(``-J2)J(;(p6x?D!1}V+HHFBo2Gr0efupRx-wsm*!$P6NYTF1;HN5BdIe;>;KE95Y7Q z?Y1(X+Szi_2iS7(&rK4~Fu)Mr@9_3O$7v{3*_yI#BaOqFY>;v4+sSfI=XeuH-j&){n_Ke83#q|bu;7D!{5cjZth$WgI{7qvEh#S{UotZWdc z{299Ge_C?_+zmwdI27aGSZ} zv=NDT$(Lcw4(w0_6n;sb1#}@&Ors1d^s$z>mAPcwZsbmPV+jWP%Vb>UT5!iju z8;0&^Ft7r*k|7Eu6u6=AhODcG$O-V5txG?*zLlilqaV)CF0Sm$zn#1~duzWrf7__3 zfrfGCpQB?g8A9Z2%2!${UHc_F+5WmGzW+b$PJ8sj!P|G2AXw3P6j=*7)Q*yWq|RN? zcq$vweuwpLy30gvPzN#mgkvvP~4@I09TVb7`5-dukM_7Hchpkh}U!?cBk@3@tMl z23yMfv3j{WW^v<8(6wU)oIOl!|+ktRp;p7b*>@{P@B-D#YnwSmegJ*!b0%FY{= ziO6R_rer_Fiqx}dWY^_5Jvp|&M?aOrvp4E2Rarr zn*FX@@|!GVWxgmplKb|M_HAN`*!}PJgV@;`Sb+O^>8f1+?q%Yw{;#}N34;yXK9P+o8Hv+XT@R*Bs-z!)*`6Asb7?M9euf<<)5 zWbGbrQSke12-8*Nf&(*K@WXY|V9=w)?JW_bl}#+P#KC@@T^n+i7MmOobtA3!y=%i*8zx#k=ITfKl;#KfUMbxw~4-4~-n7jH+nLk*TWAweT6QU0@x)yn^ z>zjjeFo@OsIR_P=)9srzpg5xNVH&Hd*@DWIa%13fU=Fq;47e-YR4PFo7V!IT8Snz8lbN`u7P^6Qe<44&8y{H}D@Ya+ z&Mo}h+$Esf3!R=ZFh`;1RJsHdiq#wey5uIfy|cT!nI`}n+-ml=XlpA{;0!V`EnB%o zc+Su5LXkNx4&I5WGjq@hitW(S%sx~W8D(~jbUOvI0baDA!5!im;M4W>P=U0*_+;`c zuK>yD6$)Npog(-!pF|$4cd$8Bv`O6GMBWWLXmMeIxQj4=S0K=mK*k1IXL(B2glN1c z&GxulDh!QSVMn2Ru=w6dd<-Pc^9@>`8wT$S_O6C?44MWj`+|&CqV#!|1eN1t8osKk z$A=j97j5h2i$py_W}Vlo9!;>Zb6N1#Z3wHe4b?Up^A<9QeDPaWQ?>Cm8l5Vp$W1^&?6bY;{DXIM z4QVhAn;d5tHY)6_ar$3Y6GrgHxr^duA}>Spdr&UvRJX#+3_;UQnUj|(S$@}8**1we z3a`_pi?7ipn8}wZy+Y(Ow2y4bCgY?F5SsuwvE0nOCEwpr2>NV){C9e~SN@l!Kc1nR z;>?SHc&MP0!R%oI%;S`20NJ(P23e6}sVtatR|wQOp!yJv;1}e!l`%M3<>CV`iQE8- zp2YV!)3BLXPeR5zYz7eRQFlzB42*AZ_#BKKT^udnj= z2l>;Z{Ojw?6$%Rf8n!9ke)VwdFX=h{)9i=QYzMtwE}b|;N_8BZ;LkDDVpKc zSdB}+UNW>-kJuO=P8xVSgmKZ(2pL!RY#+#qmWC1Wd=X8fpM$h|7U#?=t?5^`v@~-b z@sV+SI+8Dx!^7Iv3P;+2-#E!MTuvm1uGEn(qR9lMzlT)Xk?>Rq(s{r)g>12n1SD>| z$CPN_g4cn_#dvWj847cChJJ=P*4U~F9?5l% z%E9F(6?C_NSD5TD2J5@FBEfRv%y}82T5|%QktNmYRt-H;n{*X%ToB!+a`+0q2fX}3 zSMa7$GeK|STERX;%9g#z6-<-t9gih>p5uLGu&8VXUe}a-A7Z^t+C(=rwB)`R@o+|UA8yLEnWHHRJ95D!6iGMXdz`SW{2XUhRfZZ z6opFVdCIFkQD{e)YU7LwehS$d(&0*- zh4L^eHaTD3Nj_cXbD-FATql4>ah0ci4ihUj^_PM2Dy#EYshG^stk^}Qn$C9F`oDE$ z;`!vtq92qPf9ImumOKEJMUEAJA9Un1WB^@2qQ4$R`Swf5tz>CUjw*UrwxK8_NHY@! zx2k=OR(*`NzHW;n_IXKj*+LQ+2QU|mQ}I_UHFn{Ee}^K(gSax0)S8oGHn8*yUNiN` zHAi#4Q)2_d2rVqEt<6R3tpgOQJ}krmZ|z$bnRDjWnJ!%*_3?jO^8S1ZlZO3PDoCuN?^OIMqJo-{8}ae>xkOuA!YVvVwD zv!&HqpCS2j9O5)eSY*VdD8ch>f@=#3>>ogM3}z0T@Y7s+Bv&s1Q+o+gBo2?1kOFZZ zCln_&lUcK!dckgK;X+NhXzHL(8Hp5esO@$R$j&)O9OlkaN?3hVc;qTcY|oc`f$mBy z&?>nV_!NHJL@wJ5I3OP48Zk$hQD3cB#UrFSUF<#)Q- z;sjar6#!~SI%sWGE7%v0E+@EzQGNN+SMXuuRu@~WN=E|9lQ5o6QRWnv`UH@r@o|;i zh4UjT{>zW8g;VPm>5Q`K@bFULfMDh$a!hu*!BYGzz3g-K!g~r-QcExP?A`urjXl{1 zGdo!!Bl0~u&mA=oFd1Q$V|l0~F$+gcI%n}0+LTZsF8J;Ox8pFnUMgV@#A9c!PCevu zt?MSlt~zlQbqOFCM+}sMrDGgs_9|qJ3-s{pyo2Xc^+rh_vRk}i7wE|w{%sJh>P#w-0ATC3%R8qyLK$hgd{w8urkc6^&Qt;aIsIM zYRDbODeE*wTomoZ>MU|PpA@kP*3!9@RwT=btTvtCT@fL4kroU%Xpy0HW!|eHP&^+^ z#?E3-cOv>dis^VlM$>*SjNSFA*@`S_Q7YGu%+L__^Ovo7f%HdSMdrtob5@<~L4JN? zisH12q{jocCb%OIb12B z2E0k&(=twL@JKA<)ABhGb|Xe^myzW^aB(~E=!mZB`1 zqYA;^SCSaZN;!7G<)N{8!w1ZH>ftzyC&{ag&k99koI5Luf^s)T%RszW>} z7l#NJYJry{s7_`q;7ibw$5e%C7uRJXK7}(MkUGf?a6)OQ5KtQ~U~2e336KX78;WQr zHJZ(uu3~Y1oMU0+IuYR5Xi-p8ifQB{@`R*(e$*%32+^Di!9}8obAft;RhS}a&|M1} zr77DAz@&TxlCY$Oo=4nKePEoVsPanVP66~pV^SD8AvZ`ma67kYK8OW4B9`FLQE&wW(pq_1qMOmwjxwGuc>o|B?tOWg9j!ajlp$j71_X zELI^*Y`Yf3WIc3HF01|tu!Hn#%a6wnl3dB_Cp6(YP&W?x?HT7UXqa0lFx>Dxy71## zK%18(#q`ss)PS$|)&Sk8naxNyhmp@GuS}O++Oq5pT5yguWyv$+0 zGpPrlGdQo-GJ7OJjN}s45wBvrV^7pTrqN=lj!<~|h@}@W<3HsfU(j=@dPju^11IG$ zXPNUb;p1)sX(U-dS8GX)Nyeg;lbw`B#g$%su~&H8l%70g2FVBXDsMTC2s)smAtat+ zml7#SZNzaR^Y{gYV@Kh^RS9QV=hErimhWwwPM#R$WnzOU+9+&LaTta*}&cklkqI;4`{RC@l_^P@sL2dB) zrqI@!$(?x8{LI_`Rfc(1CQ@l1&x{s9P|Wsnxq$~xK^?-Y<>pk<_urgUpS0ttURFV7 z;sCc1GE<(j3V4gSk$gVD#RDJ+p39_UP67z20{Mcb0#H$%{#H;G;XHN2=g;bcAT47) zbcSy?=~U6u)evMOQl6kVy7Hu^)qpx&dBUL!0aVPfvZ(TyP?GlXPD|e2m(F@*h%}4U zF*|#~8ftZ8AjLA=zMbOAN{x#ab?cONXwWWB3s)HxgN27|TG=3TX9OAow2W`-ir3l$%y*SyXipsuR#S4Re}b6n4(J z*h~?yw#kynxW{S&ba~;-)=D*2B-bm$O5`;j@sb2zx@nT)m9D0j)fF}MyGk{muORZW zK)K;H(8~Wm+QEr_`Jzs?}nof6bQ+BLWUzI~fx7RoPt`(Sw z8aM2YM`q9L^+#sO=03~DMzYyY@>%K)%+^@!(5Kle1+#&4b_z3x+z%X|44}m`DUCMs zAY_ovx7maaT}E@mVsa0*r|5oi0Yl1L7naHe+VXvbR`!9H5}v}=Rt{xl(~_Ai=L?+W zwnTcO63~X9d}JuTqgK@y8`dTBP4Nf1~SC^mZuYfg}6!NUkC7E&qCWny(iADnm1~)d4q9ic;mh^3OXInatGEZ=z88T zEKat%Qh4swz2$g`94@hyd#hraU=c$G0@cQCJ{7OGw?|*w_Ur@B4ia|!o5*C{p+x$Tm zPSx^#qw6~%VpI(KMz3d#jP7XET^&*_b3T&VLqZD}5Lm!K8$i|nI#eO;G2)^P#KWydszZM3Zv9y-4O*$&^;Cw|p3QkH?g4%faq5JN@Z#?-beU7bv{?%M! z);^cf)@T#rU@5kZ7lO9k62u%?t$CBa$e&S0Lggb+IS{uVWKR^x92H~JJxu&yvw23F zfHpMzAx(^_04n&vDOki}HbF`-To=V>D}$^N{#qUATh|hi-;&O6e&+k;MIG2j?UgkU2s?r2An$nO zCBA6L7osinRg;AuS;~@66!|(!gD+ot6KGBlmcv-AqPDGO&;wCoIobFTE zv!Vkw^`c^Ljplc~-3a*}PI@!85V8q^O^yGW6p9rruh$I+Lvw_nV&l=M?}l#A8avi1 zE37Lm=ZbZyXn!_%gEj`Mafc}v@I(fdBa0{l#_5o=66*(js8~18rgHLjt_^45AVIGo zRG5$vCnElk`+gqxj2uZ{yvSmH)&fS{s(7on&6aqY3FnVtJ5*&8asj&9zx>w5sEZdD z@6NA|L5xzd0eR+QpE2VLb<9hGe?Wl}fPk?UKxDgZzr??`<9Wec;uAERHB7Vc=0zT% z2MU6I9E^;f;SYLav(pL3Zhv4}U3bNayp*}b0sN30$wy`%U4Z3h3BU=`6(Y25sRtm7 zP8sY5BS!@bR425N7{P+=*HTtSvOO;udYQYXHIh1(AeAtt=&_G#S{i6JeA^SVgbp{f z_<&G9cdp@7)D&NTEgI+hZ;tHCZ;oGI!E!OArG4sruqk-{1xrfJpKYH?$XRj{L-X2{Y*{{zo4vy@DZ@%SZ zgj|f-+DE5HSMN?Qj`k0}X%UsW#*krAhxJt2te+duNEw^|51`Zj!NJj`B#}ulrTno8 ziPm;=12VajTLg>^{s)lh`1HH|x2@G_pJ_vIUZ(KJzDw;7(y3S{Z;q@{8G-Jx((*N8T`C&sE|0WAV}=cglAr+vtSok-D|BY5jircZ_*Wt; z-MY3DW>RWFbY(TGhD_w%fV9Ur3@HPE@CLqhd~SbtX&=6N(=hRHgQyEQ4{IO#ycYGJX#~H9NXeh zj}o`L|LS;`L(}boj1#79mkgJ)f4`#hxgmD8F#_T)g6$xrB_|~fV0!}fFOul`8pKdo z!%@j9^9@0`-6>Z{&1zpSBR^1gJqd7Gp+YefEBT!6wS~6$*fS zW*-~6lJmo8u)B!o@g%-}sHzidv<6rs;xbWkH;(c6KP53c@N-nEm6lp!r>$k=8^Ybh znNMJ^sQE_~8pVtXt{P)yWLkvIO&x8rd=l==n=f~rsb5p)`h0nH<=mLBJE+Td^Kc!{ zDr>B$4_IU|aqDSPOHsPZn1}X%M|j|#&g(1eQ$|05V`-A^x|3zFfMLAZbr;LDu4b)9 zmGK(7<|k7mC;oTMl!XubE^(&z^cn-!bH*-Z#0jM`_)!Yd&cm+n?YdE%(jS{q#4K)t zniusMeQ#zN{1=wTG+rhi9zV3EuE}Wf-!&Qa{>`Vm>3up~?!tejxjS?2BGa(v^98k4 z>tmTX>cTwXZ}!xQG*+m{NYhs?FT3Behg;2XjFE9F^Hom6BEwbYD=izImd~^-6Bmu* zKc*D2-hG66iZtDo>dD*P>0&s*KVzUZ9xgL(;6V*jUIbvhS+fLa4L3))YH?1ClCz6g zOB>~sT`tCCn@(*NiLX8%H?^1%bp*6&wCPG^CBMc-auK+_A#dMwgIR4Fg^YZK`dBB` ztj_Yy9dx&Ols1&FM-?Jemjso2l%j=}!g__PjlCZxLrI9ZzN;78zocWSbVb=cy8AlJN-__@q0s%Y8`LbcZ$d2jf&*6 z@kUAMe%LT6QW|`i13aP~2F#I=?>R+5Q4ufDnoq^NNCGd2NcT?YRfL^54HmoYx3D`-$kmUws9hTn7Z*K@+ja4 zsgco5@9ohcca5xb(O57|V=r2WEM^ETsPhp8QD)T<4k8x zn0CWOVPhCeYvctZZ)EmDcUTEy`N>VtATNg_LcG)mhO+TUehq(L?jkP2Em?m35p&=Q z$`uX7IFM#x80Vsy`@tOpoCoenGLVbq{M`(1MxgIf>s2C(HB(qbdWJxJzImy8lN=hS($XT*eKZJ*YOz z%GF{E10f;B(7+ZQ*yx@ayZQK#WGzd`zw`5t$g4P>pz{FnlaDLHG@6kIc7PVwB^KiV zPZU~a4iW)TlWuI$wMUUOrv~2fNoo`Z@+109WAUf$IJNE%>bc~{KnNiC}=4HyI3YeG*)Z1DpPp``!BPg3XQaZJ0a!%tkUIe-5M={efbh@pr(y=I# zB=@t%Ak#c!!an2N8E)#<*3@C_=P8D?sO47V=9CtO1rBMfWHL!j8rz)P;7O;s?o3g0 z%TgBJtCJ}cICtPWaO|<1p4;h!=D-NYz0uGa8|#kTHAPk(9cq%&k$XtNitxkiO7jDB zFPOpM#rAISw7Q+?ZD`E0V3FTmo(QsA;sL#hcu?QJM{^<4cggt2@87e5&~~A4e>vBR z)}mT25vrcpC#zw52Z7GnN=lWwy`%Ep#P4Ko9 z!n`12SHk6uX)ATe-HfA0GfXNlSb}=BhYp{ogD{lX!FZz1OCr}Vtor?X3@Mzq9%ykp zi(NU#p};jMTM|oWie!2JzR-Ip2TFMVew8lpCJE#@KV{mWcP5Cq=y{JZl*pYCXiLhG zeU5!iez(>2z=YQy20lE)c<>$p7b3#fwxTp+UN|*HE@xT(QkhiIx@lIrlq@rN@wGA| z?E8bRJMI}qua6-HJ*yY=1|!e))=jHYQB~8al!P@`thi#!0#xJ>Yhy}2rk8%A3LnJ7 z(!=R;vWUERLN2BBM$@yX#PkRuEpFmP;n^Z_W@$w6!L+B#)I}}Xqr|PaxvlJXg)(zv zjmJ0)`|fz`2fe9=z%k?61lJl>Gq2DwL`g6bFEF<^{-gT zvQa79JNqi?66@ODzn7tPprlLt>f-p#o1+W+)!T0mkG|V`fEj^fzBbYH{d-z9P$`GF zR?qRrGbs2|)kHAkYVxnDXQ6?O_wU#0Pv5^MCE(dzklaPVeNHBP3gZJ?bMPF63siQ< zps1+~hk(}e@85q7-G0f7133aKp;L)a4rGQFEP`p9w)7Ar1(qw0L$t$9mk~R=;b@`s znGFE}5g=8v5=epLh`T5u@aicn`q50{$H~SuRmfHicpcGU2r3eTEm62vCE4Jxlxax} zyu`oAw=jN0q3;kw@gbLj-oIDlp=)dl9e7TIz4dPh5i=K3N4X*!BKg$C0i^B#(Ub5N zj)CnNYIx-`0tGcBB8JlEP~;Wq@9!^Bo&##e%f)=TXo-h-DM+*mSZ#&q0hBwd38p=| z(YJByk9~?OtCk;Ib&Us!;FvhIa#dGdY_Z~Odq;xfECb#;>SqLPL)g5e5|@>Uou3u+ zb1|dQ*U&v9b>Q8^iqtP3BR1uL4e_G~N^r`a^6;< z4CM4I*A2bPGbL5NiJDibFFbVu3LxJSTBrLQ93mm@UAz{tI zKq^47G)-?IF%gw|0=1QCwU$EHPduZM)zpsMyP`&}^i1l4WPh<4+DlTCaCZsD3bil9 zp=n78Q>lZ&!u0M4x(e|K0TdOwLf7WvX(v$N3^TfzDRh(zD{Ex(9-|T;=LKV5_zF&C zRa*_ElAy~ii^F^{u!_bN;ik%g0ml2^1c6i?Xt9FmB^d@9Ck z3jNGVp*djcKFHo+?F)(CTFd=d?_11VQYpyY50VtuB&BJoU6ocfOq6+&u1T+}6Skmm z+9s~Dr8qw?SFRoz_kHL&+s7n^1E4Z13>=S!HjJ;d>nazkxD*;*y}$ zwJY_mK7~=Fc(ufxAP~=<6(<-kQF-cT&sdBq?|6=kDqm8M*0YrRhq9CqP(ya^Eirp` z?@VyQxa=kDSA}P(mTI-ca^{2xiNxXpm+_Z0pSUcL=Cd~IZn4EI%clM>PRsqRXbiAb zk<8JPpeRe7We}Xz)NXC+Mq!aDx0OMkgAY+kf%zmNYb~|5cgpwp%m)?AGH>NNn{9BI zQ6l^^@d}qUf@x?~-VV^-bU zG+V2}6P@3Xuq7!+Cm~TV4nv8@wL)>&JZkf5J3rQo;MIIajIL*dkA`A66M@(i=cUG% z*#@6s31R~zq97WKB0?Go!%zYPvc*}10;k-m$`E=aN5oZZq{S~t5o*oNl=Ln#I~vE! z?!5{)8MH(;Dv}<|z<<(Qn6&&*{mhH-X|V7*z?RX>vpeL@bP$yjEcuDJau{*Q#=VC7Wgd_*!kWCL$QAB-R~Xg%E6>98DAs=o=KW zk4??t#-Ni}t_SRT+VA%bak_u0b&4iRpPpD@=?J}2>cH1!dT)H z84xd?J&|hfRsrI_Jb(>U1>+Ha5yzy0enG;FY+t5&aF>%wuwX)*(cuY?!D(S*Vj`X3 zv9BSOF%NhN)ug2)T|H@HoFq-kmdj~N8dL)>lQYFx@0GM#I1mTbz}1oPKu_=mi`KJa)=o7@?J*LW`YgNHG~-KO-%h zARJX1wkcjqBr0t#TEG471j@Un2HeGy~!N9)&nWs}EfR<(&&$c}PX!HYk2^H;q36RY=re8)#>iEKNl zIJ8u0Hu#kiOW^+$_lFl2d4R|AipLHu5=KFBsqT9nDoKVnCbpDAgGI^FX=V{;qy&Tj zSaO?D$H^8(x}jOFkf2Mz_iR=y_wR}$P%0OK#EljSu8%xKw0tp{(xnfx^>!BfVmE|| z>xz*u)Iuf1xA~=Zsv4@g&V($n2Q9!PxfjhbrVLJ55LK#mOoeN4bw01um`W~8=`ygr zh}+Ygbi4Y3sG=8(eMKa7Tq~`ZbRjdJs8a|@X}syLWMI{mo+s1LthstwaeXV_fs_-- zdzE3>9~q_*IHQ5n>5Tor?1kNMI4on9JyKZJt;&~Q2LY}0_L^3<7~rK?m6i!8;Zedt8*e<%@23yYV2T15 zTn97M&8o`*M_pYH`YYsgEJatNBHA8>b4{k~*hb&|aJ=^RL_ zD0PkkJ!=PKB0zT4O-n7_m%O#u$cv_ptmLD*PV+q@w;lQN5zZP!LOD2phcZ?~O6c55 zBuj;v-_hpeMiha5fzfOglrh${e|~%*j#2#!Wu;kc^1C4K1qnm>wK_Ku);}te5lN8N z7BOHkYu@lq-XvKSE~_%+y(Dyj5U`xjas@ky*ePD55BmLXKQF5BYX8v2P8=ULRnXyL zt>S>WlgE`p+|!8VgKB2uD2JC~@Az?$syz)`r|Xp!kJGiY)u(&sp4Bo<pau!8BQ|IPY;C$s@nP5h+x;P>htvYKd7-N9?{VQfsPKWF>jB z@{q9~W2E9<`MA5c`*<3;WBTK9zd!DG$355Wbj-1DhVH;HhvPwU$9%jz-ZURi$Cwig zIl=qpx3AtEzdf`MUccRcb18QA#Fq^ORurDsj>-0})efhQm&9!-EQ~&Y05jkFvXLg< zA_;=#?p||ywTyN+i`!26F!OGbcm_wR_Hu^NuiJDNo>t{JwC%&=O9V`{8Sw9NTl||i zWQ=RXsE$2mP>L6q@oE;jM+v>SLY*HUb++4eJ#*5mXWu*U4DXqn?J1Sp&MQjyW-**= zHlWYrrBw7+h49iyS#+(viG0|-J2%MJ|Ax1(9gj?$P+f(w*Jqd(mjP$NWbNGKDDf;1 zAUCt`N(~Pekq6wG7m{21Rxk*Lf!_;7w1tVqF~%l7 z{zA5#@|~YE82HC`aEAfrI7$+eRvu&mi5W-TZ_ucCvqfUlcJ)*=OoWSrmNc7{U8iv# z5K38h(EI(v3`vm`^ChATs?6HJ6TPUd@Vv4`+4v)y>Hvi(M#me;J}w9y?AhFS4U3?KgZcv?XIah7lgCa%m35R&_ZUfkgj`Vz%&EpR`r# z*p0LL`k~F|3o4H4r(jbFN8Y2Twy0~8frR~d_LoK0;H-awB0pf61%4@pbXjGbr|bAe zXSP*p3iS$y7xvma#`l6>FYRY9d->9-de1YTWgugGu08>*`r=nY&=Qq-#TL#Z zdmi6|G>*y*s8Wp{Cp-K8=)!(?dVEERGnVBO=3bGHThzg7@uCi&$bB@4a{Ld*gHb>9 zh9e(D4$J8FJip%`c*d|s2!VT13J3 zK|Y29+QI(8H%Io#)y0J{J{V^C#rgh~z!w&7+ywfXwZO(>4b;g{uM^9|la1sUc^I*w z`Qn+!&~I{sD;0vn!v#z}tpMV~k1V2w16Gix-1>LQJbMsYfghOtUN7u)Mm^8!8-_J> z`bN!|uar|7?3MQx0Cm4U z!XRTu_UYN-(eWWJw-goU(IM19zp4qWrBIGUy`b+n)-VXXL8s#e{?Hu{y4_yie0(_d z(&`6QPeoYT{V@(Le)O!8dKgRa|?z`4W7FLmQ9g~f=0=s(AF<%m@X-X1s~ zS|V`qkB5JJ_=7EzT2pQzA|Vn8I0m?Ak%}>khV>U45p?h9;^ zZ$o7bnZe$@f>-oI3M&7j^*HuS(iw4Bg1sTAcZIiqp+HFb(xac8i&zzKKwX*-v24P^-(H( zE{4)m2lV)j^Z=;ZfZXEcOVMwFaBF-RW^I)T_JqYPc0rK8l?~q6iB@9{LfekX#@}-$ zXi8hqE9YuVr7NX8x{6%Wj>|@_umiSNHQ4NNi^DK%;?WQrRI}u)l*1vxSpP|j12ZqL z9w<$>Upn*117?ScB-3GMf9F=iL4bf>kFGQdCa`+O%eP7|;aKB=<@Lsv-|cif7j}!j z+jFeYe|&{eCDpGkDg}jcpoL{LUfnWy%iM6&KRxFWHCXN@jB9cy_^EXE_=&2%O7|^|?4AJLTrUpXl z?Yf@Ct}|)qgYpY=SPhXYfHB>5ZzbW~kuh?E;c(FHV{DXQFdldVcW9Ku`K>FdGz1kX zloD_(b$~``^A4t%7RO>a63?E^VJ2L+c(9%!Je&F+&70bZ>;G~0=51{p+2Ziu_)~P4 zJ4Apaw1L3HzJsxB^9F-|fV140Je`)h1wEG3idr&eX7b(Nv(?s%BpfGm@8p>=&|Ots zb?VgF>MXkFEbXt|8~26(RNm4)rfl{$F5lgpy*>LM(2w4nz3lnlom^G0Gu1gD*$;~n zh0%0==hH`n0TE)LycPMPZZ5(FHJhTJh}{B+CTjYhdWT#m-OK`c0Ipyge%g1R}p595&kBqt<>q$Y_G) z<1qe}zo*;zW(z3+|HzPdYUlql2>bzXJynxXsQld^MjsJx=MFFYTm=;`-km|8l-e31leNg3n`04F=t*U1~Ve-A! z$C{!ZXc4S%kJd_P+kaGZ*~z0}&$Mo^Gwm2R8SKMn0y06o%2AIZ|02{CtZsW`1R`(Yi1sy>3!5?cN z>xyK`#YnQelQK+nJ0NKU`-9H@VZ9#hH@aafYId59{%XWs@qu#mUD1&&-pvE_wAjU; zHWo)9_Iv#)qAL3E-UHD(quod~^+eh8ZkDdC0!wZ2=U& zxqNx~R4$0n4Fzx-J^;OLW4glUG>UR*QT2Xg$8ALMntYV!LTdZygCF>+D_@6s&!|?w zC~6*1%`(ta=mBRBmfq8N@>f-P6#D#B$Ml*!6h9%lTKu{6E*{S1E6WF^r>F#B4_3(6 znZIYx9R{sEFPL4~U@|+8`ei2I8NztJICEIS3P5AVqq^?bfl^OesTc5A@(IF83 z5?A=t4`&~xEX_t8qyVJObE@q9Ol}P5jTG6F7idl4=TfxWpH4?{UrO;w0EcdeRc@y7 zH1c6dD5s%Q?*kg?-<|r`@6OL|{FCo4QMPkqzbU5A=)I2gc-m#3$G?*g4P`0d9O$g}uatJyym1q+hTueHVbemZs0VAu#i zn~EAC(CIXSazs>RgvJZH+KN8Sy&d>Niv9e{Qx*xlpRK4rUcdUmzrIG3 z)ArJKC+}e6UcNd%U)}W8_x|b4D{fa?((dv-Fh6K_ash~Vd2x1gc}1P1hR`E>E$wvE zd;89R*?ZT!c-gzS@xOa}b|Ihg#LEQ4h>z&tP@hf}RMBPPoqv6L{;mJ=-CG)v7QRzi zfSPg8@d3Wu_70$PPT}XVI{+F-;;$~n@RDL1c_b=ssAy@}YK7hXcE1w=Egb@K_lME2 z+iC7E$LEqki_y5GF=Ou`WAC9O+H#rZ+1Td}@N_oMs^s_@H8?#w;4%KXElwJlF^IIT z(HAf0#|ef40j-CA=xS4;m$m0I1ONV)s<$Q2@V!3|XRPJ3wNGAAXeOcD=MPv=AlY3~ z{&IF{b(Q|o*UE2oaB$c>2-^o?1Ml?0pw&5Qz@x)fF;)}~<>EqN6SB_kcd8w47k_Xv zK$)nMeCOuoM>>O^M{)f9MLY`G6G}4saLz{hmPy+I^be~Et5jw42Xb028lRKKtaulU zIJ(*X290Clt=T6Wea|SBOv=Gg&}995`FOtrGX@0xle7BkHPFX$eqQ=s5U2%8bl5-Y z4#KcguZKWig8qJZ)E+k1K%k7%^YS)?LI>40y1@e;!JkS0@3sG`6s2&w=-t3BZ*S~A z(?~Ii<_MnT$&zmGPHt{`R~JsdDaA-kmUldP|MKk8=@u-)I9c8aP1^pr_sQ;`l;ig|?B*@= zzk8m4MM>I09NY%7uhYey$6fUZeO?0lX#e~Q573TRG)z2xrf0Z23-ZSm?^kPK0}=i5UoC5 z&<8f2&D;URn%0KjohLJiIJ{4&3k}BN2zr)!crdm^JmFpOIhh?YkxJlgzPNd@xlMx> zH5)|?KW=%xxeZ|GVX`g#2`ku0kHGjM+mwvL)XKL!;myqyW)L@K&Q**1fJyf1@b6Z3 z(shZc?$1Z5pox6n$>|J}#V|bC?6eSD@Qbkguci15ji#cL2_P9rm{0f9($Bt-&kEYv zJ6Ar3^RG?zxCKV%x;b(N!Gwp4te&P+yX49#3hFIso|{}<6Pi6Z*(x)R$}qE~FC{py zf=R~aW{ScQl-DA&58@`kO3p4okLkVI=Vo@39h46vQlZWaviiqXG7;S(zGj-k?ZTZN83kf zC>Nk_SIM4^Q6Lnz7Q~puj9mNYvj@&Gz2gP9nCZ=3uIzmIT(5dJS3hp-3wYlmTMlNo zBpH$ji*%d?Po65;2qgVLg(-`?n1Ys74&7I8FJJaH8a9h5qg%D5u}m>lI)MEXOH`uN**L%G8rQ! z$sy53{z{yrSZPY*G_y;UbcL=^RF5k2IINpN**z#>0gg}K$Gkc7U_7PZ&@E^3>Qg9x z!N<0QveVy`{6}c0qBkNq5KFR{v3zYAL6tAz{=$cDGHJpTU=8hmN^OYDyV>?kg0%ul zU$B9O2lptom$SKMh-_vuythK0hYYm7iIL<826W@2tVhf??+enysT@lfp(DPEAO=|< znmlO!Za<{~C95URSATLi!-TfKcQPT;1<-OULIA12EE$-g z-6ZaaQ3hik>@i#8tQX3Eky3pk#1LAGRLOXP?!fW3C@V9QVUUQdo%k{noQu5XnQ7>d zUQC7`=rK{yqZOdN*d)%-ZkU!ZkHPXPydTr(0}d?a2}VJ)JS#FKe#~UJn^83mYe3GL zu$^78C5^1^jnV=j#sHR{gq#*QR@;$+;u6fzZ-a-Vyl#w`9_3i0vDF%|p?4f(IH zF5kbq{;8lnP|I~%Az{PkdZKs%!pqs%!Kqp=nHZ`%XhfCJ;q`b03q#dd z8yKoWWbdkqv8$@;W~Ei>ip~ZCbp-atjdPL}i=xy%W00Sq#Y;QOpnS-cucQ}eK$Bg2 zy75Y9i7d+=T(C7VODj7~QL4s@LX9lb>}#g^qL68pe_m~%(NuV7^dQ8&qcSL(BY%y| zEmY~e#g_Utj+jy^ZKa=OId4&-w;ZWg7;f0_xnjTPp3px<=`=mE647rGobdG~=ijE! zRu(Cz|1+tbXe7&&VR9H7*0rTguqw3^D#&UsiZ4%K7QhS@1E*|3OZiOIi(*NVGD{Y> zcZGbDhSR(gPnyN|*5f}*s4^0%#}j2uJ~PkpP#=??qp~Y6NK+xa1?~yt55v%tDeFIq zz``Va%Bcp^+SnOoJ_&0_NlqQc{5$8!Eu-6g`|TZ~DIxTRgWeWt_ukDC6w^~qi^}=S zqECZpI#`8On4C1^VOJMbCfx{-*as#RfU4aog$>DB5M%v|`MLC0oO?v#CvA z=Y3x~==&)iZwG!VZ(EKe<4>OP-5u=71n+Ng2Jk!Lvh^KjXPpmDI7EOkfCfBKm4J&* zwK_W1*l*6yu5bJ|y_1&?VAM9~iN54JFSez@xy&0BU5dCRgqh~D3}&og3$*CQOn%?N zKk)l{6Hm`i@N%_xe)41QrT_iq)wj-^v3)f^wq62VIH}zB_Hv9V5mWFjCcGD#raxjU z=!5EeH_04RoN0*~HW$;%;pVadt7u$wXmyAJo;A`l)v!G(Q^mwkqdOCNjYln5Ae`cv zaaY}Sqnb^}9cSC&aq3ecERK5jF3^!xA3`hVw9LTdop!F)Y+lxM?Zeh&99J4WB@XS* zK|y`@84<}kCivTU=51Q9HYwVg)&Orx)>XA=ikmC4a7CogmvPC0^Yqvy5v+=rq;4o5 zwJK%DBIzM}Xjhz5)o;(1rXBi*O)Kay{DL|$C$Fw!)TjB_6@#-;~dh1;bxS;lp

t;Z+ADA_us&qCPi53=+g>)ZHwM>1t=eQGRRX%@v817DLtd=If|b?I~p|JvpgO5cKKOW=`3Ln7MSt$>u+qR zqEF8>Ii<&+0=)plQTn=3U#n^-k<;p-IFdMbuD*IE1P7aJl#nr`R@Cg+4E2@F&2&{$ z&5I8NU7QW~|5dFn2R1_sHZ;B|_!hxZ$17)hnBA$_e1;|CrW@-rFr;zhen$ulNw(0 z)c+vXZs566A}-|N3c)gBoP*lidCJW1`wxb|0fA8nYGn`Ma-r-u13*KaK6imBSzW4L z8jq(~I9VYB8Fn|w>%~F`yiuCTHL*55gM5~)+@>XLslOTB}6uXHjtvp4$EeOzZ7f6W;d&c(IQl)J~jl>1+DK7+x!UA2nSv*Oj*&N&Tt#&BhQBN%~!MS|>>Ye{>@9Ls= zu7fQoJDX8>^@IwB&&pz~4{3pH>F_M!BINyO26}_fa{Loc)kXJM`6_?kAS9iN)m$_V zYq=1Z=s@yJ$&QFY*i3jq@+Jkcf+UaEw^bO=HAo699<%bo`L0+wT1N9LUkO8ekG%v5`}DoOn*8=MsPL7Y zFYC%NeU<0ejZu8GI%qH!KyGuVkULE?jdFF*D9&ZBJSa*E{S+aaP;D(X)cb8Snz zBIppq!&_%Is*y1~7H>0>E2A0hu{h-9jcM%JFC{zZT2m~_$hRssifyz1Ir;>ZJIVb~ zLb`&)8ce6avBx9F9#3pr%E`xJLx}t~=W+MXsP7J{<`9%{GviinP?AMird{wJvnz)Y zOgF(Vx|G9}`>dN@8>8{ItV;j#$Vxy%Vk1JZgId003g7}(Tqut)>|Hd&avxIL2z^_q zj+$LGF$9E&rs%3ef|(J*@44Dm5g!ivNNHs534TkWk6PIIWC32NUP#9aSVSLOF_7*3+UkWYLa){Q9dx4 zkN=JbaS7h6loU5q6{*z+w^m!mNLFc?j*dA9CWC0?C@DTx9VsyW3Kx3>?OvZpu&^VP z`?H0erQC6qth27DvxXp3e(S?1f%RwE8-?zECf22`9KS+8X`&=0Cu)7#QNZq25|2NM zdF9}<`qXT!b6}R7OWQhIhJ}w&HU3Q!TE=9cqySmiDun!ebo^{I{5Kl|RwLp1D7Zf1 zeheNu204jtaG_nzd7Y#4zOvRSM@P%Di9r0?_D&+KloFWgGs?G}JvJSW{wfc|zRt7g zYa<%dMFi%JPPCSkw@y)TjFd86?bBiaW!&1A!7_|+^*^FsdV zaxeZ2_Dj)a*%8~R{|~ttUEi0n^paF#B_*k|9X`vhQVXt1kFATLrPo5a`?{>qV60C$ zEjn`)GiPLtTd7jQN0E8(7va|HC>ttAvDv}=PB038WxSz~GV+U_;~kE&f}$((C{u9Q zqawvS8yj2sV#MYQs(Q!9lO2(e+TiFsf?e%5M<#;;JRe09p0BxK8r%g`s&mG%ZS;i- zip{3dwRmQ4Y(T3qmPNydm`1={{8Md3i^ftVHz%l(&de~L#Oa;wlee}Y;h1W(r?~1- z>0~wxdDDdM6Ea7pSgkGEd%ng(z<4_dk1D0e^igx4tkRM3Sa8q&VkiuFfu2Vn#?d$g zUe5!o_TDPKFB*G3 zMRqy7_*|iV9zCZ7gxc&i2J~g)tFS6F<2T3e1uiz--2651V<|yQS zQsh+ldB@5e9Y)rr=f_)qba;f%JwF2YbNkT<#v52-|?}tqDianfh@a1Dd$B62$egWBJcB+%A?NdDo-HC zzU3Y#mtg2KtoWE%tfTT+V^+?PC0;G)$3oBe0-5b* zjf~q6sRRO$UkeQdCThX;(y&LZ^)A2bUA;QL{C?%|&T=s^)1XqeI@-@ zsWu7`o4lteF_C4iOqQp`6Lb{g{Rs=PQOR1)GM{Dup)i`b2lsGx;i;JriU{mknd3(p zYrps3n%&`dyr&)mIImWma1@)m_YR}LJg}nybcowZ-9yF2u|uV!9nf8Ry68)_dsYyX zlMYR1NkD=fPPZ9PX!m%PrgX3&R!FhMZqepGhuM@5p5 zIrjQ&8&!n8OlAuyCQ_^!EIG=>X4KzA9a%}SY2#>J8j<%BwA?(kuRngg$$}+j4JWA_ zvkovd5OWbb_vwDde;!^GCaKX)9gn8w8yg1D>v@HEK-AQaX{4dE_;(CN(`OA}8ySl;t6^%c#mtetfFb zE{STKOBnmZ1w@PmQxiR`>`Nf_6$KYBCMumfw5iDTkmQn-dl}q=AOtG%$5~IT&G>wp zCO)#a1!%Xuj=1?nttm^qpG&MM#=4%dr|?M(qfyZzv^7NS(Xg%sCg-Gv_1!rGhqgsh z0ym9~#qMcYUC^gabguGxqG$nSU&tB@O_(^pz<&H@i3$o^fY0%7Kig_sOx%lMiH?gW zVm&($tEsD$*_?--o%Ku%JF?J9RKKlzKmSaLBGWMV`Dd9eiIFQ*Us%!aCARQ2&$AuKtdcj2&8uXVud6m?j*!DXv|IrmMM z0NP!%oPWvX+T-O5SIv3qsmpsuhR7R@ZE9CwQZA-H0 zf)<(nqiJ6$#*|XOjdcj$#ve%Yepl<=5(1Vr-)RD7tao2h%e3rMzpGua*kbnn;^gM^ z4J(TTub6@_a(VZp&hHnY-JdxVow{n(R30Jcex^S7Tdi4Zk$H6IC@1sy_ID-o^no?V zJb9gb0W!}yp_xY0w0)To-T``VE*K-wKQ_JAyP;l z_F}4K9u`n59Q!i}l{ID|fimxH!gRTqPcd!$VmLGf2)yqp=OK|1rdmK9atV1Y>nPA5 zBp09KyiiDfEF!7L%@ctvXv@yNw~h{L4!s=nl_9*V54*uJJ``Bin1!s(KQI0^*{m)} z*izt}V^(YU-%?EaTyxInp%U$8bT1fF3DPXD+sfqr`b=JQjoBPmV57lY>HS#L{Ov3% zw_l~KX&uh=`!gmdlN-UVgztQS@5~b{EylRqsCrw}ekGT|cf2d#6le7J{`D`}w&%qO z{oO0BtCNe@Js$e|tFxP4;Yq4`W^&TAP|kGYuPtJoiDmy{1iLIo-act=n=eGT-McFE zBl#H3=GqAAxfKrnJ+!@OHB+Q|KG|ubP1I* z#-S^ytiw1cUc^D8YW^OdC+w1eLVn`t97$-2|8JtvRF(8g63}lZW!xk!xwPho6TYe%Mo-fCcZ*+4UMJkq%@D9s#Tia zH{Z}0pzCe^2viz0zRe5xxzr1um!Jvn@||ZiW$=b5$&p-e8^Uvr6^YhOaoEm?N0M5lrspQN=2PzZh z>-?02wK2;tIet%0U6@xOnv+XCf_g27?k{hGa5iZP*M6jsssaoON$N7L>_kE$tTjYS ztO|MoS;Im(n*BE0!sp@HA1YuLqGAae7D<~lQiVjiDeRMp)HO-S!N{s%0ND{TS>3=UBC7`ZEwE{X?hHJl>j(!%h{hR2hjgtQvbkyGP z|MPTo1#(Lv!6Hg3uYLzwszzRp;L_^%mQG)uU7wy@{a;e=ojqIbEC25$tOD9Ku&;o) zmZRTqCa<53fBz;D>!Rp?29b5Q{tu1#_`OJN9^Ji{8RX3rRQ&GjwCA7P++6wJUsiIS z`Nw4~3%vN(Y^xHG6|}wjzGAu3l(xy-xULvW+09lrW3_N~I~IFZw^OJ0xO69C4xTib zI4Z@d4D0p!c0pc@WySxv>>nUYdAwO&q3*KajFpXWKMNX}jVyTS?qF$e*}7%#Y0s85 zpv_v=Kz2ztL9cVGwBY**F3={_e6jKji?3jmtl7NJsUApkkrBCgAnjgJfk3<9;fXsN zF4$xT)UG?19OCeE3w??kR6X*+FGW}Ozb{IS0e=I{F7KR4+sOlEHswZE$q~a+MVg9$ z=MRZROdXi2XLij-EZWFzq*$ZG{${sB9#b0=A)IMtUQj!R>qf!O3dpVf&0oAyL_8O4 zw0zyEN}UR!h8A%!ivJO4nM4>TM^!U7w~8`bo$c1^=7Sgp@n|uN3NAT)nr(ji7+mf0 zKbz61=x^6MS>6gm<8W{uEj#>nDeN#BV>#?3%p4mUP`uXx=_^5(&FV{^%+J9BfW2}P z`M$>5X`t-Lymrpa<-hD*pI)84yE(hOsBD%knXdVz!^!)bHmbWWE;Ssm-$H9Qm)$a$BPzjy# zF2uM86g<7fp@;9cpLkF3cX`ijGZ_vsfXW{P)1V)Z;`u{iUPoID9p3x*V;Jn8Pih

v(1y1sqnMf^LcQK!p7Sh`uwZ!{hO25{;S@}&HJmK3KdynPmw~v*c;X!SC(|! z#z}HGNd~m4P3<{1@l3>thY=?pL9o>z2)3Fn`0-@{m@MW`X>S?3^sWx?34k?SQdKSH zzcX|8o^qs&w6oU-1HcM;KfJrVy78}neEZ_^+z0wrSuuC|utWWZ>0b0Pn#`xkXhgKl zeIS(UpmVT))NeK$N8NV4-Wdd8=cv(W9R_`ua%DO*UCKN}sPA{H&9=7-KM?3oJm6yY z5mzP&JmLft3r!h&F`v@Cw;IzJSBa*z=i~XO?YehHB{715 z6fFoBGnF=rCIkE>n507>3Zo>MR%r}r6X8Vt!F*Kdy}LX=_fK!mu?GA$C*hJdYC*o) z-ceN9u0N@Hl$>UQ3!;oPvZ~ur(ho*5Px9!RkrWh_jzvS^H=`-1Ak(AAsf=TW8h zI?KhODZre1qd{&00yPJ|!{}T)gr@3@wQN=~Ml({(IwofBN3h)Kg3_YKlQa$^%4AC4 zrJPOrD=?pl%p$?`$DD)*4hjsX5=L_7RSbtM*$0NgS;4d^T@Tl6;-qP6@mL(0!hgAl z9z7-5>4pH52j=5OLgX2Sb?%41uB#dX7LpCx>O=jX_x^8qmIy32a{LBPdqL(!xDT%@YA`t zPnfmrx2p%hS~_%8;favFXc}X(rGElt+hh1;=HVebn2lN2;SxsJVHOQZKuu5z)hw9z z7)Joe4u>JqCzB)m-7J}d&dLahjcU(_6gXL>;?%56CbfQ4Q<=ln^dwg}52F$~=@8pZ z&Cy5ECQT2^P^R-^810sg-iVjnTOWklb6A5|^=3d}Nuh&1QlsgHn;z)u(PgG$g za%v180GF7*0qD+ShG2d*N?y)3|R za%2Gn*CeHCLilM77X?utMX8~#_2K;~9){7-JH5Pkb@tl7=-v2NKlm@+U;pSMDPRD~ zq>z#WC+IKI2ZrG!n8rTl3P<=kPVVyNn4#8bRF42^9cDrXzx^p~00IrZ@;3Msj~8Rh z7#+z@&9M@edjQeN7^X#L4No*JiU%GCpL~4bbGH;!FLLG@C+U3jK;i%ZK2P1%8ZB<2 zX*|)8WqfFl4MR8to)jV6Q(OABM`NHf^4M>sY#6hICu&UI)&T9(s2p=$Rzr(@2NS} zb|yK+?ac1fJ@2hl`58#DT14YbkQO$*y9lUd1z@x(Yu`i`C++Y9G?;*%uL2%3bRAb@ zfUN*y7Ukob5I~jO(cOur)eP;0M=>c$2z<~etIejjdvLg4?KJY>kNw$T!6+l6;NNuU zckakf)7!wz-XJBy)^bDaMSoUtfNd5wUDE+fAE-g$bhYV3j&_VicTg}>Pb{myWy?vK zrb|LSW=68r69g@j5>9+$>=;B?3CZMZVKngJWcWatmrjq0T^ILqCu=*DTjjdl>dsd$ z{NCGlH$VDUmzOs(Kz~-B_5E3r%(v|zMqH%=Kn9}rE)~t0gX49^5h_bNUacNHNz1ontZ`rWpOKF4v$<$Ds}<6<1$?c?7B#L@TUkl zDFT~){BhGcy+5rgoF&sKNqjQtXv&gHG?B`(C}e@WGh0lEYXbuxEN1W;$ha`ZQj)wB z*NaeH=tj-5S!#Ov2uk6c72kjS5LNCRAVkmK8BayCCLXRZ^7zpMal<&gZ5NKGIEP~QAE$&8BW%a=q@PrD1K6~bE zF}O!m)ClKd{awdwyW7Gc5V$Si&)sele|`ZVZ{T4?zEa^yz;9KD)rf@%#G%1-;Y$W@ zB(hAjkUrI(v+OSdk(1b?jGQYXWf@Pnz|INxQ~5!fjeOWRBvb2 zpXQD$t>IX!E=-Y*)d5p_B*0D8Dk6#*q;QZ>kReLVcYUd-h;+UXk?}nVf_y<0F;WON z#K>{IjSZB;@d?5l*%-l#vw=0leufe=Djn8j#CAj#rMN)FeI&{-(A7J-i0alMupj2V`P z!*~!Q5yh48s(~vxS$&Ppu$y_VWE936_y<82j6R?>XhsF+Y2onch-C=S^^K&WHDfLC z*O{mt99CQV-tJ-JpxSB{XdO$=&0}VwFo3ctWdxzRcOsS>0v^=}A+sNBH<@-|-!jUI zszC-s`j!_Lz?UZ>S1KcGmO{|{ULY#7fM5(q@CiCx%B(lQ15IDKQ^>JMjX4l84X83- zy?WP)8Z#iU5og2`1lUOEi+_}(IW?6H23i)QaiEGL5(b8do|a`vFqL|RAXeQ6GnAmX ztlDn^o(ypvBPG9&q+)S$j}^}HBwphJZ)R0iolFrBU$bdK(K`?CS-nDZ*J!=L1fx;` zDvFmleW{*J3RI|dfo&(vD-pPuo0I;Fy)#^-3NNu9OO1%3%QaY}$Z#g}&ZERLA-tt> z-4%Mo3AWVAo~>ZBUM3ZneUNLpck1xh%5{`lp^j2pLr1AOItsB6S;e+M!Mr9Onh|hv zbWhQRnAu;ZB#V>5N=5e7U?5)`{e+mRT}` zz-9gn3%*$tF$o}p%Q*sq*=~EgN1aBs-N6Mh#VqCxMf3h=$<)h4h?;9}WVoP!S|(js z^ahh=-lHf7r}s9Rdz-dgy{Vb1s(&s-W)h({NHBX)*Fa3Q33dYkBnN`%?#x{~2oF8&*k`z*+W3b!z zCwAQsT?<0oQKcZQ}h2qgl140dnKYvXd7c6UC19aFT(HA z0p~%8AP9UAor^PQ!OdPocte#|oq=Uesb+yyyuo4S__b6Ii`XDF7+tbH0mho~;djzeNox*ngLs08XIJUn zra$Vs<}Xo*Rr+1xsXQTOtGSjrCM&ZMI;yq%XUb5wLb1mNbZ0_J#Z>l#uxtzElIc5_(2AL&vsdJdHBO=Bn2e zzK!3e%~zLv%~q?8PCmQM*1q!gahJg?MWp45xo(jYIrnz(S=G~5Vq+3buy^_}!F+qT z&4PNWVv3YF!k&PII1pn4acM+akkQCuCBiMxKf1F;j%dtnvq04s)+AZFBU3bV*dogI zebKfzr)0(VYyv}6knVJGBPEB}(o^>?MGe5pqw(;;!!@gN&T4kjJ(}(z$-qf`M3;?# z{3<_VUHbgz==^#Ml>WJ8uDd|#pIv700?jU`<0*%7Xz6te5$XOHuI)}YQe=`5Xy{Dr zc!UWnJ4XlACi)5Nw_DX#E02}z&`@GEbNuUACSvZnLqDn__0IVmW=O`|R!Pjobw(-L zmXc5+=3#N3nJVfDa$PDMUR)*g5bBOKRkr{nZpHI78V!YkK= zWUJXEx#x^=EZu$mF;ewR;nRsX#`N@fczI&lUZ%P9aDgYB1^_0Ag2oJl0jh^UPb=Gs zd4Gn&sFSRnU3_g_ip*c8uKbHj60!Z$(O^JRmcJDlo=?3ar?Xl8?E!QLt@a zMblsEIPWj8IZ_?a(+-Ty?m>f=de~?cGPebuZy`7zKy;KVb+z~`p8ziL3;$MSRZrYp zsgT1kSd8YGaddOxC(KJeL=R#r$Z`^CZn(*=+XPq`oTkO7sHxUxXP6(%GfteXee&XL z9CULq2oqnlEy76*@gre9Wfx5@{y7ZMRTC>AEGFo40S_uejEFK#qUlpuO-8OMQM|Lb zsVCZm2_h43q2DRcO?c?e5i~;HxM9ewIhpcL{JRh*r(s&9^PrBOt{mi94zB|=(K~*0 zxEgi1xA_#B^P9s@L|aLYruv9aFAKj)r!;3tXeIglupejqF?uT3wJh$aj!c4Y6Ro)_ zJ!?Z~>5{{-YfYT<%Kz%&OqnEEK)JZ=U2r+2Uzb7V?d8j-nnv*|vktSsT?c!8=%yTI ztq0;TdlYrLM`1Sv%<%` z;FH4&47iD(n~eh?r=+s}dBnNV$K%>__Z4qbrZ;Xh@UGh^_0zk6%(>YS@=Zh)8RJod zX(>YJ?2uHR#{jLxr@ismpFd2aw0;MWEqf)q)`&XI_Wn`WKIjDX`eCkLvWOLAV zcdc|7-oMhy)L2V%K*9L;XU_HO;@RnQK_v&TP6w!ju3-KrPZLB`@fN>!kRA9m=f zAAYE-{`|^mI%Wnqn7#v>%b&}?Z?NvzQOygW7LBKK1v`Y5e2w%JF5p7cNCx0`WOa%r z0fuGXB1BQEv!GO0>icfdl6DE)27~etz}df8D#FW2eBg1kG_!Dlbzb-d2F^ zxfX)-`@?1sHM(K9)d5m<)E$JuLF))8*3mMg&yQAw`}tOg2_3Yg?{>tV9(@9deBwEd zn!2B#%tBO}0Z2&GL53Faym|k+cXR&2e|Pe_N4P-FnrB1rZgCq~dx~g-481Dhu%Jr4 zJ^8`Eyn5NY@-MEa|1}xP{sh2}CsFp>8ygkfq1FUNt+X8u<)56NU!MA>m)AGvKXNCW z8gKE^={tMN*thoqbuN(ciO=Mo$^IP<501LsRxs>#VgEWHVzhDdI?=wnfBF8rWBKPK zseoFy4{{3qfCoW3_9{=j&5imrnWpSOit+tGub+4<>B&Y?5DX9M%b0SBv8sj7VNxqv z{q?8SZEz%PXG}$sRN;vitA&ySZvKL!s#7R@Ncw+=g?c;q3rYP>hJ}7SvC}K4KdpvV zYK2iU9Qx>SWYR2*qKQPhffg~CK1e#-g4KAt+!1@w{rZabe}&dRspg)EVVzi#D4f1S zI)k=DP~plnyzmkFEhj#i>N$88qmo^NoJ7?cI9oBbv|MMvIzj_uyowfr+vwFuHf4ww zHDV{m!ZFK-H05QAN7PW`WYwb1B4Eh$E}aN5U{dzSm5s9SJkReaPk?=#)gw90!=vsn zY&3(`QM*;IccX*uezV^?YBrk1r&)J*53_zuDq{PvMX?PrYi-1wJ&Gj}6EfZh4=d3p-P<{Q`YF*_GEC6FM&#ud{&?>H zJ?WFiM(XOXHp8NAl7?h|m}+J<4Q7aoBW4aeQ?ye!Zi7OC&1dc?|GhT(AnfFK+FL6} zjQ!cQI=aMPdJ;6T`odtkD4jLiHPAtnOKB>U?X+1wZ08*=WcU)*ziun9l&cV~Gi#PW!VD6+5VUkh($EgSki(_QQ10 ztm3~dBsuO2wnZYDu0Sh;qN8Z_fbl>EqI1W_-b30$V ztEAq}7*rg?L7NRpkpV)O1X#vtf)`2tIKIVDN7m>`8C2gBFklu6V6Y=V*nxHPN(O;Z za_>)vlPV`u(B_>JS_glDLn~428Qfv*RJ7`_ce9Fq@1u9BSas0bFPPx?TnRjkXY+?W z$~=X)N~{%EH6ivSOb6Di?jP}j>B0N_Q9O008|d(SN{%-{vCMmw%;ZmOT~$y7!6_bm z7@^~{3@}ZoO@NaTmwt4!;%g+z2dGD}jX`*127}u3$;{ap@A)%OyWhS$Ilb{u&);9) zfaVV>y|=YRaD)H8!d?tLilo3l^}OXg>UN;~#~}G=^}o0W+MaiJWsCF4Cm6B1fLI+w ztZoy3`SOS*S9r>$?)^06%SO6PrtnqX>zN1b0mcJWq+YE}hxZFki0>1P^BacYi!!1D z;?~RF!zfnE(#c}UrIpD-KPNSSBzN|L;~{9km@4klHzyHL#o+?8XdoW)V6miuDjJlB zkt9**Z{GAL+c=ngka*hkL(H*aXuK2ms~KL*i}NxqPBt`D^W%Xq zN`7eljluu!37+3$a*>!EhLU^Z&4oBF`->bW!fU4?VQu9rzV@fn*a zhxs%bp&=>okR_v^fOwhf0gTsxnokb+RtOo*XV(g1kom~6X;VeTCDj-G&w|;UYmol~ zDXApkqsKf1y)z4<*#~L57>j0#$uWrR#*+mfDDELiJ9wiG+kESO*x6@Z|K;`x|0BF5 z)Wk*bzdo(O5ZMg^9)2MON-((d@noyKOqf;(0|uC)Y8O8e*a{F9)h>gsHUZ}=L?+{{ zc6Yzp#7qKhkaAmOUQvs{87?K<>Rm8Jie zP<7mOvU4e}BK_%GbgRZ$fhU;XWTbA-O5mF?AktuLPf9~wI+w$KDb%I+a@dzcUAnJ= zy;lr&>Aed0Wnh=?D}i4Mc9HmMeAs1T&rD?PMxmId8wd-t4!b%_l@SsS1wLXyz$oc^ z@Ly4ff&;0%eB8-tXX3KSaRbRrw#)s<5h1;g=YTSmBGC|7h2qL2&zxOfo}b+G{F4`; zC_PnPQl2IMUS8bvE?%PVD(66HgJOP&Sy0*s)pqyG(k}7(O%`*DXbN4D2oI6Upp`5O zITm^%&Pn2@6Csk5_|mJ+bm>+Dk%FjWU;FG68nKfJhj@!ir|?PGjFamhFHZ5e;7|Yj z?Dd-)#!P1#^ljAU=Ek0>bWMXPnx9j-!ywf>mV{Sy7G)(H zIhvv!m6oouRN17S&ho#}tJjO))uK=trP<{+;=P4?lV6%nL59Xn@8-=rRCQEy<#&=l z!*#ER&c<|0sGEm4dNspq2uX$Ap|n+3&DrTo1BiA3w^``e+2w`*hVI+xihWyPQU+>% zO~i;#kR!x->?h(~&y%Ueh_%it1TK_W(VT$9;U3}7+H;Z-1?Vohy?%ds+Pl8y-4^kY z9ME>3Na<(-%Tvby^hTVHjqa+nYl#TQAaU)2?nvOR&Ow80RjtmUh~Qt6oR`4Yf+bzn zt_jQ9GFaBiVOcYZJw06eF8$m9a|^O-(v6V4~E_S;IQ9lx59e8)$AM` zgpHuzY7KMtPOGoBc3RKK-p#g7{NeaGWdV*Ho9v(<-Ea;B>+l~V06)mzp=sh-^nnas zGy;XI)9xM<QXxy6d`y1{g`uw2kpCFT;M~!!&Lq%OXlWf6XOb1;qJ~j+GUjeid}= zwvP^*on~kMuz6UoN6|s!@Tf5u9`&0!bmVRt6ZubeGVX9Xz>ah2=s3rbm!UW^UHAUq z1zJR}zujnjY7B?CDonZBkLRStEbv0DuW}-!(JvFuZ>Rddzqr0Rd2!zJU!9-4h9O|0 zI0RV?DA-qIA9oyk1h;=n_eU;d=WWdN*RELq)4RGPS1KC9YkcZ78XgZp?!-icrlVlM znIzF`PrHoWm~m10a3_h59jTqV&16o&yk4FdAn^VG+yAvm)d*CW816i6NSC6|THS-{ zQIS;yZ#Ky@kXS;x80WGB(z%vuObPobiuYm#JEF;1+!N7v5op9B7@4IzD_!L!)lEs^o?|SG7%pQX<@2YyJkzdL( z*!SfHBUkV@m@WLs-WvpAILkhx=v}tx>IG9m+a3JZ+}XFtJsq{k?4A7=nZpy#%tf+0 zcASq;?3m6Y{);qc2~^Qp1>)2oo2C7W(HExxisrC_v)D$P_gDBxoP_zQZ3LEFmz_2d z*G{KX&8E`&QVWIU;1xcsjkC)8NVLgBiXcZ!s(Lga53vQn(60K}%Pe6YDeHj1;N#e5 zBMJdU#``!9)L7KzSV_5G@ntM&DlRPbpP)ibS!I&X(PcCTVDJ#|c7md(J)87?s)JHF z=!n-lPkjBVB6AUU#ZHI)Xb$9}?00}Wtr=FPg#{UlgO6AQmi5DWVWxpz1p}sH!jyjH z3}x6)Ui(5rgus*!l}!D`P)&cg{UU94XM+TaxXdX@cru-Z%K_0KvVOAZAH8 zB|HGM5>(1WSZM)4fe+>focz!0byxPAUH+XFX*Ikr)NC5G41L)cYFn z@awvVRw2rRBzGq&`$sgPTzfQzgtwxA^&xslIc)}Jeh3~4QYPwoD4GqY00hWD6K*;H zc`Vs>ip6~HC{DRQBumf)_+Dgh9-RspkHhO!xSiC706363q}UjC;UkI(lMvW&-l3nE z{j+(0pr`5uV^Vwkokdk2CS9@4KmP0OGIQCm#(qpszN--z=q(2cboj8Seok&YJdCeuRnAEE#Cb5wZ^nU`B6QY6r*T6!ae$FMWn``?+E-^JPaQ$> zGmZSQY`6tR1|Ea%)yezw8xm-Jkg(sJT=l$v*$=OJZ!d3pRnEZ77jOFq&3p!C-hqLs zf9Wa$JNnmc3Iq$LEC>=&kPs`FAmt@OgC7b}B=8~vASgC(&Pd@YLbEtAkCR9}f@)8}vZGf2t0cdX% zc8F5u!mtchxwDwA38Zq`ZdmsEK;&}+|H5fXJ}jmc2XIv{Kavr?urHDoSL<*F5WelZTji1u`4SgDO~=cw z9FbA__cCK4H8^Y$H8||H^HfiQg{krrh9jD=z!FSgR3MLXE9R~YU0_>H2O{m}Pt59P z_P}i=(=u?TJR7(q7tVC?pG9H21&l>r;rdzT?XOC1JKRU;>>f4RIn4~@KT?Smu}qhV zHcCKF$wcT&o#={OhlzL`%{bQjs&{>Ja&>e4<2B8cB+#0EU&RisF91FAVT=`m*p1hG z+FUOnDZwiexq70cT4vwbCCKr$`pS_U*CFjH+xH4iQ7B4jo=_r7tzZc1l8Lv|K{K>O z5@yknApj{H((^A^-uAY+iTnzN)pv&lkoecL3@Es3iKu`lBRf*Lu%KH1>LhUB^b*Rw z4Izj5YZr8_kG_MHY&MgWR(#Z=y3QOivr0;iQwztE#}i5NW-%`gs)P10?J8gDfJCTmzmh|cN?}puEOcS`I@MP!)$Hel z`cd^2wE7=9zIUHb{Bk8r@(?KUu3xQX*wr1EQ!lTC56>Oe=Y+)IMCi3>yeh5!8j8TN zst9-_<&-*kx#CN}K>KB{Ee)(Eohj4i2X%=mEc@NfmEXI%QqP|uhsIh;+j3ph-mkXM zf3ekSN=};JVa1pQ6)y-io(G&eOXpyqG@-$Kj!(=j2PXQ&I}Por4d^>drZcP~VzCSH ze4|8rl${}xWUU|LfMa-F7wUow5OjmqlLHtzH!{kjWEs_`yYC{ZU;7-YUvsE_ZGEa= zTZ`z|95P!B5-Sp^955N5uE)@7)kl+T?WZ+i7aHg)_qE1xgd_uw-leNwLLkZ3s`}@7 z!6gbC_pwg;3&Kj|dK_ssK|TewGB#1O9BV&&nzcvGvZg1gxwK^+v-tNKa;Ks1p0PBW zp*&TU#G$X=Gx0>uiBc+&T4va)x1~w2HVwIBs^TO>bXsksK1ZEu`{?(hw|3lTar?Yb zYja6tRilI$75$=4nw1iwRHd}rE;BTjaXZ>qoL({tt=W}Yn>SX@$YsN2%o>TBPH=B^ zW{Om%4evfyecn}^s#pNm#vc|0Q7MkZ)KjX*0hSF~55IUor+?YXPF{Mif zKJVYXJwamk_T=Sv{`-r!y{p$f^k*QC?0KpkNx9HR3%h(D7?TeB^mMS6Q!8)z&a52$ zvi$NrVbZB$ZI$<)y8ooG8m?~51b0VRg>mVoVf96n?x?SX@tSC!$h9za){3R&aC!qm zUNVWXW-QF#b}!*5iGGAB>5+|2YFhcP>hAYwk@!eyv{U>^sqzgFT!)qZgYH+l_uF(i z#5Q@|niK{AXWd{Vn=Xh?!!Pm7d{oyc(cRfhEeU)PY@XEeE-;}~wkg=q^i^ikqNe;M zsvU+oM#xiq8ikv>w>2rNOal6`e@KZgp#+^XGVia*_$ovRUUj|ac~a< zN#AQ(Uu5k3xG=-5QW9Q_w+8L;`uzk@Orfhp@NRIH6GT2s&K98~yKEIE#(hGrr)eD^ zRxjI43Xa=txAq%uG^3=bVubZSlKUcvTj`y3b(cv=1q(pmfRh{0@UAW^ZcomOoEbcC zdRg{?UCRK8Pwa$Ol^x;b`b9BT+%AO4yC^(S_D*w}KRO$n-0qA+=Y|KfF`-_o|B_%KD9sIZIx&u-? zf#f&N1gB!2Lgh^sSFJ!ru_4OPrgZiWTI>8dhneDItlblRwAzZMlB7U7Bsx=`g(#H&N;yR2N8xQFZM2~lLAa+gz4Qw8V`aIL!3U!;rnbON>OB#n&I$}wk#Pcs!Zln_}TnY zKy<=h>2XB9<;Z=cO4<;n382%+NY_pohqcHbjDretXXr|2 zN1kK$S@A35-Sho%60(>_?I?3lh1YU%<-fY>_59PfC!8D8b?B*8n%+0xsQpwNZU2Ce z<$fn$0^sw2$B}#=ide7KWZV(3G%5{0rwt}WD z!gg^uYpbW)jCG?zXp8DG>qD;ZcqR+YnNjLL3N9 zqI<^_Xu)kMh^o}p|J|YW)iQA98oPiyJwG{n+k2UV+lsH9!}*7A31qgh64$+S$en_8 z?1*#bZxxr+X!Qr}VR&%Z4eIryu+R)n7-yLm}laualrK&h;w95L;ATV zL8v!{4$jG; zM?TC+`S}m!InDq)JY)bKcJpT?{p@o>+5R&T)f6Rvap|94zWdP*h%kj>e#evXgzDzl zA{=il-K21``-EaJz43yQRA4BfN12AS?X1ZjQ1G*vzZqA+nl9#4HmfKZot3*$_GO2E z{_bmEf(FhJPC+o3&eOfO!3SilQT^kX60rLToWJsyy6izX4Evo%`v5b*2hn~jZ1>y4 zMz-w1l7aZ7FKIv&p0fiTu?qdmB8Ei`#-V>3O(+E?6>9RasM0geFa7V{o}XR3 z|G_^$dvU$(y}G ziYbcrrh|CzE)4btX?uUvY5MRxQRgLZRSN0uVRJu=JPnQg-=99z^2GiINQ|)wOjf9y!=?_hj*zydA50tr3ixTj1G8j)Q zcQKs(&C~PilLjniv+5mkK%H3Z?(Y4;@V5565BwAL9*iHqZttJN$9g~9_%ALmuWyi; zeRuutTmSm|vzya5RL0UoEERsJ>3(SUF&!|pEYs%tw7tKNO}dYs{IT8eb{Z$mI+}^4 zDFW45TCOpZMwJA@!Miyy-*gW}56~j^uy_ytp6-35;9_0yDHxGIG8ZLybRQLV-)#1~ z(SEbJ9}epEcB9=Mga;9jolK(T$B)mL`}lsGJ^>9W7z2u0EER5$2;*<#7cj9_XJH<_ zg+~>92Eqcowkks)QNP9%^cDTi&5}%0K#e5(%$V%(^={_1W%4U}?K*GdK7dcrCT>5s zEza<(iniOQV9tX^wbjfDXB_+k^!fT}sh8o33jh+cQ-~mC0?n!e<<)J<|E+Hs*M4I^wfniv;+gm%LhM>1@cKpPUn$ zjgC}rUjp?^^>;5DDpR;q|KtdfF9G7+mEb5iJO=rT5JknH*a)S<7>t*bN+S+iQ%sLI zAWFxJ^EUzP;~@MPr%^?^V&iJgufw7Qa~&-;oO6^8hX_*E$;xKVBZN~W{cWG#Ze#bC);{pe2>u^T+9JW%I9B?=ODx{+!ZHtWQ#q?`!k7cbZIw z@oX%iSfEr7gF%$)gPh})L<>s}03}e}OaSdFf7{$kA5v8vgHzS)<*zUQwz*w=K2+RV zv6JfB^Y9Ai;STn<&DB#d^ei`#40&5RB^u@n%!#+|E#QQ8a%s=-5s@1TizRqya9g98b55qr70zX!Vz=D4z+x1zI@VTv>ST>E9IbFV@&^aIv zb(xUtOJ;Q$k0!pgBv)YRCi&!Y)BCDe#4d%inJA{>v7M;!Zd83tCP`cI6puBmVyI!O5l>XtXcq`IDD$9G-eNcZ;fb;GIlo(6!Zu zv7WX&Pce+80Yg2xUXbU{Q}3_uPHs-$c#ZB}bN?rAnAWDjd~g?~Po7*8%GQ$Mu-1QA z`PEvj_QZRc3>M>PGN%aTJ!)0EUBqFu_i32cF%r!F?_co$mbaVhhn7*;X!Ok610kui zN7?SEJSB~R{SK%fy@1yrY6SiIz+3ho_G?qWVY&cSK&rpjpU;NI5WBM9f!hyF;j?JC z{DaW1o%vorPF75k`t9#`y zoywO8r)B z3G_p|g4GSk$t<{C17|ZJm1=M`25L!nbGCB*mf~%}7TW!mLNoh4rk^X%2&UK0^!rd- zbxvUGTll@q&a1vlHv_+!StFK9oy?(UZ3zxwzsAyj#h*pkG`)gu%kk}B0GTFZcS+Y} zyFp!Z{ru%{1muRUb$7+tmpB0>U=6~R<$j~tf~(6c-GV7BP5gQeMi z(PRY^p?(L49nTV&a)%^cOrzOn91;$WUfpj$117#t6LJlzQ%zk8u2?RmYSv{4%fE-B zd`Y)ue4*XUbOVw%N|uWuc#u(;tQB=!akRW6cX=O^5s3OL017mxd64;_>bR1%A3A3l zy-HW24}7n~f@H4eYO$`RPctbi_|IDMx!HKt4=`mFrseys#q5O0+5OgHKG3g&h-QwI zHNZMt#}Tus+awrA@mk&7`I2kjO7+R1uGDX>b594j>-Jmc+!xIA6UBwB?$>M?48I+w zt-U@FiS;&8i#u@wefIhbDw-B9g%0#PB5Bg<$YnKF0M&u$-}_-}@9)WiQu?pPRDJe8 zA@7g-pZHfD8dJ8wR!P5QAX?e4C>O8|3V_oJOlrfZO>Rx{hLuX9{2(|6ikLxfe&Xf{h$oF|7Uy8Tvii+sON zbMj?fGFjZhBQHDAVY>1FvnIIfbh{lT{a~~dsE$;k`q2?y{kcxI-!DtGFx9Wq$o5mH zxdbrCwi^y=D;!(B^_^CJ>(?N`g{#M(x`>WfCSKnb6H5!M5#$Z1P>-DI$f6(kS586k~O!4s5UMOwbi}~~ zM^bx$Y8EC~5CX3h;jn-lCjng_gLp)F*i^lv$p7%1MI~Dr?W^hFNoGT2)l>F!-K{_ z3tgPU{a_!k`yd3ez68-$ja`IvtNM`BNMj%UM)?Ok2qSh#2e{R}vk7z#Le3OJ!aA2n z!qk3&mja58R266%)$0Akl%1A(D^kw4AKk`WcS(F`D54J&10o7kX>82x%l)75z$sUg z9!K+_W__>jo#dXWYQLjVq}+m@*mJat$8tCg%(%~Sx~tYyliE(#u__Zz7siQQ4m;aP zT*DV!0KDCTK_2E;h?F(th?p_GIX6tZuXZU*O z+ZRuEfAfM4IxxuU1vL-WSkP|qf(|-suSaS(Lf5+Y%4&j}ZK||0vkARL$|0xdDi+<_ zA9-T0cN9D%3(TA{^P*2Nd1X7_7fhOw8*=C+lNx#`(%hd2hDShnQi)wfAo+b=1k*ZJ zT=o*^;6PQVryeee%HON@^UoAdYaI6X51Xx{upQOw&0x?yYz@P~{(gq1<@+-^$v-Ea z)n%U5b?fDji*^5FH1JalhWFFxHmx`XHaM7i6pt~3%MSdcM4bq@DKv`}Z=;H4yfZ*E z{?aF3z66u2>)1C5E0#hWhM3TJDaYc+31ucyMa?tCHDpTIY6t$2InQ+Z4fUexty;qF zmmA8inZ1MXt6K8}fX-o@)N%+M2cLX8d~To@HimtG&8Wivr!)AmLqDfFrzpj~w>k7Z zQ}^ggE(iNlL1ur71Uan01G`9z9=}2Z(ptRd&%Nv3>CNSpfBoj{)lJoV{py{6ad~zL z4N`Vd-0l7F?(*u!zy9&UXy7J&}PDS73giZZc`k%xb--A=ts511mn|Umwi3_2P{z#4vH1f__^ID<5~Rc@ zgashdD<7D|TVVbH_$l{TiAPs92}Y{bKu|`}3`gVpCf$Dlii-Ix+LaP7djp$LLg*Uh zMW9*pJFJ6XO@w??X6Z-zSP7r4k4S|Dxpiyc#3ILjdm*tb7CDQeV z29UbrPf#Dh?7MQcnBdv2lRNou9>n__4`Z~^1c?mq-hWjjC((Ak)>yZMp25L|2M%6vPE=MT`mm=+m#r+&dh6(t*X zgQ(Sr+DC&U&^h)y!^YuZ=djTVN~n<;AVZGK3l5^(M=ai@pH5H?NdsIt#Fsw4B#I*F zX>LMJNKOmR&+8B7pS+#HTvE6=&8dFn4*sh$yev>i1nx1JRmned6n%_FRi4F8oXV$( zO)7VFXGC&@8UkZezdYj;_FVq9@+%a~Sk}bZMZ6vQK?%$_x2@Efs67|!JdH>$#&`h2 zXzB+kT2lNwAQzP_`DE?+AQ_L5pTR_c`Hwk=m<&1pRC|v8mD6B;$6vPtbacHy-XspV z2RX2nm)O409#JY})UW$ke?FZAQ_#1uNJB8wLN*^XkMBfOCIVg%fvomi?-Z9xN9%;V zxs4|1n9fr{e`l)aOrQyYKw%tq#o00P;eO-*rfUHxy$yp%fF9WyOAJQ4NeQi*Z7ieQ z&gZgE0Cv9#)}O3YaGtmPSV*H~hvVOMHt56x%>SLoL>~|qfnn1+1zGdI(4L)KU-zzV zDg=A)`7;X2Q~~1FCe$`I5K|w8=@eM8MlaLeiW^eA0MnF0f)QU_d{rHiF`u>X;hU7vDyoEXonyaippeDz^Gn zCAuSs>cXj57L|7=be~xV)zi7@Fi)PzPsjZD#mVWnSG`yM>CF%RyQ|)J7{A`TxViSP z&;F+e4R?2~@&trnl!r=u1o+#3jen~1f-ppbf0Il>hg94iqqO!hclZ|t92NS5Db+E7 z7o3G(kWP(2Foa)Ce&m8fp@Zt3D82|3%m>a>tEzgC5)P{OIG#|I0x{~z(IrTuw$3w` znmK1!1O%Rv8F8Xh-v*dkKTu48hSEK~a6!3o1NoZ=0tt>PTUW@2OR~5^R5Z$LleLI4 z^<%CaouZziT5!MddpweyRWxz3oUbN65uO!=X10`_B}`N&m1tK|r6mR+s*;1fF0-Wh z9SFL&cS^cVO~9aqvzX>=2}6dht9ne}C2+>`XghHOs3fqj2z|q9vgMut9XDlvcxx+j zP9y}3gG-yoIwC4r%zg4`GWp6h-huXq#8Nq0l+`dW}HdAxZ`kJ_e_vdHVH~!g8?+Rr3 zvzyA+0w6G>^oPsNoK8~T6vC1TOyUqpq%nvO9LM#Hx1Df!LOD+rVQw`VT(P~;mV%mJ zj=q#oJ5G=e>MQ)vWJaUOQ6x8e8*1`T!>+s+(IvSMeLex2{2hl?gyQSK~@T1+N z%o~4_dsc7LRTp%nR{s+kCQXo>{)un^rcCjA>A$kT^*xBzsw-!3fh* zuGJK#AH=g<+i^6;)N;O-5h?JD3~iipZr9jFGYvc|>Ajw_x6_AzRmr!>VhngbjVW!! zhLoen;6<7Xr;(+K@t&@5b*7ChXwlQd>br`%$K@L_h>q(~MdN-%DOIsuJjYNvOeh}D zxx|&pX>S>nR5En7*Q0zg%N1=ll|1z{C}&coc+FB?!?Ri=vcr$L$G=p0v>lZ?@5$~X zM)%53H&S#|57z%M9bVXaK!pmA-l%$9=sg+cW>7`Qa*;M{47w71{h1eY!75K4^T$Y~ z!e7h_`7R|qq2DZQz$t7=GcM-{T~mx-mKb(K3^KNLtLS^yIRa^G-$^?0=#4_W2o&nq zb&)a4Dz3*NLs!L#EuGBJC=XNYQ;qQP=!K5M5k274BK`vht|M>*LQ z&WNJth%*zr`zuU(X%iP>Xc>^{Kgq&h)zeZFA|zPeMOaS0r+r&s^0gE}o$1;%nBjSK z?kkd6e9P>N(UpIvJ4SW>9(4kX?n-^vU>3>sQ^GUxt-scmRx~7RaNjr zVb3Egj^g)apEZ#2>Gp9ER@6$VF{Z2$y_Lb{ee=wFe|B->zdiW@B}PsqWS(is(wZiV zk=dhww_8$d(qN>%YGObb6`yZo;ED7!K-tQ3w5kO?E{030@+Vg(w_d%@SPAiUR76teV`Lj#3dg+O!uC?bV zi1>mD&+O$`FBArRWhM;7#M|<&djC%>-j^3W|MKc(@2dBbGc6R4gjR5~F&Z==2gYN* zFO+t$Wk-@!0Dsks2QcF;F@cgMf`3{(1O%+gmi29mkrb01Coc*Ehq_Zf3?UlXYtrpw zE9H;PZAey{CDY&*vp$lmQYq)zEi?JK2VSQ@V%_wWRBESu3ZSWuD8VG2zH!W+Q8lm0 zEmRS>RLZ=eQ^VJiJ?HNW^2Pz_8MJ>rW8j2NPfBMGQkM7(p|jyE8nbFIX~mVN7b-BK zVByr`Nig!x_AYhi9ZHXi<^WQkZx;aw`*SI~$Lc#M?3k4jUB9IKHjm9~oD3qPTq7>_ z1p$&eAb}D$o~a{~+d{WWt}x^T3ns?XNWd+BIHy29Up4Bk(YyTg5EU1wdKWwuLiTGh%IdMQsP3YxN>P;sJY}Rd zV0S2J0mSpX%oYj-M7fXfI;6OK`9WivXA@rL4XBeUxzar6@9WkkdkubGb}5Vol&S_p z$qFP>CUmyITTvg*<^Rw-MIu({9%9X{Y~D>X8K3K<_M#CNNv{TAt@8sl-GSpf2C}SG|*$DwkG% zDkAYXudqf>sDvLvHnrLUQp30>=kQq3*ensr!bcUrP&m|I+!|}%<|I)79!0~sWCGi; zN6PZ17w6yl*EiSx*=6tQikp-18BeNRK0}nQMSm<>R{`|wwPe$eg=~&85Gf#Cd5Ac~ z1JR5{AhTkP^A$qbZ7sn$v$JH~sxo%NZbbw1z9!YQsF&GIf*7^N@-dFqqI&i}0uVm> zoHk$nP+zoG^s5wOQTzN=={^$%;VwMY`hiVEhw4yiURreI2=*wYVj;!Z174zhhzB3Y z>nf1dQhr=jy@rYpng|G)CEn=-ULTq<6zVu24@%kfK~D%~F$k?7T16=rx)~%N;egyG zNyz4WEWjAeKC-)QFqkhu7NsxGqF)vfmGDG?a{-?pplIn3#mTD0_^D54S);V0; zxd~?(tILzjt1=6>;Xq8Fi>au6E=Y=fYW#WOBtF2v^oiY!zJa zY#%#c&_)yMFl0_}(ScF)AIri~o=r1iwU8r?qDh83Wft4dShCE3MV4hp)XLKvB~NKy zRmPOFYb`rOJlEAil{1*jQ%epC3pNySlHrQ-wLgcO4kORWLpJEGcJ8Su(Iwc2iITw= z>kPBW-3&4yu=)qvC6pQ zz$W4u%~C5erFnf%G4O*XkH$TY`S(ub6sQGJ%o8a-UObn~>&r?1<+ltqkyvrJh)xkV za_vYu4n{~Nwgq_|o>?}YDr3R6x{df)XNrXUBA!(A-#MKHVRZ3M1!jEX?tGnmIFijo;r%%qZ`Y)faOKIXC z$VOq_Z5!oSWWbGOngMwpmK#QFtC*LkNg#qCjX|x;HJR(Eq;hgZm!c#Zz@M~b=%Deu zPcQMV_^46E42$i?KKpVMd>?0tv%ODdy5JW6?#u5x_($gc)g_qdyTf6Tuyx6>f*qFBp%1Bhp?-n&kTPgT^MEuEq}3cQ&S4oLxy11{81Ds5AL=DE!Cct9?cJP|TP`zu znc)|V#zm%aeMg^%ZyJxMqtd$+gllajn^2OAwnZ)3itO!JhLT(OQ#9~umSu6c9OI(v zcyoD;LLQ(*2-Io#|Fiez-EAC6zUbf7rzm-jWkLXG>?B1~*R(|0yp~8kl4?(Pzdkqx zpg^L9txx+P87Wb!EN2&E-rr+mZM0u64)mc$VCZ)xNqDe@Pvx{ z>Y<-B-zv6X-5S**a$M8>zkt*67j=n9z_*9Ss{|Bv+vt1i&)0`RR+iY)3pqtv|-Odg6cPvv z?yy6161QSSl_)SemrS#H2hctSLx?Rq?t96Ltsl58;7u%Tjm2g#A!KD{V=%j$?n;Zu zRxHt0U()kjWkiav+o56HF__$@7`uaml$Ov&UftR9vlP!mT5fRowA+uuc~JO*PEF0uJ-743eTMuv*vVu?aAj=u$46huRncrEZV{2xC{w>W8Ru0k_oBfLL0DyktRU{#y{kjZ|u8v z!g#LycsuhDA~#VdQ9pS-!^E0#5`TibpOD z-F)?muzc831|rJ$60wv*-AFTE_?7LR<&2M9Oq?fz`NCK3Sw6y#k-~uw(NjhI8=tZV zbm$z)C?q;(X5GO64Od?lw_FajOfO-ll?-Krw*%^%nuDxh!{Zk|ob}}D=kS}BVe9I( zEbtoQ!-})dB!Tbk#j9VtKVM#aF!Jol*=r+lzdO15)e*+~7&7W3xW|4xUUTse-aEqB z{H!I6+>G@wl&g5MJaynmQVQ3oUtEu>&%ZmZPIYT+X?nb4QzY-uzyv-qi1Zos+1$2k(0lL3ee?9gKp z6-V#klbG*Gk5a-X|Nevej9e_2QNgh(q7Qg2+R0F|C(+Pp!a|EuA4OE~EY9jMNmr8x z#@YoeI15U)wNKB)J}0vn`4imCH_?9QWhD0Nuqt zA7KKLn~k$+gf#|W>;#Ny2b{VyZX%Yt(Ud}1%|ol{$Fu@5w@@(>+cL7lXpB1(hmF;8 zBFO38GpFy#c#KVPnM+PoG|yVIAIveC{;)Od0YXSFmz3*iC!eb#NU|zfLtGfe$uc^F zuw3BjOX&k8{8FQ?!Q$rN)7To#3Z($>iI0FQexL*Lvc+nOq{nLyD$H(v8Q9jUy-Yb` zd5%}2W7q-3X4mX%m58lQ881+##Q z{^!wB=Ew!dW0o$&BM6D^@zhQ*Aw6lH={cl$ZTD=8+K^ zE`OVZy0sF9tUBW*fJc|%5^%B5w=~2R1`-@DMuwaWAKEojV!vP{z^4FwQlX|0Utvk% ze}iPmK)#ws0RRLHfgt&oP#km-kH-n7`P#G+;ZlGa++U5U{Eg@E=D-?|F2rTQ9vR)a zk{4V~us@pMuSn9dOqWw@pb10pQ$ojJ2ngFJ)Q2o2FEwN)4%A9S6lJ`D8Nl13^C4m- z5w{cU^AsoB48G?)cMOOz;!|Rcdp{!x2J$Q(=(KWa7bHNX>@yK72YFXAvq*1XHO}5> zx$A(>u0Fi$p1phjw)^Jf>RM;~5jtPEvCq2FI^&k;!}H3Dq)0+jW<5wn?l`)1@5`#; zjswupUSdcSgEZ6O%B-rL_X?>x*pDyc zc*6lmG{9$J^l?JbbYK=V42d2oWuoZNIzxBDdrX}=+`A)nm8^y$z$;d3u09vRJ;!u2 z_81pf2|;4yh!nO>Y&0NsW-o&DK&z7H-gcz3Ia=+__1s?6t~2EjwPb%gnlDCO(uq=c ztXK)ccc_J0D1JAvVe+KQtBrDG-bB5b)_U9f*8iATu+2GS7>2dIIlR1$IlNqK4rdT5 zml17e2J0gu4dHdChi_*;->Aw4R2NillWe5%W~;iPJdSSsTDv+&pl-0N(?+6{MvX%h zvW~mNN6HZai?%x^yfuMU(8h_+7cxB#P! zxUngaKpXK@3;|C<&kwu|WNUlMwxDAHwVUo`Vn0ONc7!1Q_)M6arc~%PNhI%1*dNQC z3nCd5`Fq)sn|;@$tOwHwr4WpD>n9oKKu#U;u$vsM~L_GrtNCnUhQ3t29s*=e` z+hT%YXaHv31YRw6`?K?l*QZ8icztqxV&uXSV1RNi!_&Ozcy)bp4Ns#CdF6?YDRPFy zYrsmY7@vv5N4&#bb&d#4QebCZK|)&NMipR)FYWcLZQOD#x38!{VN(d}b1hYNlnr7e&8-edaEq+6rP)a*1t*A~1~&Qgs9tVlcC17Q}%( zTP@OL{K;s-aES0?h&_tR7>@60m{qjYSnB~>4$^z2KQ9>;TnD|QyG_{*!6w9Me)+bh zU(y(i2Sj*+D#or0zLP`G%jbJ;SPT#e4SQj%Ayg*_k_sZhYEgOraG&Fh;UittOB@d{ zt3>H6KnUbn+5ojhKv6%PU0j`Bch65xUw41{)9I@#56-xHdVcZl^qq6q5K z?=LRiBKj#wZ<~4%)rkBpS|rx&>j{Ef(RaPfgD?6pJ`@^$t}__`JmO-U9EREdP5GGM|?CchWVBrwb1xC>s+0vlx(u;8tp zP9O6LjUv4{3aURF4}22GR+z+r$;vY5)7X{vXh zXAjZtvQ%W^6(B|6&nE+_!xlp+qT!6Jt|_1)0t_iK#wxS&ceB*6c9bWv#VDO8{n2pL zkA_JMpo?iV9(^SD4tXZ*0g1M==$FyWEkUT#`@qXJV&mD8a2TMXMR4oJ+p}jChsO`vlH}l8+H%@4qbjKjvmpP&*q3sl{&9gp`l2i<2}T&h&u5jo6bk1_Yhz_GqX@fz{?G zeqaicfyA@CVd!XQ)c=$$fXvC$V53@CPkOB%#aIU%zXrO3jkTD>B!8IJp~?p!-g65; z2;zy&67wYP--7e!-YQj*H4tM^_s;4ddJa48Nd8j!TYu#UDRXe0oxeRZ`Y2l9;r#j= zDH^$D);W=;!Ot9M!}DS(X%7CiF)BITs3iqo!9TcFh|FdmZh|0z<1%om6U(P6AVzD>Pk zYr>Bl6ls7tTkSs|@4s1OJlg8i#R3ffMS!wL|4rdjkN&FxMuD%o^tdoo?OF`BN)*Um zMhOe^92=EpD=Htf{X~>U{7x{1Mxa~Qd~7JeX@%bC>OVQ`nf(RsoGSSN(%Yo75|0lg zwAvIieAh*+e|}Q^41UJwC(|R;n-}1@#x!lvm;IO(Ivf7Y2xTzTVDz$~47mRK@=q`~ zEQ6gb38_&6DKw(>?vG@KLXWXIH>>Qd=*8(Mb+TFS(^v{QS2KVvbRXKv-j|Qy6XdYZ zJR!#dwP6tu%oBRE!d-LB%D~m~8g`nP6HGFlow57Vn35JHb=kFD!cuY=jETwKB>-)~ zIUsz`3Y(zmVT{t`@^jdCdPh7p(X(fm9OQ2A?wW6x@(JR*&*USd)KWX868b09`}*7E zAo4&(f3H`Rysf@giJsBdD!QX$Ak$47MY8%x`71#3wAvZl*U^_6&5j=3cp{A4IpiuD z&BqQ3Ncn?d`RP{my=^ z-`F1>bcz%}uaT@G=-tFr!b&?T*RcwQ(c8X{uHL`?_v%|CyiU`kdNzPuvEe9Lyoi2& z|F+uLtHBX(%L)8ImEVDU(!JZ4s`vokfr^3MbhcRfcfT^eug^DVy4S>K*lRM(- zOAPr~^hQP>Tin}1EX!mP(eiwcg;VKwVX>Kd$2LmjN8P_DR~8xI!(y-{I-}%|*sxSy zC(j#J@iPKwx=iLppM0~DNur&AI45^dW|*{SY~uYQsj6!1=A1#DHc4OL?Hb^H)7<@V>?Ts~`Bzk+O$v43 zXA)HdNc|FZlP2+^e{0mo0R-sXW3l8IWcA@{j9sim)8xho4WB6TQ(%M#7&b+?B7P_i z>}gsSOE+wU46~4HLmUM&_lUL9=j%`(Zw- z`E}bc9{1tmAP6c8$AgDIBwT?eUg37DiyWt*+ow#@vVYM?-&-cMAr}A2Gj5 zOu|9~;&2iyQ5n}t%Usp8r|-I*{gPh`7LQX4TzxrNExD1vvm7;O&-GBXx$+K;fv zZuN?ty_do1TmMekyJ~AdI3~eh8ip1=9!banRE|UZ_-H;$p|1Nqo-l@~n}cXH@CF`F z?2+l?+XL+e|KoSQ&N`3h0>6%BlsQdNWg7%cGQe&+rIT2@d>ey{m9iB zmo~)&atEPa<=;UxjMIds05_IV{02rD!VC4i++2>JI)+*QUIKwogqQ{NAXI8P0YhPc zUcF0ncDA5JN`vl_w1C*&mq>yX}FW*(viQqAu$nJ8<6Tn_bDrEMdF8^M_UU+@Ip zXy!k22N77aIV3-Tn+(ZJeT`!?z-`2(H&K6LN;dyFZD}(49d?3#*RAo{6z3N3MIjCk zcMfbxr)1@iF$DM{p&kPZXAAHWF%)vT8ZYQE0hGh#`zN6@v6{o(bjKm}6R zUUr|Gx$$_|h zjoKbfyNnxJpxPvmO_1xTnuAFFD#F=MKXvd`ubvI9 zK(LhhpCIH=B79`)B)5;Ea(U#%WpB{7H4*4ZHmY$YY8y!!R9+p$Yraxbk-X*@@$SBq zyC?=sSCnI<77~7CJxQ)sA}8e2x4cCY@*Ve!=v*3-7;6d>b!|M&ue{`2&CRPAEC0-= zF!MXda(jZg?w9-H$Tqc_u50Ww2JO5E387JTOY{rKY4Z9eRcBcm(voXvC&4tuQ^*O*L}`5Z8w5Ituw$}ohl@^xk9p6#y(kg zv4VmH3}2?(*x)G<55qShO7cg(DMm&(?BM81?~TMtfs{XDNG#<~y>M_rpi3+uKENEI z+R|D@1!jIRg(;VF3i=(`F47Ev60HMd}`_hKrpnM)sM`E99HgKO>;(36ae#x2@UQ%h5LEX9U6KH4-P?em&78kGZBvrYip z@axx%XPN-%10k(0i)V{w)w()&k-Uc#Dxow~qEO=t(qGS?i>t=I51n~V6Gev7=M=XC z5#=Q@O{{HdLlc@!F}s7+dY(*F^ah5oV`@qC9J8lraXJvMQf}00xsu~wF>{fXfiM@r z^%dggDbUv)YYr(9AZVXGix3e>AnitoW=9al=;1~va@UgXLu$W&*@G@w?H7-&+G}yu$PAo4E<@;+8vL=F%#`~bR>1t)f`-*)R8s* zA>@Vxl$sSvw6XKu+2!TMB@`qz_sV4{I!?vhZOT=TErz=zi>>CMnF8r;_;E3{QVjto z0B!jiuC7SHTvxu&ln3dx!byP@(fKyt5wHH*=BpcQ)x9GV=_ohO%|UhT94qg*?@oY^U2w{QPx0} zyBnb?3_=fA(>^^rHriP#wSEHJ7hI#%I0QGtd~70p3&BSmaS`)85|^>lD&Ux7JT^); zM(P1CDZEKcUp3p!O#0noW~3b_0EL7=w>*XqyWfxPMzGf&s#>8?rF}&}c}lhp5r)2> zr7Ns2NY@rKPYUC;#fTpTZYB@&nnApWojznCx(9f*Tl_5K`z2jHXNu6IUzUq}^ZBBJJDCLOuq`9n>~MP?fQ zB$R#a?lT>B7*CBUi?hibge=%gAo_s?b_^068KSZD>s=Db3{#3y{4wFz%^CAC0SnEL ztSGmb#hKFG@1@s{q5a@?1`R>j!hILK?n_iXsLkB!xVdro+FPpkbfk%*_%Myspb z1YcRBv?LX^uPQ~myCAO?*DFF5c>RtjZ@(eRU$q~~57`doFWe2~Z8k$Wvlq%+ZiOc< zVf#zzUQzrL2H0_A@8@h6P<{C;h#pN2(fWgY<1wC-ZdwgUeu>Jh=Dv|`vS61`gViJQ zyIAZ_>yFai4V`uI1oBTISoOvnOX^*abm(TT*YK$H7TtQA53tSqSVDyrnj_k=emIV< z{?e^C4t~{!KW${XdUrxl5Se!l-W9#^?)3H9hj+?Fsc*hLx%~MwrqKo=*YbP_KSg)Zy@QRZ|BUs6t;IAs?%MOcB&UK=}EtY9|@wukS+BrTDd6m)S!|HRL%CA)$4-n`xh#xL>UY8XKW`7Vp-2yV!V1bh)#^^@eeLxb} zAqaBZiMaW?S!B2orysG^+)kpbH%&HO@wWo$FiIQvR}Br_a#}okkRj~jUNeWi=p7#6 zmZ*T*;+uHHPf7;GddF9`#q**G00f+SBm>Phfcw1Q+QC@@A)ECg z<*5N0(2i@~Yy@}!0Z)oi*%4>y)%Vl5)3qk2slwRWtQ4%{02=a{lAQ#C5Belo0wJ6= z?ROmc-9SqugtVAO!tZ9WG&YCH-xdY2OP+LGYb5c0XE^BehkJXSq!rhCt;S*8Z*CDL zpU;=}ZldY20fsU2ZyoJoid~{j3#C(nib36<$o2KB?$zn_hxZjD-=PC&Gnf4*alsE$ zAQq&KEag||35bstAq}Lu{KF{E@zeN(d*R?+{&5oK_<8eyJRO&u z1`+`tlgzoe4Gj0en%xj0WCk&a09~K*-<#+iY_wWpKrYxF$`*x547U(o5%(d^q1(IZ z!nJ~oO$6{VTy>P*O4??|XV6;#pO2nHd`>s%eSz(YQfK}2H<0}EEdUtYmf;a-_pG6X zy;*Nn4t$#@BE5K?#7!noKWpcU4anBoC^qhlsS&v$smMqF6(-T6KhP#BdABMy^r1s? zE}{*iP*X=fCI;i&v`EfP$dEMkHn<_1jRV?ZjxKM&2T1|#iy3%&xAAu&UK4pC{nhatvfeF!Xa_57db=oZ$5&f^Xpxm8=Rd-kKpZG%VO}5f52MWeBoxRQ-Qju z?E_92Rx7%i!m9ko8YU6&;F)eJdB{ReKGckpA+AR;2bH!w^dDf=Ir_gq?>l--2^A%a ze*>6=VFbC-xw1ScmJ~>kE`dKILFyYULDmo3{n}yDsC8Um@9HJwkzZf=hnUG8HQ&7-*f-d(stu?HYw!q!}-#(w;o_~|S-Tl+a+YhI^ z6oyE~c5?RcU*Rf*$gioWX)dALL%7&jjPR(id0V;l0j;CN<$s}~nNr_5FsL$@T)@8y zI%8-@RhR#|u?FfWxxDpIIhnOz*s{eg;UDF&GJGr&&vP4^SGfs<2a}?Jj>>zANF`rIW+K z;gc8-J#ml?oK!WO2lA5Xi}S$}qy>8rnfS&WL-)|wCc3$5>19eiJb<(tAS*OfnU_*<7n3JnIV{e z?Np3rp9tdWIb?^JE@hfRj^qN3*EfMkBrqz`6{x8Jf(>FmjzT*c%|IO)IBX&K)&u%W zA1ep4$bDinYJ-2l#-D||_XLmN#gz9uw+wg^i~BBtw4ujDNG@}|2s=Za2R?R&cY{T` zcD>Q)$NPuF{UqMo8@3w5W_{Rb4C+NjLBElVNax?ATHF{Se6OX7*E~8EGUswbX zjxM_t&bsvc4#WX3u-+;oa_XrAO|B7|rQx(aocF4Z`eiee|ENcbfmZy|LV&xp=KU=5 zd7^9TcEU9^(QPk@`47S$rta$YvS9 zr|T-Pt)^-U;7lUeCrU4Lvq^)*C{$Y2j3V*7BW z5D82$V@*~!XP$8^vEaxXvcv@lB3SlDHaqN$Ucg83kYk`RsF$z}Fn;xGfbsF8dEB<5 z4OR^Pxs#D9dRWmIL=ORD@Jn~E!T<8%zXVYW!!E7JWjm;Z;;8I>i+o%>9yd?BPjP<# z^)IOekQG>aBk2IkHeT>!@6%rIQxe@nWyM!V}q*RUi^$;$T!8(PD`iMeWmws?htacHX^ zJhOXApbyDc)2wt3qH+Ulsg3O6!E7ei0Wtz$p{IcJO7h+|FwgZ>{~e(J$^+MiNNQ__Y}vSD$}SR`%w}CNva=7ng)}XMs%{0>ipp?7OLPs^ zgsX8O4Tnj^JG)Of=qZXbi&9~epUY-LSUQ0|1|h9z;y#MOh(#5L}3;ta`Hm(Ka@Dp zx3EW}K@UGVAKj=ZVv7>^I2p*9A+w*CiG>L*{x0GQ%&3;f5&$V?{Cyx?!yZiU{qN&- zmm>lI6)&!v8T_)7CO_ zV)>L=EM8aGm?jjlkmD-@CCnEsbJrlfyqNM38ZB}YjWTHTRV*r3)9ogiO(23Wf?w1F z*;Ga?i=jCYF>pn8h=|*GHHOGl3|QdrPX4_Mr7|iw4|=$foOt01dB=KIqn9tEtJ7E4 z7nj|uU(ViO@!V~+x5wb8Fur7BhX+n;a^jl#aZ;uxfe=z_NpOSDuDYj}m%GVCP95RY z_7LE3DDzbJ)ydWMZZZ+BF6UyfESjnTR+4rh%Xg4`q8dWU1cCfH8Pz3c)>Nh^A7xs~ zg8?gIaYF|5L^cgcT|RFXp1~{FbrnvCv&JLRfDD`G;{Eld71-c}%kPm4&6JU@_r?ig zHg6Vg`CC1zc~vZHMGU##JUFb>LEbtr@)qU~6r1w1M7Pe0?V-|UcAmwHUL#OR9fSUA z(be<8vx7~eE(PHq**QcpGgI6#(nb~8`v~qwisd#X*WfiW{cT&al4ry9x#ZeFkI*9d zhoL;OL<0_lYV&^uHbwLz^2_OgS%#yL&89qgd-n7B>FZsV%tBijl)gw*EA1@ zp&cdMOex#o+WD1jDTnYWS#WGZ1uw(p>D9%D%U7o!VMD%gmar);De{@6UQD_r_K9b_ zl%Iqs)w{UAm`23+32{B(`t1Bc)PHt?H<-2T?Kd6mwIJxR4 z#3IqEICoK?PgRfGQNmZPvJb(GjYg$e`-3Rfw_Za!3zzZD-V%W6oxEimMw79^{uS-u zf1`w3bX-v}ykm)?u5v{gtJPuDx}hIK&bGujs1tz32U#sH=f!2LK1Gp&txiaLL1`Go z-pq8f@XEZ{C&OYwqnDoLg zbLi5G`PjC3HHSqDHY5Qy90(#n9uA`;pNN8QDP6U;2Ey+ ziE~>)Uu*K1(){Eof1#3Kgqy(pCSc`}h$+6}6>mil~(&zjb( zI3R0X4&Fwch9c;E<71}6y^`5~_#q>q2TTOM`DCqyAYUdCFPSO9QQ5zg^u1rh8r1Ff zJ_R;B-U4NiL!w94iA4{lQ8f1}O&G>ujVq=;-Z;JoENrTN1F(Z!uQMT*j;Y6x!4y}~ zG9^JH)K?H80&7o-2iQs-VQr;NLfwT*NNKbo{{;~gR(F&yh80NvWXS4|4BZvd3`H-r zCcXzMhN2bA6#kJ_fxHau4L4=Sy8RPNOGD~)U# z+}9qDT*>z#x`fsw*6R`#TM&b{MS=AHaLwYE*twsbAp4N-tvx9oplm>q9oTnX0OClh zo#CG2Fh}Z)cG5X^>pXS9GR5v>^$PnSm7Zi9pm&5yoE9-M*s!)+P*xWbx%k||)Ox^< z0Y_y8SQwVGkkvbI8D--f(GviTMPFXzp0<5}p&^e(cGf61?VgKk{%={>*BUiuxzb5yEOgRM{HU z&vwVt|NOH?>wRGvXdC6v_0-Qk^3Q9f50ctbb+map>dFN}_l90}pUrLh z^rLBLE`-e?s9Y`M^!5SN>5(Nhu`O*D6Qu)l2@@tzK8nc z?tGRKWGl@^t3k7ofrNgn*13D+=?tbZFr9#}$yN+}%A zKHVE9-@6bu__O37)t~|uQ2nd-A0pFZtkB{=j6h?4Yomv>=0A*sq_9AlL)?@z0D-~) z{`+}?Abhc*p8FJm&k&P;o-81$FB!bpA_wTUC__imx;u^9;hq+fkZ!c~q&F238 zpm!J#w#Wg>rFHu^3GD)4-J5lty}?CMb~!v6wCz_H@7`aY8usuy`c~}&vS}aLEMG>~ z`ODk0pSrJ4-<*7Sd)<9|`lr*kJ3CEE4GI~a;3P9U@_aSvB^ZD=TP;mbkdcFEn9`oK zT;C|Cj>}7CwR%0raT(HzN*ADX>FK`O!CtUrSlri7*rH3kOs)NPtAALp)#FwlmZ{h2 z_1Z1-f5W0>;@7ha#Fx?kdjKFmW%?1Q1Oi=WR2nlim{8E4-EE0V;{mDrHcD1Mbr}EB zqK)kKRzp%^-2DY30zumE@_v_LRp>#}8sP+)E>ej?Jo*(T7!38G-GtZ-`r#KBNJz^E z7PrmN7{uv{3h7cZKU6zn5}>?(566f$c$GJ}+1-YStWGTDfw?d*xK03qUgeE5j$9DL+m}5Y!C|byAbqr!VP` z=6?7Eo=NX9=nt)^>CUH79Q}x2hB|ieEG$6syJ7mW6|qIH1;(hv_bA|aKEqtYnH!sn zP{tKa72Z5iQKyLkZt#^awT{jGbJSr;qu^*I=WQhKHGj6t)-TAj{yyKE=c%@NkO(v z3i!Jn-MBi$kf3T9)O3N%wVU{oKWq-$EFJ8!-vIKFAS+F4(2Ep7rV-Zl&hQg3TVkn4 zYAG2GNBt3jxqTWTZZ)qXZ#JXq%?)V)pRl4jB#FUyj@9fO@%4?Ph*V@Vds*OunL4oy z`7wNwWu1QPQl&9bs-Ooi@p}YLiN5(MRrJ$0rGyA&_AeHu8$M2c^v$>F3mnkyw}TSD zM!zS$=-csl70vDp+0d`!avR*<;ye49}I2{1G0NO~Pb|Sm;u`Q5k{qumFv`BM~3K^j4mr!2Cj!HLP)0!A?phk*8Z> zX*2LpfoAP*4yqDQg{@%^5*|5ag}D+MaKHL8?xRc!!YE^*qgq#MqwH3%wGNAgw_ZGs zr~QOUZHq$*4J24_9KMzQ6<_=t7XXo4xVr9^HM$hYbCc0vJ9&;Cj2te$X@a5P?XB+t zLbjh+?uoVF$(*ZW>Zy5t4d<&~Z{vK`>$OLmFA{U_IA$M8M&a*IkFn(Z)%#brj*ywC zJCS}JoNR{QB78IJJb_$SHL3I@m*d_rukviP6Pj9mcaV&`^W|cfD3eO76ALUu9oji= z$TEA2&9Ja|->OFxPA_3|=zp~{-=Ru*^^5`HYiIta4?lM=&S~~=JKFJrmcfcD^uUg&Lr1v2W1V9Xw!Z%V^jxEO$Q6CV|@GpbBd6Z z<*`AFU0f)=ZES*4N)*P2Je|bDEhvK$e_Kd%&04cV`kqg0WD_^-sp^jflLSvub-#&` zvv31&h0avOwUT zQ=Axp1)t7HS6d^Rce~IQ4GF*DPZV)UBtDB>@{sp~6RqpO6n#upU_&rftigxgKdeXx z!0g^b;WfY>l#Z zfuRKqho9Sm<4B8;lT`3g%g+UCw$6CP`;RyUBnY6k^4HqmqL;_s?6Ux$d8q0?wEiU2 zl1A-sB^BCQFo&5JwmTVfF9msd=&69P5qiwOW@G@IT`nfrXf+83*ih*~1{~?Z+cC-s zm}_}^e#Pl%0?wm$Deq8f^}VzNo4fPfYD&raJEvl zPK9(o0`ve>^x*V=+g7^e(E#&TxV1AJdh1J_o3a9o?%wQRP^S60MoswQDJACV^=It?;;44R38K`2t+fHWPtdSsB(PoH;m^c& zBZ1Dt(eJA{N^78@GpA5b)n)Irj4}$&o>r@~X76lAH-!uhOGbWp_4f4S{3svn8su>J z#Sq7#Fbn#R=?p2#qoEA_s8=lU6mUHJ?iLJ>s2Dd&@rU)-BCY_JXLMHP2=F<+hpwMi zt%kmz=3ysW$re-=I)%Srk=U#uhdWFI##W&%14Cv>I1n#?vh?j?@Pg2tkIhslJjKH; zk9&U9eH})MYK80#Pf3%9F_=Vpvd(>Xn|HKXFY95ay_ z5=w9t;KD8#yh;KP--Td;f%j1IyL+hoUuNDvQkC`|+00pfY%?d7Fw3j=+Z#RY!MoP@ zDGzDy%RjeN<1;2utiANWPOk4!m zT*NoNXI`%H-X54uz29lY=KouJdk3{nyVbonK2LSxelCirdmJcmZ5TvU}wvFOo~Y#)zmKSnwK0fSz73G>vXn5ZO)x;ncN4 zQXR+l$)HL_6*3g--D1M6)ehVJPSWZ(2lc(ZPOsJ8@73zHey?9hxPCJ^`t@&2BWi+cwLoupxy*Fk4E>@}K=B;G^=)JOpZ&`lIHoBLqofFF4=69)^S zMfqYu`Yy?p>Po8omo@QU1`16K7PGl-X$zXN;R+AxP5Hqe-#ore{UAu}TTCk2{50`A z^P4R+)M__yWzawvF4XY_^B3*J%h|+mJ;Wf@V>04Dlr)$^Wt6m(SvAb>nk0tFZNE+d z+w->TwC;0TtjvZlXI5sz({^Rl!x(9s{G-HSdJRnFZ5`COB-EEq8j#6k24TxpNIijC z^24sVAZiZGYHrP$eR-1*;ovZTh0gxmTWcOiHGXJ64yiTL2adgW_p~eYAw2^^C#7(! zp3&pz0)^v4(qFU+8tqv5qh$n-IuyA%-2dtYVJ-%rl`Ww%ov1C{<82IX4`?8#6Fzap zA|A}Iw%!)VgEr%R<9KZ7kIE}wvx(GGiymJV+%Dc#E;zp3z#xitgIVC$&OA{znE89i z=M$PuyifntyP4!OfS&HbDR{zcQo87_-t zGW(Q}po|W0C~r1`h}AR_25zMC|40@yiqQeG!>2P0e%-l4Vlxh&zEp7Fgr4JYDmINi zyT$#)QB)b+44MbXsXz^LAC;EF{Z>|Lg@7*k?2^ijZpY|h5jjR(w_*1O4_6+mFxE56 zb~`a*7+0ca|6n+6=`aUalI9Uu%|f0)UwMXe!rZ4^+{{+*XXe|N{$$>fc33yuuaR!p z$r+*b9C7iq|C#46nCHzZ;%$)vC${TBr&(+DI)}BS-`d-2B)w)U?hW^AwW8D$zmYX6 zB4-G-_tEFp&Z&rBN(A8+*!nozwyyF{aZP2qALPLHE75cNYw76!7gAkudXp3{_stP< zk?E{i-;a~wu+y&(;=R3o>mcs+59<5Lpix9;wt*~-*&8V3bx>M2KL7~H+4{H!{n^#4 zlgroL*Qak!{=BoZTW{1pynSoLjPtA0SKaquqdSE#&hT3w&Ocn8%FDa;@T(A89bJU;Dye)qA%5k9wgtOgIFMKpRhn;OY; z3i$csdnkjnNaB=3PQf8c<{Zptf2M{~eHnP3y~~~2 zJ^+B1JGBl&AwSaGaZbOTi3lJhPF{B}&R?DGdYyC6lmuB_UBYDa%r(;SWft|miN5zH zziOi~!c(sHqj@v9)MaN^KuASM4oa5lD@Ls`TtB**0T{p#GPcam`HWJBF+m8j&F-R@ z7LYRT2qIBBRYN&hQ$~#dmqCn?OZ{;?nUl~_SxY&};HNf;ZT}Op4D@p@@i34^zTBxwp>~ z>;yQvBV#2%V!~J&i6z=DvZUuK;3j>ia$6`;KR$`l4H2|63q&sa zLhwty&-Ywqt+FbvS!h?J0v3$yPmxL1#^~P8)Zf#{leYd|S#l$pyK?B^XH@|&K-O6D`76(puRc4d@H9S9$}qGmE74O%AulVId~+<~ zGBF>SzQEtB=;s!^5FCwB7>rzQ7Kgz1hpX|u!_wuK5Plby3wggGG~5cz7SIQt6|Hk3 z+j@}B$QQiXFBcG`D^Z~&567vuD1|~xSP*>JVrI%&(IbBSD#m=HJ8t)CFk{W19MHDj z2?T1buDMx4h*qVm*dx%*$#F!AjomX4yiL^rpYhW~hyh6|dl8sCElhrwl)T|&)(lbA zv+9uQuAJC42be#bcSmM}oBO+p8xZ^=4J1_8@OCs`j8rj0HNuj%;uN<~{H{W>%P@{@ zH5aD!42^1iDR)9Sx3MUvR`bZ04F@#yNPeNIWc0>8Wr_BhoM1uiC&Rj-xsnZd z5E;*t{@kLhDQu0;1y(30{DxOa&R0CWDA^n9u?`pqjH`vwop=mZ5eu_~6hyBp8BvbP zDEi+ZJp6q%nUl1YJ=5H==pys8q}5Y3jdes4VSDrb3{>yRyNhfbM2KnkMm!xZd)Mqv zs_HDpsStxBfk&H@dn2>oZKoD#@oRv;YR0%^#6a`ES63h2bLCsNJ;$ z<{E4&ux<0W(oP-ys#EK6PM>wMsVHM%9W@{~fhvhFkO3sMLnYw6s<)<6Q1z+o>USqx zRnL=C3rn?RfqILC)nCbcDB`DQjASDvl1FM`v9v)FKryMETP)!O+YQWeW>=1*xWAaC zR4x0}`wywxLl|R~-xYQMRNMi|TUK!&gyb1rLPyCekQI|x-ZeQM`4wdgc5w!Wxj-qf zu*=!mebC|{cG16$hJ$1ny}CGmbM|xh-PzBVC)Z~e=bq7sx;q)&EWmCvoQ>TMi3bCK z#H8HC=s9zIbRkQc89W|6AEZl{c}ADgMV}=(X?g-14gM){q)4+3R>|@~#~ROrND&Y( zp^^hPSSgi*ADjx==*wKLVfdqfL%H2vH&1$dJO_V(iB^d2j4+td@i7yiWo|~;0%eP0 z1SnXg`V#*b4LnkztP|=z6(58lz=@BrnHJd{IV(V1bKa!kv8VrjeR_V~{prJ-H>cno zy}bT2m?5F_J!Y;Pl4~|BBwisx6^H|aLAMt#`?tG}8`H7wcq+wDP!)`wsEVyZy;ysx z@J(n~Fu$>{-a2rqCy$OGMn4B&xw?>Zx|*XfRgB8DO*jP>Rq6dbF^e#;p?2;1eusmD z51W;G){rjo6Q=#yJ!X8{kdduoIJi*Qv704KLX?Utq+%pJMqPEGqGgpJizI>c%=msg zJ7CB%3@9_BNH)D#9$cGZ3n*ZKgy%h{6O-v%PXu9pOR{6C;C$<6U|;U3^L-s9*umv9 zq2aU~C-wuNv~u0)ZSie(B9u~G@G@0y|%$&n2(OKH!w1a zpeQUnNRaywwTxgY4f`|uTrn&TYFp2kB=jv8?}W&OT^FVbO9%LYkf@yrX?g&wJn}-B z0;IGZq{KSFAVvnxea0NI=G2FXEa00PHk{;|@=1y|>-i$-F7C|fWcb}f-X>>Dx-$F{ z5Bp79oxo;c@elIV!7%g{whj_DGp>!yEsswBWT=%Zy>VZ zGH1OGej|Je5`~$AshY>B&)$=0&i?*>r!yGFap!PvuW=YR8tsE-CutlMoQb)1GDlBAw<4ct`e`9aWb@IqkaH0$v>Hg83FPWXc7N2 zi(AfOr$I2Y^wV6zqM}$-0(19a3e&Z7$bbam(;+aCC}#K&$yUcH!nersNM#2mar$vD z0{`T9h7*pH!FPy^TEXf<1bae2Cy&t_;#s2AA&u&kdT#`KNV31Tg`lU_YmAO1a!4)# zBLz&J7Xq$diRy%9ra=BjLoE7f9+zgg%YenQg6O7X$oDopiXzZ+?X2fnB-Nu_*FmukBp0r5`psmN_V;eTQB_QsKG=7Pc0IlW zM_oJ`uRt6{pFJZ;L_fjo3OF*a0`I5qpU{a|gki*eEoAT>ci(UH;(D`JYwX8{yB{=K z{Z4y-*gi})aQC&WUGepLhqe*2A~j1tjEz3~`}e-xVej^3{t?hZ2FW~lufOOw8-2J{ z&Ch??U1GE)@0#u_td_e2m#XHx&0MxBop%+hY}r=0P)@xCz=LEp&<~?ctqOu?rWMq+ zs*Chf_M6EVd1n>hgqzjuOXz#gcNOy{{Hu%p3g~V$&5jIB{<$xIsxFfGm_MuT4Aabo z7^}O_7HOt`#WRdX^1yWv`>KGok-0uw%+21B39cV@h^h`Vm^w8(!3yz7_YEt9VR&O+Xc! z2W^6+JZMoRcSo+uvh`rBKBF#kjXV?*_V(i9efQPLt6$E}f9{@N{C0A7 zUCCaii)Fg_;r#0K8ZnK!uilBk-oCwf)qVBLhx1?U!@pf#nD+w@pI>xO-@P|F zJ7OvsdB}npx1fAe9RMcVEi#M9UWz;Zt!)Uw^0#~uXIsinaU&PxsW?oGW zQz#da9?hoR`D{Gu-=lg=HU~Cg@^A^`>GX9aeE%=taJR~_2z=P>mH5C$aK243`3X*S zH1p|IFGWy~1Udt#;){5mVmVk2)v@vEX(ot1+YTn%aFw*5;?!()H_C`C9}>!LT9mvb zx?r;+BZW0-iB z&bbvROK1**bctb(YEbqy>K8wgbG%YU2HlLP&e(j3kD)QctdV+Eu0l=fE|3v8YI-;J zX{g89&EKS+o~Q{=by$Ij6%0cK4FN-e%jW9l_tC#=c8E7}+YG1G>Y2YhIeSehbQMtm zp(%*=!zE)EsuYJm^1QjPAQ}>j%*^Io#HvCL+dR=R>=!%=gbaO607D>kep+tH|ArwA z&>T3d3cD;UxSp>21GEswlNszf5xNS`}O=@P^m0BwJOK+H>{#Na#7HU-c! z5g-VL^CaU$8t%WaG$b{m!@b2ZLuX+kB65}zLwb}x7dF49->Ye^=b-b2K8BQq0mOEB z=x_VtyZCpFTZhP(q_5c>2ROE(>JvMlSFf?~nd&%^TyCDGv!!5dsxR5*v3{B=?4Wo7 zbc0i2?Bm!uFAxn%ZNtd!d=3OPlyPQ3L!J`f;EvhG%%)x zP>8CeL^(;^6NZX-AR{QqY$ZA)wDc1ZF^%@S_>}G9F8}O8chDI)943?lsLz5*WG~yJ zip0K$eL|WcpW$?%6X( zMtHWpi~`~@L=0grU9E9YX@E9=*!C*ll2tXH%62y`LORK2*Z=1%%Hfg{Q8eEGlaQl_ zpp5rirho%hI1|YZjd+gALs}=-VvY|Ub8tABBjXNuG#o`TI;aGl+)+<~6?}4GkuZsH z$WcN9w-JMP8M2+B$XCUtOK|pyrx;-H`5wIo9CgQEp5rL5OQ=98ikU4MAxh>vy69)f zfi=4cPXqdtt<{G$zM!Q+GK|f!g75tF;pg4bkqSRyzU^?UD7hPDv<05^sdv~DM~8Ed zR?dgRqL=TO(Zalt#IN7jV=CM~tTaJiIyAatV;OmQHLYgu5meCq!p@ImJN_CYe_a`bqhjpO6c0!!o!w}`Aj0B;Xh#m+Xdy+a(@PZ-}ibeCl9yhEr3`amhc>;Tz>LN9&aXHzJW#S&Wf*S!PSWjhzHgV=}R!hHP(^+*Be-1$_OKE zAE)%|xj+7gGtaB==Hl|zDg4VPsTf9KR$k9gXX^nx;A{nldqgLvVX{!nlf(unYy@?V z_w*j-;*PeOW_8C+Q-S#7rl=flg!Z8{Log`;;)AQ7aR^o_zqk*p#5VQ*)p2;z>IG;m`y z9osKA=AX>R{$hCP`Dy4&lColTC9eg)%>b-})Phj^qCitD+Sc03rdn8_?NN@CS3PG+_bYNYA3nJm20A=M-rX$C& zUi&0kpFO1owRK&@EyL`-ao9n2->9!CwEK`L9s&BYM7Yb$(M3I(hAET&ookz7>GLz7 zt~mkNtE}Y~4}-m70k-)@Nx-2jCCU=vd+vJfM0H=Hm3iu(EeT+LvsEJ+Zm+4i{g4T! z{}kcqUf2@`Z)*>g`aR=7bfXb}N-2B1fr&C1Xoz!0cShMwHBI;M1XP9*^?;g}eVUif zBkgUD$Y+W5Md9LiZ6RTELgolTjX5Kh9!o}GMt5SB$mZDHV47?SxulDD1X!@<{%(e1 zj3xn^n4WywPK^=#o9zzG(Eel15PtBPBhu4&i4+0)=`C6u!=}^sM{E2+ze>6NXGYjEbPL>ugu^-nrpk>7Ff(mBF=lO zqr6@+t%pQ-&H7_)NDPO_?t~Z4Se&E4?6W=@RwBJ$e7i8~ZO8eAAOMxzo8F9N4U~4u z)(}1Icq;$bsME}~IH=&OXRdfXWuS6^fi`*CJ-xiVxYX$Kk~E000BA;izyz|J;VK%&So~E1oGiUJ~=!0i$cP1ojliFO(7H_bm|2~ zJQg@xodYU7bhxJW_roYWBMt@K8At9(k&*I~&nAaruSLP~hWsYM)!pRZ3bwDSvxOpR z!hKQ%6S;zNKm{+jSPBM_m0Jf*E)t=#6Q;>sF!|`AwMJP1No;~2M}IP-kiRgYtrB6j zjbyRGW|L4hml=tVOp}zds|#}6IGb%_-BC_Q+bK$Qyt+Pldz#6xUHAO?Mb~i9SHI?8 zp%m11tAX&lZP)`|3aUr^ck%dR*PJ%s``Z>0{j(AZ**}7HsYwi=z_y4|k12GhAU_bZ z_1PCr?4C-c!@cycxHf;I^m~%C@go51g)_;?i@-vqIwSvlC}5a{6)68k*O_xkemwEIgIpFDF@hinHT zyE8G+Ww4YLgneQ1^mbu@cKxk6T$6pjQqB=w;zu3j*}YfdIX-T+PMcmDdI6aZk4j&?^T5 z^ftX82+%8s1N6A1Z36VlIzW%!BiHBS1n8A_+($&rcHj8g0KM`f1n5Z*!vT85z5UVv zy)FB^F+eZSZ!bWPX<(ZGy|Nde$1ko8&?|=n^!U}U4$vzX1n5~_;0NfH0|9#c=)(i_ z%Gm(D{I+xfdgTWN=sAXnbYwmcGk~C9^bQm-OJ%(MDIMJW@>Tt9GSUiAyM07?$4rBaK2gXYCFqMK| z2grbX<|ptgh7hu|hC{Bt^wC9O0Q{F8dMPT)5OuUSuI4BuIg2WfKVwY3PeLapQYizE zy4!5mqOPsS&f`+D5ydjS!6KH7!?(cVn?}Ik*|RZz8nlBfT=jMn6GrRpI*{GhlAAN- zxyAKAsvcpO+x6HYrBYVA4FSJ|Ve}||Qn{VHX+7Qwk`O;Z&}Z9ysGtc1URi_q=L~Uf z<~*j#8hjBRBWdmB0Qq?1ghEFuxFGx~M{%z%-o3v(y}ELN2HwczEzCoV2gS2frtp>Q z`@H$G4-*Ht=FM(|9x%CKRTNA}$KxO(!xBhIceLX@4(E*YpBYiDU1@Zn5LdmWu#jwc zH&{d{RJ|)Y>N-G~BE%x0Y`O)PnUO%aDs(7YvRqY9g~+2tvhK6&+b&AqkDhZAK9Hcr zrodn|;#Vpamf4hQ%|_-+bem}*9YHk!u45T`iJ&{U17-;r5D}?5dS*~Z9r+naD*>D&M+(HAV11Ixv)9R=|4QD<|n zZl?Ej6tAd#tJ#FHFXGf3&zc-7QNe6JkWL9#e~~y;eYMS@aDA5;-LBDSL2}tdIs{Z@ zR6c%BK_Z%~pZ=(!)P2n5E&Ew@ z{p1{IfE}mGB}R=uy8ye?D7{Un=E`6O@ysASlF@2ptA%m*(H@8GfMp;_IC@$vHkj!I z>h!{u=rI50hR+Gp3d0Xg^IK8BI%H%cgs}6Z5F@V3jnuVy6xir2w2=IAthWnXkz{Oh z)w-L_?=0BLT}CY6!;$|&)T1r9sHAw)ddpuHr`mBh&E75io92jTR}eO^D`%x9H?9`& z(}$nC7w5ZAE%gCZQP-gmazkmayBI*2io^O(d(<9A4$M|djnYhpQB<}R5*E*3{^*RJ zdGp05Da7lznX5$X1OQXX>1!)uSlnkwP|(dpFSFl)z#~EjcFjgVCW^iTzwNp< zoFtPKuGdR5ETkyf#Lv(q@v@-0y`|W*ozP8$1-!YC_VS{ijSMQnl=U9+PC`%yA9NJ5 zUfPqgPb2fYJ-LW?O3g=YC@QNjZGDcB$OJafH0dvA3(VY8Xx#4Vmgk(<=;*x79(Yf) zC!V%Z43ggJ#>jA#U?jG?5#;yA0~E<*X26}pZR{p{H+#|D zXbD(_5W7k_u^tU&+a5B^K~vbP&$cgz8kG{Soqub@DLV97Crc#seeuWGh-^ zUxMTHI7ke-MuSqYxK@LsjF11}mywcn4;7Mf76aR|R#xAgzGDtySoEv6rza3M^!hZ< zbFjrMOFNDPM`y<$UgT4F_=Jt}Gv+<)&F=a~nF7sYyIi$I%)!`?=!YMotJ7E47nj|u zU(ViKm$KX1T8neL1zFEGkb^gxS>1uRynGe*1hWvgJuuucw?pj4)xCGRkL6SDlFC8I zx8{?Mck1XyLFpo%V7K3I34w7k4o7Nai7|M2=J!$)5cVe45GS{EZ$|EKv)Qm@%x5I~PU|XGCl-#xj_iVCspr)*e&KILk zkl(4*QCqnQH8;;@m^}b=2mBH+n>#4gV7ziz8GDWf6i3k`gfsmsRc&uF(0u*O>mS$aBgp}c##piY zSamge`7(5BAXsqrOT22_YJOSdK0UCW{e8pR2)N=3-QTh<(_qtWX|VS(W_k1QsEZX0J9mqdjiMwge#It_0#1Y22o5#X_CI6n{f$3 z-A0B-MK`nAfCL!^AR)vKdahRzc1Ro`9thJGc?X}Php#x#83K%uIS*zK{lU`@q7CHf z)B9-U!iU6UWoC@~MqSdpoF<81MOIDbE-c@fkS`HW0<3yN%{Z_O{B(h$4qqP3qnU^D+%*nu!nvO$ zlm7gEH}{N{^Rm;2jh0n!o)bH-xJ&8;#w8mJF#JHxs@SnSqZb{8`3Z-qRKrjcyoGcd z;O|__7dAi0zIY21DSrcaGH%|6Y70#-aX&@#8OGx|@HZG_IzxmKR=O}&u;*7uzZ;Fm z%uN&a3Tt1nd6?^z>$x+{TAV4~J5mCWjxFHsnNoi_iDDZ+zxwDCmunN`0X1^EdIe>r zX+s09r9j*3=9$O$)7xLWSJzkFi(iid70qDBqpNzMfU32`MUl{4fyTEAQZQF0l4_%& zB@iNH%`RBFsMf6PA3_33!vjBiyzZ(=#IjCy zarxq$D)DeDWR9kE`|!7-1f}EnQEcjQiet<5&$R$M)qwM^7xE0fBE>oOAux0GR zN5~;fy1^zD1{I%>0+Caeiu4GKE5a0WJkubyXv(nID5E2{XOmnoNY6I&8_MCR1oV=A zyh;-~x&|DOXirP>5~o;~G@U^GN$)EjP5B3-7-5X)E*>Itl*s`Uon1f?l~qbqF@e(0 z<^%)=0ZA}sYRx=QLwFC-lOPTp3oh8Bjl1D-7l7RI3Vi40H&^bPCgp4iNp*<;gIODp zC^cRY7E%Drlge;6Qc9x1^K()0_V2 zS+raK0M@`!sg%d4L&9NU6e3WkGg{zE^mmwFAZ7}(yrJcn_h2cAWxB!pKy#|MlwMzz z>~nF*uq^182*{aULS`VJ4=Qs(*)nQzhyvTPSLu1^j`LYPb#^2lyN{GZ5(<+6OIz`P zw^>Ec3#J6it021-iftB~n6y-R$9;x3agPd{^BS#E2XiO7?|_6z(pE*XbJOoCoK|f{ zDqA66@h#YRtP{$ZW>R>FhdYT!P*hjQksH7f^46O#X7dD6EQlIt7g8@^oe}rb7bTSR zq1MX^53D3GK4>(aEeXC4e=)OW4*v=AHuG8YV-kY_VL19du{#ky*Wt!{6KOb1 z0i$10`v^!jd@P3D#F&GCuo~s|K|3tPVW@w>hcXU75ZUZ!q;?htkT|gu!q`VxM%c`| z`~vgD=w%Y??yzfIu(-$a0!)r#Od+2Rf{V;D-Ap*A2M<>ce3g$1u)|@ij&|cQQ&77ZXG#ax;LsV(*_&g>e33|rzH@^m zQetN6f@cN(k9vW_rAS>@Q#dTA>oKx6QLh~$j^Td0xp5VMb!}dRoJh(`5=eXaQn@eM zd7zZFQz)*2xTSBFs&#i-G9%UCJl;Oj8%=U|cv>L^r_na^deDMIg0G)gauDhjWGYze z>O=Zv+!)Iqg^J-`QN{(Ha+!&uZhcA?qv1Whh2?g=m;iEOm_lvcrgOQOQN1b$`((jdLu&~U_G2^Y zU9<1Rj7Pty^aBA-xSDU3`SPSxX@seNyPa}vcQPA zj8Fhb)|mQ>$_I+SW)H*zBlV>6Ds^v<_f^N78i!H&pmvZ?ar=U8wX^ddre^?ixsZd4 z*)Cq^AVZ4-N2G0Lj?9ig)S#)A1kbJN;km8bTux5B0A0qWT#M+Ik^`8ER+$GTF+{K- zqOnD49weVe{e-GWN)DK_(D$_HN%SQzn#ry)ps^JO;YIiWxT;~H&_4}z>G&m2+&5G? zi#RQlN>AR7j9!CK9Mt-2wT+9~C z@$4?qU(=$mgS%x$`4WKRaPM38Zj}p-C9MkURf8fA?9^L-Q28lS)3Bm3nA)^5j}%j4F{#&%idK z;et<1D}2h8hwd{}5k=7uS0D!W!BOWgV$qi+y(;6&Bzec41X+VZ=43H?!UsfTATgeb zP-`8PJF(s}=ov)$akNp>%2?JopkbES0;h&xrS?g%48e93@*G2P5*V?Y|)I zJfQ#Ouj#%IWV~SzxKmtes#C>5>Urt=rz2lmKUP9lqlRc$2aSWrkYztpzTG8_nHUYE zMsV~oCf990KVaN7MkI^UIw<~YpxYRRd`XkMpb?$=OrIyekKk1Kzs zIL9{Rr&c)e97;m7!!Kg_nl*0u1u*?a>Bp{w=Dzg8!?4RBHWMy|7O1R+NokcbN=|7U zkIded%v=in?ytu2lEc6KL~S|?2*rKi+3Vb6`8;|M-*WdeS@=lyTeM}dY}e$9`gy8P zyrM+Bl+AjrV%Ym(vsF+u^`Q%i#}zF=?u+2^;w3hVGZ{_Sj*vLe#`!WwIXokpR`v__ z?@GjelJbLkp_;cXP%8YBhzsMm=0L2euuK7l;YeohInAFDRO1vPG^UYzo)@A~uN*YY zO0=4lR{PPGLJjqpvF-g85fV~a$!wkn=&J6?`}e2kuO<41roU~A}tNt@aEuF@;i zoV&~??L+mO+V*wlGF?zA;7n3gx4IRH#bgi)gCTUp_7m-ZYrpvWHI~k zNcf@;mv?q{iv5y-Pe$8>*>4JiqYurxH_2>*aij2yw(n7DJLSK0<%HtDRYQFGfj<8{ znMfSJjswjS0@Nck$A|L7!Cv8Rn>fhSOMjZLD zF1P=Cw7Ee=K&pL&r>*nE!hzSIm%#@>@i1!=4~r9y}X90s?v7HfGAtdGfdD%1uB+ovpo3GuNgvl?e;nj zZZ8Eif*l7OMRlI~hpp=SCf~a@gP9{z~dlPjA z3zP|I_SrtEYT=ntNKff8$#N{)Dh5xgU()+(ws$t2x46^1*_GP1w!2l2NVcV|oNz=? zq6p=jMH&iVLTo2C@l)1Y)CGNEVHCILcRjY3M7{Ax0Mjw5pZS-sfvBKni$cb%^VOGo z_?4}((K4dh*26CEDCn2!T@PX-Ya3#r0EwErO*%Tm<17dZK7ovHO5x0U1)6;mM=H9HT^~oyDAsgy}{Ok$NakYcQdSws}$#&DsRAiQ-6tBN&`yu7&x!! z49tTdN-4g)`ek=)wC3ElmEx)#4U`a&LV4#4niSAM!vSoiZH`fSBQiRj%g$K>*(#i# z(qn8a^jVPY^ui6?#*4uns7q)c zLxj}5D0*v7d=US+axT_T0hMdSu~cZMBtn+1L2USZK3f=>iqLE=7c}jek#ZB0)p$9o z@=GvNQF@;)lSxW0I;xLr7-f}PL6}y^{!pbe!Hq+^uZO8I)=gu3^*uv=O82-@Ci#|N z^Fq>JG#d;M;1^WCUSfn|aDGZ&=Ndn4_xCyqisIb-F+nGG-=jJEJ!tIz} zEmy-9+9<90Hbf(@g=uF4)yT{1iKmEcbRi77^oexq6B?Wt^p3QcVi{^1S3x)P#?D);hhI9vf1PG$R5WNr? ziHqN;wOin|E7vVD!_@xSyry|G077bpnWFnRHD9C}< z8cTcUSr^9;1r2HtFgbBprZln5P7N#b)DP>my#E*L7cD7e)bOuR%x+O6N9lajoD|Dr zHgZ7C6c%gq*u3}~s_^3_I|Wz%x0B0rNU7n#84R1i$eBelOiSd~6T^-A=$Cw~tN_UZ z)rdBik#gGeni2goSLP*eae(nDLjxxm(Jx>W!;m3$R=RCORR*bHDhNg3)wFmN-_M1Y z*m#P4N8dcY6fQ*dG*^hqm3MYcJw=Ji%t^PXyw~fq%j-W^B3wW&?UifkOE*+hmA%A= z$)+7uw&kQ$Dk88--O1IjyHiNXDnh}tI{$;X=A_1CsUOb9P6Ez?@nWvE)%&F48@yr8H~2weL|sXi1UCFf zAG=(m@`(y2B7FwO8ZQ>{y_bXn*`9d9W-y+fyXUFBM@vaVk(IGme}iy4hJ1$Nha-z% z78&W*X6<4g$cU>u@w0b~4ZXOhG(ZkJ=UDd1=;n4QC>A-e9XJ`;l7o%paivBnl$>Mq z8N33rUIH;)cZt@;IQSTw;T}+sd_2|NLc^lPA*!g1+z2yT(Y^{{Ddr>CR*b2vM$k`f z$TGJ}&2X~>pV8yeUlk4j8QqMg@fh0yU7gD{K$sc|(XMRW5+dsdqdp}J-U6{M*0J{goc3(EG_?}6e>e1q*tRcDwut51Bx)1 zK*tt|Zz_X)jCF!Fw2xf$fXzQ2{oe_$cJ%lV!$@NaJ%sGZb%@FSG=cgJQDzwMLAo+XX~2%LSf=EcqcLzwd>ZS&!fVKSe#E9; zMBMUHU?e%H+O)Dq&zTD^xE4-C<=#IUcE~pe=@M`LLi@7>g(f2{W%AR4weuETp@68gQ$X@t3Wg0<$knsIBvYt_# zQo}u$W}0y_a6vBd^%oXP=<@XH;=|>uQ?STNX+tC0Op?hAV*lu{bihL{XC4>IFlA?0 z@5*-OC?7IjPtxEp2O z^$hYwN7!bgM>x!)yu|>rWgp7f&U|Uk3@t)`RJT2$*Y0}T$F;aOWQ{^Ajd*T68Ydrz zhWCF9hqyi!%SnNy8@ zMx{z+7QfqNPehKP1Wvkdhk^}xr`9Hn*WDBt?b9u{OXSI>KJ zrfc4c2+4#FFj*wHy+kl_mlwXgtz-{W0VR<4(1j;`pt->!V%R#T9YogiABy0>M-{^) z*-!DwUMNkqte)mEKMm}00=8h7b;XQ0{9E$}cIxiff-jtxj`b>Q?C(3Qvz_vfAV+|% zMLRZ6CnoJ`3|e?b)=iLta2B$H1<8`|Z}($zzc=gs9U%S((ebgJI8W*fJ}KHUH4ajS zVaabVUUjc8PfxoSKmBhbex3b)?7ew+8^@J5{5SO}TFS9Z5{s}eqNRx}(GDL=qKBem z=gs8cG=K()5jLX%QA**@x$IFotK>*FJm-PP6ARkv>4^>^RM0v~um$uwFw zFj&(;zDA03Vg*_6^#DiyA7lZ?1wzB8VkH2%;g|;|9E1Tn4s4^i+MQNyFgx@b$OF)J z8qKKG?KaAFbW|8)2~!0uW6G`tSK&dvcntsM7_hbe{aSL9wPKmtR!lSw##54O;T$2* zUQ!n=CHx>dzgdv_tRLkESS2^A_yIz$sZLYVmSRU`2=io2&1piOlm2=J@HL&*QKeqS zj-q+|MaBoQGL$O3<a z(@}_J3NM|~*Wt_~g1UL1%#w^Uqp9P6e;k@LK0$sopUBn=up4rs#&d*DS#J}b9mJJf zCzAwh)=LDAcZ-S@DCo!_f;FLwIS=i}3>2<4%CPpH|2|iBzlDx`_f&)re|q)dPlGq7 zA6^g6--DL(`iH~xI!>?4zAg$Ek9MobC2L0XFg>;wOn!w!k z11nYxaamKa`-kdmjJuNVj^4Rmtbkr5pnPb}V4g5^Xl(=LPBj*}V5414&2qX6)>FIDI?GIEoFy#3(D-jn-Sdr$1I z=Xy%lOFen}O+%nQp+6BK;nH6^ECU~eD~3FYO}6@gQ>p$X$eWB{XD#qPW3;?zzy$DV zGNtVQg?Zl>yq`Ira+dyg6eGfPv5y>~p3IS?9k1Sgf^^BF4~Y4~u)pXSel6R+GO}4% z&)`QbmN)Tq2) z?RLM`85Z!zus4z&k@Z@=OW79qQ(Z!Dti(nx)KrN>V={oOfjwx=ib63#RM@SCt4;qp z{hO$9kRKho@!KPVEQ!mIlSN&|!?m9Tjz+=#kK%bR1E;7ZbB;SIYiZWe7 zARK^yACITYj4vxku$8|}oIF|fVThHNAEyIxW1OA8{&w*G{O4C^m#A~w1No=Ud4Xcu zgUgG*FxXnVfks;Ov-`Wu6lPCu)Ea&F>ZiAt0Gb9#W=brPQh>DOieWW6 zfPNsF7N4^~9>Im7Xfzmjri_v88%Ds}-TJ9J+xFwxL0E9Ek`>kN^pWD)fa=!z z9VU`<2iFO#s!nEsL^U;p5~s6J%#z}vE6zw*`X6l-2a3k)C5In?oqJ5Zc#3j#`DF!> zKT4rCJrN;eGA)2buD1&`lCKCD*6Byj=Wx5x)&Wj$Qlq2tv)dKMIRHN262iME&w~0v zuPT}lfocHKR5hUi-~?HuGl9^o$P5mKO^`g#*xv3A3ay#Dc#X8p0G55N2KhC74V03- z{Pyh+5K|}V=YV{|6oheXIy7wo`5%}tj2lh8Oaib(FMtM&TxouRvWF+xYcho`mI`CjsQOE z6HF?K9PmM+4h%P2K)jjuod3y+#<#9LD(OstZ!M&g)n zH)?$;qFLQSWD$G8H7GggzGLuV zr4@fl#v1VyukyC??=6<2<;`HcTx4UAa`F12-;ix(#wX>EGx1oqxPIakOQPziZ_du~ z;D&Nc@nX67GGB6mq%xoBaPGie+ja*DtklcZIv(;#sV(wcWeFiM0JfV#md6m~HcY7S zs?}QZ6_J)N5LrbX$7j#Z%{FnYyAs4arZTG2<@4tf|3?piWe3ayZnotG*zObc$tgka zK_E9y)OMXzxi~FWw@S#&4^5#Aj5>}glNki4Q$L{ku4Y?md2Ek)6jml= zL-M0BI0NY#DpFT)iZSt{f!4D|-3OpW^O=_zvB=n8(*muLu#H@kqLdskrJ0?Q172&p zNP!sNOOXI_)sOt=DdHR{*eXaczzQ8D?;dDa=Dw$Ix#^6gHCfTfQo^R8lyrn=Z?qeA zD>2Fz33T{#$Ta0gLI1{iq@0PNL|dG0N(sP-eM3WtO9Wd|(h8yZEM8xMOK`jR2noJW zXh{)n3{Wu(MYw*AP{h7aV!)Fm@D zigGu;&sQ7BErAUCS+bEakS>9Jn*Q>8qht)&$v7#bKGWNWm|R@R^U+C!{*+5Q+`_=d zg$)JtHfuUFpaf;(bJbAaT#O3+#~%=l6toos#~N>du*h?-=6i=+ZfSBGbhRMIHHEf9#ftY1ov37j z)sTUg?0DzF@2?BfQOQ2ca}kfdDRODmxHRE_IRH`XL;?u~SD`{? z|KA+lgrHHR0FlTTWzMrqP&)vlpHfYfz_O)6n3MF&Vgya7kI8zf29cI2l?jY$FQEY~ z>^g;D#2SF2LD~<-7Fp5{l~MvBX}SK0+A>^Q#cN2!oMDB4jg2J!8k2tz*Nh;I;f6QpzY!sTdZAIW3kIowNkzO}FOy<2@4*o<4=g zAWq;;nF!YNI^LKSU`*&!Uv7sl9$670j~%!aZe%YEcW%KT-~4KcXO1*wwi=qMe*VwF zho8QEcXo-fkc=Xv3C(PXTgzw z0Y9&CIGqXc@b{gfQ=(OHv^@q;Qt2?$P2rDT}piB7TAY7VLG9|Qhg*w;d!|9=<>nUCZI81apFppJ? z2Qt&4uny(+TTB=j4i<`29WB6FPQjFlgWX(zftE@?2e%2P3_xRKv$oaD`y z)V=q1-~OA=pWs_>JW0+8*>rQ=sTkK``By|C&2m4=y$R!bAd`1Ar0EnkCTltqjZfT-~kHu!H*Z_#+ z=A3t5!7!ny;adGxUP=Yxy?U?aIW?_*9-9bP3JqirQz9r{S60E}th-a%qlbZK6)UBg zjp)BRkl7M!Vn>DP0szK0mW5Z)$D>_^mK7{dDB=Li!R5tI?_Vq7=uO7C&jObOOe*SF zkj-0H&4x;>G&!Vt`_Jn~2D)rrv(*Je$iAY=X17yr+o%CxBeukt)y4xt3ZKjr35d33 zjhqY%5-$pc^`^M<-l8Z`#)RRiNCM6z$>NihNJ;ARdT!-i@*xWK} z7WI6L5JYGUg#`2r0cbW_)8K_=%8C67RSd};u_7PbeRw+<8yml7AM_g41nCaUtl*er zhmgE{u>6=~RP|nN!K71MX3oonc#nsl{#AJ>6{*6WhnGPV9fej>=DUjDuaIo{B3%0`t2&a~{kIdlNCz$rS3LdZg$4a+7o;@rdlCFVhMsjs z;3~p!f*5IdC~22=-P|KdR=1PJ4MwJ%bWZLv5i+N`A}Q;a5cLsUQbg}^#)tTuPD*o; ziNVxP!o!=8h~Ws&NU)9>6Jd-Q{88ISbX0Mjbr4RC2mMTGHxyK;c?SfLjRa{2nI+kO zn2xcyt_#5jxr)1oCBW1E=tw4Z=DY+^dr;=$Vb*u&(lNc!3i2N^bnMlDX8TJj!Z0x0 zsaURcIV6vgxYctl%dOUmM_R0ju$J?z)Xsjj%CiNCO`i5tbd$hL%SLo<=OdrDCj_V6!T<`#pSi8q_>CEjn?tWXXj!X zvcdVlM8Nr%>Xa{MjHvj{7y$9f2w3DIPNi}QDz+-)5*C-(&p1|(V zDnMsCl%%ILOoQ+`qEng++5--l#`>FbK}gQT)Y+}~k-hM&eSQ$g6s4_;_o=DcZ3PX1 z!5?De7r>$)=>f6~fR!b{QA-jYl$n4osHR2i$}MEUh9+RT*3C#P!G_REko=q&a3VTd zwmiYWJN@^Bud~`_VdVnmzFQSO@SZiv7Vc4;XqbWkkRJ~q)qB@~pqIjr& z=lwj%c0~O!=o{etTTS%<)Q0^(VBH)8^!l&c>F8s!Ruc)OP#vqC2jK96G6w7-nl)p> z-OKe5essHl5-hVXzUI9rFjAmbk)ASD9ljB#w)SB}5$^GC-27C@OVt7w=X6#mp_2$g zM=Sx@0pqJe0fQ6inwx#&H)DH))2Q^qB1-#UcJHtiG%K%4H`&i17skRexfcsoTB2e< zj3-isaTbJ)xCgMc&D8vvRvDSg5w@r%(Ve6%cr+_+6%6@cuOWv8F?<;#zG@eb>t*sZ zIZIY4JB(S*d${2EoUGcg%)S^27|xLwni4BvN<>f_&EfMIo#5NB&kEVvKUMGbpCF`! z-gcxv(W)FoZB4*?p@`-Pxgjyk2#x`HjHFXAbsDR1ju-|y=x&j#q(v_9aw|3J$Pz*& zCC4sR)av&zd9KxKw*71cvEXqCeu2s^5l)yU#D8$Om1lm}WNTGNkv~v%+0Wb_c{z9g zh8FmCtJ8t_`F6A3%0x_pG`(RMt3bx{k>sx_5hxzJa98?$izS3yCL2~!3vI(LS`Ye` z4X{Iwky!oz_Wb+7#p&zwKc8Nlz5m`hs<@y;a4?uXP;X-CgHFGzv*v8+1`D&(n6EVI zO=0}YZ`p|7Y+qDH8H0a!cgYw&Uiz{Qk3W{*3<^TT;+BCJ4%`vS_pc*77e7W*)T+bY z>-4=?tcUqJZv==*ovFlXzr!jtWR+LlJCa|h$Z>azr{Vqi>+_%4nSk%%Pk}F)d#(!% z!m=jI;T1e{?RvT1gQ@HH+?u2hHg|rRb)8cKa1Cs2|!hf9aF-KX%H?U33 zy>nQHQw!HG`e)#dJKJy0)ULfea=Qq{A9%j#Y;+<;bno|Fyq~Nuan?^h%WCC*Rpv&{ zM_KK{-&C>}PlrYl@nIF3x!{20)!dS^c)eU%w;2M!`Fyj@B0IJe1uY$KBWRLH`D##u zA1^OBX0PO3m4cXh*k}wBz{!$E5PU4msq)gP9G;0z+%Q;{*IYt@Gae}=ZynrS zIbEbxVTMar*sFZ(m(l*`VWOL_+8Q z%c*ke85ZP7jS<%#0Y~F@n|*{v>NZ+_+RrAkLB8GyLrhbgcd@OZxtSn{8+U5Qw2R3Mkr_daVPC`0i~U*89TgCNgJH zzQ|%XGQy2H6`cm_crI1exF{!0FqM?;wi;PNzWW3_63vd8)^nBRZ21#yopAd;9GYkY z^J2~m-Yd3o3dUyg2Yy;Zk!_0uSTeGsiadZyr@|jcFIkj;` zGIL@nK>lPYslhrkpxN%Emco--Ixw}l@h_|ZjaLw90%ndpcagx# znx(8!F{i$polCF<y%;3kMTnUdZ=9GLL#hRDo27G0d0UThl+FvTiNu-( zh3Dy-mwvrmpWcIobNK}trswP&N)&#Vt)Uwh=S}KNjhR6Y;?NL1x7o@TukoiA##uD6 zfaqpfz0@R#Ku3YO;JW^X?+9A?GDRD>+^2Xtn!t*4y<+4Fa@O*ZJdf8OtpH9AK1{MR ziX4PZzM=}!h-s*HQULCilKnO;iyKsa<#f=$I&@OT4Odm8q?Rw zEJ3`V?V90-ydX;saLq2)t*5DQmgIzsY)Yhk)a#WS9S}>pom#oIi)-I)BjqOSTJ}^6 zG6FvPFryp_o`gDmR_p_IT$?7Ico?tO(_~GlOEIv`<(e=S7*POo5h&+_k(H<)bZ`Qn zpDiJ1VL1ss5Kpk)Gd5i((PRs*QJ!-z^GTyDfc6890N}ibPt^x-MP|!MXl2rom2Sli zC8Lh$7}(~|7iX8JPU1oC-o+kxiEK6!iEy#C&3Zg8U`bn#=XwH_5UTCU8y; zDaZ%+Vh()5&y=gFyyV+NKi~fJJ=FgWCu0)bP(w|c)5XZ9NCPY7=eKzQjQNRSp=4Rx z@rW5>ys?V6xkG}5VI|bWhQ#J#vTn497boISsLu4+C-5U&JHZMBv4+B>A|Btel2ax( zO$qH@jVLot55e2UVPOPsi5f9j)>gKbji-$@Dy^_OaSTJx8ob*0`vm|X6zOKai()Ex zRV+XL;bWYifBsYQHhu53FAdRXTz5HJtUpz)2tWheeRSmJNM=e=hLvBJ|K)a04rsf) zk{i|FxEQk4K==8)ymvd2pRqsULqsk4ndVNCv&l=BO}qyliP$I#S*0_HVHjY(3B)wU zv&#b6xCM-f_pAqJ5|B8683);r#gVh2W+9G@q_6pKY1ac7eamGxpxDk~C~)08MKp1g z>3F}O^l-(3AZu|29V3vayH=3tO}t(l+KkgZ7UU3rLewF}nK3wR8GSXIZKL%~^wqfh z)mG9IzWT%4&**VcmtZ8&Zu~^|q`%gB>k$X4&>3f%8c6zbP7T0JTN`Mk4DUbM1!{0{ z`sxi%2v1wy*#;*V@8`$VZ{*ErR;HpoYqMwa{|_YJjT70zM^_LQW2mdz!RLT%%~uY> zk~hfL`MbuFmF$h<7lX}Z1(6pdMzT|k>Gst3J@dKmEwJEe|wO+#93Kc45YYa(Ttrp7xe1CEN zlRAH2oxN#y)CfQP;0%t}%)zx|kx_!_9KKKa71WmK!-BFyJS7taUvsb9n0J(b?}Y1l za)_43J?VcrxJs6Ds3Q=;FDG}^Dgwmi#^Gpcuq34&v4@W;2Sz^4m`hINwwo*9LF!su z{)Ju5T_>in&~B_4S8^wH|nBV;Vi0~ zgY4}x9_u42g(?D*^>TrUXX@zQj;u<(T=XSL8FK6ZQ_B~yM&RYVfqXpn0l|5whL+mL zo7O;rb)=nn{5ZnBcWAyjQ#&tE+nVZc8xt+gl6d(CTJJFfA(Hg5d8{xe*8Cl#FS)v? zi^sOXEciaQfY|#zkKQyhFdM#@G=ch-L@B({#aC-VWA@2!=f)BsDlR%e_1Vfjlr45+ zKG${s!)My+WX1$!MFp^o;Y1F*lQc-?;XD!;sKtiNP?Ndf{9C7kZSB$D@)RBp%14Fc zH?mRAK=D8nW~(}xgJ)gOTNycKRnMT5bk+}i^TICO?CwW&HeuT6BKZVLpwf;25pTNr z8es8KfCGuX|M5GddutvwHJY<;!GY}5rA=T>6}4YoZxoGhu!hFibo2FE9v&0q3LOLh zxxHe#Qo#>5&hcca9`$)Q=>6&n2DE0)P zPu2!7Q46$6Che@qfhEi1=z;dhH>YAc@dA-&X!Lri=JF_%v_YBgS{=B{Z94mcsQikK zN6AVyG02lq8lp^51T651dSemR?|<3Jt-k1~jD(^W#k zHXYJ3CG1*8d6;eeo?u#ME*P1u?OE;ci~W7)D6Wn{&ScWTP6JY4;Zk>Dw1uOxB}kGw zXRJ3YL0Sr80jG&b>X5Jo6TDJ#x1(@+lbGt>bq-wnE{I`t`a1T7>}C%JC4nMN#TI-D zC81R8GP@+*k}K(kMF}+*=8yv&JUk?B^Bl$AOyt4~OP44z{g(1(Jv_Ej)*ztm08aWq zKzm-U(oBpATzkP)3wrf-7s9(rT_`!+tXbhHx9rWWHxYPvjOUr%fjn9{sA?0y!NH4= z6#`WKP$S2D0HH8dlwy+&Gz@Xe!MO^5>-c(#27GgNKKSv~#pT(nx2h=xo!a9! z)a2J0G~M2!5ck6&;|NVF_k|H!XzZifr10phFHXUT^OiD*pom-{TtZ6cB)@1{4 zCctllL2!JLn&^_HWbK+J-(2pyQR`En^r!g%%P#6Yk9Z05j}ARcqv-h4d=dag zofZP3?w^;9={exa$b?P~=I~+g%)eo}cCXjQ3>Ni62V2{VL_L$u(8=3Qx2t4Li5Jmv zI)s@`rVuEdZU8=%Yv@Yx2&yTDlR%=-Q*11A61r(+b8|a-gF3F&ge=BNB$XOPQrrgx z-=R8x)#Uh5=Bp#?ugG&^)m7~)DalgSZDCNe2v5737IpfL11my)a-J#q@ z@WYScm*VJzhX=TS485y;A=BiP=m^_hcjBVEVQ6;bx0qUa6;W@e0q}B$gHK)Oq6dcO z70SV&$Q?nb&h#^QSpjYtuV5gw6rEAFms8X%$L5?_x#Hz_-J``YXEb?x$E@bjM+SK; z;F<`8lpaISX($F2j*Litkr$w~(DsKh33?QWaiCW0uBM>CM4OlyKo-mm^HmBi5HV6J}SfUSx(C>%JO2kT^UFFh;^*-_G$!!@WXY;FyqfYDtB zht1sZFq$%3Hc%?S*(wyrUl2MU-DjT#kCBKRL$)s}mwODw`0U!JOBP<8L%p0vhAD~% zjFLT1=F9aLoxegEE$ARX=K=9e$R(jUJUl{ZJnjG@&{9H<-t6j&kLYH*Q?A$Hh=x)F zwQ#+L+bjYOgq*4XHaO%oWUG2PWY!=II8Ezx!4C!AwN9?KGbpw_oz0Ly)5D`GHAh1z z1{hHFH0C1MXF0M0kn$UamW1IooT5P1bbF^2e(z2EF&S)Eat|aG>lj1k1oEOyB9Zo2 z80I9#4PKW{4^pwzx+n>~(Qz@ocFoaFJfSEXf=RL0nW_enxdG=tdUPGIPDJZWLspNy z&!2sEEpJ-fFj~DZ-&D=DuF`E@Fm>zI<@vj_*Giu_yF9FuN3v?po)>$@Sz`6mYz5e< z=x!M4v0Y2xhYsvOp&kT(pb{hC675tq>F8?6WX}g#U;9Yd0!20P(3LT!LSV290X?7G z23$uQ-4Re5CmAJa8n3@Ng^h3BzwXuQkdQOj4h|pvjYBVdhy}w%POLk5F)h)7AE0Bq zsbQkm$vmDeP#GwBSJ22~#u8OvRkC0RIsSHQKSU*XQXibzPKzAzfjs<*moT!oQ{4?C zc4GLl!?m63c`gj{ih8bhk7rheW#)O}`4vO8XFuAJ0%jS&{Vp!|a(MoLml74hA}cXa zKQAZ|ViRXaiRyE(0SLUZXnY)ATAfJN}1 z56xz~*8PLhYli!ru`Le5(IwFfJ_1_xZn@rqBcR_Sw4#SO0jw#(d9+Y&a4yDFHJUs) z*xg^Je=A2E<6ApY`t-3J`}xg=>NqUHnwI2B0`IA8oA zql8wC4r=4^gj`yNhKLd21%tR6;Y)ZG5t6f1uhr_c@`Ik^lF?n^+NKzn9+p_Dj=%y= z5-_j;cE#pUpFCwRirszi1i>_-4@r_Q6jIF6`VR1xnF}Dh z^hVpvwV@F%0rwm5h2wNeZX<&R0TextKhvZEsZCRW{Qj&M%P^f*UMj5$Z^80Y_0Sc{ zE=m}9d-nbNLu}+=-5C~=t=SIX)ouraH|IZRdjXh#bIHWw0&lr zBuI1&6{U9fr{zqDZL5@(!6Kd~h@li6&(%o@*A_%^ST|srZc?LzX!ARogb`tPmc$d= zHA1PTczSxwD3LLI{fJuDRdVFJg~LA<_71-)pSS`d|l zO+)Zk`YwVhpV&`smYaAs(18;Eqt1F>)DSB5ll8W3Wm|L7#u zkLvg8%Osfn^wZfJEqn+tv|>SusE$~aO`SehG2o-QcZZr&DLA; zD!9AOy8LL7+@OXO&EZ7hZARO%X8l*K)+R|UPTH+_)al|re5MBWY_Xlq{OxCV59CuT z*A%-i*E?|HJt{qW7Jc~f%`cU=YQh$2QaKw#Jo03kte;2U|M<4jsMerT%pb6xg&Na0 zYM*XDKbz01*DtNhPt0qspV-&upo%CCkC)Z!sPftw1+0OH858yjZwIS58(w}+!Wr0K zsCT*tX9}zV+!MM0fT zRHrd&6dnau^EDLiOjpZA8V{#R0fVX}+0HaH6@3+!_l!4OdAWq@on^k!vqnxNXP@eO z(J}rgM~+QYrr_vN2@F}FH4omNeYhO_Nr}aW!-FFe22hqht7&OdKDRFGiY&3s(RnTFT_& zWL|(jPOzR4cB~D~^iKS@9MOw#E-23uQ+?rH<)wNxI0oRy)29(%yrSY*Gzw#KmA7#P zCBDjFKEGKs&c$;vVUZt`YOjV8caypr)N>&&xCeveax>H?2T+192=uuLS2&FRs79`z z{61MEYl>}-({wqSVzEh%5>;KA!BCZ&)OvySfHq*_DAxGdQ2J*9k(v`I1uR5fyk!1m za%AnGe@E35iV0JSM_`%2o+28;#}uuz+yL^nyI;!iViVK??d_^&ObU=-iG9C_>>WU4 zh$FvZ_(n~L*b5FFpzH>lcse_L%G4+uSt3Q;t!)TVKE$TPiv*$ZG!;S}fn9W{$&gZ6 zumgCGX$w0tEva9(W#|p>mmAeT!81YeMUZq-+aivV2b{oE zYK@qEvKZ<{6)w@#ONN5QLSJ&CrJzX@H6`E~tDJ9+)xmTBZr8 zTeVW+J=IcQV##y{Ta)d?J*1j?rW->*^LRB&D1QnIbHXoE5EBmAR_g9o#pKc?yI~EV zC-I(_g&uu@A5S+3s0z2V;qi2l2*f)pD;00AeE>MKdog@#Yq@(1k|JMsBIeP=uI-NTQ3KHE$z7UqXw!n} zgxI+C$;8GDjghh0>br7!`itt3-{6ttZO$i$&L6+9AQRzjImePUQaGXXbG8t~BfQs# zexv8m|3W)1MfM;(W$%4G*h^{Xpua(OPb0`=tSR%lI{u|&{QKm#&3(b{(06VY? zMelJMI+FQ8&dJ7tzaEaolnSQ3P*kwFSE=O44mFRLodS;+1#J&80nY@RLBThqn5o00 z84?Wg4tF%Z6tkyWsRuinZRV?79b%BilSC8=)i67=WwV~Ums2lgSav|}VhInsWQX5& z+8eIJ27&AS?G59knbdp3QPOC|``R0>ZF@tnM&^d&o#qCl2rwZ`)Ho(=ym@=^)8O^1 z*MB-4{7KzX*TGsCxD~KefB-?Uua3onZ=)y&H#F71VuB{|li^EK6mT|NgFTrKlkpf8 zaVRm5bhTY4RB;|`1?o5K3ng7Ox7%4#jXq50(^;Q)OKpxA*( z>`Ty4GgOy))T4<>ltx}<%W*Qq=#bs;wIeJn>JJe#z3n`~g621IDhaWo_sOYygxQ2s zFlx+ZX++%n2ou<`M^g@+1bi(G?WG$A)XCl=vUa%##YU`^rN6s9;L8y5A1Ft>sC(ukk&d|V|9dp<5*!Fda>a>F68ij)|} z)CVe|;pTcex&}(efWS@4?_bE@QTip_B=eMN(yEu@MRi|(<@<`zmoUXQi6SE0GSm~y zK?;8JYfuHCd?&}U)8}I7f;|$L*m!mme}RcXPx4JdJzXQ<*D@L>YRP%6V3Q)HuaXg@ z!m8(@3TF9kB)SRKk>qjeh9&e(n#?AUQz@^3&x7`#;@K891ImYFbz6Em33Y_#7qjB2 zWqnj*fW5N7^%heNP^UL*W$FoSg{WrnmtW(sAZvZb9I4ECjoF1EWejw&_zTUElSx~q zse)6DKV7v!txO0VKNzS&29r$Jid=3KKa*Fn<%##8A;RpArQQ153E2sQhxpE|&NmGU zmuV$pkKX&nUTY^BA%nMxZ9J9SNY#g!k28j2%ZfWvGYAtJ>dLl>;>XJASsNMQ>E zC|MjG9Q;>-459>yPim_qhpdg3qeFN|$s^R&QI15SO||T23sj1>^g{JDE|^lG^u@xX zCBb;+VSXouXJ^db=>RgS@AW!_2XKY{@Ca^clN6!*&=%WoGe%!?7+`F?`r14VqELBo zGzR>-2X-W!{0~@dPyXjj6DR*Nqhj8h_R(;Nr9cqJBu~u$`_gkL!UdM%$L*xs8x2Rj z$#7JycI%VQxYw*T#_d{;6Q3UTWqQ0w&S%U@Ye)?ltn>Bv!5%g&Pzw#DCc_MR_`xc6T}}Vy^PCbQD(*ln&`igSuh)ZSV+fcC;O#7q4miTlXC}btQ3c{(34sJf zSpcfUgA#)p;t`47!zBt#EJ!T?%7ir&CAg5Zg3zoL=%b1q8sHu<#)7a5x~bJG+YT^B z2Ba2{UDsdOs{G$eJn^R~_C+uy(Xr{6tkWo6p;?Zfmwf1 z@4%kGbPWp8Oq>-4Fo+DVf-i*53w#Y$9NMm=5IJ@;*{(t0k1+aVyCT#Qd-_hQVz^otYo|4c0+Th&2(qyW+r}4zE-z%xA-WH0eX_qF-VrQ zmU2O_;&>R$wY3{y?`jVcfpOi2!GHtDfDY`|E~(*-0YU3T#}pN#GutC67%;k4D)aGV zyTBft2F@`CCp;ziVI*vsGLe!h~>L z6H+bSBKzm@1$ z!vwQ6ar@AIaR?8Ysm__uiDp)lfsTVOZiazQ$=Ea(lAzTd5Ersb8R!M(h2%w)%tg#W zf^-~3i_+IBUMSgaoWLdn{Z`k6k0$X(?NWS$V{)9$0O(+vrdvee!`@+ zO(r`}f$Q8;fQ)V(tb2_IfMVXB$qRJ<6l`7wuEW_6Cy=0-vrkVP*aeiT2apM}*h~%^ z<*0eYN6jNUNold9CLcp$ekL5v-S=~~Hq~mC3L@3Jy>b(xRl8m1a0}DcZC=CF2VZAj zSAEADr41^^s>C(8F&OBGFrYhrm1a8_*#5tO8zN`JYb+Yj=;-L7)q-%;(MRXgD*F6? z4QEcycfo6LLZE3M=?qlb~si?5^Y{QrNMkdDKGH|BiEHM z2>5h(KChD%2^e6&0+SMNbvcY&4Lm#V9Dqq*bpRT71&|;Q_`m;>40ST3z95|F6MgR) zxHez}+yGwyHkwhw?BVzsH$_h*R`i1cQveoV3e~FrKU}_2hsNc_tM^KH{r3Fg^7Ks^ zpVLVRhcwtDeI(&81QG&bE47w^ZlIp19w)y2=T{f+4|5lnrl8CeL~s7#^x{2a!{}}f zp5TC=Q>8{v#yh0-{AzskUyCO)coN%$k{svTmbh z>q+t8o|LUGRLy0CC;HXqoxfRhkiU6(G-b7cU^@;5WotBp!P~Rfi*}oGY~X&l3PTQG9SsL8Yi|JavyO3wlNABXiUgkXhpB6eGNvLXA^xA2!&H z%LNz2AfPISRbtL+OxhH2@=(3mKBa_J5K!15`UjGntv4jV|IHg{qAEJ5suj@RA zcf5ynrmq~~nBr|O70Z3Gij`3e7c{!7_98mJSBxhtJy*lQC^_s@6^&m7u)~xTAP_T8+DeaVJn&Lc zv-whWlSNLG1)>Zmgkj@sDZwlf>^1VroiFeJ(k^O!n&@aqj9ybG$vj?cP+gxyxxhfI*U>M_L&togUyMC0{x<-sSbQ*o2TbRMd)(d!t`nf=IQ zkjRE;-g7BRcRZ#_w?3wP&YgQ!WumnF^UA_u5cmd!SS9Zi!4q4BLlK zMNN3gmz0QZL%4L+3}SV9fWB*JGZN1Wq+iQhKu}0`*^zLU0V)eRuh>5FQ%e2=O6fg2 z2l*7brgR_{gnfZ}8Bv|DDo2v@qhOl6#j_$x1GYLmg;7U{Kd*jECKE_H{gi+sA?5%c zbg7|W0!0t#HM1GAD0Ju9&IVac7}RuQ`al@^Wn^ZYd>@D}59$H-At0D0lxB#^htY!A zpJU#HQ}Hu=#~uT92dM*j)Et^|VTbP6?lLY>F6_!zAqp+rS?t&cj)|6X$4`96?pRp1 ztN&(Y2LZx?@&;V=Bq~K2`Jn=6^|)ndtCYIkIzv%C>~}{j=NWsMv6G!&yg9u%eKUA- zc5(Xp5@1RBbAUG!Yyx~4VBbz)5vf`r9?pUor121|(}B2~xsEVQfp*8>U~#Tx0B0g)4aBmxJUlVg#Gp=tC=vk9K{R$8YXz1GG%*AGeloK&h z&i?1r-YCkn>+~t*?Ms6?cQy&xU9K@ynCj9*pkMA+5I7>oAhF6@~)v z5@404b2&J#dEzFQ;Pz)LgL$2)C296tnX|y08aMxXtm@x@`1UY$Ugw%2#hKUZPkN1E zw>N3`YhdcE)w+}Zs8&;}wTG!QDn1UAhNl>2|%_@>tTAq_hJS_|s^blNCi zr#*2z-1f9&(2B#-k8~bv3V?^lDZ(fhQ?BrG#DU&>VFdXsQ|}v?@qf^W@kD8rY-f5zr z_s$W09ociUhS>M~(~+)z-7YurKy8tDI7@cM1X?-i^dL!%h>;{g%8@n-8VDx4u`V~t z!DT)gQ=n~WCoogsp(udSRM0F3uF1F^2Ggx-2t^(BOO z2b~eN@S3oNZQBQV>L@A{P@jO&b7locWI^<<=<1)80*=Ku868zM`y;_E4i;!9M2OKyfWHnYbP{nXh7dkCwlPvT)B{rV$azVfV^3x{S`tsIwBg91RKlN1 zF!Jqnh$V_~1svH|G|hn$*3p4^6ws4lNa`b%c2TKIiSB2|g8q_3KmI4-04{KrruH;d zIW^Ch>u`{`+X1`Lf|1@QjGF>or7p$xFTa7z~xEeyRkgG3lUIZA6A1 zo}y_Ikl*wMJmJgoVLWxsdbiwzlc~|lu%x?vt!qBVT?wW4_9Vtih8Pc=jiB3}nD;=q zwG9>CWQDESX=ZKp5DbP~7Dbp)$l{LO?{QslF{j%hnn)qpQ2Uq{OO$EVQi0PD@Bbu4 zGN?WtrKBROyleG)T^qSb!D@ z7WH;5K=L?|D836a?ZB#6V9446?k8@`u()C>tT_9KP=E!scn4P~g{q1;w9RRnuEA|htCnSn+t`^mMy2&PKJ-}`34SXEcnz)@XV-$dA|nS zH-%;xI>Y>+VFJ0(>m@kLHsr-h7O8NkSq$-OScL$KicGmmIF%1qeMmNuAoy3P zd`y-n)J`&t;v1GItO7I83;gFb)m3K#%#nU4p!>x}B;OaAw>{&xiKuA3(m9m_kA9W= zD2|B{(ut?KY+AETb**e9nv+MmoYh+3%{9L?%#yR2>9P~??)T^*!yjeqT_M;wr#zjw z1#b|my2AJFBPLY5up-M#;_$L^cI3Cml}wpRlv01LlZnU&h~{LwTS4Ks`b zr8`<1GZ#)ZvXPFu;?NA72B(Oc1ZJ!?BE3~9uVktKXr%m5jRmKZBYBQt*F|*H6{jhj zj8=QO608!bTy+6;4ltAKgj?aJfKRh!2U^NrquhjJq|5E3BS*Fb(&{4z=Xr-{?EPo-Cg zW`MIO0(gPsKozuLJXgv15Xo!1fIrKTUUvb6gUd(Stved>vNNqYkt#Xd{5*M{cQ9UO@O8l4E{YlB8vwt-20{27>#w&v742# zkSSo2M$v1;iCL-R8_iTA7Salbdb({#NR|YTgz5Jy2;4!`F_!q?1C)l1vMTZpLqQZ0 zV$BuH1pQW@GHCjBb&#}k)`q3M9cExh_;GYKyRSRyAUjkpN41kzp;%Bn=AimwTQccQ z`j;12*!T3^kC%TzRwwFrj-AUAS@zX+43L-RK!&^UmvJy4sytj|8TimRxmw471xI-X z)CZW4$YVjaBmy|kLBF8h*eHVf61;UgfURyKnXo(vG@x_Du&S!EEy&>UB@Y~d)iCY z=ugWVXudaJfvKkmglDrPykwz?&|Q3CdT_)295U#VmW(5g~~ zvmNFu72R{emiD&C9e8Vy6?tfdkRkb zay`8g^Dzypp!%UBzGj32Cp7|o*VW88HzJ#P=ovP=8lr5K*p(e2y_6$YatFX^ss)Bz zT=CW{Sql8PVc|A+G1iRR&lQFblcX3N1*f%S-?qmj4+s;cc&;mZ&o$21z2`bSbGM(k zk~=H698qurEeGLr+=1|;b-a(85HbCx$YdAWISKiVMy-WOnvHt1mGy&RVMAS;e3y`) z@kASD!IN++iTm~^T>j~BE3v&47TZf%OQT4O-%UB&1icA3SNiOcq<_anpj5e^&DjLg z%*#>lg3V4XN2?u>2=6ebtmOGCDcjQ!eAowcyJN{HC=cL@fqn$GSi54D>sz~GEH8{` zF!%GqlnT5sr9v-E>8@Uw(%ro<(Z5twDBV%(xQpgsf_Qhkd`eEe3Xe+X#py^K5Xi0jWWU2vD3)UW@>7(LRBZHX0qrrieKM zz;tPp4r%Ud!6MA3fDMZcuEpKBwy3=x@gfHNQ!x}Y)o9ilC|dJRNQaE2vR)r;Q*u$A~uSk z^-JnU2h3f(``PHtnN_gdySeJAy9PO)B|wyu+Ej2hpIiIo)rO3XYBNK^Gp+%gJup=_T+jkA zD%2^2C>1PpL1hDc9=11C5l}6l2ms`3e}Sl3L~mi_rp1ONtZN8Blow)9GclKThq!CM z7L3T;hqdHO>QP7@wPHI%)`na19yq*hyA_mHNMevv0c=eTQ{Fq~Xw{%~-Z~$J>vqwK zTT<*-N%2A9eUGW}Qzw7(Pg*>>AIRWgdkO#78#`ozAI8uT900u)_qg}A9h3X-Y2?Vv z*qsa<(#u_q8yq9rXxE6bw3@YY$J;#;S5LA>I$CdsyR98~tf9BlibAsVveiKAD*uRT zn86xo^=p2s{j3xmg{#JxtY(04$w9r=j=}HmG%?h@(e7j!>jQkh9nlu;7tvO%1C;W0 zsDQ&4Ke5}ybeB@2({6K|S;(ghpSKsYEOaYVH+Oa_@8MFu>s$UUy~`Ck9Cuo7u+kB$ zu@GmF@+o;gqe@g@R3Xyo*2*m;onF7(^mUrMtu>>+(d#NNx19b!$1t$7>0+DE9R9mI zMT;DwcQbiJ3>k83&(FiTPxsm1w^X|K<}D!d7=rnNmw zCyLA!ZK{nDR>F;*s)L#{VlaDPsnkr!0b2|BU~LsCcgN+Jdv-k%l;pk0?0>Z>Go(4_ zB5DXQ&TjIwtbORH)KavS*#9s`v}&7E0w>*`bC7FG`Png-u*kAnLohJ}gelo0UV}yh zA!H;?P?i;pfJ^u^+T})5aoDaBMC#2ThkXbQrm}B9{Vh$952zTKkNvlk-krU4f9c)o z=dSX5AiSJ+>Ao~mm<4|w$W|9{o$jdHj9aZ{zu8Z!)$yd3)Z*bd8P+GoS@0QH9eo|4 z6x)cT*cK@~H1dBCLok&F9edv4Vi~QT{d&YOI=x6SQD{I6l%Au7qu8ZIJ=gm<+{zl6;qDFbTCP6<35by$fpZsIEF?8u ze2Qn&vA9+-4_Q$gstsH`?ezVds1((YT%-D9tJa#++^V&P?VMU`)KY4#F``Sc#6<+RkiH|7_LUR|xwlL(lzlsq>Kf*19%u(LJC|1Yl3=&A$F)YM)~XMa zR<+vckNV9xX*QdaP7ayK;~s^`W%hNkG1l3qU{x366`Wl#w*=57<+ER(zyI#+`$4Dm z?b#(_L0qz(cS|L!dBZ|I1Sr4)G7glBN);s#_Q4S}!MemK^eCwUU~YhA6?QhgWp6s; z8;=sKxVKtQKLK$HoaJnJMF3^+i8*Vd4n%Kh@QSX6SgZFM!_GKKMrs4MNBvH7IP7(s zd8j)4w7)QP71pH*RrX5EJ*`qpPheA4uxi8g6Z`sfIoiy;YnW2-1umNMQC5u zhLt2Tz!9Q)qZeptwR-*b^woQ6dsx`h zanv|66?U;{pqY|P8|{vTjqnx<`Vo^O{2nOVy#~MjM4xzyt*Q@m%6F={syC#NdTP}v zoW~!zJ}|NmOu(>;#!HoWs9mJ^torYgnZ_4y)hrhsCA0 z2_jcf&=3kkZ>H&F3IR@1M~&;iSD=G*77xo(-VmyRU|JLiAjr`X)q%>9Lj@FoBV2MV zYzpro$BZsH0@Q}p(G~F_NGs*K7#v>0B4$h;*oXK`HTsh&WxY*7+<-FRlP^{U?0TI@ z8XrjE^@}B;3*V`?PN=ttAs3;H9m6iBTQ&YQNMKy?9TsQ4Qu9waWl$9z)(DZ|Ns!W8 z8^4vOf?q@h!ofs0UfwLIvc39q0wb_S*C496)3aTHHqLCbZ(qIs;o|f=-f0)-=a(PU zs{;Em&`|Bkde`M+fs)%!-EJyc2{p?LM-ZoK5M@L5P|K3={j`8c;M2Nl``7RyB_%BY zN(zbxLQO!mq(9KC?f1&=j#rlUcx9vYgW#U*m_yA+8Cs^+C7jJ#50ZfMxV9PsZR{fh zuae7F{y`ecTd;BgLd$gV(G)6B&D1BTGYRd*97fNb`95;pjGR4BEc5t zyyc<}QCw{GWOog>8(m*YP9Z>fREH2WgyLKHYQ7cSp=w6I9Y?qwsOCmy*S$Hr z7`(nXeRTp|rs7u4hplm$Uia81094Am_Vk`{|BV5Ph>1TqDL=L=&=Lt>j zNr+pT+uEm$OvhX%H8hVeI{n)Q8XzJC!^=^EKD}ymw)q;x7R275 zd}1updBXufy*8G~Xx3_6*QsFY)E% z#1(PLL41V>U}nLnEyUdkqJ4?MsMmw*=@lT7{W_rMouOG&Z^V%t0mEeHn;i#9yF|lS z+Fq|OmP9ux7#vjHX+fK*;^8U9UV>X?%Z1n=#%|5sFs7$zPeoloTE+_mN~+$kw$p@q zVo@4uv@8;O21~YsPjO}{jHL0L^)k#~gqPFTz&CP;sJsf=$73p(3x%!e+p5+Bz>P5) zbE^x_s0XPVnTi`U25)Yb`pUIb55U3TP_cr*bb=-Y9tHzfQo-wzFc=W-q}&RLc4&Vf zzC(v&Syx~&T~OUF3=rjW1)ory$h0xm#tZ?NFFs+>5oxJnXz?~uB)H13LfF!~!c?t2 zIk-z&EwuHtHIQH);YJ5orWx-1^>6=j(a|4JR&LbF#;Z4fe)azKDT9I6GdBk3@8AAq z@c#6dOKec7v-;6*cMvk7-|1wddjNlfil_zvZFSW~M$}(GbVl%DwFJALCF|i8R`~cw z*a2^Fx`~GgT20Y?D+EkT-uMo?Zr_im7g)BBgKn+}UR-;Ay;-1Psn2v-vJ{CVd zd<7b=C(9}ma^_M_xK+K`>y_&u10Rr|)S~jEOdT=@ zfDvqoUk1(zcj!wB$Eo_)%WV&+pvSyBiyLj`93y~iK|@#K-9Z{pl0&9@)lf}0yt?wz z>8UDGLpfp$!-Ip5fct$Yc=OP`4`x)<5w7Q)B^x#P<(F`K=DgTEbl-r1B7}gG(ZZq`8nJDLt|PSQ|=)1FVr&qXxg+f8DS`w5hGm64Y`XT`y_+xsz#RS?2g00DXor z;J6LrzwKb#7EZVtHKqK~PU7NOt#&s6(+lGDu5Cp8h$mx9!Nu}*WsU%Q463p}*Tg~e zoyyBw^E2yN5*+W}0|_G-(Qe(-3nBM@!V~lC^7LW=2k7C`EeIgE;1U9cD#8nzk-;MQ zygAgf5^?jR=rLh}9_jO_*GE64`o$AEA8tVq&1sQ{)~9_t0<(tNA;(M8O>~NplZtFD zob788wa_9D;do>fq~i6oft1EN1shDd0bBv4xlOh+bjx5pEz0Dg4~lN$*+;HHIZo6B z%|S}1_tHSvk%rljtYN~Z+v)YMSU=QtsiR>sis1rfm?6~9IHFY&<{sP3pZYN;fZH_h z9MBAY*QxmOMUnf%1wRfRzp$;f2wB*2zR?Y#seTal2uhV$`Ufu+3w~0I(LtvR@jZh* zDeu+{U&-fb_{cXLUyl%(0d>Txhw1NyX+|yZTd+!xG$OW0cS+=xJ9I5?aVix?OxBT! zGx?}JzC({_t`m$3hU80CHauMwCoCEBs&#ky-Gc1Zzg`KBN63ZYNn18p*t=sYR zSdS5RFM4RxXt^RQOc_L*o8_7;F%nMKt}AI7PmO-xjjeq+F6Dv=%lT&@P_4Px?+C28 z;l;7x1<2k+f#eV~)M^2vfOdaEx!syy{L3ZA9YS^vO=rYDD+VlJc`i`bN~M!KN%D2f zFWg7gEFN11o|Q0zWwkcR97!xlT{L2=sZwF4jM&=4qM@aFuhS#>Cjt8)*)-s730qwB z--GdV%y#{<#0Pn%ozZqZu$~Cp2|V)-T3$*I(~*NEf+{gS0e9$6WB>ClO7!b2nV=-f zzNLN*QW(`7^_pcrRq0;e8#&?s2l84)35RG7TA+KLa%2AkdDrRt%jmfb729VGN|Xde zAB@zeKOZ_iG;et!T|)0Q)vxrz0y9dkM7lgDK$2U_0kx0?*h3sC(&Ebl#M zt1#bcD)vx>xwdK^-fo-Txf`!)=PkABGSZ~xEf>&vq@ zwpLrWbDe^-ietp_(IS@iZd{Jc(5+Etl7HOIPR`eQqh4-7l1^RinWk^k1@zeggwk`d z^JG#qx#Q_t|AM1=WGWWYae)4cWAGesMltvhW^PNI{G03Lz!=`)FS9W4*BtANqonQ@ zda)fedcjJz&jp159-+8nmLOYjl})ciQrNCvz?^iBl_0^|iPAy=0iwle#3|&#Sv0Uu z^D^qO07RetPL~#51c27ubZ#xColSH|f6Nm|(UX?1lVI8@;so?TZl+!hftsT3ZX}*T zoOQq!D(rDCLrmfn13T%Kyj1NlO@5Cwz!ldDLtLvZFdE@{kXUHF2ly;}`{N?cU;|Bfz9_#xUgf8>j4o_3ybFGPY)%7{ zw@b*bPS$SaKwerq$u|5=TEi@b07{gk zxyzk5Vm2<@f+T&f)3jMG4``?%3dd znjmp=#Fqr&K)jdQ$6r80Msrg#Vcri_dU}Ws+0P#R_vigs(V7Y=Lo^6@(7<}iyvv?` zk?2A$&A`H1j~YK`ln3Z79L}3iy=rxQR*oEX!Rp@J%KAtT^hqQ)=J*6e@!*2Glu>eI z0aB?MHqXtK_ne>_&qPUR1&<(YbA)eN zVBrZAt-wV8pm;hsDKAFX~dBe>Pu zh*RYt?1gr;_gYvfWOXlu#}-Nqh>J)OvsSzu4uwpXn#FV79tM@;(FtEQFSEy4GnN54 zQyMJ>P(q~W+TG6-wKeIpzg9oK<`giB>zw`_1bj>ppFotM-c@?4BvvnySUnCT<}QZ` zVgqSu#*x3xgnwfPZK&2)knsyRoR6r^CF<*sCA5Q8YFHn8$^4uAtgNAPo5<(r=#ec- zR8W}nPJV|l!sGPhgB;mF%nrBu3?_Y-Ky*oy`|OD9mIy%|dv6ZSp-rZB0<0_*h(Z`M z4ngCMMyFM-`(_K5Zpp{CLw?dx2w_6zWV;~0DJCsq zcIF7YE9gpA_aSc*2XmdwR&dpi-F*Js1iV)HGwFp8!VWk->U5=aTpdvA&3Pebb+kY+P6!7wcRjr=t77r^hB zgbX+S#f*(U`*6ERr&o$;W0CVC)ddtdw4yYnQ$8fDcW_$Fp;TeiEy=0*!|BEQ)3-+Q z^Lt_^EX?eQ$afTAitb1w*m=>2x*PUj(*#)`>^f$x502#P@HgE_rmT`PIICISh&k>S zNBdjAD+zBk)a<=H)Yx%eddV_Y0Hv2N1MWB7b*I%B{ zNDcV~*YC*08JOq=k9Z8pn7hSPf#C~}42m}ce1R@=MXC1fSx%;>!2T>+%FvGVD!t#IAy*D`NK2L_@222`+z}|* z;!YIqPf3`tt-!?M=tsL}$FXCFQyLc$m!^Ic3^+ej-(+}M*-B91CC5^{+chqsrF-Bd z7QD2Q&M%~Cvhmbt2(-*B?_4C9!G&Up81Z)D(= z2LtiiVs9}hdT74PRiFbgAh6ljPLfe@K_V$?U)r&5ZiT6Jx-pvgjZPB>-3j4bW-~gi zEcL?Z5nz|#S%C2WE`roIoPu9>n+B$nz!BoR$0Iop*^(uiaZtO)q7-nxA^0j5cN%P0 zh|S9q=uT2{XbI78J>I`YMj7J$BsiFWblDBe@DCh+0f`O=2b%S!R_>!_yj*o*4C$H> z-HWlHfV9Mq(j#NU#zVxO^+&s(I0eYi1XHy!lQXrRj5m3F+8Ac(W1)yNKZnM_OoaSi z@-@!LvB{By^@r!&tVur@#%>}G8P-mq%6z=JMworbl44ba&8+e(`Z2iFLaRkk@y$jS zD*X;bEI>&NMu_RcR+(+jNzPCWR>1XiL7f1s>v}9%?t|%M0Q9TQOA9Ek*m)6%C)w3h z;s~-{jl39=R-b_8o85XQ-0BXq=R{bMjaXz;Lfku%D?|zGax$BYlMt|_bKPnm9e6aR zZXCN5=TVr|!0tgv$)Y>;7t)%h|+&Wx-Q(HY;IwJIxc=&i}PF(ED9@>$CR0V9Z zM4iN+v`@e!Rz!1%>;t1afiVj-lmX%}S@XznAdOxl#(W_du?>aPTp@#psUK5nJ#q@^ zQNF-|p#dcq0Rlm2pvY~han7m#=2i6Ibd9}Xy(Y{islW}!VPjJ#;Tdqkegex3^gYJ* zX>_<+Clwm!5%tBq)qcMjqKtdBq&f#xFy(GQIaIpKYP}^r1|5x328?h@Xu}{(+!)bs z;>iXJUJLHEHOIJ4G|MqT+Hj=}4rs7Bc*#n#r+{h_KQy?;Uhx)WJ^#nPh9^5JZf`}_ z$NONvrLE3VN0LnCIRpuIIGEf(=yYJ{U^$d_3)(ti&B>(0J}w!pZwn>SD#&t7)}jo^ z+4si7&mSXikz6=+@5EfQ51joHNU~EGW{+0F2s(97e>wYbslP%ya=rBNeZMN>PATC9 zi}uvsm=FBH<|&CtZma?pB_sI#9YrrW^h^C$49(;gKh2?ObwSrM)g%o`Rw0U(jHTSc zVOP8k(1#E+cT0<%ooCgQ(v9dh-&>sZ6qiB?y{`6Te<20z@Pis0uyqQ9AY;Yg z-4DO~@(X0eeE7?|w`cEvKvWQn!CVUJBp_&lsggD$ux#uuW*j?Y&tH;_ecXVW2#Nsn zK8cV>p3vhQsN|BUxctAe3C$FmQndp{*f68N2zQ@;NNH#f2N$Rjm6HaUUqz3!A_VFuH zn+PHiNyFHq5h6fF(P(yncf*ekNM~ZiF9L~AKp9P|OozP=(P6Wlu~&fKn5j^`X(z>a zLE^9oxws4{(c0Q5RG~5X4=L{7~y~ zkKwv)aMr#*gdM6;(oCb@sb_THTTcuJWx-@rH(yqX3C;q=+;~$ouUnfIMV5`dk$mSNmeh7VUvxho=zhBRS3uPFRCA#GticuiYrs5pW zE`!4{x^Ws-X`6l9yTM%;81`Xs_CcMghkl#|=1d^%0&>gD2Lzn6LFN$5Sx^P-E26@hb5;E2#so27%&)Ct?d9Elz5jLB+`dbq*G@~`$L9M|HV8Bu^)?txnt&PE^>cTDpYA;=#AV5j zi?-U~7tW(ajSp&BOxJK_o>FOWXlY?UOMsO+y~fxZt7`a2PB1ycHam*TVz8d?<_unh zc*V-T@`Iu7+q;{yWtT{V=^5y2v>uHvk!`P?F<{;C+X82UV$Hh_hJ(C~qR7g!^M`;A z?O^Hx!nd6OZ&2K0#CH5ff*D+{o?$Xax*dL6&bD);h$>GNU^jMQuKKNV$7h>&o-B#s z-8Y@CMyDuE3)s&x8zvm;jge^TAqWR{;^)4Jf(#*}$=>&ij(BZh?j{-DrT3XtKqk9bn+1LeQE>j>>3Z;fF0V3J(AW8#7R* zU_GMc?3$7Dml64g%=?`zwP6VZ?@$Vq*#Z=z)GiP1h#gvrKG33zkR<4-XYb#f{sNMV zZlX6AWWOI(>W;QZo1BI?NN3B-GUgCXCqCE`62In2B@NlEV7|dm}4` zI*Vyd;QOUTCCEFZLIr8)I^5coLEROi??K*Vebj2R&U}sg9Tu8M@RXqMbgvc7)p)Ar zOR*_>E^uO~dwRO`9Quqg!xNpr;adI#FrFrbIU%dW5o$X|6x@2z&Yf3rH(rfG5^i}G zROBomR7ShQeBlvB?rwT`>wgqH5k1G^kR8~J;3<{8v6zC0DsC++$*0rk z%E#lm2r}(%xzUG0L7j$+%6G5R`QRWdwUBysCwh!>C~_WuKJ+h_9Xcgq&J%I;99H#; z!5GJLrVW4c#E9paZv|n9|orv7f5B&6K|UzFE8{hYVRglQ7~>$ zyV2M8p95IJ@x1a9SAU>&DE~%Ajj}d*2Ks{ZTd8etw(-j-qq6@$FoUJ=m-=ZIyQYkG zX&=-+wMl!ua^s;yXzc}MHUnCu&2lRl8?g^&R(t&2>N_d-nu^3dizdWe@ z$%0x6QQOzrhxAQ3yW4GbPN%|)N)qEq{w<^orZ{IJh; z#7x$avzl|fIQ-(q%w#>Yc~9&PmuYtD{Jo9eYwe4s-%vny1~6C{_pKW}CbYFg$6=H!S>9Zxn&*M(s1+GBgMJ=l810P!5i-h0He4sh-bnD%}*;|sa}w3|r8bh%xR67nNr zNX05yt9hTJs=rA&*hD`9HO6&;1}nOdVm$K!B_vr|3knA}JLEn|K|r0$Nwz*P>apsC z_2HfU*?p8`#PB?Q2s|$aVaSSAeGwKgzK=3Zi!7-`mO*0a&L54ECTM+Rrzl*4Bbh6D$MVP@8%;V;Dzy(Y0#=auTE=HOmhjNJjc z*^DtZI_S>mul*x;AL*1q`p*mG&8(J5XvSbHQx0i}CbSrer*4*5a*>v+jV`EGv(x+? z*q#MjWc;R3`C~buTCLUSTeoVBg?BJbxd)5~XCDB^u(EGftO1C?EFsD ziBYQ8ybDFMw0gb z`_$-5vX01G4&|v!YRbN{@$MdPL7AHT+MO0)HI&pZ{j8Pe0hUJRlELKrS}u+c@@J+C zgt7nMu)7QPw+U?kqzDd12(())2cv5!U4NwWG@{G%H|GFr7k^RwlLTUsRqi5U-(AKI zumIQa;Q9;#Q^5JQ6psOOpx6Jgqhv1?eY+4rKF#N>7a!4qt#hk>_WNFc%d|VqHk`ET zXIAIJMM@T>@iwuNy!+3)hISzGQ=txZlTB-yXEEyz${B(@|xnN zK{$x6;uUw2Y;KZdkxj3Zf?k;Q!;V5MPC1S@vB?L4zl?e0)!Vc0-`lCz5NcMAeth*k zhgrf3xR2ny;B#8NwI6tC$LY-D&y*^OTM7Lbft!`c?f@JN>BoUC3guwIRZh?Ue0uTS z+w-5v!rkt+QF(56JK3x|T1Wz!hBw?{6%SGnmr5_VAfrU_SZ)!_LRiIDEF*#^`RoJD z*Xf%>^(K3E4f<04(`LTuc>%*Cxf1Is+{9l})`}9!CR#3{H83M4CII`0qxp8WnL;IK zrWSY)_W_?{T*DB!iQT9zvsx#g;G6%Rtd|J06R(vxGM^u^#W6f`Q`W^hYu4q z`yglef*kweE(axTI>tmj@YLVLnEHTkPpr3c7xHeT4a#zWa*z^9BpDRY!N&HW1A!Mt zon_)J#yX|iKxyS4efurS0p&%N3QpfWS9iD7O3dvg%b2NUnVeYq5v44JLYHBO^=ef3 zu~+MG8XaH|hy!^!+RP}(g!dGY*)c26E?Ti}Jc!pcYkObULw^>Q}D zh&f&R33Qh)SJUq`ou(tM`O5&U4anG)tj?l2A3D7 zr=dhT578&*-I-L{D7S54H9t%2IW0 zTTV9dBwK_X`xe+rJTY}jpE`l3%kdNN`Q7bci z?TAFUAfq@k(3ZPC(ajO5@zF&0eeVEl*o$v5{`y2W^39mRMpguC=(bmjyP%1^73^lP zzo_A47GJ?Fwe9#1ahvq&oi+*}y?Vb*#YZ2SKL!$y_n6AcK2upKp32JZsjTcbl@&de zF020dY2@abWEve=z@s$(?pcA=@j!mE+c}I1m#NXkG=g3e>UlPQmt(BJ!Is@Jp}MWR za`V(_ef9m|{GB=jF#GG> z8y8y}HOj$gE(Sm+Q%wCEFs0zCf~6Xd*k-TK*%A-F`#ydmU>YOEE8Up};F( z3)n|{R)E7n0rsd;xq_VLXOr|9_WCT{s&lS7iiVj>k0>;z9Id zQEQmLT+U!4E+ z1h&0bF{dOH7aTS}%^9y>3TQq@u^}WYq4(-ASXH z(XBPJIYttbyN;*h!N=wBzlB&o zQgB;z48H(b63+lVQ8&d~0*B?H0m8ij#_n?PFFwV=y#LT?Jx+1B9nVAuRa}_Cas`6S zQ*5rUfG|=OB*hom55xOv*T>u0W`Kkbo#1Wyi>lCdyU2gKN_c76g)mZ9hiPP^>+Rwr zD#e^U8=5t%j{OW{uI$`ow?F&?pnOxss?+P`O-_e8D%=Ey<1EA*r9lk08+FpgWiHl$ z>?E7Zj7i>{Oq6nsbG54GGA(OHQ%8FU8!VIHkgUZ)R#*z9vg-mAqg3^qp+qr#Z zrp|eJMqL8?)*;5w;WhQ+8|Dk|FO+tC_GWPTmmg0*oJ6IPL?MosH;VyyqSZl1RSIbd zp`;^61qUK}hF1{KL+!1vN&h)_SR4Ii}Hag~6Zc&2%%m zj?{f6!|3Mn3IsM`?)UxR!~ZvUfBydTIrRfmN;Mu2bjE7%nGf$?y?x6M;k@tl)VtF+ zXFt8;$7pr-dhYG3i|=KKxP+mnetRTP-)h2&R>*?jqb;1wl}4**wj4b{n)+({UyCP3 zSC7=Wj--{n&J>otL4=0aojQ!rf0-z?%=B3%s?>?=f0#}buZUiszyI#+`vE5(oPPV$ z_d2KjBKdWjAatH$1p-r|vGK$POn1@fF7h)fMcp>>;txN9;Bof;yYs=T%gc*HbbJi{ z4(b*F9D(r*7_8q69VLK&lzVft+{81uOJj!{gNvUJ%{DJbhKZE}Y)vtt&}HCuFiWOe zr%#)amGhVziJ>`+P<1mHUV+nk7EkAc&GkA-p^R%0Ta2vWoLsg@^WVb+&%PIN$q!lW z%|~G`>0kfGXOK0YKYAZ;O?{hN5bk@z&0Z-yN0a#+Vx{1)AWIxxj@A)WFH z98yr2M~!wdj>qG5%73ZPoUR1u))>zJ`OoOkYOH}{V3sN`lg|J$nLxbGYNdWa8!;l| zJVG&tC*$cDQ7u&?K)Bi<{FRcSB6&ReYWyUZ#Gs;ljF{3MUD-F7nEJJTD`~W9jeet1 ztqyzR_OL#Q`*E$AM@&v9J_R{9k!5rm4Fo=dpG{Q$jVX@;c0eu{hfBJMS4wc(6w{Ce zP-c?(sZMv@!wvnJLwW$CneaEv5TkgwTst*$v0$D=ekoo6*dd^BRiF>Vlq8=e)^U^A zo<(wF_sZy_NMIOTwgwOo##8w3B}T7JQ|wE9Id*C~wnEsjFc9JZVQ`8@+f;8(p+cEz zJ)0{1j+~^hkpx2&FwsDBnlZkkrAQG@%_=4!2!yQ0*Fbn)v|XW|rHGfoL)RF5j;pj? zL4cDxeGI@X(g6k2C`E8RM;9*Qi(*e2%v8`wfs}C$b-=;dxtWQdldsh+-7P*!V}PEx z=E-6_aF%jGuVTVl&ehD=4FnJrPpSdn3BeC)jbvaqcBv0<3=mmO=`i@0V>Gghc?y`j z(?FNK0Lq7QOBAD}W*)+1hRfwFqAFU~(@n^_M>VcKG4P*GCp?!gjv0tX^m=qUpnqg#~jEK=d=@ z-)xb^4VPm@G1p6p*<^n&gyLc7y1mxuyuVS;4gk-M`*c186NvCF#m(?wP;5sx7m}dW z9#CI}OnUHU;r3F~|U@kTI09G)8Wv6}Xkv{uLJ#dZ`Yd zWSj4G4qWGP>1#O$x|7bR*`Kugqn;8f;%=iC4|}y&my#$A*Al5EDUZEgM@Iu_kZpkg77d6)+jF1&@>Q?y zY5=4-dv_5x5hIe_)%DEE%FN3AW!sS-DF2wHm(XQfn0`$bAxl(ScOxDL;wMfw_SM%S zilGS>>JjHFIu>ZR$^PekFBZrEKS030#S)ecFPA`4(7TgeKIABQi|ng_V<7K?ZchpP zUqw-HG#;Y+2#*CVp9%~6M@ah7ik&eLq&{LOq4<{wy*gKva{%wkkkv2a1)o)D+2wRG zSZS*Gn!0-{;%LmaWK?^5|MjFio(KPw04)4-&+s7((T}ESe5l{OK+_y{KnhU(a13Ge z*$sIv9K4XCWalMG2C>%RKn;gL@B%w^5nf!%)!^wxc=YD%{POr+g}&R{dtTyR?rXkA zzmOOGKFUa3PIZxD`*aUAnog;3Re9{HHT0{%IC)*c-ZSdt%iz)@A(VIte?Q(wyQh30 zHB#41H858f*4j}I7}!P?Q3)ybtgnQB#>4ehZW=1O6dfqfHh1t@CD4Uc4-a+Irp;fi zwJ$yir7soOGsybFuwwNk3_qTn9>X)$P=)CO+JiE`1_EM?jv7xau`hFWZ{OdCPl63) z^tJG&L!5w`4hRs&vTQmhGy2FAmY+D=G+T^i+HG^Nr^TZd>uvIaI;yNca2Qq{i1#pF z&PfZve(fN1D9xl`WSIuI3T5SBljDtCTJJLF|M$yEWb)qev{iE>jifCW(8f?RgRRu~xoWqT8=qSM}y40xZB>IDcZsanQQd*(Dgo^eAX^X0%_zApv z!XzJ8gius{jXEzP{ac9^gMi8?bzJ`*l=%Ltc^gf?QmUyX5QXxL zN;%^WhF*Y@LsABYH{6N_avf0`gm^}^SeSU67x0Qw$x>b)%!!H-8j~aja#_U)ueVMm zg#w0gDD}2Qu3OD+qvrzM&gLT9Ac10EL%Lz*H8Lr}wruxoR_{G_W#KDA`IM6G!75>T zFBKYZ%Tl~v=oY!UQ-Xk9mKKb5fVI5V>gVgFHrF$!RTC}C3`ED47j)Bp$%X(!dcHEn z)afbg>7brXZ5skw1ooK5%Woz$&6#?r?GBzUE9}~%qqNjOtr)mwx5a6Cx~*oV`x}sf zvRTtAhAPu)N%b_Z%;_~|qOp@;$ucP+O{fIe-|?R>k54bdH(%brKR!nf?#sVm-WjN; z`g%zIMVyZt|KqP*nqd-iCVV{;?^+6!)a(w_&gyr!?JSV>?=zv? z8g>(C!G=*gn8x4Y>E90j{|@_~+F^7Ac$PpN3ERtAiN<>ME{JMyJl5-5_TW7UX44Mz z>^#jrlyAiU*^OA;Wh<&a7q5lcr99a^*p@Wg-CjOUo@V*pd(yKd=gZ_ObhqV(U3rg9 zd5=B0WlQFFlgt=^LLV zsB!a|0jS0Xenv(jh2D}!UCqYQ{inw#9@n4 z5yoKIS}mK`Igo1SX2|oUpdy7CWWPZf9r;leJ!uMzA zZ;!*bXP-WuU|f{W^tY*Ug@Q=W4ka*^D4{1nlRe6VS8&=OfoXEs>8Qye+uy*x^`$*A zYF(h`2mdN4h}zAofRn>xuefM+Pt@zc0`V7JLxbI;rx-ifn@Pc5$sbj^b+d}+K)Y$a z&`l5XGW0=Z>ObCmnR7M=TdkS%%|alL{Q%WmOW{!Qa|L}_&B1*Lk~s1op2lUo4NBa` zFqOar;-=&IELM}n?oF+JyZ4yHwfl7JG#f$jWLR_I4lmFcNUeUKlEc6#yZuU|^LWD~ zZo)`tlDab33KuWiECU%|-Lu#4NqH}`oPm4B;@Ls{6zpk@$esO8EX9|EQ-U!fvpM4_ zAu_tUFctJU>R|>M-3A8dc@-bwU$ZfM?pjo_RXAbrH5?iZ0fN}Q3uZ!|zOq9EWpVi& zY%vlRZ|3t}oiApDfOF0Hg=HEt=16Q$(=YA@8;5y)O0tS~REJ;jPIIoGtI196Y;i@^)(t zT5gcfch0TreQ2Xdd*3PQl~_tj2fAExiU5({YavOka5ivOfCLrafBN%%;;BG;@iZ@HT5SpG~I8h~o5}Jd<0_>HuZ) zlhd{O*`k!xXPA~j%L1hhvn#OoW~xlpp- zynYoRN(K7we>{16$?abV3sa&3D~OmySzL-r`f}_+wLdW!D@9sKRU|@=@zyC$u3}DS zVcm_VLrE*I?*7!t+iiCdQ?lG`cU)Vvxq`)E-D`=Kak`$OdIuz`FHFcyPLPb~_GB3^ zrqP|N&(Z7&sbArj&!0Qm+@*wQUZX$RcGu_enbB{^g&&oqazkJqa2F~?{aZ9$bJiuZ zmScmzz|JJ>NSK56!bQS?5(E*gk#Zc7=)g0Yxr@J?z73k4E}l3q4kQZ|RI`?Ig7Q9L zUD%6TXphn#v4x_tLDQ#w>9&|%DCATn6dQ~K=i{_pQ`KQb5zNR{M9JQHI#u#rBMqzT zX>`Y2&evDh>W+|z7#$|5LYi$gy;)OA3FJg>09_-{rv|+tn0rB^FwRB<4odir#b0QM%_WLR%;TaHRqyyO`wBqtkc(&tJZzHI?Z_Q3;At zUsP4$dJR=EmLs2v@PO=#5673``SH=a@YBip`Pun}j(tJ23U)jktr5+c25=S6VGIa1&M1;g z=pnoXW`nT=pb}RwGJZc;trt^4rKDO=XjtAv;((~#Bu2XOYml^t$(38_Frk!!1W7N* zx)}+`TRs|dd8y-NAd@04mIkD8l3XG1KO(kgW|b>Q<{>_N87$`L zz_-*BW}m{T9`QLnP0=ubWKB#Wl1OFb&C~{P_?aG^>KO75Or{t!_(z4RTc>OCp=UQn zlXWlv$pt@B0dU?j>xe4|`< ze6-J5cvQrU6wH7V>DGe-`NL$R#jjPBzhGG)W7SD)gcEoN2k?8v-vJ$uj#aTH#GN{j zSe@C@{aea7-yK+coV^OkQ@gt7?+QxnG;U6PWm;B$IZb6A&X)85As%!vkt%{H8l7&K z>y2wGq5R><&bUabAtQ8sl$B|L7h0wTR4_y{hM9Y6GoyDW7jKWw--)#Wm;Dt5OqpSe z5%G*w&^hNb$dR?O9MBFRb>8=!E8;`x~tVx#rX*+HP$s)mGCF z2D&ESy6q510@2P&itNqAAV7!RC>aN2_1)h57<_3p5xkKU+=Kfyl5L}`+Bhg~Bv6pr zLcN&EWEt8|O0ddW-KG_%1HgxJgs>*CVLlyQ{84N(+L@Hl)a;$a6lDEzP|$E7#Evm! zvb4hpPXZ0_gmOV@@_SHr1r9*P!ch0s>e{lLZyv+N<@w3yV?74op~wo5ALe>o&2knP zaGrMsuZX$n<r4_;MZ{W+1bap%4g^c~iyCZt`^o;)AsTqA5>3^+F)C-lN8b zXEn_iuUCbl({AwrALB}k;cBR_XlmpdW^*NX2yVM6=A@vwH?>sv?ny2#(q|CNH*Xq@>>!Y(rleI;|Y>DqBJ~l#|SKNm+>O2Sw|{q-`=;8ieDmq}k~ziYz2Xt|?2A z3dHz+XUY?kjnrAfgUnp$dw?&%9^kA#bhC3Z91ZIeq77#Sed+(fI0a&KFd4G4yx6Sk z=tTp{cc;yk8;V;i*h1UN*S79PSKI1{pgb#-{=b%%Amv{RMEot3Tk|a z0~Yk`&Oen5z46Vy2$J?~dR7}Zow=!yGHswINKLj~QIz+AeOHH0z6IJE6qW7;uV@)+ z!aE(dN-tdAYO_@PHS^UhceG0wp1Of*=mQ_C#Yx;zE!eCP;)SN!z(uNO**dUu1_-h? z;Ch&Pj6yYAn&358$$~N)PV=(0Al_5lZp@tIxLYikMr%{Uk#(C&%6RK>n4}@dvXpv8 zjm`hMWIZknfK#USYoGmHU5Ryvy{O)qbVrkZvsUZHoyMfuZ1pF-<`&C-?ncbKp!?@m zrP&C|9dWj__AKFz^3iI*+}4w*uz$Pge6lw-q}iVFoITCkdXEs6`#iBnnz) zANba&?;cZ#Wy5eSMDU?d0t)YZ!k?VE&E(8OPHR>x1z$O>N#+`(I$q5-v&L}t_!kE5qAPIyd>0r)5 ze6416nCQfO6cSJ5((Yg^NT(&CAdh)4IA4K0Hj0*GE%2D)@@5A-B<;fLm{8dq$>f12 zW40))@Bum>2v5W#;xVngB%3|Dx%beH1V=uGm|J1ATt-{^==qWl^2}IZimw zw9A=fCio}s*Tvkz5wM^HGV|lc;U9qXMbGygRf=07Qk1r2+xeF=tN2h2~l%FJ`|cKCH$M{K(~>b8gvnb zgSA=m70G_9M$Af*Y8R&6szRRbvlPeoUO&f-A4mEHm7*JFPDUC;({zrWACy)!zPiC= z9f5qQGjP^|%XoA>o7bc+tY? zx)EyB`0!CB5jq|uV_X+LaCp%q1?k0HCKmm~+@&+Ch8|@!&uP3K&%MjfgHs%UC^!*L z&-5#%Zi@`PRAg(nBq5Tai0aFgx3@>|?3z8Qs$Ff z75w>4&imK%v4uql3fdZTA)=2pXe9~S#tf<+KhK38x(riU5;ZSl^I}!o1U&>)&j||R z<=kr$JX5bB`8>a4ly-6odWzVS(aPB5P&$3*q{!U3B$nENz-k~{p{<-y$j~>Q=c)uh zB!t|TSVJ8#W{Qn>8>YRaUohY8w@{_oz$CrfBr`>Ue(6jov0W!e|6a3KY4pB#Zb{bk z9b}cjJ=1Kl-!SDD7k<24u?~t4`hhh@xFVDbIp1)*(^x^j;u(RC|GCW^x?a+pHEA^66(4f3C%=m z@FBp2ZY~FpP{!IIgXP38OY-6x9xQ&b zd){N2khVY!X_e@Bhk868iPW4EJIcZ_+)wXaTo%xkQK-0|Ml3?LxxSS)BwWm=$>>hU zyS)AB%jq9M4EcEWmTW2S6eRiPiTW{c-i!H&CA<~wKu!7?*M}LVtZruRtDS_JZ1^U} z&+@J{pr7=?5dIaRN!Nk5qxc+{Uox)^6O4ac;!j@iEDhyzgxP&Vb&~3$zjD%6mhZW^ z5Efi?uptc@X|WWGibLlsONN7$Mdd{5jc%vTg6elqvdCX>-p~~v$E7`&o_|wcI^vgf zC=gQwH!xq6p6jXJcW;i3AB$T<*$VRbLU%mp$Nv$YeWpzm9)11{TZx=CfXY-dD&(@1 zo(nXobbYhnC?PqT1EWU<)96n5m)oD?G+ak=PtoXhyO>rTCKSy)-A9_yTo`B7N&PaK zFUO(2{pVvo^(B|ZO3}|zyGqmXrp<;#^+@#PU90zR7j_V_T@o~@@3d`lBn=&imm23N zIFP^bg!i|_O>J*R>BPw#=}i$@hG;O?ueqS-D|qRFzK7-O`AkOzDEXwsBww-To>;?f zwOCA|BGc$rC+&TH^s$kjockuslD}a3+9Kb(HWn~yLx!8Rb=y6keGDa~} zitK?Yh5#eVOCMto9N&a3jz`vv=TOJ<&2UlGK7sF|q^GF=zq^CXgS`{#^MF-rU|w|o^lu?+fit% zi>OY`mdJL@l*n_=>_B6)Zia)`2)hBga+w-&%5HHU{Cs}){^X^*|N=SEoN#lmB7?(vHxrlYrmt{8eNH_c}`>~8O2JHEh#RFGPD#%NHCV0 zd0Prfk{Le6>6N6gMe2@dAhsU!7DCe6ctbT&C#fS>~CE6;uPm3uT)ok1}=NI_NQtyNhwU?6eqPs(Nv z(Usf!`Y>00+4g>OixLvZlPW>mRV8zs>ZbDiwPaE@zM#%ua1USL=t4()ztgTXQGe)n z>)G2kj9h1FcSF6(q`bPKtGbLEO5hr3*+P`SNd{tc3|i27Rni}70hKOmjRWVeIae|5 zQ(#ppSA|pnsc$PLW!LQ9~qI$awPyvq+LvKNGMV@AXXCK=DVTRq_pQwK<%Q zbzHP%t!J?Rr<9jVnx^|0{AiPYt}{+_I1i+YihGbpc;|@j{b}GGv8uU zAX?3Dl2M3}I_3qO+sd7xL+H$%4&T?FBvj=N)hmUd`DRHHc1|%}2k2cJ1H&LfM>$Gqew;WF!G?C8!Rd*l`agPLx)i`|1}h zvqcq5l39{o@BX<>g3(O3PnG3HXaHyQ@3Kz`zxq%KM+o->vQZ=J@RiMy_AriTx#;FO zUhq$pd@gW35|3BV+XE({Puj0(V$`|;mrQ|?dt*GBJe)bg8XggOmFH3fg4!~7!f z4R5*^tZ8)H7z3j840d+_CFc0gm+kQV@zLd%bHY6Pd~_Lh+iy-T`4V}@4^K|BpMrML z=+qBhn0|AgL(VF2op8tL8HcoFhPm{9a4y*yB;Anr%ZW}1k@@&N+I+Gf7EvHxN2@pm z_nFEEI*uEo!MG7MTH{)+)#(i4s5`9p`i(+5l<#w-D9;Hiw%MT|So!ICxiLj9rpZca zdC{sQDN$iC{2QHPR1(pM5&RBx24r6FK`H*FDRv*mI@RR_a8+&|q$Uj#&3LQNXYp

oX08ns$C9!wg8ehl(x9;wp)*j-I| zs1myNIiV}v0;{=(bC>X6=YG{>3I}1h(kKO_jjjl=tl&sSxbhT3;xG+1E;PmAaYAcJ z-Hl2964<%_&^zZ3#29S!HtL?LBHq?LHVE0jIMA5h8=k{}{6wp!b*#*{$7}Sa56K zngIw-zf}61B692c=0kQB*XX(7H~0HLK+#vPpwtg`o^^~&lCwqTi!Q~6G4yW%XOvoF z@vL6PSJ84zAZ+l}?R8A(4)D*N^_@)%WaL`ag3CFKi{h%pMO!VN@q)78bQnzNhIV`^ z-d+!G`e$7)SL9TVt&KsP2ZCOM{zDUy>zhHIGI-H=l_Bd0%&o*L$yGk*0~8%EKH+uv zJC1oa)xHXz?(l&lOgR7)sW1+rl&g3aFO!kTurxRw3-dLZkA@h1I1+Wf-D*(&o_0%l zTS%1-Mny8`08YH*Pq`KRgImByuW-Y`R8|S%L-Rd_XTu<&$_sOa5I%&BbR8`TUug7I zT=4LEbs7KnI$5&q5p{;eI3o7Kn<)ti=7xeYX5$wKD6(8!lcV2^VSmU+j%X`IKwGT6 zMt009b*OC!OfS#}sR4DHQ4UBRDJjo#F0EjUwHf`pq0pHYP%pL3+zYHl@}gcN`- zjhDZtVchAsPv9G>DDbdWG2tq5;)j9+q$au=h2{!S=G1T-kzCimm_-=wCR5prh5-UlUL~rVGm{Bjfx=sx+ceb~l6h`&bhdaF7wVelrK*D*;7(--S7Mri2(G#)W`1c>GqJc@X+lCW-ylX^x)e z&r&()l#E2e{Jz|M-{+f(*#Z2yr0Ot}Exh;S?Cl%q>d!|fmnu7l%BFSn;ppVl%3VUT zDoy%;B>7IgzRRR1mQ`WOqcdIu3UQg_=7NU)RhnWvRXmc6HbKxs;$nR2xVJrT+;1@&f%E4{PIX>$4 z040uy=Ct3WIUO{A8yaRsRV)@^x|%OU_q0*ba6?)HQVM03ojkSKDD2}R`qfN#-`PerP6#2kH3R&vN07?~_!@CRTXjLdW<(DKMdf|o%(kC8wsBrz&SO_e9g z@X+lvdcA%p8ch1NTCd;j^y6k+k9s4&o~pif6;<_=O<-NNfOU(Ep-l7f6{fV4D<@de z*~l{fxeA&|N;`{hbp(@Z07bYFd;O z2PwM=VLEtmF$BE^`UY|y>t!e}U(ctwLNE*Xg6c(p-J2!abtP^I9_|2`ZyEv7(i2)h4@QwLCGFi#8E{Hhj(FT6~yu3;e&r^(>0 zV2|!SpB<8Vx(|qd$I(HyxN@0PWN6;zNsb1Y7@>L{!Uv zujfE$B&sUi2y^J+PzdFDuF{)ueiBm=41lsmtQmq|H>$GkNoS&997kiCk19_P`sWUb z{>O|vMO~muSb20wO}>~>DijP}oyIFl%BT`ItgaC-2bOOfa4mK(X{=Zwn&pQ|U6YmJ zu}j}58yCK%I=&@y4q7zxjSDP5`#=MUfWu%q2)P4*CV7Q3yI{U};cN+}9aN7(7(y5T zJ62;Q&mF7{$Y7K#N9!Bp4V&Y)#RDF%8NxYemy-Z`g%^4F5_<@jRv3jzc2q7y9 zR)GDW8Q$>(;M#MSaQy{apIGI1u1A6FfR9eb)7hP0?wQD4&RLAv|AK?zHK|O*s2lA8 zh21oonIvoG05e&l`7rYxGPX(1;2p=9vmE&J@6d`d!LS<1zVkTzy8Jl&y8M{@x@_@l z!{$P~Hhu`lE_2jR+nIkq(=NNvL#zCEISpA8j=;2q1nop_e(0EM+?h5kuj}l>G(kkM zsPL`x!NDZO71~#EUIVfKH@Ynsdx#2CpBF$5EBi*uCyf}xzDGo2!o3d1@ie*v;mf+r z*>Uq2D@IVXh3xC#l1d7W4VK_73bXRev<=VqQRsFuh!3y#GtPx#8{%YsSRsgbL&Yi) z&IB1dJ(kl>K=PEa2`6H$%znlHvClQd&|(~^qDqy^JwT%o94jxV4RIvt_bSZ>yVjfExE4#-NLTDYNSmr6YPiAQ}6Y?X;N-HM6Ha}<8gF7y& zF9=ql^;k?sHlVIe)ohO(UR&+S&X+KDc<^=_N6XJJFoL>@$0eE%en$$XX-yp8-+cLi zp(Q*Stc_8?$D#ESt^KbJHzlA{t_WL}Zc8Ff(@8IBH7XEM1p6&8r01>T-6W^tm(o2l@LnyWAqqP&<(Wd%$+@Y9#n*9?qL)#$ z|f0UG(Zg3{Gf;b>ZKw`k0oG2Y>@t7wK_zqYXnh^Pc^ zpTJXs|3z;x42(vo!%?`92Bwf!tbrAo5v(NL(rrq*PQr@iL&tZ&8>DPCFzf30QJU29 zU^tO13Qr6IH|1Kk2D+r^PpbS0o$Y4OG+QcEmtSZZ32rb52kM;7N!z+wN6_Ma%%yx*?r z^g_~jW+AkBN-${T@V4p(t_cDw-7brX$a=}e(RcyF9?rip&)4=8xzwd)W%E@!zd;1O zt7K%18<#oz8bMT(NP{y{tJUO&yqY&OesjOeJ|8ymKbFPPIPn6RE zFQuTdL%N!MZ@|2n%+i>@e7_X*gvCbl-z8r3gscLh_N}zX2$xl zrMDK3hAR45ZAO^hXm%$mk@(k;*=^>UFl%CDlOS~zy^<9ZmBu#`cL2+ChV2V0O1o+u zkILI;8Iyc~b|8})6G-+39Y8TRBpQE^v~lN9rY|=6phX~=h+&I77Px6V9UFfXN|!r> zNhN61n{DJ$l)2><@0AIiHi&Sn-u$)xH+cC&^uuAU0;`ys0!gVyGr=iZ?c<+EMT=~+ zAckJXEzG^E#K&yJb6mSj;j-d#4m z;pZrJ={lS#5h}6CSE3)xDkO$JiK27lhGw#arke}_z{WYfr@jWZ*3@ZAaTnN~>m|B` z0AmwQ6A^;LElkvOeycy)%h~FyuY<2QG+h28RLD`tkoXbvhKS@`dO|hjsxs#E^-A29 zZnte_b66s~R`zXXkf@ci-wbnHIs77?B5zRIYI)GM zA}WcL5@HV8CAzEJ4o7Z0?qoj2Fbn=6k~RHdbQDY3&+FoQS^GwjR&UGiO0D-BA{>>+ zm?LcePfWo?$3~!kGrB*BJjquesS*{Xoay5*y{y9m1=U%V-4&(L(Zzps=;8aW(A zq|Xtr!Ux3J_!Zd!=vT1=@MDS6=NC$0G9c7SPl7{x6o;8ryss{}L9<~PNaIBFErwR^ zFzwM|Iln^dkW!VOR5xlsN&)ifKcH7$Tw7N%lDrprDjqMx^%V(8%!t3n%UL`PV2wCB zA~ndnPl+VZHsq*svJ|50ENA3!OCaP3;FG_fbRx3Qs$ImK( zHE+B)+IsYklu@Ji`vmqq%P^=Ga;q|Gn`}g=174EjBRYYa1c7R=N0An|4o74Vd3I?= zin0NlO`}!Cmh^<9b0wM)6Q9E~3OM!>ks|`Ck)$3L03t2r$fXor6|Mx`DxpB9>GRGZRPN+Vs2F`zKedaEnPLIk5zGeR=yEa{(BCQ!ioH-ovIH2W{ z}`GK-#tHRMrH?N#DZ|K@3Lh*2lP<9h_bIejga&rUA4|6=d<4Fs?V2IN6QOXDFcGIvA;J*CcjK6xOEn&AuujFr)MuL5=lrHj#&YopMbXxe zXFibEd?xqdR3j>GcxSsxZjygQqE@NFr(cr=UOT`*NvU08O7Q@}6z~d|E;;$iMSJ;( zGr$uY@VlU=jvId7gu-?cZH_Fy+A;P<4kncob!l;YIE1ry;1ZnvOCX!-Yvs0R-T3d@ zNWj*)Reh~2phC-y6o-9&2;uN=B8pk)lGl4bhWf5{x*JHF%@yi;2`ocP=HeFW9JvkfV zl(kFCP$BxFm!9jtfeV1VNaBZjRD<8x6;$pA6UxMdH_NhjKD_@No}RtGFqXFL$M@bL z8;@m)I8#PU%Q&0i1NOL_dC1IyRZx!d;N5b*2xR2u-sG0;>O|o<5xfj+CtIs&0TwYK z2h+?PWQ}}I1PFnU(7edP*z*34cY_@0)FkimMt_hEmj3<*oPeh6T$4StV8R6w6}~2K zPbvy4PI&`XQcbmr6Xdh1nwwg(*-F}zX0uHxR-5%kGZ%a*m#mybO|$)U!+6QRl}4AJ zC7KUZYO1frZo)>48-SI5;$Amh<@@N(Wu?AXgb-b)^oLv~mG5;?t!pOS+R)rM#O}rM$taQf8}4 z*<)3)mU0`PaZgfObLFt5PC53h7L)-PF&dw%!30JKJtB1v&4J0)dcIEa*60dd4#h@E z=2NPTR$7@Blq+5!RyRw?O@$o~2#KTFZZr!74R6ObA3WPUn+#RqhSMHS`a`VrTRaLD z@J}vCe8yyxvP~1}Lo7E{X_jE{>%|q5t&mEP5l-wPd@56cX=J_sC1O_yjGDcYGrL>w za4>qiF{m`_k2QC09K1VIIv74+rh~#pNI?}}ZOmx7u|Py_2j&?Q0YQm2irl&Ja3;xc4ijXhn_~LXPlCZOloXP0aXUawPqB z7T-?q0`@FJ$Zo@O$V{Zt0>Q}uM1Yq;)2aGYKn&(n=d+r;W=p~jgmij#dW?!a?e!nk zL4J=fHGGAaO+b}y0Wil| zpsqCxtJ$EA$d_ohUal#$gB)KYn%)sdB6%j+YIKG3f)~|T z7#*BYwG@2^^pV>LnD4%ITcN%aqb5-r3tKXc$&82;WHacEx>%?M7wgfr`c@~8hvIAW z*q9I`M|49DB6k6M=)V}vr_*SW>bVuUZh^sUl)*(@gC%XdV@G5=rDmmylRPDIMa>z8 zsY2(?=~$#)({9zpcQzgPj0di-De?x(NwnC?o7;=*jmNxYk9q57fi%%pRag~cF{K6i zgXE0c^V^3tQ*?&7Npx5I&>ng@KlE~T=w++xTP!GMh5{KV8lTZghEGlFx(&WmtG!3o zrdL6!WQl|O_6ZUE_@g9kuGcHgHuBFlhzE^F=#9-u*CXC73qGHrSSii{8ZXy^-q+vxnE zJVf9>4vXTLC_9MA&4S+w`i6utO05}?H%uB}h~zR*7rngbeKuov5vR=?4tkG8-;PsZ zl2+!W742Lyo|ABa*Z*<>PnV#MVN%u&o-u`Kj# ze?3m-*czP5CdDha z>Tm}1y{n?I!YzDaE6!7}h79DFc-tvfzz&Uzb$V?VS}&kqY$N0f(BXWw#xvLMw?eK^{1o1Er;lISZ0XvJ$^g+4mA+ zp}o;OaS%ZX=7vD@MySve8MA8OiMY^69QknU{aWzT{FZuct8SJ{9AP1ni?3;&qXd!^ zv=AZ&6<>f!nAn^ygBSl?jvD*Z`-{Lp)#*u6{^r>7EXE61ORi$= z?qR0`I7o*!gvgxnXe(N=8-ft6vrsRi+fdzOQ?2?ziO~A2%!w#+-*mU{E$gSJcfz%8y!I zsnnHDqI!05dGzsEA#ZlN3}M_G)HR0jqbx1Tqj55Qvz*_~g2rGln2%PCj)QZmyNZ(L zR~d)Sn^?+^gmZsxcpKy=^5XOH+mj=_&?JqF91FZ*Dd^vOLs<}*S4t&k=bvFM!uJ>Z zvKV{pS2V|K9V+n#DevRi(YxbwCP3v3tjNNQm3+BVlQjMs9Q(|s_|c)ZO?gLPU z;xeanGVwu9CoT#P0%^**NjlAD*YU+O2t#;ZK>jMAdx^!EMV2C@ACh*vA3TF++`%0SW({Gk;3oZ_6uS|7WUS}}BWzFvt2yi-N#=9*}L zGAt0kFy>^ZDJFb6Yeo_=jF}l&!g&W$A5AOYP~mzaYB)$UGZ2S}(zUQ@LM0H9i@TNe z{YP&i)`>g4L5rxb+seDceh1hlW8+|6oN&eYaX{Xm@h|FlL?=LgB6!6%PdgOXl&kTA zYgG@D@h|E@Cgf1NFlxwP8a*hxK!US7@y8K!gj$l-e9f@qXfXRX8G+6bE0F1w0k75U z(2a1n(QQle#v2a7RVcZRo*}cO$^-}BZYKN2T{!e0)v*a(1(LX6ZN_(){GlCy>OXx7 z-yePX2n_V(!<#P`huX>~L8O_w_{01}@Q3@O=CCoDbmH!y zQ>%?ejb5|fne-;@K>_}-e4W7`mZt>9*6E;47=L(gXT*X!q%>E+0E$F5ycvp>KYEBZ zUy8pSvd_0n0>71qPjn6=Cog$8ygt-otZ`on9Oo7BVq;}P`8DWeWj|wS+BD*w@mX{e zC(ew^6P5C;-j3sTr$1>m#M5B!`+ek7W%}93da$s9 z65|JIs^JV=hWJ1!hagbmGc?t>JbzAi2+N8^%rRY~;@3td0`ByUQ_q{m@H=(!t0(d~ zRA^${__KW_FUy}I6!PYfIBNK;rac6GJt=cfeswRb803b^ur~*T+9qhpL2JK_Ob6X1 zO&)j!-Tp8A%S(6Q$=q3B{Y;j)5x2*~K{tYx?$&DkUcEIL_gc+pIP@>E`QBY*^PEob zI>Wcu<@`2)l5V7pKP>(rL6wK52aWK`l*MoV!2UWs*~VX|Rx^swQ+8^TC@8(Z_>)5` z4^f>DWdgz@Pf>r&fufybTJzI0VXbFX~&aR zT#!yreeIIJdP-J+9;ecypWbtYWl+$<#OHOxv0|r$97?{;beII9EQ8W5a;f7UJ|434 zs-2`KDSJr|6&RqAN?CyAouIx$cN%B~b$DAC4$9o>yW@+u;b+j#-+VlK`$z0SPz@S* z@x|jGG?2%W5yUIL?>>H!KyK2j5}HQ-czk|}9>fMT7r_+R$&_HUIqN0X3|id^)Q{gI zhC(8&SPjV>Wt-MWwfRskdQ%)57jdS0+5hPXGUrC`+rUT zM*Bg`NBw$341P;SL*g(WHs z>qY6gP#}CF7uI{(J*yaopwyRNM^KwBox3@00p(ed2)Jk8UB2B{>My# z+xZfybh=KjQBg^+F+aV2v(KL4k2W1;GaY5mbhOyXg;K{}B?tnoTvHfYnjtnA2o4j| zys%d}#uU^1Rj7&(1^XAebtq<&b`L#|;AgYabW#yHg0j}(m3qi_%wkR=d&VigEy{F= zUX$t%pl^fWtFWs%MY9`t$E+liAJa0SsIWJwt@`@!>T*LN5Ie#h7MExO_7Oc8DiP3* zD`{ks9p&;{W}C~Wy42~6SR>FZG5aMOl7vmZJwLgWl%mhn-8RmfEbdG3>-qh3x+a6t z@5lc&dnO+o2x%e^C~-*#37=fuN_x)m<3G^9FA3CX-{z}4vE-u5mFnD|3ks>Sg)9!) zlQp+iH%n3*PxQSku(%Phz&_M z@y%#)M-M6p4nuYB<2eV8FL&Yf`Mtt4%uUElnu|_p?8aKKnOIXicjF~_>&MKwCL5J`_TKmh^Mo#;nk54BJ~IUHjIM(g%lKP@ zA;+|GFN5Ew&j^zM`oNNGzV=Vik|1R#kf&@0V|`A37GyN0 zFN}w0v|25VXXjpxw*1~J>m@y@h}UP;mGQpLhkp-OcZ*ot48GD|SXtTE`lb({^>NI% zNts(RzC5Gusow9YbcbpH^jZQ+RR5uRd9qvoCvmj8=Lj;N6pn$)J$NAWwH}I>-hRRF z9eR@|LT{69chs6h@t}_M2I5Y;(;oNQqjA?yZ|ZB8-qcf0F4L|w2{8#oCZad$D;1Gz zgJp@}1JLNL!e=vD@RO?b5!(hpZCNHJRKa?WZ|*=4!yhVH5v3ZYNFDzBTQptA94rFN zgKMO(3vO-w$$}~ZV-f7$iG2II5+)#)0*vQ|3kRFbVmIu^(bGB~IY9y*YaK_WbQ(QAauZ?D}RySM>#CdC#+O$#6ct3wgW@3=v}k z7LAByU0zj1J!FAqHV&#miOFhTO8iK8Df3B5YD0lio~&C<|EyP;;;BnI0o6Q6QR3dV z3f#)>RO0EtkU+`tW6-#U6xR^Ao!T_N>T2eFqLQ?k02aEbhj^j{E z^03krI!xth^+N_k}*Lb zyPHPKtGJjLwPiu@eF+Q1v;5lAl{`)wUCK;PRMY4dPz{z4^Fp)Ht#m0~vsvl1GcwbI z#UH`>;OlA1^@8{H>zVw4LqZ1z3LzxD9mmK%iHr~dN9AOS(8_ZdHz&g7%l-xb%!9|! zs9y(sMmaK9b#D{lZ~oWCWn|pbYP6!;&G#5~me&if^5iUF-4853b!*kXzJTZJr+HyhKZ$GmjW5^W&-!97((} zPDi#-=7#u#G{X(PbU&7BshwPzgDMm=2zt--MV=$VprM*gBf4bC<>>g&zTE%QH#4hN znZkV=ZSHY8>B=_7dZU}m0T<*`(LYi5cs6I?l(qNLVu*zAqkdGCVlnrq7776RVah{w zcilnX`6=S97OMnq)&ahQoM#36>irZ2Py4FYLVo?`{mJ>oCHAQzG)LZ3W%+}k;>E4f zdXFNe+A#XgbI!M(_a?{r(!j27*fq~gsqH&v-%fH-$O5VsQO}nakvj^wp5aQc(`+f$ zXbNP9t|Fa+-%(UtoL`=yiSMVi4gL!@Md9WWAb?81J%e3{?S)|le*1WGdK`xP*qRMB zV%4p0Sgg%kQje;x@6M zk+PUTR^2xE($+>a3D}v`O>$~CYV=9j9fBKt%8wIt&^W+ZhD8T1RWyE_M_LYgFN!8-dogJiNm325$JOzI6uA!-+@y4?pW28Dqjh4 zetC8OFWDS|U)xR|cpv-u@?5=P+nve>mR)gPUKq8mP_Rm^XkV*egab2P*aD)=szb-b zpRPAm);2-Xj06Xy;OHzT#&2UX&r-1J zhbG|@i-LD0^(Q8E80!8{xn{idM2TlyN)~cx;d7dI^h!plgfb)~3+0r|kurpOP42`% z0u`A7O$+|o%NuKdPV-fLUEGRRNBGu3wWz2xn|w_@hIKVz=5n3B0tN$T$_(UV=s-A& zZ^_>x%qOUp)jwg(uMmTC5C&E(9)o@hqh1Ds9{k9Jc0P=b1OK*hwuuMShJ%BDKXkLT2u{ zXL;G=%0=y2e(sz73}hjJ78+uE$&=9RQN(hiIcS)e6RM4KYqIyHf8gtGTjE6d_k%J2 zq0#A2&n}N&2B(BiL|I#@VpdquBQ#{hQc6BcDZpYE%tfb#UQu=MYsz$|GO^APu?)R= z2vU{5f}@W^j5&qx>N-xA0fzruHFK8SlC{8+1ww6%AYP4X`-I-%mNGOeMI7i3J$czN zZloM^l~F!^+0>dK4CbTIs7EhYe~rghtdi%2NWZO4w=rnN?e=I=uhqt_QLA3>bX)N- zACu;*L`%WGm*j-sA-6;P(V{DKLNO635d&71h`u|$Ghu$76SP7)nVJ#x2l?Lfh5PZJ zKcAgnh8KVN^ychi_~G-%(uNk;b@|1?0WD*Oy=c8PhE*M z3>B1&E4AhmMUWiprutx616rskd??V}79q;<1(oc(FgUhgeZOI4r19$U(TAgxQ(JO2 zuPEV}r2i<|272`DVQOtbak+8V-`#pR=oS4-{S|4J)7Sbc5gE?OJ)~0R!X(SwNT^2- zLxs!TdNYr|9!l+Y^t?yXC{Q)sGtTy{cLy0hNRg%60_H*{f73=W=)gXfg?z9`%{rFL zHuX?Zkka)Z)z@Yjvy_#2Z&Wjru}ED3$Y&dsIXin0$+2OfezZ~Dmnp)GQBB>FR4;vwEOwCGYlnmr+hOU;g+L6(|Fn2xWByHy5P_41)1g*W>cmi^24!D z7@}+7!c5rjLkn@vX6Zx@2z=NAdKk1Hj~;S19|I2I6=telStc(ES{tfv;dAcU9pwmK|d`i-i)()8Hd#w(I{r501vA%T)9yEKB zUzDx3SvmoEgyVmMZ95meGT;7mboxO-ClqX^zQA#4S4>ECxAGmmcov!_8UAI89Qh0v>uDG zbI`B1dyV$6(d#s7wMnnnsMi~vUVB_GFm~EiWK5m*D|USCAw+GNe!8{{@|r=wnE7I& z>3EC)%XH=tdAZa)#?3sYyN!R+CA0cgO2ei)KmHF&MhFT5{(AC`&JrbHmBzS8t|;~H zyc*upPJZ1y!m~=ydT*>dP8!SL=xSohkbM%Mk^pV=GzW!{Esma%rn5&wVCj z*X$0ulXffaHCuyPZPag1I{p4+Jo1oT=6g+Zndfxx*sCL@m8$vY3K-p_0W(XRX}Bv=nA9vqpRU;p81kKQixU!_Z_V#Jf?Z5zMjjILmAYA+IBw8 zHtxJEY(3ET4@9>}jWmZuomP`|X|)nGWbrH-9*;{aD2e@jcl`eN98QIc@bWL8;c)rz z{$u$5{P-AlwwVhI!sG7Ww+?GaQVIJuzBrI{>2*Rvl%Mb@q;b)Zgq$faS}7}(XSN*g zI3yoba8XlDza}V4P?k^83Vy>K_Pb;P^Afx}e)HwS&U-%C<2B^S$q-8I;o?a!Q3unY z)7fu6d=%Igpk9M+~Y zT3o_iaQyxxy!hh@N`n_~kIp&x$7*d$VNrnS;NWxDX2i?_*m6vVxpm;00!pI97S4V? zrK|_<&qDIdet&lUN4AV^=)W$>5k;GNZx!9OA8$Urxn!KjtsOB%58Wx>z?L`n*RIiB zm||Y*W=}zejWV2kNOv`cliQqvw-HtX#|oY;+;fY#XgjofUx~0-A9uP@f86gj<31`Lqb_EmXtcVc zZNg&K$=&~&p@`buN(*h*^h-~PinN&P8U8E2Vox}7#H=)@Bt>I`dt8u1791D2>duKp z@;1Ck=tYdds=7AxhQS2GcZ+xr|EX|P2p)sv_BBB`Ls@FP_8=P(n_ibI_(Q)-*+)yi z6W=xcB`D)5y*)6zQ@xIVnXS^5`gTP*ey#pn9mUTR*9D$S3|Apq7Vtfr+MXjQ<1o=6 zX)-7kN>X=ZTPAYCR}`MlrycAB;yGF2iF!n&yNt(~4OiW|;mrKNUQ(03b?ez*ak%XQ z&4QSjCooGD=!FUFiRM&c0-Lpk9gu}#xD1^J@jIDO?s2Xn53|@-6EQhG2zh9c9Tjyd zp@Hc~HYH`tu590J$7G`$K$(U(&e7!&A9DMmJv-&I{qlVl6avJ`>5Ly-9wR;h0%DaW z5gZPcz|X3o2&PJYL$;lh)02yzpd-hbR!+`%c zY4|=+rkTCrGLF6;<|gFJX?99jXdV(fPCOHmn-5jjNqBw?KUjk*Z>n&&;Xn6|-`ko! zK7pKiX^Dh&4=$PSl)5fHz^)Z&e|J0_bcT&ar$4CG>WxuzJZW`Ct+=(*F?qfDn7n3H zrbP)F%J{QqB{It#3t4N4bsp@QGk}r4@F4IVrrDD;IAE^p9;1PrqQ%E3b9AyQIwz=m zgj!VO9G`)Ec)F*j%hK(W)A08P56_;?1H!2$CpJ@=C6nA|u8ixIvoi2f?t$G_9NFhS z?QElJggP#g{`j5t?Vfac?RK-#?V)dX)F1V_gMPm^8E>U)si%;zw#{a#JG41et0<(qrth5jcUL>$Fy#UIFOPf#gdZWP3P2%sc)Ue zHA=3Fz7}&z@{RS;JkVB3E1bcUA|Ckq2-CGeGvs5*nY|dlTuwfOz96UrY_z8R#@}M_ z&dr*VgHc-C8T)fo+06sT1=Fy`^e5?PZMqM8VmTkLM{yb~=17)F#2HeLKD-Zpe60q{ z-;Uu6j%{QQbDm}vDplrhn7!*L)d^!kGg8&jLzR(FLJw=f5%f0wD!H?l>lr4v#i~_5 z*T--3SDEcHg0i7o#Z6PCY`W&+(0!ES51AVq+j{!D^wl)0Vhd9sUWG^H*b#g zU@_mF(;-egi*?~+DsnxaV&u5=PJExyr3ZLfGIjFKt9hqBLnVmi8C*rlEY;PSRlPKX zK@tKJmfmId z8u}SsQ!E62JDbZ!Lj&jM?qrc!3Gp|+RtVu5(51Re{ByQJg z!_lPOjiUOv-p^-MDz2*NEVGJm8COeL+ePxWKTSP z|Mu+5>1BBI?%%&$AaWZzP+uIAY1J4x^nxU_(Gt0}-bHv2f4HB^WK| zDIa`CKk|*5SIh;Z&w~)?sc}O2-~N;KTq{hpeN9t`82DiRVl=&2DkL zG0gtZEwzZ3(G`>eW^WbaBfto&xbsdfLNO1M%f(atnt|r92QXZ8JPpQ2LQQtyVQVbs<;lzDNj8KrZd-Rxj);MXPme2cdlGcf&%z}9Z$W$`kQ;sid1e<F0X3sH(Xw;iT}U6;nByBXKz8&{h8KRNeEECB~q5H1wGWo*alWV z%wOnR-MVG}eLr#d?(R2#>*K9uXA#T!MJ#)G@&;c8!+y_?kAKUVp}c|*o0#mUzu~+T z_&7L+Q7&b(g8ilv&z}dm4dgkhEmaw}R$ zW++@2y#g;qFl7-9LlvPuJGM|v4ZGf2_+3iOYq_;(cbbz{z0>JVI#I1Q>b6?#UT+k) zMtNZ2;;OdZTCjUbuY*_c3b2r_5xrL4js6Hv$T|e`k^kOfUMZ+z&#SZq2fT?iPB%`! zw1&@$Bq@I*w%3YJrxVW=M=kUDqbR-1p>6joTa|OaF{g@uz^tSlTEIu1?gpsnZK)vG zI8);gR3j9;{gsgMD%f*1FlC5y#v@iax8T8qem<+QT#An$2~(_4#zVt)%R#CcLv)!xwI~SWgMY>1ExM@USySw{CDCS zeIJ!S>xx{Y?l;@*ex=nx(rve`um%t5`c-{vgUxw9O3yh!FTXb0hjO3c!jy~6D_$C9 zXg&V(W%%V3L)R~lPdAYA(!L|k(Da6GeaG5S)poyqYgI6e!p-3fW7{rykYxplf%LMM zxyL@I@})SB7HQym0zgsbAhx`6?Wtl5+3)$P^($1;ak&UB?7xy2whn`(|7~N*SLaCS zo^g3+s(47KALdsm@8zyLMXtAxGQ3<(kGohjhKLWz z%V41)>*>A5fcCX!t zI)g^k=+$b?aWv}pM{(5b?~ms#fE6ifkxUHH?Ku=^9G1a6>i%N5u$i1;f=aY!z93B=H&!Dz(Err=>Z91QSUBi#q#oYI3${FpKHd~ONU-LIbqi52RQP{O#XQt}si_}(@ z%`;^;YV=y|X1&=PbSJQV;z_R`MU8Q5;)i&#zIXPG^_+nC+XF%drk`4z6;RPInIoc_ z8=!GAk%X&P?1Mdyr(`Dl0~pyAPgtsK}di_aPs0TAb;}t zLIL%29YKm2oR{$xJR&E2HC~3&+c04A@P`)&zMlqB`V~92PTdKh8qozkH$v>}mQjw- z`l;d?i0@E!v=Z!B2@_5Y4r3hX3ezG-7+9d@9Q&BBHp2H*m7Fc{7~=q76h$4`KCwK| zjguP$Oi(|=sxRYj92o(PVtB?9W-Ov`jOaGHbNkKTr>ah&m^YmKgWCPh8fmF(x<(2A zfk|JQZ}gfGiYm0B(of?WP%Cx<5m;{6!!c$WL?{xDOu&Q^$ERl(pWpHskXGz?1z18% zTaCY}Ce#?_Gu>K5GiWD4;ZPM0M#Auj1x9g@nYXzt?v?7Aa0hcV3 zzbkuND#uG^0fMHBK`%gr?DcHYfZ$(5e;Ad)wG8i8+IN!@-wSR26sw%p+y+&Vq#U_3 zju(gn3Dhrqk*Z<_maNivI-xKc3}K>8X=fu*p;U_~P3hpO(brl3+aZHj!k9v}BjgmN zbYLp&+7dmwLy(RLDN=y7F)5$mDT|k_LW|UCAcC@|u(9drt0Zy@#GFaj>(v-e;nK~5 z-Tmm4o1y}MRswgq*E6-Tmhla+)Es}SJu47;^~e#&Cwo|1FZ7AsYPQBv2mWc)YTdXw zsn;8=R=*wm2xcoH9;u&T55yA-U#IV*v-Y6)@=E%6iW_Jx=YP4F7N}~zm zZMN~JhXL4Qv}(r~yw&_B8DXSb$X*kr=QyJ3YaFIkR43pS3bkwbF)*KCcD`s;Q(t3{ z2+{J5fFAh@O1ZyUb^ZjuSZ>+WpP5@dUw7n;$<0h2ugEUu&P!4_D`oGjc*@h9l(Tb^6wXL_+l(ak zeE4yea@|;&eBy2nFle|okEI?})o-j${MT#0Yv4oVhSc z7)l|-3yu4tdm7x2jamZzhGT*YJhyIn3R!a2Vbl0UGQy^8Rc9D_%W6@Q5z02qB`uV! z^e}novXwJh%6(Uaw@ub9w2=L)X(*d%NH$M`9q&ikp_Eswh0aSZyjf*|p?M9r*tNXF z2Gr(Io4?&?(VbLlpc0sDlqW~gY!pwoaf018`qo3>d-v~sC-ggr?tY6w$9fJHo6%V3 z3nrc18ANZHsw8n@8}ZrDpx7ATPsO-_!>e11QqeiTUF^@VVNUBk+^qHL!0$7(`(aXc zeIuex@9+z3z>`{ym2uj28|7^cALA1Rti469R1Ohs7SHQ{6Pa(Pk}9S+qZ~dD{#RDp zd;jt1!-a|+-nF!M7#dkJTT8YQLhk)^bn!>mr|4dv-N!;;)O}8#-6B`kHfHl|6%l22 ze3WWl&KWE(alatylU%09Ovj;)#r74=mh{eL_G2{esQ_c$Ru=P=bpdbV_V@YE-LJhJ zk|%bRCgj=ek+RR(wOU!0{Z;sCa4T@|{o5=GmfM`u&00hPiC@Na+n3=k76X|kHixl~ zhf%b^eB&a>=8p*t9f|*LIwL00SK@>OEV*4tL4HA}Jalb8^M$d7WL_A{uoXgFq&H2G zXH2PW_(>=Q-^)I?Sh_dJ%6`+blh~D|Wt=Ux+T~5(mQ0s(DB0Sgs`iE>SM znLFt0$-=#B@P6g}C=1Mz5qde@bfZ~mA&~;Hw%VJBlvn`rO1d{B;17hKgF(LKlsiVh zNd7)gX8M|?Zeiq^DSzc0r|KASXEs|Vu(zH#9G@V49=&@P;&t@Llgse!>7_>ED*WK{ zm(#aDg+HBrJeKkq&6U0q*rMnz#5tHu=eIO;>%z!Xp1Tj)R6~xwGd0;)r>`h#@Xg7G zecR1F)53ajwT#B*W~-<+gUAVCV8Sj9k(vr z)Wj)cXtJ=M=GA&@bGpV)?k=8S_@nEcsp>xx5oczPEbMEA$-u=%q7r z{E)d0TazaUe93VcVK`aOMq#MKGCm&v>G)&#@$6>~*5E5Z24zqF5rg8$7bEOJ@SrMh zA_ZxuYLp6hqHRLmEU?n}_>pCxLAfhOQXYbJ3j2&W*}Cj&2#46*f^-|W}>qe(oj)rQT{a5#)waidf0y`(D0c`m6JZwM>R7yuwpS<~Uc^rN?Kl}3eqGT6m z;n1!5>wNfkWB1b)f7SNBvXpnMhFiZ1_G$|3o2fIsOVPuepP`Cg<0lMW_1vjOzoByC z$Pr#G=W8nLl;x+z4&*$?*S?8Y5g6lP+#R)g<573etJUhm!KgEg`cY@l-?F6FMN6t- z26_f&AQP3Y2Y?>S%V;_+W!3xOfUY#zHiFKvK;FS3n!#GEkndohrXc2`*E53T!)g4B z^6v|wL}{N8??DY=?m9;oR$#G_R5H8^VB?VQFMdZ4fkmfZ3F|$8MHsE7BVdV;NnfN!0CxPXs?y?+K4wN#+0XS4Y%eir;%v0hvA3PqkJ zlj%CWmNJFRz)CooWAn~C#Tq(|R(;%bNU6F*V$^(&H#SN5M%ml*ry$j0kPY>qu-|>?2m>;B47jf8`RLo^V$DSk*49@FYu z=}WvE*LCtlW?FrX95&-x>UHKcrA2X4A8Kb#(OsXHyhVNPocl`Uh0^epLsnkd#NifT zk3#v4)<5^Hcg@|RT#J_iXc)d}QE2S0U7W6N>X_rL+-@`qyo&9U;=W$RlsMKt{&HT$ z`Ev+=-TkoI#lJXn>d>g0b&AMp_6*4t4Rot(F%Ao7{2s2w<`Yd~iJuz1C5wEE^YnzH z@z7JADy@R9kLzzhqq}*xwp%R;06REKJ4Juisj7rIsvo=ESKLa1OPChTOZuRMUJglo3HBfPF2&+x7}Oa z8L>Uwx-H7W(C*bOUk1DS-^{(WP-=ce+VKdDSZJ#!Ted=b4X}ebkt&+3S6T2BC5&wtOh)YTdoJ11JWYM4Pzk<#Enzf^c^?o>;oONxd_1XAw?4*y| z(~qG<%H~EARZ+6zJ?;J5?>qnyAVG?9(%o~!>@V0Izg1-`u#4{vWY6{I}fU z6gmNBA)zZf|g@>O6uy>U{jMOytG`ZLyt`RaKx6yEe) z{KWf|D&GeXnm9YvrIsFB4fSSUnneviETa)~n^guZLfe5d? zo`rE7&To|}=T3-dEW-}@4R1J@3|oz{2Qw7-A${_;gV!HMN=NURr+&$Z^7*C`F$urIGqvx}XblQ(bQ?d+T%{kTK-8zq#|f@b8&T!}&)L zV1AG-P}|WwM~v64ZVwAoatjzv7TgwL&86Jys%RUVN;)$i`|c{70awQY%@;7~Sg0Ys zV0H!kDvGbXRm>hG*{G@;7Q?K_2W3Sr^y4^{mol88wT3t|nm5wLPc8dacRQQX!lk)2f7;v;gop$_jFO!x# zeDJ0p-PIj#c*NGKLH~)m{;%s8(Zo5?=nWDcGkv_xrmpt0H#xhBV<6QX86%J*4Praa z%zNbUkHDH%Aq7ZWM2j`X*Yr7HCH7U+B|RK@;}3x|!)(rr^^)R%1~VD$(+^`{r|7{( zW5l3;qVfusJ3hylc?6jXh%^LC2d}v#4PtCdVDrEtKGwPZfjpcb6i~w`gJonA`%#7Zi=mo{eVg}j>0XY)T$KJFP% zkwYGg7%NsJEfx8?RkYk0>gcS;z{uX9vloYsWjgM0Fah|Yo#Cg+7@MS#;!>nH5ol{f zZ*n{sc$R{UuBaL{?ad!$ut}PcSU?hCR2LDtBJPbiE-Ln!&jZ6~PV?l4J^~Hr&nLK}2rfQP#k^Curpj@A>r zcJ;6fg()6=$|q?16-F^5Eatv21vlHnE-GvA*}}uQydnJV$V+YbdLDte3pi-zf2eYY z&Iw?V!eXX3{1w?}LjIw3g+Bzzs%E-^i3rv}bGXS3=jgM-v0;U43zl9jAH~{?1dU44 zT}2wt5X*eIp3f;VF-K!h!XIEr)MSlw9a*31CaI6qpNtr-afe= zGo(>~&d9WGacc+t<@uF`VHD73iWpDLU=y(Qq?IgIKLf%rt=Mbo*6mpDwF;7*@H9F8 z$ugQbu&0S&-@tIl6$k-QpD<|dEk#~6aEnwQ`k-Au+=bJaSPo4jtOSO_izDH0BmlW* zE_I%lW+ivgR%~mwl3aVw(*=^`XBKbO)JwSw?GA!=FbqZ=Z&0sy27bTa^!h!|&)l8o=CpNzQ;T7-zMQj{TJuFl0-b^$oFB7k7_wPOw!Bb-s*)(m_{~aa#hf zVmpb24~wRz+bpn{rDCt<(y2Hg#`m!0NX1lL@J6J3P*qF%>*DOm8P+z+agFSM;je*e5hvbaeSIcw)GAin>QNA*_fl*MGO!2H!k!d&%gq*f-f&OQB~Rd zhdFoR#&jocY`GK1Ax}k2u9l3+wF%7}JZYncV$y7B55=U4Fqx0n#%TbZFX4{cTCl)D zk!aE|Z8H%sE$3HMmQR{gp`w_iDINk4s!2s!uX0&$*>jB-m|YJu9oLFZjr4K_kwa7c zCibN#EqqEy(6;6@nI@qx+S>*O-bg07SYZ8}bxSb1rS3)-%>a@o)y<~*508Cj)JC{!=A zIw7b07EbmX3Cg1JVZn_~At`9doNSCHlGes?j*cw>LJi(kwK2-7jWG)UL#1#&SNPB) zj4U_1y%daaLo|*OBXmT5tQ|c$D(IFcutR;I%`+utiY9uVmVa}c>wxy@uw!Hsl`!yd z!}Y|XlLYZp!_P*=Ffc6RL1ff8=eOU;#>jugb)<5~_9Dxa0m?MW-LcG{D4{60Os0Ax z$b}|43ddAQ##B&48_`xVOw&rYaMDm5Zksx#VwrXtrFGS(x{rQ_*!mJ& z9`zefY*o4-d-%%KmQMr|Rg2XYel)ceEUr=*gEZ5)4Ny)T(o8Ql*8Kx_)*P|4Nmt)G znsbMh26BRy76#r_n99oPMakHmmMw?&&=$QDk74jy8;4@mhV&1P-dSRoaBRlq+_0W9 zATwsk0zbQ44RpzQ(4Q3Gb30{}1bHUf5{}H3RvCF|f63~NOG(pSNt!ETb1myN*hO(k zfe;6p*Px9|#AY9V48MUlZeWg^?2elZk2~5#xXR|p9*IhibPNL5?2d-v z@oA2Pe6eK67bBW|E$pm~kdsxA2|OzfYw2RTD>cgO&Ze0)Du}1UzSHKd8awe=C^OQ_YPWYUaQGM&o)cfmSUpLJ#-c-khWma=t) zbF^f}h99#G4N0Z7`1IW&=cVipdRkmYZN9B>88>Sh7WHP+DMLCiqxG1fL}6lxXe#)={gjwv?dI43rv%^YpUgh>q3BD!LcG%p>Zu^7n0 z8l!Px4bo^mwkZ>&(PHr4`amkzFJhLHYF2w8{2{vSPPfqs{GjQz>vgZ$YYzQhztQYY z^8Fz+AX#4s?Iow0?NmDrOf^fNM(7x%spZ-)n;LZM)9P876rs!6b}2%aNuq<#N)fs= z127k*2rZF{GX?E8?N(0tRGc}*OQqu6+<_9QICF{@pXaL-D5ZJO!6-u|S%I9|@Nd#Ml0!j~F6y4D7b_md`Y|)DnNI1T{`2O_zUCDlk(&f6K z3U6$EJ7k!Zw(3g>gwQszWKXDU$tB4}^V`MkXUS%BkyNsxjyGyS_vVe1A?EcY7%{S*XPdnu$ec$9dN-VMj=qiw&2yEtBeQNOv~t;@!+ztBa#8^Vl^ws=v?HzwE?W@xn*KPcZ6tk$(fsyx^`^T^2d2? zJ%I*nj#sl1iC;BP+c;fmx)`GN+U9VT!`g%kIQyT86UTV)cqlrHw-Nkd5k*&>s0ZngCOJuy8U7Q9oIj+`RGCdIjGg&RY! zqYFFBgcl_pRt;QSUbtr$mnWwjl_xycl1Irp)Gwadcqlvx2OTZ=+tPB}TgLsVgz9`<;NXn=!znS~9vq$GP@t5bND-j5=M( zZAG1`e0h(*UYkAsIzugdh)p~^(c^EcjWM0gIP&~uM?0FzoIIr7TNJUhU0_F7ht!#b z@}txJ0i_w$Xx;D_+?ZZIS9(-RNDtjLF=OR$@)A?q>*Qyq&Xg}oO>G07pPM>YK&#Pj zPEP$4(qTVHZsm`5hU8eJY}}M?N3^k3UYW+LnRG}>rJc6=LQ*W+MuYw9FpmGgvNWy! zb(=L^i+24F$$NaA@p{YC858Hajde8VI@Wi}*C^*(SEv=I8!K`QTas_xGRL>hc091b zx2{OBEA3lX3}jY1{jv3Mu#9J&-s}tp!*(#50CqMAFiWy``GNwq(@|ib4+1gF_hgPRT^FVD%}+P* z?{}`7B)-nP*sTSe*t}2*y~H~|%79AOdy1+lR!#N%V6?v7SVt{ZZVP^HtfKKfszkpj zT%xE|LcV54R-$sw_)65YkFrF?+_5_8LqKf@fFPU~yUr@2uX`Hg06t%cFP!hbH6ZCf zNG^pNDis(s}Z%GF6nHn%od6SgGyp&dV}@xmLnEmU^w2vG%WP#4}3P4*1j5fH|A z9v%$Ly&1c3u$|2sV!1n0))K)BU;dH0LWQArU zMAN$~_v-rc-Q}K!P_ZH?-_k@+F3uT^C^8JjAZDKtE+SlLRjQ*&awXi6HziX=Ipj;W zxVY~}9xBkiqw8;Pv~)_`sHjJlA`MPbwX%I5sn+HO_XXN+-d(>xe&-5bCo~nffaGB6 z?_pXA*j&NKzW&(6rAY0h7YK1nCW17ktBJmgj${hRac~b2**Vn}vNDAd5E6^fh#c(1 zi&w4}qYXg}m4;n=4TD5u6ouZ6*Nb3TdyTFP_rk@s)#?h`ZncC~v@L}uZ3&UC?pLc9 zGG!Qjny=X7QC=Taj};o6R|!h8MYy&W&_EL9VhNM=aqk%p|7-v7YoCK#!zblDYkd0p z6ZeL{PT~=7)9F{zxcX@ncqRz(a~gR@sB@h2qkkt`Su$PwRpf92R{%1tLZ1 zXfS$_@#Fx+)cVuGD2#~HTb>G5KW_S7uRZCF2aQ3!?)Q7WQP6L8TS3!g^)?91>uql+ z9(aqQfpgG!9Gle7zB#*e&yRj|zdt>`p`0Y|F=>uq%6zqb8qPv8rKJV0ZrgL9EqCU9 za^v7O-ZS3Jm-JAa=L=thSvcPlFRDf-WQfi_#V>F7MdEO?Ws0sn3K!%n90!3f#R&ES zjc&I~%zm-(9;cD#J1<5Qa7EOLKn#X6tp>ga9vg#l5uvn+CKG_GJwOu|l7y)n%9?Jx zn1afYfrn#f5Hb7lQ2?;p$pRq4zf}zmjst4p{=8y^?k*p)4H(LbK3C2Q_#|1RCgKHS zI@@G5nMRzx(c>zJCHZq+9xP1OJL-v3b8Z6^mY-J6U&Qb@D7oh_8nM)h0-H~u*f@w| zZ~0iXFgU|Yc4>&`VsSKvgSI~z54~2SU9UGLgJ!FZw_dwJ(c+LWY|BHwdL_5?ePVPEFnst4OAibEJeT$^&UU^TR0p=mDrUpv)`d)hGSs zy?{Xb{Xvzi|Iz|NMeb)VZF^rPD_drV;WN*joxSFJ=g)t3TKm}|=cm`-YDL-jG=y$#?X~ zxim0VK4B2P`hnwfIQ%4@KKro^C?V9Kn};~gTV?bEJ$l@ulap&85crDwAS4M4razCM zGWx&qo~UL_q?SF$c&_KlG3TCd^z=HwCa{H9P)QdHWXiDgyr(fFJr{N^|mQXz$ zqCo75$g5euR=ISOkqcPaemXDcV?XhPt6|m`%rD~Uc!7zG9X0q6-p&O8F}IkGgasr< zFu;O~Wq=z@0*IL_=pp)tbJ~(oK$$5-G7Y=~K_wjIB*2_tBVKp-(i?@jf>x#>-h7J) zASV(*gdyXJ9WMr9kbX$*Ahm;P7LF;^i^$M(8^pVn{DCHuw`rt!c1EQM91U|Vp5zWw zye%9jfm=nNH%@9OCe8yoUNjCV7m>^vGNXid$$aaR=EUpq7?cw^m&vqw(+BS{)`kV0 z^eF=mL6Uh)9?+3MiBK5f*_@MnB;c@q)%gogx64@~Czj$>AFq&V;9HE+dI@SHEH9DDC*d1^ip9BL1C_CsOa_XxD)0qGUAb^sU2+%e z`S=b~BvBqv>*#hh5R|=|y1!=`Qc9`jDkOj49~>{CGlArxoob}9IFIEvbv6!7?zY%vu8n^nPa)Xk}$;<;Y;mj62AovN8w!E z5h#EmhN1Rd{8KoWr)D3B^A6VaW-1on$tF;+`S{z&Z&~&p{8(lr98A3aaMB%(+MRlR z+!=Sf!JyOZcKStI4*|lq^AN9i}k!sa5C$W`a)(Scd^p>kM{{Zs-etH{AG9PN={9joi8p z=@&ox23IpWK+z?8TI}&1$rF$TqTj1rOKF548{_;EIZB|oP3Rr#JZa*Pg4HY&QR*3*t7*k;Qecb6j3u<|96BQl>;U#sbp ziS#I{bc05nMHswVfQ>*+dV&9fTstCXc6JBfRMKxqvg|O-q|@5jfao%Iu=bkH07dR?W!E+P{>tH^^z>u!SeI{7 zrg$}+R|$sABHEZ;|hduWz5no@=JHucg1XIe>_cTz(kTlxj| zo*_0~eLbD7_r8wzQE<`^ZB+c5;HYxa;LqzH@ktE8j`;nI6+WR`%Q&@%h&ol7nF)u7 zNNmm1XnwnIR&bvE_N{26ci~@PHm@VQX@)ywN*LTB^VZ}J1w0`03b7>KB%R*K9V$=Z4*%q} zs~6%?Yi01kO9KaAV0k+Tm43;ERCb~NGgh5ty`?*X<0-cW(fY%8AJ}v3k6ehB@(faI zUnso%qiE%dAY18z{|_zW9H89bD-xSRM=H-4>5dZN@Zzs_g-O-cHQu znm~Hmy~`-`#BCT%Md2pQ&SKHYi+Cj(*T`MPT3N7C+bbB$mtx%1nRHKzc6KH^v2E~l ztk^k~W8sqjaf-Gq?obT16cU1Xn2!Y`EoMA z7*5hbZ75>;YMa!~40ld(^k=-sdq$(?5>gnL?rC|VV^aghQKr^oNkPBM;e8oj1p2}Zk(%wUGRvy&@2%x!mCBv>5b3_Mi}tCkZ?j)j^u zagl53Q-(gn&i5}PJvIrC&&422#5!J)%j^HgWX6@9J4&r9JZ(NdfIfEej}Q2*I6dh1 zN1ayB3-F|%UJpj&@ucVVJg+$_ae5#i6&xOj66`l7?uo&tu&!rSG%WARGj+k2Qk*AQ z$20Mvpip*`T;2|tG;#jU37>!sba_ucb z={}n|2Fkt^(>Ur(8k1qS-E208^}6SK4X-m8jwY>~X|w<`XVFrCGjWg;8-6tPGa#nZ zFakP8e=gFGTyJ?BlM|Tht_RL~9r{1BXn6nr?8Ln}``@R(yrkqx;rswI0F%cTl!fI> z+JuYec~An2q~5Qhl{XbShf$y=3asZ?`R8_j_=_m8@W@M~^O4W{4#a`t)@Vwq%kLmoQs7Fh_qFqoI$%avn?>&iKxo-=aGVx+2|s({)gH zoXe>XK)9IZW)!W*M{?haWHIpzZ*?btoSc4t2E6U+ z=-qej#nCwiZTj(-m-_tj)ySH&+p2dO^~Ub*8+7gM3)jJ?6&yNmFn({dfnnXDF5b8w95%X#oqD%np(s)|c^$ z>kYe31uJr2;{$IDOkuoOj9vUzA3N5+-vD8NtBO%|yjX+y{8ewpQvEllKkQIdeak)u`s($2IXx0|%bR4mCaAUn+ z$I;B*mRV2o>(tcT|_o z&3d-Z-U;7ScOKB^^6c{b{Jrzt4^F&ZETZKKrPTs8_4%rf&hM2S0Om50yxRHs$3gEG zI%4$0*c(j)`tAUSA<+`d@tKp!%0i}nk)tb+JwR{KWrVJq(K4nQ z=uQMBMeX;o(E>TdiV}o!d3g2kd8Ut#U^(|40s!TogwF+vp}mRmIeph^mVEqA*OvJ#x|Kz8+5a2ts`!22O{oB0=a2)K&}>7e7`H6D#k(U!G(r~=U85tUt9piDV6BI< z^$Z&pAZFetdSWLa?3E-U4f^y^5(ZH#PKt$w1cfh=-||owFpG;NN--0)vH@74@gWMjt^(vco`jjIKR1ekIqkAWM%H@#nCr!Pd^}2 z{P2NvIlNxV6#C%<<`m)G8{b>-cjW>W6Uet90h)7FyP^5Q)d1_^J?Mw$j<_u#E)KCA1$`=jH$dt@ z!E|qh*l8ec2ekNhK&#rKj0RI!Zh-xHG_T=RE!Z{cYZbeKb3(%EN_%naExb{Peb{yW zj5XY`w{Ty7a8chPg)WJlM3+@O*d@-Vk2tB74{4NckhK$Bo@4Uf>#vaAb&XGZTKtLwzI=#Abvtw9Ii z4^1N5B`XmY<^7d{tpW1Jz`M!x1c()FMkc9}X=%4|#X+O+mNgQK+z_Z)Buc`*Y)6Jk zr>lwx6->NhSfPvaFc4vnV#jVI!qr0n>RtnT-s*OF!-7;h2|Ntf?@&%XlK=KutD=IM zsvtE6C3%UphPQK+aM(9_wU2s(Q)ZoaGAfhCbP1JAeB#U~St&=QPw2=4C5kW!lz9NF z=0K=V2fTuChmHULNZWfCMZy6Jf;b4AL%{8!kP^h`>f&x`Yrqm`IK%+^@C206^)%oZ z#d0>8=I6nKAum9DBLRi+443jOVva!u&^6%=3hyfX#>6M{0EFDc4AGfJ3KPw)uJb2& z(li#>2j4|L_5~YBR4mf+{>t{ua~>U=aXHbmDK^RoHznhMQ{r$~PZRq4rM#1jZQ;(K z6ddY=t7I3k|2xq9+Q}Vf+9t$@`o>|8cW?Xj3N~RVRP@(EuiO_qSHNX<^RdT|xD1dc z?NdNM;i?Cs0s#=jIKBh9I>)t>@MU2x*31z&sv2&cQyZOXvPt3PBNgbp&9Tr|b}J66 ztlq~!DW4q4g)Y%*rfKBILLP`sR8{SrG=c(vIc#(p+=j6j1&twv*wi>~@&0h`0I(h6 z#g{jj=2KiIyjw==+dJn()Y3)$wP;;7&Qnm(KHXmybOIdP+5>$qCs`%!FLkhj2w*jPHKzro;i^H9~&+!M?qyA`sFe3nJT zZVLegr_dPkL^B8m8pWlYFy;)6acd}1kOF!031gX7IU>e=G+lYZ_z*|XWQ6Z`D+)a@ z*?vfDn>H#}!gxm%+}u15qBp8jm$y&iXBf_wEVhhBF$PSG*gz%h_zcO zVZ|zMzhWghB+;TJFKG|-fiZ6cj<4RwVvB=?*R-|VxiqvHbx2P1a_Br`_WP{rJQMoIGvsj3 zg!cW6;O)p#XqSAaY9l2lBc|R6b`98N;e$=`a@bMu%LCC*3nUa?Mq2>U)F}x=4lWC1 zJ(t`_@C!6SYLMs15h^H-wR4?h(;?KgBTI8a+inuisQ1qpHXRSfTg<=nNwPfYL@Ki1DwKxknK{JIZjR7jEYexg9I{ ztc3!Bpg{zyY1<0DqHN?4v=HjQ2;N4osR%zmbQ(k+Vr5jF+UeOP>qZY@%+LwbInb}!R7U0G96 z`HUvebQufmdWgC|#fD5DOIv41M(NFX)+;#ZOjZ}cDtcWGR?){a>HQ3$ac5S`*ua!< zQlnB-Vltva&I|n)htDcNgaS?uhm`}K)f?#UDQoPyDteVr$+baea+^h!4TCUC`Z&>A z(xebuNWn5X=;Bn4jlNeorV8801f^j0#Ta15Zs1gmR%um?F#JV(C(R^Sx&m5fJAwngPbkZ~+<-EnA@1!>|4SoIgw2 zm#eOJiiuyp$49>te0_)*VqZ2@do6Gh|9tfR-KBeT`c@qNkaL}e#++K(O8zAo3iQ@J zyKt|LkM}eX`_6B_Iod0|Q~P4jSUVwxKRXTaT*Jm0N7B(~CJ!4HbO?N?Uf#UF!c}&9 zV(kqAz%QdyAf! zgeeL3XN6cXqdR`|Fc;l;uGOi%#vdZ|OohBIy$Ny`sb0Y-t9-^g3HX?r^2Eh=A)U|z z4Ir0QTQc(Y6axnh$)_bWp7{wuRGs7V6Bl!$opO`w#pZtf?gUfN*#0X4j`Z*j(f`t1 zXD+TXP_nRNBCw@8oC3<)Pu7!x&wQa3r-hbe)rylVS`u>W8m;kGKcHn|x=W;%V-s6s z(N7gmWaFV^(+}uoTijN14#kP&e5+1ijGK!+kFEF<@Sbi_wVE zrG=B!p>eLmObR^;?Cr%A;g2@fxqM9Tf1=R-W0ci@s{YC3bn*Ss+yBm*Xh~M}1m#i6 z_5Y^I;{R9C{sAhQ&YClpg~am>_}UF0|BByq(!~cD+KJ8RQDGO&-3SOb)Zu)MjLEXk zc7S1g#Pg|B0;@LZ6e2*F8#*n@_t(Tbi)3sl(l{lNIR<;%cCWr~q|V)%8`U;&J;A-p zZPB&DRp-SloO7SWPw$iZiTCfH!lRunc&B43D?C5#CYgh}!!Em5elF^FD1iEoHt-y7 zd2)JlhKZ@<7JNs?PEMV3WTL9PhQ7S=M?cE)dYof%>?j`C^FEQ`D;65+ z&|YGUGQc)_jc&F7MKFdYu#iteJ)fA1Jg4x+bQgsz?z8oD6)tG?zK=1j4gB`z0UrcD z1PsR;--Q7k(RwTAHu9!)Cz0Oa&CZl8(k4j=gMtDnBZxj*nRnXE5MU=q#yMqVoMVx3 zJQF?23IhrrZ% zi+8-7kMrmucJ87FI$^?7d9Iy#A0!XfH1gvl|fNia;4x3nH9N0Pb^((~A?u^oY!IIHJZ9H|khd(pi+&WVhL4?l>5H zIqrzZg)%3Mr?#b16Hk2!mV}B$|>X*h(5KdkVio2W9u_=H1b|Q}^WT1~<@2>~fVsRs)L9baM{6 z9m`Y1uD{VdRs82f5#btH(O^LwIh;^e=1B9>TxNTO%0%LOB&* z-Xt#ndn0d(1M{o;QE^pz6?*s3db}?H#IP5B6doHQg$Lb%5`89Y(2|qhPEbobopD9Y zpMSwOJ5+RM=Vm2GnDODV{ZZ8hZ2ILTX874;lcI8agGa5M9WgRD{i?0KqC0x{umd3E z56p~==L>q*3A-+j4mmZ971-^`>5rHl_vroGcc|G_jTg9SxV%0;x;Q>HUwm`)79W-< zaQVaO_07@usn`1B--&#UyyTd57zG~vu%e%nneY6Z83ew0CNN8Z)_%4Qhl*)vO&7Hc zkg8Z_B)6&@qgfHBDAO^cXi@|>tSJN;87YyY@S@p(kw!5llAD82A~mO*kz&-&p^Q!V zvBS`bCl6g_-?bVJ;|=54VP<#E%R#&r4^~{#=*WfEoy_-99oyIOoaE%DG*VP0^gp?H zR)=J`r&DT-b`z29g>q~r*5;7aU6f#Z;j6|=Cxl&}#^hn21kWlJ%?A1vfSZqJi#-CX z@p|e6FM*%KU%176<5(pW2DD_SC~d>PYhx3!r_SHMJNbCrh(gN2 zW>4?5@cfa1tMCnoNH))iFYJOuQ-hL|6D|oHQeuk5!s>D{!gz&f76Scl@5e#UeRF#B z?)~*C(SvSP{wfVyn0(O->y@+gXYq3C0wD-96RUQloE!IQuTTF&#KYfto`#s%gaN2& z6Bga4c@a(SrK&rxUpuYNzVqB^e0uZ7Zd1)VahY9xjt#$*L&iE*6Z)}Ang&IdWG2yW z4vJ0e-q-#<9SER-J7r%_4zEm8?H)}0K8~J7F_8{ohN_dM3&Gkk-3ke&A}W!m%|Kon zL9jeaoq{luEkvjYuAz3eSXQ1+8WVf=i* zFbng{0ct5(8o1<E4JsO#((5){uevLpI8A8#`VN#_Mu?i9dZ=l$+8ym42=uv6mUSpaD^+f4#3G)}ubUN#&_!9yiErUZxc)KXBulN7=bbk!rBBLub zsriGwI=ZhAU>TrQZ7{u0DE$6*4yMMK~= z1;Xcx@-DFQF2lcVTaVL?-bVK#9qg`5Y>yK2zN~)d7%Cywd@20`Bu^Pn0hR8`3{G9u z`l&GBSdP&0sqfqOXD6p8`zU4i)G%OcCgtn5m*2Z5KV2N11HIh*aQ5!_yR(aLr6Q}2 zL0_qk&_dDrtrm-;DX>aulr9Q;|EKYm@Ss%KY!U6SQA8f;V`9)sr#F!KL2atk%3H$H z-bikVjicN%tJ9vNw$1%a7z85EO0(RwA&Dlh#Z`Z`S{ zyh^XxHRvymK42MuJ>;Ytg)mRgOG%B)F1UUYKkF3@;H8N}5pR^s%HTw@B<#j*$6OM0 zx_AylW}Rh9`4m0wX+oAPjQ6440Fy{VacfzFO!CH)(Im?Z zj_}s+5Z?OjQh3|C9f3g`_R3?fEY=K+9lQ|vDMaAW!Jyh|3`&fVB7MZBK3^oy1hrZ; z5FOHfxuGgWdc#m{nq6^~OKtUwR+vlOXzh)nRYe&OWd4)6Hy*-i_? zU!1Sl`;%CKs9AZXjn(zUaQP(;J9eH`t%r|crSgkV!HNJR*K`DC`Ar@QSJ5)C;V{7* zA#M}ev{NbSjsQ>Lu*~~fHerQIYL+vTph1vOQ|r&qttNS($|0y-y>Kv($KHG*iKx|x zpH));^!*#oP*dL+8U}raNCB_KG}tD(3LEFbHd1zeqlwvi6?}5pr6~VQEXPumvFIs{ zMeZ>f{Cd8r0b_UP{M#YV+?MMuMKfD3wkJ`DG*WWIJQOX&LH0Z*4{3?hVNyFabLn<+ zc~?xl%eIOW5Wl3^T3XQ)107ETZ@yma;cI~?-MfeFUy1I&*{#Q}lq8pY3hc~t2ls(Uu%9qtq!HlH&HJ!y+>^9o|@Mc+~7VyeFFc8 zfWqaOkto^uZWwf#p*kCoGzejka75vK&G-%!^Pf$ZmS57prFT-65B{PO ziExqpGRzmN=`v!zWSM*%WNI=={o-t2leuHbn!i_5BR6_nZsbMeu?ZtqtmThAOTJqS zznFDcv0?P&kH^M#BTb>nipq`HNHvhbfC1p2fq{bQ`ooAuqVo5&3#e3XbsZb^T)uv^ zm)RKR8i8~cm(dJ`KYM1AmZw7IxZx#IB%?*92{b%P%PlQ#!b#N znD41zg(N%a-JjJv5e#uZf6F7F!)(}n997S>69-iZRC1|1((l)=QhtrO?J6*)2+?r) zvxNGK4y0@y)yI9iShMY-#Jr?E?eUAuVdQ8p-6WXe3;b%?L#21cIUCHB8>MA~sdW#* z7w#rAPMP_hDW>HiXZ&6@Z`31&mQqFsev9rQvtTy96SVcG|D6A+D#~TtSDuP6bEQ!` z3sB}3PR)TI&Y{-clkiBC|8Mk7ZO_>PXq?_4uH0p$pTp`#)=g!thEG22*;F-U1*}-g zKkOBn4RUHlwpvyr9|GXK3vLAebDUaimS=^>Aqi2WMwaau_gnCRaBg>Yo?CFqR^?s; zlL1$T!$Gy(Z0k1H(!g0DapKTQncBEL)gV-Ix@SJOPHVc5K7sVEaI*4BSSLnaE67M) zVe&~9xHQ%9)FmN?li~7Q)`|b^T5l^Dxvv!nkt$oa2PKV&m)r!w#w*8pokyh?4>GwroV=KI1&pMXl@C&1iBW);51Gda3cjYs9~ z@w>O~`OP(7ky?o4H5hsaV`?YbL{QrM zWN;Xt{2x$bK-BU$GKEMjgLkG9pap$O?1>-$jy_pqQf;Eio;oW15sZ=-X%&Ih4;`lxPasC1^;% zBlSu=GGM8LKCPTQDb&m9^rw~89~Gx`%2w}F-Xn;b%YLg?s zzO)rf<|a9J#ii+-t&&TM9FdArY6@oqUPubsdTElO(FM8MoikpSS|A!)9V&iplGz3$ z*-{INmUB9qytj{6y-yLB{?jOzM^x75FAdb^oby-GOHz4PZ@j#r0mpa?g(yx+dY^11 z6QpUZ_eKO~5L^P|DB;Dt*t@4|$||XY!{It6z{DsIlc480-Phlh;T+VJX?agtQ47XF z3PMi$q#Br;DYX**Ec>o_SK41%yKGU)=qep21zgPq&Q%fF6Mqx2^0vA%Od;?HKPW!lxV-HPr2x7iV+qI*Ii-ox8Gj5HuA)Y}UYn|{#*N|izd55I3PDY# zM@$k|($F^Fp-ecRevgvH35QowF?9q}VDK2#ivq8VFmvFoM;TZslfj*bmn}r@U#`yS zWnvMD2j@yfH^u-p2OOux2qakNss}byY4?}5Uo4E0hC(B12x32##IH!gFqM+KpLO|< z2eX~n)J0U@EhmYv!`({0yX+=WL|0>|a4BK*6vi$0gn0^G?&dR*d`T}49j&_a_1e*u zMs|YZO-WvBRlUZ3YF(-|X)17B$|e_6@^?GQ3MvGFZlaC@lTdr1!2~ugI2k z#4Nom?W$2q)W5&@>&4{{7cbSVRFv&9exnn6vjFdqqM?BvuyRpGA1zc!^(c#_bmg+j z@)u`D){C$?%u1u2r!cGUez>y7t9TO2D(!e%hPVB}tFsJq<1Q3)w=_$#IL(#4rJKW_ z5+yL7zgdt7V+|*dg<7Eu+^pkLmJroB`!LN2&!N^G4S@n4nf$)?rE-R5_*X)Q(9|{U zM>yZYRgx54#`7P?v<)>2W5AQ>5Y1AsN}0%B#8j6TztmPlo9eI1c~wF)9Gw8Zo;e&Fkhe1Bn#0g%Wi5DRNx2X0pX811^g~iOjvW1hKF_pwksRiU%oUT(D zv>)e`!Bw)`p7ZcQ4VW$UdcMZ*e&V8cPmA8q9tc8r3T|X z5PB5quT(T%IavLgVC!?c+EJvS|k%uva z&D7#B`c!_Gn&n1Kz1%JAY#5+iUFI{ddndaf`*7J%?XaP`#z zB6B(qs;+{t(V>j&CmwgRis-26VJCbs0`dpkOZBE|TolZ>4_fZ=$!>E0582=s%1qiu-QZ_QG!-V8o|Z04XIlb`{`!LYmDwuyps1Qa13;Cu<5phiUPfF zkBXLaQ$i>a7^q3RTamg-q2Fa>MMOvUhWL0=O(Ds+4GJq8L2s0~MOS(^Lf5V+Z;EqO zx(7z<`V<+fIH6QnLxH!PK8kf<^3n7%p_wmC{xJeFj8~(YcOT1?8P+Ty)^Z{pNT=Ct zH@l6X)$m94`UHe|uhVEX2SdL&9f%#KJuis8EJd#D(iWwHpXu;iSj&YfuCkOYqmeS7 z$UKCu^nG5CgRsefG9Z?qW!-Otfeb@!scISRAj?9ZtA-T!tnK`8bbaAoUhGMpzg2Y% z&7z(zmH|)%-p=FGTy*}uAjZxsW{L!RYOaz-I-kj{# zu+gEwfbvUD?n0|Ng##uZc3e^-0w=8Zcs)iJ*U5TXrOYs5t%*E5X;VlTb23L>+Q6dF zz$v}hWAyf*bTBpHK9)0GIay-VMM1J`KDj!_2Gj%ik$25V-wE$rEdL z57yOcEhkp)WYB20hyDJrG3WsMZZ*AO(Do%C^HHwFO{jDu8yz+5m) z7P+s9Co~463DRem=^^b*h1#v`tsrS!)n4+g8-A3&c0^e_5}y%##8Ie49G{os zBrPw2rtC-JJSTo9;c9n8L}=yOVr@6CnaK!SibX*GW@pGLq+8+uC-?;9H0Qw5m?d>G zxl4k*vUwKZrc0?3?&y(RcTxpe*&4Ij10ZD{(fN9&r4psY&&~@fkTf1b&khc!dpxYPfYhG7X1S!>3vm4I; zB74)p3+Joj%ZoQ>-?~S~S7+?z^!Dr1Kn25}a&2n09> ziBe2~dMSQGWxj)VR%tZF{M~w2f z=eRoR6o=A=M9~W6ZR$s^$;DC;*$>6>_`_X5z6ofZx>EPE;lCTYL?z0{4pk?+Jmro3 zOX6@Wx{g}vD0Qv zO#mlviHCP6QwD$NJ3_FNCwy6j!tIFTP0_U28Nw9qw(6Zmy|KId z21Q?EJnnmq&Sd2IqjunRhMmErISd-DZqV!wCY=CjK$gEjZ!+lgdTr-Cnmaea!fE!M zM&pqFb6Sl?lfWE0R{?VPe_sRB__OE3w)roM)w0=c_t9nK_3jC*!b9iHGOQwibLcGW zM%wWGu+uo~0GDk5k?r2B$*1Aa`T6PyX4q*A51QRyoR9HrclUGIcisUz2p+_bF*-KH@=JX+n)2%V4gPOB6LysIzfQss7D+gOA%+3dhT6H1+--6jn`FTEuz9 z-8qbmTid2mud{unD5s@`w+!R)MpUFKVYrpW##&~jxIJuajq>=eM3qb>7}Vw=r7C?I z^bUYWEFZJDQ>fdflA5G!rByJGH!nj~$&@3&08)!6y`VbTO0p4q%}h&cZxQ11tKFiK z9FA>^7b|sX=_Uhl=V5)E+c%@B*^EX~Tso;V7&sOASED}yG4uTUFb+0@`ORr!Y8lJl zuG8u`lW+=pF~{Qdfh`XQfQvX-q8DDgSJ|&R?T+II(|{k;_IFK1qa=47l`JUKkU=o= z#*=p68;mFQ`grKKx|2cIAM{d(7bStRR#LJsDJjE76_AD>9kLO0yMrC{E05+oBHWfc z1KmCF;$R0;y1415SY;@xIL?Hw#XNZ7>fSm@2tg|Ym3s}@9vYNp!A#smyNhOlI|oIU zo1KTQ==*M^+iR*mB6+w#W;>#H^0#gBtWs`N@KQ{YHy-vny=KcFj2iX&urX|d>M#nL z&8}^dtbp{1u@<1o=yhl^;DaXvbjNKcqtlU-v7_NxV8_6Gr&|v5IWKoBo$gboAy01@ zZ7Lxqq|xqAVBs{{UbhL#dj~!`lVPLTZrUd#51KhEd8NoRU`UPuLsI1vmECoZuFu_z zYZvga@K%JY)imDO*;8+hFV8`{bH6#dIdv*dci&Q36gj;($*+jEG)&mFTT7~I?TYoi zvjdZo7}4^IwB#;}Y=T;Mj=yt%IJ!WGkQ?{-?d8Sk7V}3M+y-o78My&#bi7uh+xB6? zU{N(Et9y~Zq zP>v)h#~VfWd^?A3B%u`%%};!b!$LqCwfRxGa?;taH6FHx{&3i6jq3G=-yJpmW@pqI z*|_sYm>Mx{EK7``-$b5`A76lHKS6;0v)K5??bWDUIWdc)=A=FBG)Cjra8R#z$K%Pc z)$R8BzGs=mjWD%Y+*p<*Z~w=pzug6`sW z*q^j*Gg=O^KCk6!@a*<@c6 zo!Js_jd?9mo;dGdNSqfwl3AVOS-}=liseh_$yn^^Ge@2JV$&w94yDk=WyQ3$C(Xg2 z=lMf_01I*w3(aErhc<1!&UVv=M^OKO?G5$oOU+!g z#e$2B1vi+qI_;p{_4=b0u+n~KJVIINTQ{Jk;Fg`aNLz57_JG(Zd?Yg$ZMop0atki{ ztOXZsx!_7zNJnK$$}MWpX-&q#sO5KoM|OwJj^}lH4bW*UQ&s}1Hen@7%k!r;pFjN% zwkDLZQ;$j)l?I>2C}_6&oj&X(>h*S~KNxm=uM?zQ%1eXQXRkzYN|oL2ZbmJkys9ZU@JsiT?>-V|?KY)eU9Q)l?yWbg%{FY@dH^S5=a${LmJ=?5$ zwv$IZ`^ zI(<*NoqByd8TY#r4+N?g*hrulqD}zCCG)2LUt;I6`Fw6vvY@=z+a3TY^hUNh+z3>k#*KyfpoI63!+*6sYihK&|X+qlu(a;~!b#m$A;x=KEY_|f0E>b6s1pR&N7u)fSG z!{ZkC&pBCBU@OnL3-LN{2;-)^Hn(#Quu^r#!E)t=bIr=W44nCTHVQDh5>9=6@%Yi%#zz4_cIr0 z-&3#dXC5URFk=e9V^=x}W?EU#jdpAh9TQx<@|G)0GZZ>Czhx)Y^7bt{))p!Hs*m?O zH@5B~8M(k35(i8JuTZXl#^my|5L)s|3i7ts2!i%#JVG;YuQh3Qfi zIqfx_*EZ?qAbyY&tryRr+(+*=5Lko7BXMoRr%bmy4d#r+QE=zohY^J{umMv{Ljc7( zi_o9=RrA7o7~KbBK=wLDmsa>rQzt_|-Dld<<_#aKDJ$%QU|f19tWYXeD3Yh>)t}m> zWq|j((TB=iz>C<9?9@ZR1!Kj04#UfoUED^KiLJ2gm6c}mo!Ca3%gBud>;;>H#(3QA zdG&gyG3f_AztQm~IoE^p;hE%|Uk>SdFrYvo_(7Mm{(CzVIA6%6paMN#&rv3DXT(}h z`LrmNC+o|iy=rL=3hfr9or*<1q}9^%Lyk8&6?ci0!ekOs+OKhxej*L)Lg5Dl-R@vzlswwj}H zkP<1X8Y5W!B+PaPt!lRexWzC1bO>0KjzC0nm+x26O#$b{Ji@s7xBJPxUKzij_Squ6 z_GFR%57C!6RD=zr^9q1@wbxK|HhU@JGQ<9aLpO1rLuZc%!C?t8t*3K-bn?A>etP}w zDPA%<#PP#Atx?LdZvrOBq_C#kngaOUCVeIdPzX)ErgUu7 z>R%mQAG=2O7y#YOi_uiFlqnu&!Xe z>DoK#>iCLM2rif0J=r9!LwNAJj7eL*xk_$p6|J#Ks;!C$ExeN;vG-me#RV#-6G+Jm0jZPLdOkA+Lf7> zf6^|LV<0G2tr7tiVRVaSUP%_PiNMM>_}h}uF!Pg1LY3mTmP96>EywNCP)?3(Nl=PJ zY---JBDP`|u?qKDT>&)%8k24yDa5{7!bWSxDz&yJ={q&srCWAX%N)f#turP!fkwtp z=|S2HyjG**cY;>CUPo=w2NiuZXtpxCV^US=kI5r4cXeBI`c9wCCWUeos;zr+`sVE7 zw3}ACi4v+MYz2T1vKB<^)nPU$H6Yz=sJ*A{r}QT1yV%RGQ=54i36coUVuss^g`q;^ z^OnK6P!)!Mv_~s1{;22`f4t10WV)Mg*b`h4=TnupEH&dTXapdMycMWK_p>lzUA)S_ zdZjYxW&^EL{Bgi&60BIR9fctoCpu^S5XA7Ip4P6Z9UIXXUd&o56-UCd#aB*KDiCPh>@x_Nu;%z~AN zx4r8ukcsO7?RXUGb`N_{&46;>+#G|Fdy~S5dwK0*tN>J3jBX2r!DHv&6qfMUVP-^P z6YNTA&J64L=!$N@T$~-Bl7b%2CsPcF60-FQyX|N}Me2#>{uSj9nFf10jQtdXuTHPe zv)$n(l;mKZ;l`VzQ&I5OkKuGmpwmM~3Ve563{WmE&VTV7mRHq5$v|1QcVHkc#vMUX zzDMs8nVz?fau<^9R#p@xl1_-E=kkdr9`xl_v?wjD7y?Lw7J^9rB!RTCU1OaiX-uQ& zBj)$5!e-4GE%9WI(}n5PewX4+re8~Q0BuS|R9@D)cPq)99a1^Wg23-|YQ=ZB9yO*+ z%LmJebt;Pp=7MiX(4=xOsAz17R4}wq+JxY1>ZhPH>+i|(5q_+atw55(~huNQ>GlTcT`RRrBg{VZKNCAFCer`43BoZ z;g1@l!NhM3M!iP6=QaItvope=9_{9&*BK6n{$SMoA~8JP-L0!o$vIlc7#_pJ_TUge zRvIl!43B1mVtBwpp9Zz*a{Z)89w*UwjrZs%MGR@Dz>LD%VD19M`Vy!`+H;&{3$M-R zN7wiWT<$K4SL$LvUe9;>1yMY(#^QE`qJ}F`jX^;TAgJ4*rB$L31?^M~fQ#Sk7x}4; zbZil55x`FA8Qb$VA1+;A=6K2&NjHY$MpS z5lE4CoatO@N-Qwqj-yLb}v2^*xb4~n|w-rZ+0jL9r&dUOvU#=AA> zmeUb#?bAb%{YqCs!Iu-kAzcL>g9-^)1c&Jx5jw=YAH<7QV2OZ1LuolROO2Jka4i%= zBuoT3iRFmlTZji@1tiP^F^{w-IWal(X-xTDWXG%zeVDC5{Kj1^gUsmV>dka(7R*5M ztd`N#Rs|LE265lwV5_SVh)|mX9UEq&R)mNd5vbtMuDR6%Dm{Rf%4D zqF?hUmo=vPHIR+qB6mSzN8=5HS0P`PWk=Fcjoi=hKyD+xn(byU9p9x^(?Z3FFn)2c z+SFO*&m4sV?dDMHfLLV>$%!c|fkC3CM^)~_<%(DbF3}lm0}8lWm*hr!m9)EAYFGA*eQK|J%***m$g?D!i-3}&Ir>3PACVz|o*BcW)bS+fTXdk{YIfec4ZY^o_Hr*?X=;>ipY(z432*wYkYOkkZyn?L(LV0%=>{gxx zg!-dFapGyn3C^K#^x&+5@!dT9bsZoG89#Nk^u`}C&?oL8R{-t&>_VdD!v~Hf>S0(` zewpA5UM78usYL@tl!OMlDubMI-g$uWbF=DbaG<-+!(E6OFdgJ2*f|(1aCj%0?A9ze zddbOdA41Gdw~`HEex&+t#fFZe3P3$LZ#teS>FhlQp5)0VYF%J@S>U5E(j)FC8>gVq zhYuG>Js2hxgQ>!%=Jt+OG&<~#m*E21ixDaeA3>xE@nRom>^;cMkHKzDLu!>{6&!OiJlZRx%ZvyfB2^F%sNgEvtzqVk6$U*P2m3&&nMbsQM4);n zm`uZ-aQZG744?K`^)W5kOjxAv7kvcLwCS&5vJd&t@0Mnm=V#ZUAwgp_lt(m- z0_vRcG=gPP!&wmySXC#i2lXl^+QzlC48~J0oYh58a>}s;`&M>*q7oo*7l4-0BZrZ> zdUwjqZx0hx#KEs$Yb<5I(gZ?=g)O?X>m*75;#hW*>?&(qwb9f!x!JejJ)xA>$bB&W z@Buo#LSG+{JTnTa{NV#8!+^Glg9UTjiex@b9mn&#Ed~Gt}Q812h2*GTz zdIZE{NbD(+jz=^=ljAYp0Yc=l6OH0{fL2g7pm;sFf>k5b{7S*Xi2@uU41fu!J)1Zq zV5)K{0Nnr+Aaa3X1et3xQ$S|^0Na&7v`4*xbu3bUG%+JerP*m!n=SO*rv*&w+z)VN z00yGnG zwG3}>>3$2PJA@}%Q@a(|BcPZY1%O(hgh=-Lnc;?Dkp1((COL+-k@Xx+-!XKnB|*<34uMW8Zwjan5@C$V8y`qC2tJ-*hK2`AS01(kdXWLyh;X-Z zgfpBjNTBheokI&!%&jt*oB5cdz*Tcz@4dKSyBu`EUVIYOy&GoZ0n;5pGUYAI{h zw~f}TDSc*zq7piQq4t4fcPp3+7p8Se$hop~fSqo<`6J*lUQE3)rbR$%gWL&d@2GW4dg*v#-^UdPxp zF&`ZXLRkV<0eWEY3B(i~UP8-Q9o;W2GmeuJe+(D$WH;cP3A+{2r$?xK#nT{I5FZLr zQNtcgQGaG)L6rzQehpg8!PPqgtmILStk|vG1Zavxpd^BzE-g$XQ&NYBS0dF>mr40! z^&6>r50@dadRis0yJH<#?OI^7yH6tw2&~oBPBaFmZQ;%nQ1Q`EYUH z;!VR94FIpH@kHWoJY2+yA%N^x$hMJf#KQUQZysvoJUN&j!g-AF00;F{+(TD@bY>AU zw7ZCDEG7|8Oaw=UtJ6nvCtOm>BiI6j>Fiz2K>MhZS$HbxwKQ%DU zX_72rBySSrP!O#+cd+J=h}C`pXPlUu1KK6Ukpxb_DUBJG0qv+}-XpUv^_04PmKq$= z2+-OBHgOX+EL9yt0ZLH}GgjcqfPUVE#%nbcVV!}wS2J8C(Fn7)#LX6}Xw0j*TQCn| zRz$yeGbK6|5Py|Om<3E}-b&3fNjc!E%%+Kmr08bSVfdBz%I<1`ql%WS2-mUDR>r@#dgWaG2%+|Ch;0_K}GVj_*0;jfJ z-n|!+(~r{nf!(53l4$rsW@hXkv<1?3y@)nL`aYm3WQ^7vu{8o#2>LVdDby8}tnio# zqmaaZ#8j@58&wXgawN!iEBD@#c5;FlV2+{~7C&)RScK#CluCQbegybJ0jHEw&@7D& zjB+U(1+;(y^ts|}=h$~%aJW`S(E|F9v`Hz@5H07V!+_M($bod^nS4Q%jX-O3We%SJ zP15UFD(|Fx2T>o&LDIEXyj`$Sa(lha9`WF$^9L(MJdtHlz@{+mR zrwI{@0L?(We+uInPt586W$#Vf+DMW`;m_({QS^PTM?#BL+GQH%U>n=Kje!Gry8HO} zsi!2BKx1vAl8oKU`R#9PnUz_K7L0pl?!BIABvs`a85tQ784Ch!1*0AMqXjBHV*_CM zTCS=7?;WJ5%qTC;4^M@qW%|ofCUWyBJVOx2D57m2p3iAdPn#=HA9+VjzP| zdSi9PVyj1;y2Dd{zcDd%5P>>f};-w;_NDzxDc52Kq&Ww?-gcKbytlM1*FOT zod>&@6Q+?Sz@wuuUmz=8kS{9k*9&`;DMpsdV~XzuKsGI{i*s5bRA&Zp7iJD5G(c!tuaIJBh=3edq#E z<>d^$9=Q>SZYfWh=CMFQvdE&*I1syMxm@mLp!ghCx(qpwES?!80EixyZUvovHpZ7S z3q0?Eyi}v>KV+d-OOPs-OpR2X;UXX%vDv$~=&xV|S1;bNg$T1+>TSMIk-=h7>JW!} z#VBTvb*RQFHSWFup3Xcp;DI_XW>X+<$c5kwVwANcKc&cASP-}hHCTC@6HV60R+fss zQQdG#-sQTL_|aHK4~~!7Jft+@xK(nw{60bh!>j<=MW-FipDnQou5ix0ikz(-vlR|WBtIJFJX{94|>Pk<^nbNhIfQ&8FT%+(HK^8DrkX3 z?pfQ-n$k$8ibMgdd)`x&*N4Sj&o#eG5=;K}@R{|;DhN=3i5+xz#6E<$;Sx)Jc5?g* z7Vyx)^s3cDX{(bXVe_6zzT+izXvIO_@AyHz+3i;<^(LGi?N-YjG|~c^Zqe%6BP3Cp zf-7ROlOq1**op)_lV3m!Q&L#d%`Urg!}CH8|9a9U(IkHElpWU!7DS-FIq}f#5Fm{3 zbqndQz2|2aFMiDBsu`~@k6%L*ZpK?$O?sUyfXmB1J!coW-0PF`x5w8%=3Tes)C!x* zd)F>~J$?DJuG!#<8>?OF8tszP+Ny?kb&;!YSMaAaSi}onztLmpmYmKOW4M0fonCJ> zhF83t)h%FH^WL7k#b0_f&23;(|iv-T7~Jm&|QgzV#*5 z=n}&sC+yRgRQ%$n^OGwN0c0+49O2N=ic6lp6w)Ed;@^q&>_}Ju|INjBgidW+=+w4`PTJwWp+f56zp;*W_-`y- z+q(2lhktqN!#}fRboj@HwbkKYUU&FIgBYXm(GLHzb@-RbkA2(2zx-XA%7~;`x~D}U zHlskW$nEndo;(ylx!b@%v)!gEY81o!UkLbWN?|FeTHXpqU^f406pof&fBD(jhd&gm zm$g{sqX7$vDCSSS(e#r)_U5xb35lqa;oF)#)G8m5{W#YvaJXMsY5s@BZx{Y_1)ZS* zEmvTbSAk>3VQdXj;LIuLn(AMx2j!g#86!VAUI~X(u`7_yi9M*d>DL-=yVDyqy7fwB*z8rCwN88B4;yPwdu}52l;>A;y0@7} zgO8dUkCH0)CSIiaGgWlaEY0^yp@&eRVg-qzsxA;T&lc7Yz@CPo;2S=>!;9j5{3{m- z5uj&m!Hl;pc`TqA#5rAKs@es0Ns!j9wZP_#%n)s)Tq3O}#%D`RD~e2&1|}-v93G-B z1Q3Q;)IPn^dx=#ktf4Hs1r=&-Jf=}SA!Xx{nocPVV)`CLrfBM0jX~LK_FJ@U9=zpg0)h)Fsa$Iy_23_3_r*RRb zWWpKVvyHp6Pao!917Zrq6(jpB1G*@~C#*llC(&->&}}gtGe}Cej)CQAy&)CDXG>=s z_VBG0){Vm&vW#?)5>7^zI-%U&`cNs#d9Y50n-Wqnb*B6H=g^^31inTygTMMY>;UJf zgYt!UefnP%S5h5YOLB6Mf@qTR#QC3WmXG%^y#DF<>fF1)|GzlN?`4rkY7}~z7l>LV zD&iP_aA4R#2XI}=M{GFr`|mXTqeJVR#MD$V$kYrNS6~+L@VcrbO)#2aN~uS>g{Iy- ze(O&`I3{0sidIYW5M*=8IJ*+hjnRyUF83R)Mg+K$6EPGx#q^+`_FZ*ibh~Us2}i~M ztUZzl{3%0c9K|arUc5ucTFZSK@8qf~Duq8{W!B+4qdW0B6>0&=Q#A653>F@$VbfzC zE0*PN`1Fqhn@ZR$rt;!Zo%MUUh|I@_#+|^}u*?(qGI+k0*ULRy=FOpDi|dIlBZU}bGG=bs(G>Y@?de{+z$G4&#_FxNg8{1~L-7cMOsL`|Z zZ5H!$^Y@jnryu z?41ZwXz!=2o&NZ4ZBbI*l10hLqNAoA({_!lrrR_VH$oC2Wj^n7j4O~h!w>@oq}U!Q z3nQ^u&A+&O=e>fn`}pd`8?Roi5-Y{6#O;hSOrP#Pd9$fE58(jB@YY*cJ6&GB@Gh@U zu1-&`*~BSpP;dT*F;I2?|JRs-rM~f=UtIiHzXqUqmlxO9rx>jq3PXF5(jWx|7{ZBl zN;Mw4y!h$l3KnOx`Plm4ZyUBG6;c;3e$Z{X-5&f`#fz6-yImXh-C8eAg}=VRErOG-4^ zitjVVjwzKeCfx%eK;%+v!{p>LTk+VN!^7v!KD||GTJrlCSJQ*jGjW`<#NX^xOBmS@ zljOA47!kr2LX%>TeIa;D=W@R}76Zk{Zj=GT4hw70zVFK06Vx)E`qIs2a#{WZx+N{$ z1G^AbklUe&loKfofkym%`rwB?jjv%k733o(4hxcY)oPZS z(wl;qfqwzHZo}Zsb2WBiZ7Od}xD<^=6PFmINduv)ejVJq}aoQdW;l~=Y? z@4CH4uU-p)F&+lpzTa=u>K(Ve9r~u8fYy7t$0mbZKK6po%K#>_ z*`=VvYHR%DqfDGUcUe-(*o8 zzL^`pxGlydd>BZ9jgnV#>8QMth z!+LnWv)aeIk%Q>Bx*wrOR(_`NZZs4Ehh;IR#1PIRj}KZ5-R1)&pX*#DAkPpj#_o-4 ziIVe#T8ff;?a{r~cRC-^6b@bKXY2d1=xPkB;qBX@LurAi8covf(8EX*0W^d6Sa?mg z7u}8GqxA9c4Ih@Ae1uX|QeHjQ&DWZ%uh^}ZdErH?hcrLWcU|T+7mlAY(_r1=Aupu! zS8`wB?`2t@H>a;|Fawe+DQI{hpKZKeh)zZlH7GR_zF_Je#lq1@AYnl$W(1P$0j%X0 zGSiLOl(3@CfEATY;k1!c+{9N>FZgn^LG6d4JxX zyd_>aw+R$)U#5d%GT1nS$sh$3SkD5*0~*={il`406sn^FAw{X zKuI>TIdgoiG&?=N)fo&>N!+Vc+;*?wHd`&X)~RhL5l~a>*=@+u2X_m|&Df$X0SoSG--OlSLU~0K7@^ve65yP&7K;z& zFhiw6c2}atkV27CgI=174k;1SeUPPCLN9li0a$9(R5nPXG5teH+q}t%#Kj_olJQCv z7j_g%T16F?ilqp!wOSJcR1{madNrn>7Y-C_gsUD^XVyU-YKau~cGb)~Q&SuMusc&! zp2yF8>`v$njrs1AdH7k-AI$Ex#+D+*?c?7Pk)_deIAbdr zhq7nC!-VdaCof?EPL92si_4c6Kb;?_g5zL%3tRlqfiIYTnjg;YJ&KQq={Eh(d4pxA zM1<(ntHS{#Y8ZA$(g_CT)UPG-0TK%Q51PZc%et&T1|WD7PDM&hp(Q?=&>=+F=n~)N z?+URxv*9qL%#PJ{SRwgxdTCFU>2$r;?l;=KzT0stm0`c@w+H=NtKUp>Z;Q2%f&lV{ zxJ=+J(H4e2;DNp5BoS=C-t)Mo${+k&AV6~6F;g$h2ZgV^$}|2J)1XpFg#w*}N#uBo zMUKx-U!OCI0DBn;FKH;SCP7$lQwc$hTBFga_8LRC-)vVZezn~S>h*T7K1lPuU(=d_ zCTmIw%FqoOeJV!`E!|)XL=Bz&oT;S4qn`q&;SH=)46+vM&OX-A_Fr%NN7D2y$338Par8fNZm&9aHI(* z1XzzT(j6J%bIcs*Gs-GJLsM%u<&(~BH_(Wz=60&3nii@RXG3f(XRQ^H^( z|6~RFbEoC~=eyIZAH7!~T)qLx?TXT%pPpZzJHO}tJ6Z1SPsgV>-ucPjPOiLnFTGdC z@6K+nDFB6B^PtV4$9fJ4N3E<31p$Z%9T=|PTFn+2)^?uQk}c`o5s&s>g&i?}cf z3DIOlkHXDO}w^8l(yOm0H&~Fce zfjg`Z2N~yStd-a~8hcCUXOoz1S6Lr#+Tch_@%UqxCBn0fb*7)UEhz;NtP@Mt{uHpR zElqJeq799~E?-E;s0X!i53WHy(I63Bu+vaN-3&()qOn9hZGRSs6RyB81+qI_g> z>g(KqkdMZ|gdPUO^eCSPDWtJp$uS&PtOiTKe@{Qkjjrrb$dGdGk!JYx|*G zA|o^g7-t%@1N6C}N*LYPQt4@MPnuLHPw!{r0snsD{7)v2p4;I9IW(LrlhvqC>mFUZ zSL#RKiv9qJN0?H=zw2EdD(-!dmaCwSp4iAYD})*ZV%IuGmf~KV)0v z;`zt(RK+Q1(S>dkt;)~XF!t*(s{DGe|}q`crz04<3Gz*mSWf-U)H41j+g z%`hpHXZUoCX8j)Q2KWmn0Zb1qcxaL(GQ)Ui11z< z1F7QzxjfJe3X$s!$(3VSsD5r#@t+y3Q!1WcT%EkTKFOul>PW3(G#X58HJb-h=A(x` zrKJBtye;dRrhi$gYz{&6UQ%6{AxBmJ=NC7puYO4%01unSy=K8Jm2lQ8S^UPZ*6P(7 zjbUqmi$83%hW(D)?00(W7C)f`&U{UDq1K_W^YmG-uXDppjTFMB7kIw0&C3)omGPq2 z|7=*}Adi8HTFQH2=6OWA%N9A5g2IG3Y19BsZhrBuPS0OcTqKaR&{fBATFqv?#j)r~ zWps>L+fWC)S}0?m5oU>?Na5@B%A$0{i~@_%Ah1Irr=NH!F_SZn#!)n5%Ip?#bUI~} zWFKNRnsIQUytqLh9!ZcWQ7Q4n8LR@zRfA#~CIMNXGItVC&f(<(b8SlEFHFS))ULDz ze9Y0&_#+tHsEmVil{*XPx>1$S3;?Snl-@5&#Gx}pP{xdB^wwpSyAW1HDny_jXDVF; zV@iajb4`<#p!#Kxk6MF&!k1K>6G~jOj2iFH;8+Of(1K*RL-qyJc5;m})#7B3p@KF1 z>jk9E4_7dzp158mp&eAIabiN62a3!nZ!3V<7Xg%bmlFRdq~;t> zg1y7>Xs!mqteiQ10ul3c40X;SD2R{AlhSRpRg0scPJKfv7@V{#^Or<6)TvJiLnQqp zX2Y}ZSj+*M-RJJ@%M?}r z81I58mo}$;MXlCk#kXYo((8*8wAcaT>Ry@{Hb13zwFUS+qVE8`mvjioc{*!TyW zcCsdjRG+r1J({9njheO^Em}bCIPv24ZCLQ4O^fwLp$7+eY85u7(?|@y+g^y zW>elWO_yS|G~Id{MD_65@-PFM&;t#eI-{SM74R7)zfc?$jEL@(rC^y08r&rzyJ(hx znJ1ulo}ecF{#K-cH$4Zs#W``1+nUh`I4zogKfv z_THSFzJ4QD8qU#C;>E?QSJx*u$rzHcQzDp19q;Md!gN++oH zf@%w;y5`!$%4{h0urdo!Ji%^*;t9e>W}2;8@9!%8<$R?PF?tF+vG@1>EpF|3ut0;v zMKGkIPv}MY2($f6m+>f*7UhUiYyw{08u+eXkUs%-V{$KhoR#qR-<^IKs;W=uVfhFi z#p5qp*cSC^%qyis9d${5%N>F%cL=S=0Nrno51SN~--D;W9=XxV6&p^&?dYAL_HbB- z|JCZlR-+4oeBcJHT5T|>x7PDvMmwpPZM^58siflLkMzY+`K6g3<5;!=1y1wo?DMBS zVkqgMvyUHQ68kGA3qRx+9{vC^KTp4OgpWZmUi!MsbRoKes1OBF#KDkB4}5{blyZ0< zvn_h3=f^K@PXBh|y*fQRK^0*%MjF2S#Ia(6#zW@`kGg!sPmCCGs*21by9VkpSY%V8PtO#JidSS&ab@JC+F8RrFdP`SzC40gG60c!!$@eOCRZ+hMEZ+ygZxj zS8i64h2gpLvPgTerK43)MMike!3hFIW5w1Y!9Q!gyjK@zrx$q*^^&6p%|Hfz9oRYk z_%=XJ+hD2+7ou@tv^nvJKvfunaWO+K$CLc4K1Yip4+td0@)!hq3NTG~csR|cle5NI%o;fq(p;lR6}d!wPpW(;a&N#qZFjijF@e#T9Q3L=zsYC%m> zLBLXBj@~sL$ft>K$1!`kG z0ANz2Bxv5!q-Qmla++?ToS3XU(x8umMe3<&zSias{c0<48`WAHc)n)M9kyM6P;Io@ z>kme&sr0kaDq`h6tZC+jkAI`6jx>hq8~t9d+(g_zofP~CLY6c$@9C|6vhGngMFcY& zf$>aB8KdNICNcL>z9Y9EQU=1b(w1rpLl3aUE??{74Kt~a$DI6_6v?7@$9Iy+6NN*S zXOn9Vo^d;L0r1&JZ6@Z#28#VLoRB3lEC9BEHQA6DGhU)IG$%Eb#F>;NjuZLRyF$*5 z04knmL7O4TL3Av2UVLpgHLU-6`tx#cYT|&{rwCyle~9PHw206 z7m9@rtx)e|1rT*!cFn68^A1K|%WQEKQ{Uw(#>q2LF2%xx;5AIR>pe_#0h8hYf>#q! zbMYIPV|C|(8R^q6T{16qdS7rROXb$GVWbGzDz*;1tIGEdG+8yqgCb`VKo%DZR@ofY zVXJllX}X1eVq@+^@8U5MHc-3VR?tO`AydWMYQYE*U)==W8N3 z{3g=19l6cqQ&7J_`pFJ?ezj{=iJiC=#oXc?P0-DMlNJB-mtieKjRH?9E6p#IjnJUh z@qt|)bb|&k(LvDn>&*^u(e?FAv^0{=KFdo|m=cbZI1}%N3%vMCl=1LIG3|vnU5&?K zYQ1}Zv`l>g%LP1o>MM$Wfp3U8r5s^PQCI_>7 z&eKlz?|3^%5l7y?Jv~>No06%<`2f^-N)5{ShldA{P@@Q(@RguKvOk*2z{*Ji@3X6S z?AkUPs=W0vdMq|daK`W%iy96x#zD*5sUzYuPm6ASLM&2r`NSg&eP1);a8RN020F8a zbeO^gdJx`^LMXRX4ORJoTvuNt_3Q@JJIFF;snS4QbeqSQ*Kpn{b!VeWI^{;SWsgAwgoxSa2w4D5Vt-(0AoO$zr|TCN`_$b zfOchtZ)eNtdgI@8-Y8o;{2OafBlLj-ft`%QOu8boCbeq97QzdIe;h=O2A@XE*vHxh zK0LKmvKb}F^WoEQwKhL}TDqs+Xo0c@n&~OQ$y|o1^LJnq(10>!+*k|vl*v4NZyrd- zJSIp-%Xnz0qXwNh{9i}tGshN$(2JfeE`(Oc<1tNn@0hBXJ-j~#4E0bsiPtnA)Vu$> zI6uMei^(=*_wUe1#vBlVaL=Y9C&7sQxaH>QK|Kba=XkHN#DD{n*(by=A5IK9IffW6 z7LO+Yom%r6SyVS_os!#Fzv%4N@8J-=Wz^F9bIc=peY19$H>W38dYG`W$mc%^Al$j) zLP+2s9$20nkgc5&JO1s-+ZV4{aY-%F{j!07bY$9FuNUCxN#Z8G<*MVzJ&rU&3{4o^ zk^$~^4M1sWU>+>WWF}+Rp*E3--;QTJ9|IQ<9hXQ`4BxwmvGlkJ_q+T!8OC-t{DR~D`sVls8!f9wqXQY% zP%UgSiU-4hZS5uPS$LojJBhPc!aQo^B69@`wJ>*(W8AmS2qap(HDa4YH1B8~U5@xi52|=EOu7&E4eWk!f(Q zS~A`R+M_j?13q~pz4hW8iV1xbo)Y_+?1~24T1#GB@Q(38b^Y z1B1}_1VSSXFrdi}s-IawPbwYdM|rFFx`h!vYiHN!g$<*jtT@Nqms}5KbT`4nW19wBaN-bqsg{87L}-;x`#+nc5lpqs3G#{DJyYd%<{gJ0i}G4tqYSd6;@Er1|Bzaw<5V zVRwtXw_H^lF|n)zDpyN+@%$8CbU5v$&An0YzeSSg#xqg`&ZJaEO%UI~Rq^f|^_Mp1 z3hfgsmDUV!F(HBNHB z^IM?5ni;*Nk-!rlcu;)9CPK+-1c_`T>w{Pw%<#Ioj!J-M50kjIU~}zQE!v|yR|C8b z{BY-LcwQ*u&P++7h?)xY4~B)?GZ}hXv#br{uG<&9s%wnFw_0PmAN_;Jr{cE7$47Y< zV}lpnG{1hlQLHm=-z>3d--Gg+W_ARh2B3se;}{SZqL4fd*O7R$X$> z>X1%sy$`{5O|NNSn+-2xD@T61YZ}~+3PhN^uBq)v!&Yr=NiVi)YYQT>^-hhww05dw zULfzaTH+Q1yGx`e$>nl%OL`E;Q)!shI>b#82gtWhmJY(uKiIbrmYyk>Tkkaxy>~>% z80pi1jbiOzbOui*?1d83fmu4=TuyyE(5BFAQ`}(J=Q@0L%|I$CH0-zJ7Pdh1F}(NZ zsN5o2z+^842%@ga$!7`_ltRlW@pM0p=P(%z``ngXL^Za+@ts-F?hg8~Q0R&3 zY_^hkEU9+L(i=XMS(D)o8LoO*)==hKuY03gzBt(LO_f?Tc3<-2@%8aJ>Th5CNYP<0 z7sND^U+De9>tXeLfwi82Ei*3BBy@;@C|Go~xZvrGw>&wfe)L0-9IQuFTNs43wfKP% z-GD;kh?!z&CqE(@V?lstKUs3%WT2`+K5~p!reki05x>*5TwBG1SMr&}JzBYC{EMEG zXuL>ep5T-65qvG7C#$Nf!)bmOYds{FS#fyHa1yr26DiAsdryU^B0p?Ig2?|>l*KJ_ zPj18#CkQ;1$8JFV&wV5)_5=f-VZf@^6kl8^z;($!%rc4Y-}wuNl4auz$}#EU6UYXK zPTqQ0aJa35ge;O;S`|)QQ*g$$#N9GbGwrV@`wSh#=AQV8Auy&C0SSKRxpkGr8+Zw~ z3d}^c&7|@1p_hWmF}LVII-+3TY0lARpF02@TVBp+>UpkPKH6y^)-~YpoePF&ugGl= zdiX#yPaL*7bt|s@!8qM z3-9{u;^rE!pg6c8wMytlO-faJpXK=~h|?H+^@ClC>}``pQqYH^KjJVF zeH9NKfgQZ{b(k0HvEZuJ53N#iDOmM7uH43iT}c<;dli$qvO5 z`9lKk&^)`%8XW`hrJCk;%aLDt9&iJFFPx7@=|_4=sF`z2kYTei^bEr#MIRgryTS23 zRNAnyM3d|R_(*jo?f1}a4!<#;3o$tmI=h$OpNs}vv_QVLIvDhLB-A-7k=a31B-RU) z?-k7cV=(?=cLw3azbYV^~6e~F(;bIf(cEL9bd=6WPtI0 z!A+n*xMt=u`m}WibmTI~G=~Q!qeOF5hionlG6xC1ip0MXU(&#_W3f$9dnH}p5#pvd z4h(J-dV^pZ26;6wJhpuuAV?j~J7K z$DB4Kne}IPf|D30k1p3+1pY@pL6U>R zu+}m_dw4j>2x`tz#t`ZB;P1sEbpnk)(<`I!1iHe(;*0nO;kUWOJ{gg!EnYe{$iHR` z%XwyWT}(E?*+s24da&*#S$@v|B&X0lW4KTntw((3@Fb*Jho4$ePvW+W3ES z^oK-Bo&ir}aamiH&{;^p$ZWRPS<8m7#v-o5v^AU)!aEMz!=q3VjJ?h#Qz`6cRRGay z(zQ^%RZG*0MSxfYt1#GtxdMO-@5h`X4cmdqIf@^2{sYjCU&i>yN-DdkAc+AtM$4~c z7k6*W5!Bs@XN;J|3?->RA0;S+em6Zhlu5wwqH#4}Pr=lQrN{~it>U_chZS;Fm}(=> z%oHD2k5052?M?@y^b{NIRy8}C90esMi{2@TcGV8mU4>3CH;3MRJr@+=a1 z^Rkn%h?a#ZUK6!1aYmK+jW8r@PkKR6C!kZS%$#J%%Z)DQ?rn4%F1sOSL50qlH9$!T z)@qAr5@mdd*U~7C`RFz|EJ=X!nR4aS?wir3GdBDu&)*urNOhx&@~>!wT)Sf@$}J~} zNceZtbN{pKw74QWRgJArZCcWd%_+RgWNILFvs%ZL?8Roa+O`(&s{ocE`51dt1fFzG zsGKvI7UEa*BWC;HMbQB`!SUzPF|2?YF@HG)$7K0PD z&i=mA`r_QjS#{!~vgF}#lr)*VgP=i{)S&QvvRC6KIe8L^3mFrOLv5C#NnRw2GfK)e zVYAw9;3X`eK%*{E*&Z7OQt_{x!x=Lm@mt=!aEWli0BPE$F6L5JkT28zeSo+Vz&q{sR}AWikpg)eg(H%bXx^KxhmeD!e8_a5;sV|F92~k90h|4zkI5$(6HSj(hB|lJAIVpxOHJ;3;&eU1h0JZ z{N4302b2$uE=Oc;^CkOz3aA)*KQIfZK%;p9A>jymr3cfy;E*97aSY_3DNkWkG@ml- zuQW$nj5VQH2(tm6R8+kfU8}}Rvfe1xBteF%7nuk7LzIwqAwxt_p@W2}(jN@T4yo zNXPuz9}22lbY_5immXz?H*CwbP%(Q{$RKn{C0~tRppF`N3yVS#(&VT~ZVq0Y7G-E{ z)*Fp1eB327M=3?MAXi0tX9*Yd2>7T4kQheBWD?RsD#pN&n=np_OOtQ5C49nH&z{&o z8y_p*3?Ydh{&O)>b@_EDS;@jlCrO2!=n7@e7G_yfiN_Nkpc_h+oF_D!Cv-K!xPxHg z&J4_ycYK12ItMeT z#YmDj^b`|_>$nTt6q$QS&>2poMwx*1hC$?b6?Pl-!GBwvg;3(abdZdhzWE7f3J_WtIf{%{!eS%~v7s?>qrr|}AD+=!Fd zFuH`zltiR}FtXKV?NF=Hva~}g5sbQ@vGqeq%Sh{$%I;D!vRt98Yc*DYfxW_lKBcch z@gZFG%49Q5@xlIsdnikA*8U7_@>wlUFY?AX3KV?-Nex$>Or;(*jj=lLv#hekbJVQ? zt7SrVi|6j1?bsXDoGhR1(3!GJdAoweK~GcO6pIl$0ASEUmY_I#yvxont#+zSbI5vs zG#~+FUClS#`{#0o-rF#o;A8O*VObSR>YSoy;?VC0dN>`VUo{G6uN34bKVM#4-FVl( zynTLg=Dof=%TE%y!m$0P`+^6tj9Q2Wm(gzk{cLr6C#ldlUMn>gIY6@z3hkH|H8fX+ zHmD|-dm1#oBF9w~36@QLxQ*N4oXfp_b;;?3aP^P?QO9FNc#K6;n(8VVc!ev9@u!W> zkk6e2li31eKVg_+j2efbij~Ehx+wbU^kfgqP9`aIrWoL9sY0?fJ2gr^)NFUFcFwqm zk1lz?q6yDJZo-S{h-380e8kfTZG?Of%E))X4rBc*5<#|3Iq+JDt?e|LwwlcOUcJ>= zmz*lD8c{642;|nhBmq1=e|^$wXscOQPJbcIY^q6e*Za{2+ZbcdF!>yPV1tU!m?Q*7 z2;bh@s$x#`#Re5yH9=J|Yc<#8VJTw|o3*K8%@N4>RjU-ivfH4mpH|luD_KC>6I zCVQWKW-|wYITW=Fd|4zIROSEMawAuwQ&Q?Mr$y0=Rkd|6;}(mX>c;>k;{aNif~edO zA*khJG?_dd`h!n+LxuiD=1=i0>J6)nVbJfihxKNqGVHbc&0(WH9MpWenr+J zT$~OrNwH(>T5?t!^*t$CfJ){mcc3gZKAC8=42Y`U8OZ^5XpF{N&^%A&W=p zUNDdS5XEfbbC~)@lT|PS5xn3VnLq{X3su+6NqC=_Z9qE^vW7@N-bC5seF4xjw-XkcW7 z^VoBJwg^)uZU-6AlQR>?rcGsNP|@sZq?`~ScNwPx{gRWR<#)S5chIl4gBF|&eytS* zjX}RLXr!GCT+=)i_zC++b!+S+)wO*kY;y4{9Cvel&MAOT04_d7$K79!d&-Dj*a6yH zumo(rl4E(CKJZ4HHZ``$sU7ARRTysx`6b%3|?U3S468}EOu+DdGJWbE|Hrs(v?5HodMT!H*p5DAe{bXNnx+PkP-;} z?KmFuY@y=EAvY(%B1D;pYB@@7Nu9$YCs%20jy)rYu|1%85H48}j!f#v^6_16)=<*c zI&GI)AyOEK_&jANVj3Z}59zz?LbqFY2Y$8P?$w5sO0Qk-RN+Ux-Wnu#A=iucApd5b zj02gskE9@U?Q-rWkQG<6Xgd6BBZeYDwIqpiH-IM-a% z$PQUML!Ec;4{a3oXW~lw%z>EPdWq>vIka>ZK)B5oM=`~b<^aK>ncCb22RLjZ`Thc$#?yH%>!GWmxG%$da!nkMWtd!EzL-6+)Rm^mM6<})I&6dstyIKQkRKHf(E$&K0<7^Zar=00SVl|(mdY{T zL1nT^M9&~LJiyH*Lwk32asJwSd;GKZ$HroqHWxnQ4BhLii=VDvUi@^9a=xQo(9!;0 zQV>A{^H~^DWX#BxOVMI72SmRjSOl7dOrKQP5*@)@3N-{VVez&_orqd!E}QX}svR+_n`S;H&qj>1eg4 zF>g7W4`%n%ZHZmnG;uw#i(b;7bmQzlgCtVu1Uh8Y{kk9_GqU8ui5GGAGmIciN=Dj8 zl)nQrWpVBXzMFJgayRL=YVA_JzTL9z2I)s3v0jAxyO7y`2O?`s?NJCVT~MK&Y1sm{ zBE4lGz3lRlB*0muR}kLl;9rg)AF5rcXr8qqi3M~D=!p-bydETAG`q?uAvJ)nYl8oa|4Rh|`eX4~XO>B)Z*BJrZtMp7U07wCfLd5O1O^qU7E1LXsi+OQ$yjsBx%4l?MaF$4_kP z6=Li`e>$6@5b{?RBL95Z2nPHU=Bx@)8vY2QT1h@3H8RdFs7yK3!BeZcU2^8Hwi*c# zA~Jpr5R(J_Hv&wZjhAZmreymY%n&a2BD%$2+@t@gq?5>mS))cFCcmD95`$mWxuOwI_OusY}mqrlLrLyJ_TyXBK%S5M*0? z^QVJ4P{ZN0@uL6V1{8Kq6}ryLSHSfIii6Lztph?*^t z%1{2XdYtmY9~}u*e{=fkhF^Plw7h!t(tC09Ge5B`)Bz9Goj3zFI!H^LYo0_jlFzjB z+tz5fJsvW=S5x4>QOk>2(a7#;jmj)@u*^&()x%M?>PSs&J`;8Eu+bb2-Cny@Yx$K* zz1C>MDmOaAAWdCt)i>0})^pB4TB9p{_+b4DYx7caif@%ooC*oEQlt!gk_2xy;X@8n zO{`5T&Kc}dMzKo*aw}r%HYv!l!Y*m&VR#>dFQz-LF;p1H^gv2+svM4t2#=r=8U-x+ zD>8Bs8XIybT`a&!BrwngX1QQ{Fc077CsU!o8X|ZN*k6q1WRuBBh4pl(ol>kP?=Bb* zq&vePVlpis&E|Mg#6XM}YDET4W*Hh6UnfMmQUMmHQu-|@>kC*FJ#S^N6H@GAfWH9RMp({RkcD-d^2c*ToGmlR!#lIV)W0r+E~LeJk|j^$9#*6Y5x0TZLN&{4#<>|B_`@m=8GBL3OfILfC+TQ!Qj8A z-|INfQED(WR2>u!(zp=+yIyaMA|jV9BW?$Z8;h^49QFP)pOC|DOUux7s0w zSUwYM8?wg+OKh-g%3MmWD*2Em-U%f`%%mhN+c%U9j6T`Y*QjzJVLp-Lq-9)%m?QL1 zF|2OA+2x!B&2A(kQAj5o_|AJ$17sT9TZ1DU9IWR5;%JjgB8;9vYye3HicK%@wW6rz z6J_zz98}#}tL1l_y-KCp4|>%>P@J&59T z#5UD=!|ag^@`%}0rc}c9s-#VbMzJ>fvqkhiGA=T5^XSS6n}zgIu|dxmukh^w#v#2h zEEyK2B}2W}0Qfe#)mAsCRPgZX`2)Y#Zq^deUBoOI7Lg@Gr&go*=lJQG3Tp{#G9Jy~ zNC!9t(>_4Th_WjAYU!h7>Ipao3hEzkW)9isvM=3iHkX(ZEYq<8(5K0LWxfnl%<@#b zM&wu`OQHR!j@TX{`PcBSMfT2qRgrK*>7wflhmqJYp) z4(bDh0)>>YDmbGs8rB#`^2NCYbmaWCq{pF5gxnaZ^(Uy zMAt*t6M7mMy=`mP#;{LY%oS&)6MfPFx7q^D#XQB~! z?d;DNKIxlpQ|-?qBT*;^Od9@(j@dnA+7OGRbUtF}Bn>lxokEu&zTQsJr%#U{e!&qi}@CXVYn;CM8a?GPM?XWNEi-bsnJqFB77>7`*? z&H<&0CM<`_m0FB)B~fTExbw$D&TPwz0xZ?X(VTQ=e+?}{Q^D~Ts-QA8>+o+uyr2u$*o7iz&0PqA_ zBG%c2D=ctD{Y>-D`!osr{#5aF>X))lZB>2X{>6@o(YQ9cC zA=66^}W>)}$ei4TB(S9z8>YT6Q&km7kwY_6SVYvys4 z+4Px9M^?JgI2nE41>-rVy&LdtR}TbiXuW?o8zIj76S`qpjhCajPCCq}Zs&QT7aqm{ z@F;ilGdCHK5ifU?D+T5=Q8^{c3Ivg8Nq$6}M5M#Nlu==$StpHl)3syP65AjS6tVP- z7QFXE%s#4R|K%393pP9L}gm0J)h@^{YWF{DGN}mfFgM)0zSP7LAdcCGR}9LwbO^+ZFG; zmnW|<{UWOKPG5LGp8Vn+UtJyl;=MXPyFSr^S)ZbPo?of$=Dj$+yk^}8MO)+`opd%j z^8E+r`26DBL&@>g>Dh^TUMv=J|4kfhq<@*r##a(Vi2{Ci7sa!^&dt4$^7?xJOkEvI2r@%5Mt3s z{YPonpYjl_p4jbca_Ii>gWD`Q9}jC3;EKdgil9E9g(DD3F*4Q?m>FQE{>OdV#HgTC zYHfPxR!h}tm8!w38oh$YHF{jap9@ZeiRoqGJVfObhw^RxyWNfX*gchkv4Ohz*MTG)rjDF=X zP0#49TX9a8e+eo0(lo#e7=J;|njeE;&W4Bz6##h|JpiH=4p)di--J0Bq_7vLZH`w6 z1BdK?sK(Wcch2ns{r~2$hO+^oZFFlhKn;_`|MGAB(UcLZR&kD@9L-%9CKc5PM|=;^ zxx*uu_*2|F6uAv;;THoDc%ogQh6X6AH-=hjiIivJPsv7nB`#?@`q@-97)}r_90q{* zqfI=>sAY_txhll7t5=-=FPOrDhJUd)HR;q42guNl=nstL$LVT$_ZO~fWSo9}c5(3* z4~LtRD>k6Yxy|nqDd z8Si88MHEQ*lh5vbw5#BTSM?gt_K-d(cqSK2&ID0F%rTz7J3V`;{3O#$I!Dg))0=DW z5+2_Ca(R-6t_lThSM$F5LHfiC7buz1OhF9BxPq%I937>ME(QhSN5~wchi8n((9Ys$ z5^z_VXM}}qMfAS*Atf6CH?PcyU`pE*KTTJwTINcW(494aX#I%>ViD-2${!F$=KN z6y6)BygNv(?5?T2VO7Xq;bDuSXu_j`el5V&%oX<0Z76eEd=r&dq#ro93<8lB%*W_o zTKkpGsw^VIMtH*ddL9}}yg7aS#`#@-kPQr_mo^^#bbNJAo&R)odULV`dd1hwd2Ei} zS;x^wwkasWV%bOi_f#ayZlmOOK(cHAv&}XER`7*EFwU}JLqS!WQIcH)>I3+KG8KIt zmb^q2A$kYaA&`jx2#Un*>u9_V2YbN-J(wbAlMg=k$Ey&|^MbfES#`w7#i*t%BSYb& zB0s3a$`em=c*vG^GW`mIUPrA;E1%pi_`H0C4o+f@xp`W>V2GEKQ4Wqoqf37@&KR7N zHaISy92`C`A904VasuY-;Fl*Qy9RxE9TfLc2U@l)RjKzKfhyz9sn8^31}I<*GiI=9 ztPE5Shgd!uj?nEA2fB_%&{>%F6kP`k>N3p9vFZsn5E=pHlMoT*PQB47bsM+^8*)iu z?ZCu(yAXO4H9l-3yn%aEDZTu-QhNDurSx)&ln&46`D04yZErI#OFN-sZ_lwN*xDZTvIQhNE}QhNC-r1Ub;?)6f7IV}h3 zmQs58w6s_BYoE zQy*r@{yKSDw(MVCBm1XScGpyPGgswN^7pD9a_T$*SR1sME_8+c<<2m)-EzpyeuK_&`to2;iCsMR-+ z*y3oY;s&ru;0;N%$gvG)s|B9S@H<>G{~Ah~4>J9o$3=u^+yXI+j_}8ldPVr@NVEhF zOHGCZEKD5Yk0}WcZlnWl3jQr=JR{;gDTC!W7*fif!Auea)r2`Ek&G5mBormkMx1qt zSg+BN_(&03*!W#{W=VfC*#w6!J=bZb3zQ;64v}2*GPPuPl<#q(?Jt$83RacMW5rgoC@zlcgu!F>SZdk+`p6_GHQ6*czzLU5RtiTs#6Ib&V^GR@{(J z8WVK8+6}piTO(H*Mm^J*#0|l}G>j^0jN-P%9-Kg;J9w53S_^e_FX4=(L>~nn;1jVh zK<4u1gy$EEScKNTXev06K`?T$k}Fj`lQjypuzCb~fk|-@jK?Ggfk-rF5mYavf>i|# zIM{qK%1NLVJvzJw1rJ3&3ywEF;cOh?q@%celq1WN!!c2dNHIBETh)(tsk#@{oi034 zSr`tfR#TLs%j`>dTRMK_U?@k4VT8~Zw`oXw2jaeEhS&7ENDh&E&ITk2k``E)2NzOoKSB}>e9de?SRj9%mMDZnv6-_DvDf7s}I6M{auT%R?wwjjYP{ae3bb_3N z#;#UMobM$krY%GZwZTkcc%;nJc%XEpD*nKxsfu5lA!k@9QF0QyLLzI$cnc#FwkSbS zS4;86$+Ay~;|wF~4F;C#B&HdU^tJ{vkXjp!ckL&sigEFTix-UnN`q*tUSen|P}|Uj zS_|akc;|8ab%MvCPS{THDn169ND_`a%n0V8cLnN>(Juyj7w<%ayHiNL-JU4#jwOFM z(SE`4Az67Q9h$_iU~k}vz+fErvO(E+(`6PXN4&&@40=UZ4;Eci381-oSbO33QGBr^ z2*t9qXh5ZuS$w_O?v%PH3$*Pho@t_hH=UUo4%Z8N>h#$9yo!#S$Sa+R{ZVX7F_Um2Nyv(HJ|6Df>%FB|Bnb_G)rarKO}d2Kk-y?CIUf%S#CuV0pW)<&OS{ z%PWZxAN8M7-=_at1RR!s2s{|Yi$Txr^`@qmr!O5b&<5t~8(d6B`N`|88pQyCPaS22 z({wBsV{bkS@i4UIeShGomz!*%<=Y6gf20!eC8KxFN%XT1f5h@RCZH;L8Gf@)MGL3= z#0X&&vw6<0!AgcZ7L&2o9mOnC8X7J}iD&Wmdz{P%VP|2Sa}S)6coe zdz||Rrzo)I$c*rrlJe#;0Gm|IY~LU|y7-I57rdD;G%ls;@txrj!}QyLQ-iX^Z`*)*VU<0iH;AIe(CRso(z08jX; z8IFutH_efe?g+vtj7)mGrz@kxzWmy)~*O$Q7{&alyqqUq$ha)XhPjIB@DC>sK2FvVQkz6@UsO@ToHZm#}wDVVN zLSkuVmnzetrh~&4)m`eZ!D5zI5V8H)cr@!HP2y@W{P$H1_7cxmhdP92Sa3`lMGGi+ zk_G3fW5mObYA4KYc>oW=lF0mYcbwqcqqxa#6k`!qOv4~rY+0CSA@*ot6Bf@Sy40Hv zJo5BL;AAd}q?SHBjQbzPnq>X7!Xv1G(v*o5v7Sj~w!Uj9POTj}qhv#yF~tmonLv`n zG83i$s#Si_#!qZdPv)^b#zL(Gjz&904MUU=Ne}FBHR7~L%NZ$~DDK#Dw)Dr$>`5R{ zeU#%#3+&ot?k%xs$>ChVluMF01so`TnGGqI-?C70Y9*(m3>mw%IurhO!%Eh~?%-qy zrlZiHNf;(foea`5R;AHl5N@znsI}xf%lU{xkMg5&k|9$WChVsSxA$a2b93Qg(uM21 zjiM>;3_?t+ig6@3nh%UPv}u=^5%a_$k3zH&rf5A1dJUJM*KH)#3y%doj93D#a8!N_ ztC*)Yc#A)AF5kY?(aP~s3!Xk>D{rC{RF^`R;r}IqS%bXsoZB{hwuyc$NSNTlB~22#ubIqrYeU{+125)qbH(QjuS53mJW1e6M`p7 z9chOIwnFgjSxGoyh$>96q4J9HPFdX=afqY7l{Q^9hx)~=F28Dm84Gi9_VR;7S2JMX z=-UX!Bv~uXeG&gUS;qc_jO7;=?xf5PcJ zlHhc4<*n#9q4~tAvh6^NIz(Y?MQQpE7^!R^DkbZdv9iBhygPqQYsZd$i>IX9bdh8XBpKZ12mhwQ-oJN17+vN^o-`NkAv(kiNKe=dVr`ZfdY$8Mkr8AG$oB{twGWF8r4p#WAhvaG?#NY1_We67)x6&1s`g-h_3E8 z!cv+wjPVmR;s5%DazsqfskER(J(XwzH;`dPpebOP;@xefkBZ14R$>RbaTxr+bNQQ` z%aMN~<5RnfW)ck-1l|oN@tR4jliHH&lBFBal6ndlk|y+c5zHv_I64Ij33t>P#K*SH z2%weU{z$iaOkq`MvD{jf87H>`98}_Z>QR;}qQ^ftW;8XbgJc}JYy3DmwlE^y+ z;um^nh8tJz(nbY zzW#(U!jSJpH9z^|f6>MG!#LCvHh8F`NV?&0G1J~lU%W@hrZ94DgG?tH7zObg?U6L^ ztu;gB8x-?&XTEn0Aj|IhRd_&ElUrs6{s-ss=8An6fs0d^h9sJ96EEK}r&rQEo@(=n zK<5eGdPFA5qNNlu&uwFPL|lk^r__pF{POr$dL0_P4vk&bLG4~+J)e9+eL7}41y>8QV?!4B-XC#bVf5XOOkVH*LEAGN2f-ZH`H8vEhVK>D7F|yi zio2{3ogx7PeDHdl?-}hI^COO07#E{*d2nnxFudZ*rch`VR$1h)Aom^itrjft{R71kV;v}z6 z{xkAT!YH1Y2$Ir3K7V)p3!7HcT1g~-^zFd(7o*!d@r@@}1B|rX_hgIZ3r08*d-Lrb zMn-+6QM9&)j@jcB0FVE%8o^>ARd9!If!Ty+Q|s}qz6?tbBye909-uos4ndi z^F6f!uT9`EQHb{~&_L@g4qcuv#D?-w{GsK(kj;ilyShFzNZSntY0NND(vqlFff-G5f{heCoYQnQ5tB?j(0--dG|EJiCYuv1jf^?PPE>U- zrl4e_!B=P;6lpU16nMx4<)4U6B-ALy7fM>Uz^vSkwrNFHE29}Fl3>bZz3FxCu6UN1 zM%}IQQnc&IrNE>uOP>FR70Dt&2|G&1x+6E8y8jd+q?o7EbDMceQy%PDQNyLELg9MN zWw>=x>>3^pH)KQ8$zmEg%QC7x0kWeWF&-1D8N>2PS#&Bob>qKkx$|24>tF`!v8E=)r z%_-g+4Y!qKm`(nF8#r6+CLVIemdA6kq!&CQ)qk=bwf2wpMvH&s%_5$5lP174T*kvj zdjlS>hfk{WQz=I8HYr3m#;o|$z?X25(_U^?)D+Wk!_cn+bIX@2jcygcJU&#NeGp)$ zgW+l{F8g7{xyF320~KQJezy4N%vMW|=jN->Wf++O)e@A=X8eVl0u~MPm^#65IO>lA zv=Z#C&~MlAaesvHYY{HLaO?weHVTy|z6uRvE8Z++WMo&8s)T&|R*a&f?74ec)zi$X zb$tm^s*lzT?>z-u!=2Jnewa!%G=eikIYdIOHA{JCcpGZb>#>H#h}8hZa=VnKsak3! zLeo3~W@V2w<|g}u{eG1%dO;g-q3S`;MmZ=;Y|#E}YN)RcOf5swjab>689@**jmsPm zc`*WX{NWIj26>lPrx#bJH@`S#r&bWX^a##r6i>X5qcP=VO2#Rtac2>vy~9nzRl)ES2`-As$M&NpP9hkI4h@I} z@Cq2S;|pmzF@6WfXZgZeEs>L8HP}E;Qd5e!5_FCF^WOg$c&j<8EV9bk;iWj5!ykWg zk`^SiBl7s8D4olI!XSQwY%Pu*V@zD07ON>|hTCnY$8UCpc`_o}NptbkO$k;g*3fjJ zEYl0N_YRK-C5mwLD{aa3`pJw;Vj1pf_9#G>XQS=-3(KC{er4ExKZwn+z#gMj)dvb2#TtAN_1l}`$LC8v6(j<_5n610M? z9cZH>n!j;{KpiqZT4lB7mM{FwoLGtKi08U8$*H>Y2Tp;nRU-jXtQy1q!iVMeB}9|N zlT~TH84vIMkb`%_iV?jxXaTN#bjLBHu&ydq+Ft?NXw*kXj(NTpoM*f#8L&niI0;*< zgKhON(9Ex2VnJ_JwA+$!7-o!OZVJfLcpP$wUOP^86l5&aVbqM3s5oyhxojzlTQ(X5 zFoX04MpoUT%Td#GT3gu>daHeit@@F*==Nc^T?;>?wV;Ef7zutch#N(3=IP+VlTbmN ze@Nm4vQ-!bd{hZ4CC7c5x29I$hEybOTGj|C+zReW3dm+ea&wLm%;9WG_hfVj%Pr}+ zYd)+diXRwNS|ZTBcb^)aCk9h1e4y}d+>nlRjjT%#XF^Fm=fH))`LyEjc&NvwL1yDj zyai4jt6M;II5%OhGH>+#*uoNkvX&KF+IP$;!LrscwP04Yh!@h4!8jFwux-GlgmawZ?JP9dBWIVgoD09Qb$zhx~2 zoL5jT9Bi*XEN)Wn#x-~7h>TIv(_+=~iB>^n2B5)Yu7lO$&<)J(`{Vv-(QMaB4ay|msg~+y zppKaDtxknPSm1abg(E_@nemqcJG_Z1i)RQWC0b(%vR-`5qMkaga^h+(E+?w-#&(1` zm#iDLECUOWH*q^zY21-G^oe0NGrOC7u*K|R67oK!K)Ib!LW!y5k6=nO_m_9X(*WD0 zl3frtL(ncsYK;&8j#^>zu1EFnh*uhei#UWa#N?CL0hvuj{I5>_fjuGHg>iT%fI*)m z05NjoxMzXi0-kO{qb0_BvKlW(^RcwU3GXQsug<4#b5eWy911;Sn~AwV+B+kZB_Ot# z?BS3W1CBFG38)5ic+Gv9MtL+v&<<3rBR;;^R!i3Z_upxOxDolGsmx)V$~ehf1E%(v z4T2bL)M;)evq!^F-l)-EI>415P%5KiHzLfJ=@F|`l4L&|@n;c@E5(R(Jcgc?cPq|( zr8c9&Yuc0Z$ha*=i;6ijrJOSNU86$IuLqsXoM5Lm9(&NJWhkgE_HZmIoBCn8ut-Cg zSYiW;a_%VU7OfUw8v}nmBJuX(;;n;vY}}3@eu3Z}Nlp$P=p>iR&KZT=5D6T_^}}|T zA%NHl(#j`{;HGRM=d&;z^+sbv+EHjW1Y1zHHfw%)|3gct?WId_i3hDT!}?gB8HJ;W z&BUFomceK3H1`Cpp6NEyPB4d$73H2G692|0ORLtvUc)G{9-KTBTMqktuRlwknqVl}FIb*WOtj<_0k=J?vIDg&b8h}AyIbb#27P7N4-nm#vXSlhS91SHp!&Fd$ zvbg4;C~nt9H=T^i5~@$UcZ${k2>CbnTQvIBUP0S+Cz!xbb@YlT7*zUl#jC6dTWA=3 z*dS4Le^?XD@F`k>D4$52*w&RIKPj%TCS)S%$@EPfPs<+aHbaaxK%?Dily>yKHKTN- z6TVPY)#5;1+oRk{5?t(x>q1IW#iG%tEX5WzxT}ppE2+G7T^TzUFs@-L`dFd)L@V$R z?=7Ve*yOyGvK`8~MSsN#FA{}_E|b+8rY%~eL`&e~52GtuSRJ3scSc~h>Nk$$_TAbb zX|Y4$tAeE2FpB#jGe)>)96;=rP8w0v$~*) zFg@8Z5xGY%@-kco2 zw4dA@pPt!IF3w^_C;|yQ`h)iP{~8Sk!O)SIi&W~nNOg%C1+BgydxS8^uznTgqby(M z4K~zQ3b$gK-fA&hk{vv;erudEXLBPSI5esZI)HW%6;r=^BgHf=;3$C&wW`W4_(xo{=s&Gc$ytY8ieLz{ zkxep)L|U|wEdy=Zm{6G-WKoRUC>+9Q@DPd!RWwIHw+YaWig&$+R-RwHJn>#0-yBnU zD>gv<(j3QO-Pk)+8gPcLlge(@C$k4 zqP9JMh)$-wrB23V?`(V?4tGOn!eWctRkkP_U2w~0u!V$$UJRg-PuNEM-wmNtxg5pT z*hhf1Lc6l%TXN+HMq^nCf1`jE0{yZr2HM6(ot26T3*h<%DoaXQ*l+CNY}g z>pzK*Y4r=|{VG357#9sV)mE^VR;a#0p*7Q>*iD?#Nx)bxXkth;Zp_OPG3bP@= zbeF^at55r#$#F#7G-(qRL>6(gH)S{*YU9SWvf54(LQ)Krwvbe$2o774Ix4R{W!Gq3 ziWFfO&1LsYVb!rrZ5}MIR>d5*C_zSxkDo@?S*LWcoL?S4BP^>zTa@9l(REV-@PxC) zQjUuCjvUFHmzX=$8x1}e!~_AhG|*5*%#|CQp_Jn&VZ+eUAmW2q8EjYucfvmCxTbRW zr|{29oDfL#V)BPDj-BO&d>lLhBYwq0fXUp<%_z?|Ib~hb~d((|2Xnj<|qgh!t ztKs0f!ps7PXuP{$jFtgv?xWXeISc<15&zIrhXXq5+U8A;GN#=E3q*JHueilJwHk-e z>9(uMR6wI}MouuhFHvM($aZ$Ii*6P?SJMgk;h-DE-3VkF<&!;+1FdI!WYFN+y`O!aj zcG^t!A2Q<4Dba?3-0m=tYqduPvKc`E5lk7w0fCcv3P(D$Lb)uI=L}P$BM+TNsxXk- zR~*a75$+LX3A%M~&p7F@WZsQ|J(IP+MNrRft$_(Jn$=dvqE}zP1|B?EO)-@E|Igl= z_qS~%3B!L&p91zy98x^Q`;cO9WLZwMk!7zWPv+SC@B;}@V$3T9X<0MLcYo^YMmNwP zDcigIzVkeuiAVx{R##V5SJ%NYjpD+)AFfYNB=l=Z0--3p*LT4>TEa4tkk_;M3jHOc z$@rG+AlV&1Tt^%@L9Z;{=qmCS%tCR@GO1G4JJtR%$>t)LL5>PL8Gnq(4ijk87^4b6 z->Mxl)&y>Si#A!}L$Ya@3^>O)jZttbI}as03gx9DBurBgbl^#v{dY}qj^uQPYY+Rb zj-k@q^-e3Z|14JZ=;dehxlaZr{ihu4u@s%uzN{PVd`dbUVvPsnMyHro(K^(A#U2f- zcSPr7o4)fFYu7`fadj zw~$KSLb}{?J~Oh@zIMs^8WEff9jP61$?eip%f2v-|I@FGkV}tes-?#>)%>_eayOlX z%0PE>1~+^|k3WoSGkM^MiW25S2$R-EJtC5ekV=n? zg=DVL##l&M61Y<=q)fF$jjk5g^rw6cpd78?btIt={=wLFL~rX=?-g2uy5gx85fd;P z#&7Fr%^OB`35t2M;R7ubZAPVC7m-yt7CNJ0P{Jg=!5@7}sHiBWt~k{PW;7BdX?%rV zoni0uVv+<$O1cwm<^RCX>Vo2u*4O?#ZdLt#G6Q_ln9Z)%<1fj zQ4xS(XT(QMyLO8cWCZFZMLux-N}bQ7#(nMQwr0-A3TCE)GpqP;TY0S(j$nX>_`(C- zLI+cq#aV}NbzRJ9h=B_&S0X#9a2y!4V-_Qk9|hy(OeZ8JjJ}=B2f-xi#*XJysare* zrB4aRlMJn_LV@_V!J?4F_9z}Z!r#dTqG4{Nwi$`th{?N!6z81P&CuWy8({J|f%qf< zo_0;o4=~R7=o8-jX=F7fN#XT#F9b+2 zYC@!btWy@E1YglA2};d08=|C1Cz1J(B(;GkXEYl|nzq^hM`N_fC?z zRNc)dGS3P2+C5uxB?fu<9{u4K`5n(xe{ zN~!$w$Jy~$_%yd?9mG;1RDyD5G`GaJ%%}_0v1Vlw)K*y{9ayBv8Ky2t^Ol)U#dE|I z`!2h1j+-hNk}{XDRBGGi_Jl83(^G;a{SZY9-t&lhu&Iy_3j3B*r*mgzIQaJ!Do^pCFGLPuEi~n4%6IPcB1*()` zGF3cga6?RJ${15^8a&_;8+Rk{ggIskrGj2KUShO8&0I(%(i>B2;rX@_!>^4bLM_%tV4O*^!*;a56uf8(gGcbR%qhzX_4H~Ji`)lpHe<; zN};!FgFTrBGobV#wMgnHCY|ZnVutEQs|H<1LPQ6-L{10MH8;G-XSQjWaAX|FW7#oD zK=uuGKBE;-X}xp0kU{do&geokX@uNvB@^|$od?FkXhI}?eBA1QNrt_$1VBh1 zJ}aD;zFC?t6m>D|@CRf4mC}v-^1w+Ux7{9XB1;6x`3!_@DPh`yBNo9-<_@~L>lpLTWw0yJAfDk_ z#!yPz-G|C#o1x@0(!;Z`L?sAh zIduy!(xR;GKm_nDSTxjFk-gNKk7#9`s(b=N1ys@=NSUoiX+khKBpNP!bye*yD{i<#4-WV4rj8G zYq*Yx*9N-nhHV>=ZTAUjk1`fdloSmOECUP^dk<2$+o{}$CnP4)L{MlSehen-r~q4z z#Q-r*kl26b#UdHw#5z7V?7>)l7o;--$5Mw0J>p#qzYF+Gc^%_`@b?pgfH_9NQNvxX z^IxoQ(zgbK32cGM`qslgI$XD<2GgY;o$wYWxyealJei7*%3kWK1kq8-L_p;W3&a)xTB=CPNTS zQM&hhcPi>TtJSG&)aq0c9`p5O*O%|lFJAdC-~UiBphJVnX!@PEE;eZL2m5s>Y5tJY zIuFWzp*_xQxB=DyYGBA=U!A7DO!!x=hUaKQu9Ss^(Vt(QoZg)O z`HYOtw5+g+5EA^7#!EZ3P8OBw0Gm`kd~U-rk-5nDB%I&ROblYBCcRoO9BWN8Pp*@_ z5r|!gg$1LHBDxX5fBIdvnMx8(!@8dK;PrG-P!Gx&nbEqiKHd;~NcR0_$IYg_=9Zxz zsRDEfD{i62*dXs`>=>i&y^<@hV1jb|gT4p&M-*1QYjsILRy24iHXm_AfZ|gl3(#o2 z8q2~WOB@Dk;2e00(zn0rCQBKlEtPQ@Mh2gtt%YHZ#s1i~ozhN{m==bGh3%KJVp&P} zszWR*r6ex3nkkV>70XaxUEZK|P9ueqw3u{P@VaG-#!lNAU-B* z{i3k}c`$HK?vc`Z%kgZZwZCN3D@v5;H&yfs^kz%=46|Q^)_1tF^=$Ad};W>@q`f6QYQC*cMV`{35;zjc%V| zSnp)rOFwVEdu~H~m0_24bvPw_%Qsu8!s_yyZ$uKmr)_SYtgXqdmFM8e5jN8q(p1D| z0#mcmVlZ{;Suhd(^y#3HpUhD?_;2v>lcdJG18AdbSvnxd=eVbtZG4%Rntl66Fm2Tu z<$4Q%+HLmnOU?#Hn+h#?Q8Nm&H%EntAL3DrHMHizYSr!w*)g<#@1OMcx5^Eu;O&(mNQeM~P9CYFb+Lr?HQ{d|~T^ zKkB9hnDlHlD!_}f_w<|ff6bnjRkl2o8mDnIj0^cm_S0s>y5xV%trWvzdq7?{hU$91rksDvf zXrPuo>0uJhHZLEJ;B$H=zm1K(+Lp~oZV1>aqDH&bsF&+?+;Z(+xo_;auLd3MH#Q-j zWSSSc>!nto-T?LS{ga!Us{)3#(U61lNT<~*Tf3}m4|%^*Ow6!b0O)FRa&k+ zE+ptH?{R7MD{b%;CJcjx63GM^nms{=Zquf}Uu~))l28cgyCC-O?m_mQfeIp zk{4YWrrnHuFC~&pv;V@{#a!$&|Mffi$y6F*=7cW!T9rYwPY7#r5{*Rp+ z*=v{Mh1HHZa+vmYy*WR-y5@&$CW-kkVfG?4!t|e-QujCYTKJTwSyWK&GnD(ydM4^X z8puxg(sCSpEN1h{p_{P8#=+G=uj4QptZzq{;5C@Q^cQhq3MW5ks#tkJ`Y}7XoUNt* z_&*kCMS^&4wj-q=pI_4(jn<>n$LK4iPOZ_)tOxo{Dc{N69Y0d~6P3hb2V(?pFYPeK z$M>CX?KnZMHig;620YuNBU3LysatQ7bkwP}`uHUar{Od#5Oaes9G8J@6UOpuhF=_B zGD=bN#I4EXE#|OU@e&Ijqb3oJaad4&BjIqF#VPj>Y5`SW1>nD`q62r<0+hYB0+5##@#MqVm%d9QL%iJI?&!#D9NM&|CUWD!}r%Q(5{1c zN#qw5(W@Z-pwLZp2%_38d(Yxw)p|vH2a|}$3}dot2Z>b|3O;g@t2`~aioBIFO%Q7= z=R1jRrqpyxB*K-m%eSy<$2!Ib>+H2Qs@Li>+bn0i?xVDG z$}#D=XM0bhXOJ!=GLXU3l|ZQWZ4XYi^(F$R-zqnSD6(eW>T|MizL<;*H^*kPb~Ss-i+>PQAF}w^Ode z@u<^i09v;)XpR16+dH-QyaW+<+?JZ!bJw7RA339fd!G~L+sj7I#u9f+fab?^gn1QA z1@EU>bx(%uz%$sk)>W3Ziit_Cr5dl!qE9G^(^9;9dw%`)vMwAqjckZL7jNIbL34r| zl8nb%EZ&pUQRwnl$ZRG~Mul{Q1I=Z3YEr^9(v`fCe;7Pg%wABRkhMQD#T1{A(Ni>! ztgaDl2>%GL`f%RsAX;^RY}WpJF46vJ$Bz2yRs*yJGawPAjSUpjtm?C7Ba%y>WM=?d zubn+PexWTXpJx`!xr9wg%odE(K*~ZwISpsHVj_8DUXc{&u!~@1b}xM*R-0i_6Vaqo z@74*Q+KrwqHJgy3)n44v5>uk=Z7J2$6h$IXOvJ51KxUf+se8Ol(|6Cll$pH@*f0|? zZLB!8pEV|@#aB-33}pfaSQ^|0Wl**P28LlSCViNylKM5m&{&LKJoaV4F_b z{E%vXP}{URh+^`Q!93=@YKbQPT<9|a;TpOo}(M~ebHDtm8E-d&ve zCl?oQPyOqQw>NY&EqfTy;-|B#%QFh%_o&*Xwdjfq-=8MsC3BoXjJ)%}V zM$7SN%ufGRX8wId^?u!oD?FQzDej-SCc0}2{cyUl5-U#2@aDw7zB#!$WB5&1WQ=gN{uAq_HW}q3}#o?2z?qQ%?UpT~Usx*?i`Ih?cWxQm6?fZ?>Cl z(qhAxt{oxVH9-u(hrQ-bFdLO%8b~Ap((}>Kga+U>5=GZ5AQZ+J_pPQr0gc*a@7Y+K zFAM})p&5UyM}ms4X9UUx-pm?^hiFtwqhD@qM@4T>l?j<_`1-e64R7y`(&@_kDViEl zdR_hA?%QaP`;ay0w954!Ekdt{-@bGeb{Kfxz|`dbm7%_I_I1ELY5%}$lJfI^2@oc^ z(^LBzOZzXqxU!{st#YG-TdGy-;kPf{QrQ`&hK_Sg2E(ohu!fi!B_Q8VoRH7oeSmCX zI+Il<#R~a%PiOy<`1UW1ZI2@*h!4`$V!mKmA*ZJuG3;LsSw3gS{7YjP)3a8c^YOs9 zW(U9hZ_zUZ%{snIa2G1iJ_CRM6><0f0yJ#Efxk{@*y`iAFGWKf3?Bt_Sq-G!vFcwQ z`j2KaUk$AP52!l>^Xqe%*ZxSnmmC{m}%3X*k8;Epe~}SVgn2Or_DG`gTO3 zbtXd>cn2IwhFT3k1cnjAjpCY0DrfY*wI^^Uu>}G(=TE(u@LHT4* z*0eDwD3%p;5Wg1e4@>`@W}t1gKmAhVa(@UQ_VgG%&I={urP0tBDlgDrHC`Yh1Yx)= z9_xAt56XE}5Xfw*=apoDVuf#(oN)%90}rF?`Pb)vJ2T>c?CnWW8720-niJ$?E}aIAoaU$V1xPnlk5e>|hgWrfpaPbkN6;+GcZ_GITs zz)&e>^OIyr_T6cjHZeNtax0%pHmMtqW(gZ6xX}qakOlW^O}cFD)GX=E@}9CrH($ir zAl~okZO7*bE~Agk-!0qRpcI( zeodv9(F9XWIE%_V_8bG5X}?iz)vC4q{nyL+)C>B9b`bR%&2GP2t2gSkc6Zcj*M@^( zd(?~YUZoCnIv99wfHYl43$NbwYPBQ!zt^bM>eS4U_YOmJdH;bq$G#({(Cu3OV8%?j)KWcQ2YJIO%1F3HRdQA?IN8aDxQLvbL{jlEt z#hVX)MQfG){cA#~%6v4c3?8=j2?FB2$7U|l!eJGJQD_4r9;N>M?x8XuUH;{IJPDlv z8lZf`%d~Gh$I8>{>eC9#_dF0x(>2}%A0l!HarDsXU=90UMgFDR8Nf(^yHx1R1G05$ zq@_%SY(b+!*EQ5=GIEy38m)%6ZG_|5aIy}g^oXl>Ij~HZ$L{ac00Co4+q;Mt(Qq70 zR2Nwwr~=t(K?Q(5c2TdB$4>0=o&!SLZvqVk5r&f46-q#c+bvEbNe?Q;eU}ZG0|y7W zuasG2tI_O)gJ!4O9aXEXQ4qAku-h0l+b)|gM>D3am*X|r`IVbqsZmBX&%TFD>hM#q zb2u3f0Gz?9LQ!F2I6HZ&fqj6-r!ZRVm!MF*9t@}9;id`mCQ6Tjp2{?v%~2zY+Ci%m zHsP?>9EPJ_FzR$RPm^14&5+;VFbM=AFlC5<@`?t5M=OGQAPu0F&FWl3UA-vynBCBL};y+n2BV5 zhE%e|dUy z4g&q@33`OXKh76}K^&K)3>ieD`BH>-Sd%Iq+Ut!fkqBH@>F3BBt!KkR@rV+kQx+)6 zJVPtVhSVsiK^Hfik2aWLE~a3m=LAoeYmU=LF|8|=@qp7Z1=Knjx<#LWgsq~GTH?I2 zIlW)dF~bZJV=@^<@&j=32J2glW~VS~9|VzT!bz^6 z9;W$n;}nIlxQix>>ZZ-6S$P_-)eG55K{MzNhmBTLi-6kongiHC!+O*mbvA7!-H2=F z=$A~;(buZMCT9DdFh!&|@o+g_tYTffD(^NILY{!c?%%v%avwazw0;PvJ51V!+4-Q< za=&tNwFmO}p9(*|y}miQDgt#pO&*+{!h?%f$%BhmMWl}7QS_TvNN8sn=$(A`jXvx38r)X{^jYLSN`?YDYS%=vx|Tq zD*jfvIDaWMn4D9E`7$BzWj(6HqEVb%PS-AK)>#eqOUYZP?rj-KX*`-S%v8rk+#cy3 zsF`38Q>IT{ws@#J-Y;=D$(*WcE@pZ5;L^oC=gbbxsli@-`|>|--oCv+VoFOS#k4RX z?|}l#d;VRlfNVHtgzlB=K=~Wpa)ZJ?onM|_ynl6ejpZGDSK;!10q4YC9YsjYQJ8xA zV^K?v>tHe7p&}rSRc~)Ym6UJ_tyi`c&FKZ>rhZmM=rgIBT`=AJzj>E)I8l0Me?ex5 z%B}ND5N{~>F{etGILskY#M4uv>qydhizB_oR)C=_f*7MNa@=I3$*bWVMud%SgCVA7 zy9+*!=Sv{Lx))k}yXT@;`FS?4#K8WBcNLYV)_g{hA?d*)7=Az#7W_Vg6E&O@qfap0 zS**o0Lcq$;%r*X0NP78G;U|vTP1B&l(Df}mSEyEGOh3DL?el9SX3}cudGZp0TQqk? zs&T0VJ655+jO{CqOgF7Yc=R~~M5k%*WJ0-gfq%lLh69*L7#L9VXukpv;=6Fk?Z?0$ zCtftXn|t2^9mX)wMTLIfaqa5ei*Mmz1+?W9d)3qV~X3tlnRr`LE92U7ekREC-CG^w~obUX{=xf45<#*Yk11#Z7a< z%=JFN;5rFs6&HmP4$Oc1=d-J;b0K}Fr_Z120YCLFUa6nJTAz9+SL!G5;-|QrCqMrL z6g#;8uwQ!f6Ehk*VPN-)4ga}Vt`j@?Pw#ocAmE4hy`uln5~B;8Sa29K3CYH$LZwc< z+@xzYkW4XIur9l=o+ySb%tinvGz&+Wh$9(7hf0IcA;0Wbk_9`z1hJcssB)~T?0paD z#-6p;Kb~D&6g`l09%e&Pm&A8m7pamRc8LJ&rRY;wg zmL<{_WrMkfZo{xbhdYKh9T<5zPcP0+E-}BDh;u~^$*VWe;S!EPE~w+MRfGKLWjjlh z8dhJj1rS@Xj|6L}{C^flaDqxiNUuVH)4n~Z&cF34q=1VMspRp0RS&8#?7UaW3k^!? zlUvVMg(YPil)wVgXEcVpl@|C2zHK;drWL;FBVp>jI(xQ8&3>!a9gN!jYPCD8ciZ)T zFRb-9s&Ue7xsIUeGUQW7UT2+lCQ{cTSjLfm2O?Wsux@?k3r?E_pH^_BgBWdNu<9Dl z^<=f9Zn!An#J_tr?}+lrb7(|YqihbuKUC~LXW+gK<-NVP?{54buHL?XS9p3tDwz1_ zX|XIXP;K<`6n#c*qQ-D-TzD2E3lCY-;S~0@n8kzp1};_Dtw106#E=~ zMoEh_n^Arz@BHNrPGJMG@ zlP(uO>g^Ohv6je(N#d3LVClygKriNeu(7()lve#D02ksB`FY!E7z?l-Gk_cx!LUu( zA{Hx?K8?&LDA2^dgo{6j$1AkZDA12$jDbt0jPvWmwLDVdVY~Cu2zCq> zq4M%GDOGp^gBng31$vG25m6`6|2-{wzyGcZd#=K^Zt!VQ3?A?lQ+WDKyhRHND5@J1 z-&dM5ir1LnV;P~7EM~W~r zi}+I#FsQz#x^E*jXN42Q{egebQYKpAG|5jpGehb4S!R-dAFB)k>0nqy0(h4bPo6wsWa-I6wJxtRyYIb0PN#IWV=E zpa|MhQoJC1Ns&qUVNIc^LZ!^+g%RJuxf7GJV=gk|3EG^&!(_tLf=pTMC$n8;;pkjb zdr@gsU0ZwWaLfhYQ`1bXxgMxawZc!NR?f<&mY&yMEj~41Ex!p3J&g?i(nG=p%p#iTa6QOb0lfgU#w3>yViflJkZ?&7G3RhoL zAU)jh!^u33oS5X0aQrwJMU3vN>4H8pG%HChO#$C198NSTJR?j`h}s^~7~>886mU~4 zIov}!7F_;VWWp=V3@=l+sbk)7s6@_y@b9mqd za$Ms8DxCZX4`K7=&>6I8vmw&98B-~N_+Zefjlx!A(CUEH7=*Q2qZI~?PGj&Gh;xg( zK$}aFuM9=CY3iUa5}%xH2M{iNx<=7J#kZoPioi#kJgoryK$PZ)w`ETqSjkL)VgS$3 z^TUYA#KiI-*(WLhlxLrS{;5>Q==R*M>8N{l8E@~0GoWzSz=P$Kl$yx_b4F`66+43n zP0M&jMF(RvoJA^aGT!GCRHg%GS>Feg=1@HWMZrp+02gS^R)h4Bs;;9`z0QvT|0J^P^Q5qGJz z5Lxh}Z>$2c5+lCXosn!yQ8T2k$D51bnrte^n8F202_BgXtO=)qXU0KVQCy8_{}x6g z^aXqS`t|kMjemWE`dI(v`OWoS>oNO1u_D~I?{rDo_u;VBZPcTBA2xio+Uta^VALPB zgP{MIeV??t%id2))0)!$uc!7u7l9)JM{Z*$pATash2z^}>j{+64}N88>Cfg1!q6TX z?c*7p{8v23w#4>)*_)|Ngl}LcNvuH%rQYb3I~#_XN^Kt9bA4|=l`Dvvo?sknEQ8w; z1FLeJzxizPz^NWLH$Osra$b3y+(J` z=?B0q20_qk51aLNqqC7)B(1oxBza5Vrqq*-juR_LCbI!fBnUmi5Ri%D5+4yWRhd&9 z?cm>Wv{Bu12|4hbvJr<{_00jRX29JK`qhwzC1Ysx?Y-Zb;5061Yt~3X z&Eb5}8uV*nuM;(bsM+lgT2U{o2SIz#=?1N4y%9AAqgoU->tDtB;^uB$_L?>C4eVud zzNmMP+Aa9MSE@B!&KC`GzM#Y+pXGX?@4VO%`f$9Ot_(|iG|6w0O@6vyJY4|4(Pg2a z81E}y2lC7M(;O<7K-47MfJ#I)P|U3%i{tSYZm|1T^mc7SUvt znQy1Y{kh8CrOL9?N4opWvd~J`2;Ly&r+g_|^>*5sqS0$?@}|f^t3l2S#Z*V5R<9QI zTkU$iTJ5yq(A#Sc+flD>Gu50%n5X8vBvROIm74$${H&AsjD&%F{sLeV{&2BQ@`=-x z)nZ+g zPL-ICunP~{;rG-cJUZIDLF09JCyCjEIGzKgLeG7&GiQfXbRKax`#sdjIvbF?-0f)Ntc^&+HruT*6vz?H^;!&C{fW<9jthi+$csu4yD5I<;C z0J2nFW~~(tXP<{~bV0ww2;LlVnbD>w*;00`M(F)d%GN@^Fz74AZLYvGtpJ1<1$w!H zg`CW9AN+Vc0J?gMZ>B-~p`iO0)h>wz&@pz!jqM#C;9r>g*(_d@b4I}UC2hs957Fb` zBg39N2WFwvnX@Z_Mk&5;M+~I10_6_Bzl&y49IvHmWVw+zsM3?!90U3~BYL z-XUFH$glwp^Z}b5L}}PJz8>fyD$5Tj96^PY*aRVlxdt_MSSASLoj?K$08XidpXhSE zOI@y));s32<17W#?}W^@+ruF2HoEn8*soS=je0Yzbvi*med5h;$4zOI_mc3DlJd2> z8Mzbc@MMYEt9&7L`k(95AJ1O-*FZSaH4&IrMI+NRQt;$`5x5%FAn?*wkQYWi=~NXy z6G$Qy=a^0}-e2FGUD@m5mU;&6=gM8H4S?^By6sM5P^~t4!>B%pYJ;HFwb#Mj2Csqp z9fyVQa$NYXr72=qfFz6?f)0?K7Tv`xqmOKtAZTx07_+h56*3F)5sgDi z;wP=u^((RyFTm&Z+12^U#rfaP%JB5HfBoah72x1I_&J^g835hJ!Hz#@ad1|7q5=I_wgEt=mW8G33mZo(VIjc zqX{s#Xm~dp52=Lq+`^)x^%BW0HE93CG$aF{x{?|XDuP6_o-(mlweh%xU7_Hl#^-A4 zK3Ik^-r+^eh$A03lRpGr75QAPP%KuNog1(@@-F6Z_#6b2U^YaLJzN9DZViy1T>teA zK=|~h^UEJBs1tCUKriK1cdbyeM$2dfD*_-`&}#8QA!}1?QGvn2a@UDy;Cia@IqbCt z?PhBjM%8N6Z$!1AJ*@YG)Mb*Z1x8}m8@eQFP>?VFNGWk-X_mj?t>^FqB}&waoZq~I zC8qW4v^W(Cr4Ac;g~Axhb8?gtv=#*uN~6P63lB|&%eR*|C)YpurzZg8^P9i2g@km5 zwI{I1NK6XYLAJSLP2DSd=^^3?imLILh{!o+;bRJIs~jh=b4}ZxF<%ikJKY{f@53)m z+VZQ_{hD$`s0!4PircZ!0vmt14?YO@Eb7ePv{l{&OLmYX5{s^>l&lCvB@i}1q?=OR z^`Zh~8}qNR<3estwLQ#*rJ;s%pBu_7Y5wq&QJ3kS*V7?N09u?C^I&OYtvBPuxjVIc z5m`Wol*9S?NO5SbU=Rk_Pzm(&)%ndC#|2tcUih4P+Us9vQYs`;9l1}uIz$qnqy%c0r3^2@?BWXm>F8}lW*?UYI|I^v~cR-%hgUh$4 z7jI90;=3BeMc|k|x_m8Hh@N-RBm}?|ljJJ^1SN^iBrSG~T3Fms9RB7v%D20oQDCp- z8qf!Dq*9?KQnJ#UchYJyG4Gl8^K1Xj`43m(QnR39e;%~QAD|gb= z_N#14E%%ygal-7%SHkQ{9%fh6G4xTGjYrE(m|c;qb(=A}vI(;*>1@ZiXq7FPUD<}& z6~^p}W<@T{uH<8OWjD;mVz)mg8h4w`FwFi5+);Xw+Z5Hi9R#d;nct4I-7W)#rn|2;5P~vC=lQPh6@aaM&>#M zS9&*};cLo9iy##E9v*m(J&nN)NUebAO>A7W18L^YNVc~F^mshVVo!|-5C2qHr3FMi z34Q={f4+<0xP+soJc?X8;n3*6%VS0rEy}_~A*m|BKE5NtT#6*sNbdr5MG*I9^ZTk) z<2ZHBDdLM=X&*n73Wutf$HhZAXGuBQ@wiWUJ)(N^53pC<+w zk>E%X#aumuJA0D)dcIzX!Dfoj14664wwt6-$uyxwg*&V#)) zq4%YAkmWwuInoeQXBy%1B&0p0g;CXc-rr-71}S14`1kC0A5{%`UXtf>kWW8$>j+en zX_ITN!i8IxCR>Jy-^Wu!+tkx1N=L~VYS+=4;n4tyFq8v-EdMeT5+6~ zjJJHP-vR1JH~a9Vkx7Wl2CA^WfaT$2CoWsGlh2~UFo{95TN84Z?0IaQF$u8|r5nzn zpqQnPo(n0l2$@CYEV`Q*mXjZei?=4qVh+0_L4DBG%A2l-cdSZS&tMCz)-%|NlLsk( zk9q@)Geg>T3KRoGP%VM?F_^4j+Xh;(e2)==uzaw*T_Xjx=<9ffK1(s``9%-CgpGC| zEs52wVo{~X^J>*pA+x132o&L@I{z3gQOJl`%-B;yVSxCHHJX1hPcE{`45L6iX5yvC z&AardX_vOmx+HufZ_cG{Q&!n?b1=yxWbg?3=OY|AFf5*Bpfy^0o{GcGJjQ6goWUSd zv=`SC0K#H9{|GV)>=T@8yuw={QgMxA*`U2TiYKr^dnyt7SJ_@{(n^yJ0Rlk(d=6Aq z5V-Ml(8_sIMM%I{>A9L7YdZF1GKvEu-}yH_Chh&<;TH`yM%oih`CCp<--jn8h3@3P~6!FpAKD z(iPT#ihAwFGmotA7Zk;0b+??aRufnz#Q6J28lZ*$bseKA&|(tJ#xY*80m&}_zo>d3 z{!c-)|Abdxd@oy+uBHB4R8QWIDeBZny1E^IjPNokgHB}w0Fpp$zi+B)1uHoVkjf1X zgiHh~+8Lon`N|8dX1eR8kilMa7=u(vqe97v3MfUC6gntk^7EFZ-(}IcCBE9-dCeIF z*-@6vbEfy|?DhHOnZ8{)xxR)CQY6!|O-!8)HZt6n9;?udoo08?X%B*0H>g&7VW%}3 z1oh5v6m4YZ+=P>t^DDN*ZHOr@e0H^$2auPcNN6B}-&dG`f{10z!1Yv?quXrv`>k4|F^X!{>M-aH22p!h9}Zd@ zmP1-_tcJYd#ppG7G2pX7UR-jHLDC%)>T!&5wHCusj3=uD{wzDM#|~n2KE}(D1h+{) z;}I*vpVMe{HxJ_@v4=~mE(aVVkZ(K3A)IXWy*sM%@~%8t$0t_+Is|`m8!vCl`tp)* z;pGBehz_0>(V>{jGQmiNHz(SJ!!lddw8aWe&2)v;aLMJVfL$|m#kC=^ssD;XKP8H%-$hv_Q+#BPV8BC1UKrJ^1xlO z!JRRF*bMH98{8EO+!ZYtI>24YfSdF-N;i~TZ!@@A2V>F{f>bp7X!-!y#O7F^E&DAx ze$bb`rGybS3<4$yMgC}v`v%5opE2RL5}bmc*hzx#dHBs`BWCs5+v41w4@*g zUbvhukh{zMLofp(M;D16@%f0`xt#|TO%uaO1`92ZR=w*v@OR)t^A8|200imBZN%Y0 zCs7=u=#7_{BqE9sVHkHn*C3C^c=rrr6Sc_Qd^TS%sTX>Uc2%kZw}d{CgZb)CPfiR@ zlDT?p`~>3!?p>u>pm?B~Av=9^@dTV1&wTw@4~G!wSPocLEx{`5nU9jo6kzn+vz`;K zD6ubHDP0(|P^eyaYu>A{eOx>YFd+{YLM2d)Ry?{f)cEY!bLlH~?2*$LBhb$ghvfnU zg0&k)%FF5N`bOAC?h(TtPwdC~Tt#-Fh>ZW$rPBH%Iz2R|&83{oPYhz&1 z3#(sG&ILvRPRfxyC>s?t!eC$CcV@fKo~0o8^WQhE%3pue+CoD4kxC&DW>J zjE{;1c=-{HMn0XHQIq^0MAA3!kY^RME;0r=*j)SMFRt1Mz~?+xLM%A_iONx+kwP~_ zFwcz{Y%pf(5UvuY!FFtz>fN8~wH(FC$8F0)+f-JiOzElJOmmiT~Vv% z!qUwEKw4S$3@`xXnwcgy0E;o6hsSs_#(RQ!jKT&RUCKeu=m$@4XrUW>>)Bcie{r+N&Q5w`=p3<{H;3OQ!<2^Me?70Cd0$ziyG;Lc+t$FQ#B>3V7?jZ%{b zB=A>-b*EL%hXV%3h=;3*oU!oOIvYMnOd0|#X|$1@(4jf4;s(CLFY*lDSV5CC^un`- zDN3o-nU>fa4YMd$7oA#{@~6OOI9^@DKzcLJ6MOlmNS?CjdejfHB{) zIc!DcJat*pY1BFd=SHp1(X_p2#+pXv3vft$#9eiAv1lpfFrVW>T&GH1o( z>;aIf%L&|PdX-0^2qX9*UX+Nj#~SIydYKq$Jh6(Hy7Us7QKH2#U7t7*f74yew!50# zb`2vbG`f0b-2;_z?8;O|T^plqd>AEbB!_6al?390vKdnj44@*x^~bRo&Xa073acpZ z>D&Rrz_bA&r+*-QH{(dR$2ih&X6@9>p?p5N=vG8=iP1w8OIHj3jC6UUm@Z9?WjyAk zGjSwcz}vgSYycnBdsg=8P0*^>X@Yoj*hmMlpH37)2*y*Y7_eU{D%jvq9*(KI zV2VVMoJYbng}@eXkrgHGi$1aE3D3Riy^PSfAufwl53Nv`J{|`{iBFfJ+NNum8AF1u z-Epij4u8t&DFVFT7pu=Qdn}-C+L$=q0P!GJ!SDlL1SM=ZHEk?-n5ICaHi>;2YbKpeXFiHJ@V$@+xUCR(9+0 z0$qUah0hI@nG2R;y8t$*1VTBUuVdMQtuAlDo{ErRj4@G0;XRYD?Kp|n>R%XvSQ)|w zAA|9P6t7xgqzYRx!PG;|rf=?K*ot(jYrV{FgZWpJ+dJX^YJ6-+q}q6nX}I-@+USG{ z1H*_cnL!kzqdAL3x~Zad-cyfRWRr)prE(_n2kHPQw5``*+SY6L?fY^9Su}};s{}@y zrGLgW@#H#)LuR^ks|Juu?k&BGi&F&I8n)1f2w(}=MORPQ4@w|p11%SN1ijgkV8l0c zR*bEbRgYvvi4hgWw=Z7b-cm4p)JfoWdw+9&L6?&l7%N(;szfk4wQmqZCzGMiihd@n zK#VZD3$8M89oS{xh?ARDyRmrTZ&UOQ7!y_pyeMMJPj|g^(K`sxHeJ;p-pyH# z$C&59X~*bw^%NoUG$Fe1vJ!n7$1C>7Q+1zUPDNg%IVT%H8ktAakSH=eGwhN`_OY7* zdPL)X{#>Qc;fR?WGP4c+!m@nlHF39xK!6s(On_E2ODfE#CTzU_PUHQa1M?Tf{CD}g zg1b=xIQ6ce$2#0qg8dy9Z!tVw%8zQ_#(ojBAH4-wg+hbccf|0}%89a!7}zl|SmdEEE9F(TjC_tRaB+@eKq)vsG@eTJEdQ zTiwh*anM~-MIi4JKQi+zxt*KjcWGCc&n^NPWp2sVUtH4K0C7^K%R5$+@^Z5tT5GZF^ z8W|#j;e}b!yPfpedVt^I0O|S)Hxc2QQ@IRB9Hl7MP9Vmyw#X_M&F!$I7aD=(=1foe z^>~T#>=@N}$RtY8hz8LLugeRSx){F71EQrhWpseNc&3{WanLby4`jm_W1h2rl>rBG zi4hp+{bU*s$XlR*#%ol>s4(yq`SX8rV3e~3oBb|Py=Ty)Qv^SC5Bdn9nx-xfVb(Ok&~C zD*-=5$7~SK|9zWg9OIkS8ZCnrEa&r;rq)Z${!L;d>||ij%P`AMWG?Kd&ceWR33953 zE@)h-+suhe*%PAoikKMeEj4XyHB&03|4n#~@d}Yazgd`X)S}EjdJ+6#ZDO}stMgiR zv+mSiyq2z6fv~iU%sCMlVS$r377I_qFNh-<6I;D&u>hMe08gA(iTYV4Nv1>fnzc$=H&azdw7m>YYsN<*hCMBCBz)7tb>*J1XA%jG>Rj686$7I-+hI`epU(PPd3*Zun$@XHtNb!=y$w`|6lOQZ(Ad$-u zuZ@$xUT9MeW*kV_v>CdVmuSDkmaljrwf*FxD&oan-*rCw!JzN}XYT?)@oMu4>riL}$r0;3%Dnpr2|Ifty8H>FgZo>;Y; zdab1Eo$xv-o<{46t3i#eIwZV>(Yoxh@iGzgn7!H)R3YP?sDMbI&Y&dA=dClTz*JRh zgKS#z7rf+NArUTLWUxR_|HX52{yDgu2aeLN&`2NQ)L@M_+v$uP_x4byMWOJ;Sx*ks z2nA!H-Eb^Yyp_<)|RV8KV=4#bHvFIwrA?z91(VLxXzuIPZf zHW{I$$F8ba}Ff8m06|D3iK2Cyq@(rqBm1toYXXqx>7?jg!kcXrFbWMn_$L4 z%&-&HI*EJHZ|h*<&6nH^>+3TLu_DG#quWMI;|mKcA@B6awBDbavEtiNKy=izu*^sX zn#%UloWjqdGg;xiEa2wc3x>n_n(n#mS}bRdIX^Ln3PfYf)wk09N?lt`qtz0&23?rK z2-qA+86uH3APOc2@Fe8;!XUD63wmO5mus#Kd8EW%;qAM3Ky_GUhjz(}C_EfQz*8e` zV?A4<)i=dVk!I9HUsdAJClii0N6~!DCDbl@r1G=YO|=1Zjb5%# z0J)1*v*{F6DX5{yZf2JRYNOYrWkR0G12=jokf-J|Kxc<)NS|@X(FHZl#@-WJK0Ve`iiPXB_N( z=(2={69$G)8ZRc1iJ-CG7ph`*dyu?_Ds+wqIpr(bP~tqY3!~5yn_{uC&~UyaU%?=; zN>ep71+L1I!<@OJD#Dn&&SVtIo-pCN^)`Y*#Rtex1Qpp{y5)^XE`>!r-W$L^`XE34 zNGmqXk!8Ety%(bs>f?{pHcn(i6@0{uiD31yS}3>YTrO|%;nK#Y(l7 znsha17ML0?8df1r5!NNsgHpdy?@QrRGJjOfCz2rF?hw#M#c@1#VQkC^;&;oFS$s!b~_oQb&n(4sD={2infzNpK z$OT~aseqi%gN0%eOPjA&kN9pipLCSj>oGBal%Rt|dKoRl6ax;iK zt#L9PbH$qLi|1aWhyfYc-!D#9MhSPePpvh(9k@)DHZ2CS$eCOco*Z|saEvN?sK)Ez z9m&E8*rYD*i@RvD$S0lDs)8|72B|CMXLHN&GcKA?vaEY)>n!MWRYn}YlKb~HX5jJBPENPY&@KlqP<%+P*qyiZE97ig$k8)*M>WG zrYae68tCzOSQ&sey}8*{m5$+?xlz>$k#7oRx(-LiC+5eb*fuDq9bz<@pkhiS!oAB4 z;Z7#AbxrVv?EpR@6T;O_<5kJ9XT`V>d`jQMM8?*BCdq+Yaq$sWDh(-KKW>Mr);7_z zgl%#YF5anaQt@<05J5=H=CcYxhw?M9(lYVi=8U>2oI|IV#4J>4wQb_6%zr-@aZDdr z#v5jUZ8EQ-&w9&kUbS!ZrgX=^>d=PdfbNI#IAFs0Zp}9o3v@4sFVcr7pby13Fq{)~ z$$gSgD;ksPlJR!FG)Q{}-}y2{`8Fzi0z??k_-abwpDnV%yqImFpEDHkFW=ty=dUi# z#AlUvO1xH=x`aL|wZ{}uOEy{L-QUe8oK8iNRy(lOt}L32M6K`>G)P%@%LrbD)nJ?b zFNUF9&eB!OuA`+}6i!B7pf%l6+(hm?vCEuq*&_@?Wa3RGRwMmZyHT#=X}X0L{f)Fp zmm|r8L64dRV5nL2PzQ*3uO#NdBRLQ7P0DfPARYyZyTW% zIqzbEi*S4;m_3)KFm?xivGDfY)!FN_EC2fZ<;D5s4=7n0@gY+q`X0u9bQFilXkZWv zRLBzDiPiAP3`{}*$QW!fNbx=QXf=d33{zdXy2@f4C?Y-day0mjHK@-${HiplA^<)! zH#99|QRP5x)SDQY5o514o3_KN3ymGpWfpOYNhp>qo)KccLJ~cxI$VHHfBkYSlCrl1)=Ekg=`?9j9bjI@1g!QqUSU8Xa z^F)@b6*e$%@uD?3RFDU@7Db*Ijnb++Mu}dF8;4QiVZ5OrAJUEZ+1lzT_AJJTP)cuy zuBODo^oTTjFx2Gve$oo z<4L|rZ}70dIQg)e$V(?lKAeaJ&mBRRgj%y1 zZd}XU&5;C_k`oD+F9MT$%aJK4Gqxr9wo(`cRCrsw0z>D#o^9m}CKwf;Z%zG2may89MFv=du3K zd!rAQ%6mLb9RoVgq6@EeM<9}{c1!x~hO|!-oP?75#X+>XkKl=QXxukL#@27M0lilhNUAN;@79A#_+Gd+0P{@FEOn%y0;vhf=6@C-Z3GGPs zLaW=ukaqQ2yVjAdK;UC1I<<3hLjzk9CYyYS7)1SQG`H2doi_gR^$FjvQ?Ip??T69Lu zaPFD7t3|)DPVuN~@RaDjYQ0!sXmB>uD(lqK%4$JZVQ*QbJ~gy6A}Z`gM-?I6g@z^- zveZ;LY&{N?TqO(3KP=Cpy+1Yu!cJ1Vrvp~&Hxdv#in^mlbJ*<+!e+G^jlyB0KWqWY z);9&jPFisV#ZKOG=qnC<#gaE)UcWv&xhdS-{QTahtW;&X0xe1eYoiE;B6)Z9_MP0% z7c(z0PKl~rO1-0yLAq%j#^*ea*BC?O=aZ{TmQg4#L1Ge^PCry6qnLODyeaK{Ch#~v zRcT#D&3did=`}jFVYOOob_TVmF$^0U1CL7!4j9WDiXqwQmYZG)KRQj7QQnn|G8Iz{ z!@B9)Bz^C*qzE8M{$ZeYK3mmI=P_Px*@=xL)Z=93PvX1rXyr=7_t)#wn+qKi%C2vQ zU!rDY74Tp~mhsSBs-vWY4ve`%d^Eyei^V*a7*sM=Nel4i{mt253Y6BNXn9~urW)npO|V1uz!@M2d7dTJ=A`!A zG-b&NO)0nML!jx<>c?nUkXgx$WG)_$o<^+%i6ur7DQW5v&ee-2R5y7C=Rr}oE5UBl zxQZT;GLrD>N7x8##|fIFuW#>;)tg4{n-;w>GFWBsMyA-m|GdCVATdglW@B$6_annuqcIcT7*W)chiVC zTnXyGdNo(D-0eL`;5+y&H~2ae0S470RhFI#$vKY2^vzqlZ4(lgFI9C0L6Kw3IOC}LLc0M zC?!Ufb&jZ4Z}iIz2eh5pyR%?OW$#YSv-Dhtxh71GJ^@LY?IOTP?9pHl&nN2@r&j=? z_j{e`;_nUlY|3X#KHKuykx#afCCL({8Gc%ke+Q|5xzFuZ8tzsqsehU0b}OxHE9n-T ztoN6K+?p*iRHepmlB7*4-?ssGvxC;i(wzTegV8oxJx_ZxwliYy%9TEgeG{|PEI7kZw#o(0 zf&9~C>u&br;_RII`*Ezm`zH*s^ckC-Dmv|9_IE~WY9n8oq`z5lQXZ=;W!?Iy7lwmw zt=+6vYyCzytam!i`e?90o>WaZ1WNUa_3`yK--gz8Rsc2re=`5aD*zQx17o!qw!q}z zi|=CgI+>Yn#6!HJM2N8sL(&k-JU;xRLKHnKm=G7H^K`1D2zquCvioe$*S$S^C%Y=q zTXiqjs`C|Rt6}B&;<;yAHy!*Ooo9jp9H%29q{=9lMsW`0&Xs!}9#jT+mvNAOqJoSy zdVRJ!p)VbWl$sxn0o`|v+zv^g%?+^)6wx_kFD7=TiL?2tR4fAWPumuJ02->9jyB3i z$`Okoan!e>V8`0MR@~0-$v}i%^{K6V$NSp|zKh@e#=hBmFgF@PD&zuf4m1SE)>LqW z{J02;mq(*fM7Lln--&UZkG)WHx(f-bI*4#}>2f?3AVB-%Cy@eJC|DYEy^c_MKE>#f zxKggzAMCkynBg`ycI9RjvohXo$FyWqa9X*;0~UOu8F2ogO3Xo*_?1l1AbiXYsjsij z{`39W<>_BZDSWi|-Y7+44ceozQLMz+A3gvb_4XUpR;^mw-+zr)+MP~4Y}cD%t>5bo z+U-$uP^&e%&7j_@*TcrB)r=b5Q6sE*Z=l)hXyMhnUafXS|MwcTTAi9X^4?(tc<(>f z7{NIRr{mf8i`BB;Y<8<*^zp_1D}ebCwo&#v-s@-pRU1vO)<0@Dk5FW&0iEB!UJrhS z;U0N^e|K_o`lDBG9@g8xc>h=i)BXKxT8heiG^z|9w)TgjyZ357T;sA6Ob)43<#r7_ z>TtkEdS^7}u4H(0lshhs%%Ep=ba5-D}qSUMc&pvbmkROgo;} z#m<}FXgrB@Sg5ksY%~BH@%WAz;R~fA!Vj2t#J?-WeVg|_X3B%dlvdX#t#-TK?)Ha+ zdZStm>R|{A8I6WP>iXm{Ei(guOby}utAm&!`s6Ke%`}; zz3Tw_jdpZO2UijTxs0CtPvDQIW^I1q-7E1_o39V#(H{p>)Xhu zLVM||;~7QJk0dFV0LZA}$7AGf+iQ7Tkd~5{8jj!G1T90D?b}`#UagQr_n^zfgj1Ge z*We{avjo`m}tyAw+tF2DAK8Tu~ zVYk_}>BHu>m?CU0gP7QCQkZ)Dzz7lOI4lvNzXPOPf8y-XqqutaBKrku;rtnR<-W#jE3|*PHs6)-J}be=fM>rx<3tmoiC5|bCgTbB0pGq2XYtX ztDQ2(dnLIE{oB>a8z1`fPv5?I$0w3DXKbn2e&wV`p`LJe(~eZ9OxkD%__wUCF=<}K zC>Hc&>7d>6R7XW_XLl zx2Sx$8t^0hlMP_~A$rI@9L`A5FBeEj4~Rxd`{}4qFyiB++q=maj)vSbIa`$ea4shD z6@RsSAhoswP#PMf7-5s17CGM3>uo!kS1c=nfqte7IO5)-ebe1kl&2 zPVxUZ0P#N-3myClXhZql;Clgom%V4mC}2R(K2j!Bok-xx0X|T+Tu;Ke9Jk;NQ^jC8qe-+?+ zF>~o3&hKZu+n!;e$_u<@Caky9`^I9lmP$&=cY3;5h#NTdSvPVx@$uNE#ca>N1Rp-q;RJaKR^ zi|)||rhrXUUdZ!e**ie@ju~#!%l8)-$2LW`o5%Al({IIxp-~~3oY@QegT|H*^eT6x zWs+}r^>z1H<5+QKoJr*c$UEM5q|Br5Y*no$!oAZ}CK2B#8kvZa243vYz>>0z=@v}u zZS8L?oCJCs&pyyJ3eUI~o)S+4rUs23b4gxB*jSVK(k_gvVDbeHYvZzy$}WTXB#tmB zq#I9&lcnXy9Q2^S&pit{HsS(K#=-?!QN{(byfScsr@R9$;KK2m>*bRrgJ02for&0nlL^c3cj!NSz-LPq?sydZgcwxnAe0f|>_yH&gydv#;6A6Y?Hi>4 z7dtp=f_ug};a9SFB#DKffpUwhrTET}9wo02Jdil0BGlbRE9t1fa8c*Hl1fB~@63oE zS`<~oF`H7)GL69)q1=ZlY1#^^SlRpG^*jHkv#ZOqi=k_k1HBDsol)1$)~XNW|JxNUYEuI+9KT5-Wn#!c*dCfck1pN6OVr>dnzoj zq$`5$BEB=pNtL?gHDnr~S90IYOEtvpx45>{5w|i#vH? zBKN)>DTPB)vDr|ZtZd>{hrwrHOT&;yoW+;!<&!&8~e$mBY8c>=(>+;!<&!&9p(yG!$wE8yo#?#;y75g-Gti~bDpdM`xy7T2A zruQm}s%pTVkB+(e-#C{>-drx{D8XX8G*oIiSva28KVs$uvgSHQ&JCJMk?($3rf@|x zUaY;0RPCA%{9lq0#tamVGu(XMtRFRPfA-6x=dIQyZ#g(5I>qS1gdBJqfdeV4CS++T z+Be8GlmIJ*3NS9Bun}9wDAO2hQ$vB>-DPJXyKNN96jhwpg2E1Ec9b{P%V6zc9VcqJ zgq3=-pAJZ*vPT!x*+@KmNk7DoDldk>XfBqd~STFWw?4L2xRj_tzndSJ-N8}(+#_EtbAP|9)uwsKMT*e7z*bb<#(Qs)fIBP4@BJX=wv8Ivc6-fOFwBCj}GHV zhZ)=XP;4KAjk~ae+4iGv&I!hkgHa^M7+~|#zj*N5p^+-*5XT~&0Z|r?%0stPAN+b)MU!LFiKVF<)!{4)$ zR|SRMW5H%=MRo|bL|?x%0LYJ^Js!OCsA?TjX~I-YTlZMSww&m;o#n83s!G{wa>(xc z%U5St{?9LUZq%(c9#i)E$rLh*iBhAdEgN{K^+aHO%$S!iy z*{pHJGy+yl^<3Bf=oKI|MO-2rv?q`1&{a#b!T0B#TFbhqOm0SK1j6>V4&e)oLwNiy zh2}9*<$vCwB~b@ujNzW>30dnp2t(HIH5 zm5SbUhIE}+1fX;rErU4H%$gOH=`MGd9(O#=?Pa6^TWsG+P8BMt1@kc4CdHe5>@Y|U zd}Rs#o0aK&!@T84K%1b~K!Q+e)Ek`*_CrY9|Nmk+gl71LZ=Vx67p_ zF&g@IBe-KQxDvd%n@>Wv?PxUmb>zi zF~8829o+i;#mza{omzF%hKkYx&Ar7g`i*X*W9?ymRD<)e1nrVtBN&B) zQMVJ+gJ5?<@Vgxh!P&{P-NB`63wI$$Dk_0HoP&|EyCba)t_d#CTR_ae`Y+FK>SeUE zeob-12V*nLA%O$>cK&ho4R-gU_a<|?O$Fo zGNYjV`r_n=>oU<3N{UT()V-eN(aHxB^0N4+X60-yw(@MtzGLioHm_1{(BRW}3J??j z2INl?zgY_d5?u<~o=lGSjJoW~662-mB3RwUnR?T4%%X=?4WFWG4Wc~3#HeA)G3GGo zxw>p|J5f;YObzz(NRZJJ&>+g3Yz>W}FrF#b9?hczLQcy(JC^#sjlJc-V< z@=0v5Phz|Z(HnQZTzU0syW0LN2SxD=Ib^R9Y%}8;nDP=tA4O+HRG`h5v9mt;Ha7Dr zPR4`0UZG;Q)?^1q9IrUpPv_x!5*>zGtjah>rpF6?P-ll2W0}0`HvMx|L=Rr{3uFZv z%36*=W5sqG9VyfGfF`?Pqw3XL=*a1=2q+s&b0OPqI!B7Ifr&2Ny<)|B%UyA?-hc%J z8+VUH2i!}sOU1lCH&jFdG+#XU{4?D(R&2SKk&qQtb|2A4KI>Ksz}cmu%a1l^sG~Sr z+^*Q`=T-FcF&!)R-RpR_9&PTi(ZD%6YQph+8wR+cgXAa}j3?-2J`IM;xrOsY20G4) zL5K%{EA_;$R|8GL#2viWEZnw;#|mnc0Fhb4u_NIV>H>&V-^FrA}-~og>2{b z2pn{kx3M<%NEP#m>xPQ!(Wap`y8zV;?r>GLmae+L1AFTCZ_ZSB^AYQt=oqL@G z|9Hshwcevmv!!lMryG}Z5>8$8ff!kRTSW(b*p3HK(Z7uXr1zOs*;sMDaFQ~_gWkqD zV#OVYn(#{^WN!9}!ez5Ac#u<@5ilGNGh?Q1`?=j374IAs^T#U;%`sr)*c}uScdO}Z zYCtu6&BxZ%6o@%*wOZX&C&fx#C~K5fnHh6%J;kyO>A+{%KSxOL+U|7u%aEZ3K9t8qsz0~F-$4)i)u#t+);ymjOoR39? zR<&pQpyNgPrS}S>Pue_JnuPV*T>4ZWnqCQ08WSznpfZTGt!m#N5HJHud z(PV-?^sj?qG?>pnl)dxWuu3s6?b2_eRp7mw1S_=4kGUA9{~W=Jjses$<)-&eCKHdU zq1_!U>qm@drPAKB{WT%AH^h~JpA()<$QG>LV7|H|tp&~^UOh}ARl+Nb=4;IH&52aD ztXgu=c7a#Fl{IYl2Awc$wS#`AU#)h+_Hfj0k7|QabK4pwE#)m`Qi4~styi@4|5nso zQ36kwa+35oAqmB@LA;kxINsl`Z(f~UUA?`!IDhG%ygT=fD2mN;KEd6+L}!8cVLF&k zV(;TP9`hN?lb9_OI_%%VXoU9iR!e_AzdnC?eu23*a!O!dIaT&c`E6dDd6IcxhWbLQ zz>nvzUY%XSOOL$jeE)~b_ou#(K}B1f-j16M;nkb-%O8GjHhjNGGDW?)N#J+i3TI75q^1RxOoV*5V0hv zRI7=B$4l*|1PE->jR7aT!Qcxx!^xW?3aJEj zEUEOWBx5@5``PcB_Edu;Fixk>Jv`kpNL71Wd+jy;Ry?1b4PFr^;2tt#r{JBF_@+D4 z+otE)&WwB%e7qnQ@FSYN?HllHW}~yuUSPB@KHEvpGSMcRgBXqgO>rF0&ckG)31l;~ zL-5W`=xxrP+R8jdx$)>~i7XLqW!8Y9(YjX5!Kd2n6y5AlRln^911v2*5B+3zLE#Q| zw%gf}PkZ!`89O|bC8=cgiqa^%W30eO54F?lHjZD%)4O2We>;tDNJ_*C_OIvEaVJAe z*~g>tWIj6@O=cMB-{C>Pm0$tOLWf0QeoGhYl&Oo$ePBG@E|q1vC&2sb5NJc9uNw!3;gaPUJqg znITXyionA4=UB7L9PdVbEB!Eo&IiK_ZAn46%U~GscB?Z8J7Lhqo|j2{+)Sak$6x=CthZd&YhZ!u z^+vPOZVD`%6_dK(cbl!vc6$pxb|ekwW5oY8Ac^WQoPcP8;Hg^N+e7DO?ju-x(0M3> zDL0Hpq#Az9oaXrSvioNLXO;ITa$5VOF5tA0E4V&hW_8xZ6{~|A$a3m|;$}51 z9J>816g4H9hGb2Z*UAIsRi)+CMMGND6Su&SF-DG6={3|MtS`MHAp!jWXXCpUZm2Pp zvEQZRcEM%0fwGg(es_)a+$Pf)?t>9=LpU%uSQ$IOng%-cm;jTbgfcatfl-B81j0fv zN_dVk)T%#?<7!_95cc8M-Z0S7Y?upi)9c}-);&1C>|PulygNUFTd<48dp!-cM zh6vAC!iPm2-{fjpxr6mcw^XwGV;(VjR{G4fKD&>s3OAVsH={twtEK0%@Vn}lcn%jf zr|d+SuTCi2sV<3(=pNTT@#ia!F{>crHZGwC+DYM(57G-)&>P``n~@Ymkh4^$=^GKu1LsF4NBb8? zN;Q4foqb@lpo`OYZ(f1KrAj#?TLZZT|F72bhL!h?bSG6Pzv3@NI6Ea^T#(0fxnwS| z(%j~tOP5<(;KDVpiHjM6|8guQNNy``--a_56Az5TAalThPA*E+6-dTcbd@482N>QY zA!ZcyU#_#-8HWwSxo21U;_;3L~esmYgGO#Jjd$F z8!4%=CDESBSIlh{=p#qF3Iy{HQ6DuHw-sz;Q4{KB4vZcqsdKGpc6Sj-YM6D!9q>;$ z#`1=WfSM^5Y~Di%4KAUhnE)g`cuX#^ z{o@bt5D@vTN+tz>P!~y5iK7BV7sEhBlpIO#^|gR9m4WuCOk^vcE+{<*Zj5ARQgEEn z2?`*^o31yuw<|ko()Cl1=s(oKO*ngvM03E@g*#DVGS2`{@d7*#Q9*rPQP1b&_v83( z{G8&P%eW6uH2XIy;gunsdG@LBZM9K(5SURp`)GPrA}Bxq_Q7| z4ralWf{+7hTDAjDPVzpQOh^c#k7`WFTU@Yyry;PnMaQFPgv;iswHB>0eIE|7zO_ao zNgSQYzuB9-4`dy4M?o^|e zue2)S+o%LI>0pXD*R0v*bX69GK55$fP1iB9M~4AtLPzYV$mVS+bGh+6s>O&1hWK8cYPM8S}*8niO$3Go1e z_W^S>z9XkG5DhFwKhaQdw&DYqardPMUX@em1n4Vw-J@9&4hM9%;Rq^t@%>%K4ax|; z^Jf3&?(yl{cbA07t7`7$eLPnBc1#G#E2j^-^@Xo{x2)UHs&{Nn7bVO)!Ev9j69v=neJOxx0$d(lI28@WK4E|eGWtvQa>b@#~QhL^9B}ai{u(y(!e}C zod!UOR2r%GUdR_0D&4PKwRPMXy)m;m<_bqfm38@AYQIVb1cVNBd2g?SysG~3_3L`w z{r}0yJ7h!V^LOP`!wVRuZrpyVkAy!O>KS_ZZ_8oVmq~wr0CTlwX+92bu!bdlv*uij zd?N9L1$v-NYW^W4D#qrRs0+`e5#1U{sr-g3fib1#%dz?b(k&W9e9UOJQaY{CEJR?t z2&IoJe(uVmjGMvr{YB#Vr-+Fsf^oXpJ zO)1SxdtGv-!0lulEa?Y;>%GDNvWv+O)vRgG_vi85n_$+veT^=QDSB|+2PTMX1>YVV zM!nf3u40mY1SlFlh9PL5&}*sOmwphA3l9eN%M9d|RA(js8DVW=c8~yF(1mUx8dE02 zi_87XmshMuZe5tZSiT;C~h95!njHuGokev5_Q8_4-a_*<&qutWjV3SQ{T}RGw8AKemaFHDz0uJXKlt*cLw4TKd>F zKGt6PSZ9xFdM0hbk^?wxwcdW}iIE1pizjF_RqS zNGQfOf{E!`;LCme9MiC8R3=Nf2h%OtH{_iv6+Yq@G#utaa|{P28H ziOlQJxZAJ8*&PA}HT4Q6sub128t?&+E2ABHK1H1OAxd{QF;^bW>(Shdbowb%o;QSn zPn|oB3x%2sMr-)7>;0&(UhMgyTC`vYhD94be)JDF39WJ<(g z!%7bjLy9aQx10CRJAd zFEjb#^+?b24jPA_G0>uj$~rwrW(kH;r)-H&VIzJj6EQZ+qS+<}1h(J=?Q9iBKq({c z5Tpf6`)n>DIO(rp6?!3qoE=DrU-}VXns0z%M?g!5D1W6yuYOmYnBPMOqhP!bD>k|w z-v67(R{H%7S32qET-qq=_%ck+QzQ^ImOX{?5Qa%r7e0mJP`%~K5`_;{mOO>RP`!=9 zg@_z|Pvk(l+HR7?b-N=~tqU3R1?3(CL6M!+=ph?xFFm6d9T_@f^VwuRvr#oRn2po) zK~3r1E6!-fPoYD&%RayfVQqwqfmZSOR)E~BT~cSD-a{`{kt$W2avcSVlq;&SMF zJoVau)IWX`i>ROhWJ3`S9J779&?Qv$$M@x&cAzsu{LUzPU1xJh$F8XKC+M;fJ0h7! z>B3@GJ&tCwLO^*mB$hZGX#+(yJH#3V1Umc7p)ihs&P3(D!nZ)-7P*6P_iRnkG^w0L zc1D&GIvu)MI_-9))mZ3snTFw^+tH!vsGlHJY^)KFp3G>U*b#(xkMX!TN6EVETvK|Y zOvoAqzcK4@Bq4Kdd7W?}%>l&m;>knn2FSf3d7*aE)b*8JsYA0}Z0|aT&!S)m)7)UO zS{{%x^hnaUQ~q!b&X$BC2Bd6}xVPY8x#Y>UK(a{0zJLq#>Lm;#P62}Okg3v+D%A$<+rKm5j zPR_t_#lwS`dY%-7&5oM!UbF)vuznxAdD#13d)*nTSc=G z{W0wm$0ro^ChZ=NeG!H2;<-W!A2;!eiiWk*AXD2;XPZrJxy}a@U;$k57;nUQ#2k{H z1Uwx4U`216whI%kd$CRSh{7#_T!Mtl!(FK6Dx3cZHKBfLdOs14U_ArEan5ZLBxi;o<~EujAQ~4#i-D+8s6*t zlbZSo#3WM4oId@cM+ELg;m-=u1E(YBPW^-WokZ|4E~+;GxeA4QkP8sCO7f_Yb@4Ws zOhRpc;d6=3_i+{IfZ7?)(h?lb=t>MAS2zZA%u=Ziw&9VNnSD}B9z1nUsLLf1RU#$A z+BgQL17P+Njou)ediBc6g) zj|D~NtP*#GIT>1@adr%s1pPnJM-p>N>A0h^pR_@u5FUxdHWG1hB&}o->3Eu96ZvOV zrNT^%zF4ukq-AI*7-4CwL${ZJ?0ix0{XM-yZPMPbNNFTD3PbxS3aI(U5y>1KGM?I@ z`SKdV!#f66^_G@N4}%@PFyoqye0b2*=(WP>kvWGiUB&5CbOsU=Wv8cyLy(J(-f`Ch z-^g1JdV1lStgs#;)^iFwKR!iK6I%jXu{kYGZ}@ z9=1KivC!TjV!REQ*P+M4H4R2H-@k=-BZ`cYe3y=FO42^3%UIb_Jokq2+!oKPs39zb zUIo|65Ko1WE!CB^QT%B~o)+&wR*i;f&jXF6f)^0#o?gE=1N8XEX;Vq6-G~&LVbqJz z7iG7WY-~fH9y0eD&AF*u!tv=OCf_t(;Pf@wfRnUu?bu}L1nS`JJJIAwFx3ilMGNxd z>Nva^MmK!z5q34IKtmYuxObm!nIi{isrr&2N00Y%ojKxB)0rYHUU8iDPJ5fOkQ?=8 zTdeS3;#k|CcPUf0W6#1(u{74bzCYJaPSBCaU8jAXI`&!8PBZI1MwbejoH#7c**X!B zM{^i7i04D}xJ5IPg{FzS`EA&Iljxc^JZ-Wll~YHNg6DwmBq5o~hFGxdHoo(%r#yZd z#{&cHe4|Pf<<*Z+fa)`7$AG2=Ub9LiG&&EgBBW3b2&NHh39AgVJAsLc0OlMw_kMYdIfe;hAyh{afIgA0z z?9OMsm?M6nIn>qkjWrms+0AJv(%XhUB6%G60kTW>o>vs1W3aWWKc8l8mL>NOcosth zKrNY5+?v#8Oh@Wz=HYT5u`IzoXx=MkJzr2b-0$-h&rS|p-x4Q`oFAIcAz{pQ)uk zE<5D88HUJz=CH)W96npj7!MfZEf`x{DVdj;zu+p$Qn4QEvNiIl=Y)M}Y53B2ooNp! z>vSoeD8i_SpZSua(Cq3XE*@;AXLBB&cF1@xxaDHm`3oK5h?7oN6<@|6;vja|QWj^4j4CFo z^*ZE6(JSe$HhC${91u zqocY4Mn}%Btb*H7{o25{d@;uZA##pLo*>q~A%htPbUe9n+ZhLATQysx9yWCGr>A6z-OK&+ z?~X1{j!wV3{2tE*T|ITh$ZYJFBYkMf5$Uw+k_1UX{?;0&kqIJn)6atWEFQ);$kph^ zR2ejsi_3G^)bD;Vy()=`L6xuY+5MlrXRl|_=QK#epm^~F)Iq7CUcOYAqDbLRp-JRo zsdf~6}gk6Z3+&;PXt{L<$Z)j&f9-W`; zzdbqo&Y5^!YRA^eH{NJ%?d0d7NPH<7Brf2pz{2G(9s+&ytO}G(MpE(8>wvIl>*fn_ zg^~%yf93(mD=Ca1KYq`^2vGP5?;k!U(aMRQIvAdLJd+7 z7ns?K7w4Wt-yEEsULKuZ(s_Az`ornjPp2fn=r9%xmBT12Q3KZ>q@rY zi1Qva;E9FQ&=&5@z-|pWyhwe+F%{`gHU97GVQ`cFuB-7A8-?FAu>yF88mV5fIfWYP zgugQ}fD+^&N1wkW704v7(zZ-(Oog9WOd8YGKL)pNXVWXTo=rLQ9|8J@nHa2Z@$rK2 z3{zeXt@jA1WS-;jv8M~?^0LAU&UI?R(yP0sc+$Qeo_pXG$8n=$WBkp`#7=T_6eo%2 zZj$59(YxiLD5STvqIG2om&eGJUN8}?VM^&?H%*{81V@#I(R;a-23^byYIH{dT-ih5 zrjJ-6JKRUFm2f{EUpJJ2mVHY048Ly(S->pNe_(!-SK4nvS$REdYB*3}t3*k#~^>-(8LP=X~Qg%!O(Ij1$w zLGJ5lG_O}1CJC~C?vHe?IL5E&sV=qg&jQ=)ia-B+SR*vL<8f+m4{N00Id0^`ij5RJ z_kl-JUG3+e`M>i>{&U5VxIfN2|H&gM-d!TCyuJNrR|ysJ!g8Q4B@Bz~ULeJ7J)L>M_70`5Xy_`VpOW|KOta7e`tydWUT>ClAdoAW$ED`HiH z@agDdzK=*IMm|E)ecHQNO3JCnXxT2EYdsIF$GoN!LCG}kO_rfeyt|0KmwXA)&7N;A z)Ic#ym~0-qTLJvcVVHJ<&F<<90GoGQ%vw2hOQUH^q2swpQI-Wjd!CL$%dBHX-r)pM zwEPh}n?~>^CWI3VkMKex)SbjPOcO}13phX8Bl_Wt^X4@@7@Gz=_YhUcMivM`W$CHn zaAU)Uh0>{6%Ae)9O&w~$1{2(J-K?ZzTHq?A4r@<=C4l#)2=bJnU%e!TI?WnETUqQq zi@@l#VHObi$E99Teg|Y;&^k+27O&(e%$cAH`}1y$!U4_O@%5nV9EZZq!oGRha$#eB z&!iE9dXH`6;UTN?Jjv7Ob0L;;*_gYY6}vKJWzmry#+v^>$pzKJ&x!nfScB;wEKKgI z;cFWpML4~-Kd<@4J!?HA(`(6F$udRP3v5&icDI!NeqEKE#jYxZfpV6gtS~K+hY%T% zNi@@|)5gpbg|ZS?rqnPS5MFidQ12H~eaaZ;A<~65lIDIl3TE)8IbQLZd(D zI~|u}$LlD$ChERI0Iz6`l~N0i-mq#Lc>5Q$qVocd0#Lb`mdHzs`if4josUNMrU!bO zzKI6|yfQdNXN161C*k-8zTi(#5&)gmem3OaV26gNU70em)S`WI6i}+SZ3NAFjN~*H zG!;1v&{8QLU|QK|i2ohpe>t5w!HfD{1?Z-xT#?i<;fdom{G*q~C*<;jss}e$JjzC# zNk~X4_Y|Qq!xS!JlkfA#^sv5S587|fsU1eQq%-TjEdv;6%oRV;!J$psSq$vqWfX)O z&4+qim{@Crsnf@u5RNhF78+jPG@hF_$gsBZR=~6?yn7xIf!?GoM!8d%~rcouE~$X$n2D`tvzO=UY$i<)=t zcBQC~4(cjiRC8GJR4|vhSNN#+lbNmow7l^kN#b4_b9~!EN#{TVs zY56`O?e4+>$2rf(c<0qjcdEGy(mSlj9cL2Uko4kMwVK?Dgz9WuW zqN;m{W|f5IU4#gx!egG}P$J?O%^pDx#_Ig!NLnV|gmEi3Qv};Gsff~~f?3bv={=oQ zd!-HN>}3@DsfKYUds5orlKWC6{T6vdB(`eVrI=+aZj>(LvQo%lZjl7OHdeSFotOkm zT+!IOiulh5Hu+iCYIgGOwYqfPORme+XZv~gu5(Ec_V|V8)U@vQcBerJlUuF&(oEwh zNwwmRM6-517$>jF>=I3oF?vqh8&rq|JotEtIFBW2u-@C% zkdTzILudeDP9>l5eSm?YvO2Wjx<}L!$X812bsR6i5alF>n+VfIWWAQhZlq^cDnfC3 zU^}uUvL7CsfmU5yQ+@=x-~u!C28pX%Sde&?>sxnxs3)LnB;it$T^wsyX^Ah;G5nsh z-_iR`bDY6WQ?Y54x?LgQXyD+=k-QX1roB-fnT^2bf8z#uT|^>EU9L8f;cpaJtV6zg zld=UD>y*cs&t2FTZ|#mn-vD!E$E593i_xpuDHsT zyzn#=fuaRGH`R9CPqhURI#*xbmP9O3M{H}+| z5bHjN1yv;)-1}J1Is?+ne|&SvY7GS?`;k&(n^G(2;nI1{I_zvptmeCt>gsDBbxNaV zXyCj|ndM&ZlymWHtjp|F@Mx#JRMzoKzHy*C*#ei2<5f?dzq?bnan?ZHaB{nTr_ z`a->4xKJb|I7>8GxI~Xzp+EToEnJ^T4!2v+G1mEEe69pFOQTSZ<2z*&CwvHr1;D(X zyCx4jH#;fTo;dQ2Ko@I|owE{Ul`WU=qVQ1H@0zu>}{s+Iz;6ZwBjD z+H*2OaxD|7*g`jd_yLCbB{d#bYH3*nc~<%x~K|fqrxQ{ek7b}9hyPXXw;KTR9jI3Q=v$z zM5tQK=@$HTiUA`-JF15)%i=TTngK=Bw>6w2fAkjOFN0fx5~I|5AXP%d+L}i45cE~n zuL*3G+9COv2F>G5!f`?d1lFp$;kZu@30owCr3t}HCv;^!B|MkX2ylI?A7lOx0Ket_NgFQF^w75S|OdSKa9>aexAT0V0qIt zvFX^6pV&8wm*_AcJ0P>Bz8s?%Q(iRc1e^Jg4q5Xws49wBkEvur&}VM(nusr6-{&)L1|T z!Do!!(VL)LVBGXxeu$__IqPVz0}iA2{aYH8w2-xVY)wLy0zVB9w^)3j#dxn8L!8=R z2QH@_#HLPXcKgIoCkX~2M}7&s74jV^XA{q7)p)?6 zh6!H>^is_-755PQbbNAR&L*hM|AbZQamGd-1P=J@$0MKE+i&l zA!0uDbmFPCC%9dRWcB5zk18E;41iC3EX3jg(nqQ31Sv zNU{{-i1NuPhSMUHpOW*>zgT%gD2(JKSfxz zY^DU~i2_h2rm$LI{Rte+q0>bKY+N&5S{^*GOi4r_)I4w&#t{Lsxeb6}unbFask(%; zAvK-8>>PPyq;_cf9!132$i_Ufr;V8--7)>Rw}C@)4V#idSsPJQH6OO3)<3gt3id>N zA59{H~-Fo=avWEB(2seDLn=2&p#wL3Z^aGRJa=RF1H=a%76nd5i^*{*;HCIFA zXOb?$ZCL;Pu|S;~jAhW|h-!gS6UNCDbYPnzI}OMHLGTv6cycf4U7XXEvCj&kcukcq zm`pHhfk%V3oc-A>=-uMFbTLK?Lphx+Egi}yh`8=3b5R_D{Rl=UV-SY|hVD%S?ow*? zc`qLJpQ{p(5C;1(#xV>g)KZrSnFFdVR04)T|7dnMu)J zlIm+}mW*SA1f++VT|aJchNHnv2~heSMg9Iz-_}&{0mW22g^5nQ4(!a?01!+ZA^Mv4 z5teu`AJRQ#<|1$bc*4d>^dV&3n}YRnJ2Z*jGSZaVe~s~i`Y^NF)`Q)}Zk0-pY$4I569mD?1G6Q7ndpk-5-u z6{kbol$yN_K?p)FMI*+O!So{2ohET}e)+Avqbm9pG;9_y+k<7Z=PLzTFVvY2P&ROqY(w5{1a6~;^C zLQK$}r~#E6lj6#5j7m=E0pi+Hd#lDL2#8^-lrY6@ipSiYb|i&q5nq^o*;QU~r)+1; zR&>>5#$`l0U{oW`)&vP*!JrFnw=kBm5pi392hNPc#pGGYvvhab*_jqHD+2ymiOJA@UD(js3yLNSYi{(1*L32 zfhRQxT#{Q3cML*vOrw67xIN_zvnK8u*H|HaQAf;p$sV9?D9B92pPU7Xr}{};UU?1| zv_vzI0xB$L=75c(XF_em%M@!Iu=Bu=)|$@fjJ#<1Zps+ zWV{h}2b%EoRmR7vBIzU76w1r08ro-RnX#TBqu`R7f}AFwbthD{e9RVFFGoNdZ#>N7 ze#GFq(j1Q9IlS?ljnOp+-F_w(2IHr=3n+~Nn~Nip$DnNo!mXoWsNiy!TGPxRJ6@GXkF0VM^rooOlGgx~DerjW%XHWHSB>UH2FS{l z2`fZ+9K@Pg8z$KeSv=-ULBc4_nL9F18l}7&d*v`m;hG>p(0O)(KGUg~vR3X+7H4JF z#43!UDfx4J!L>jfEM!2%ENsV%rXk(^#*bbRPyTlTKY{iI#{9}J_CVWG5g06%_bJ1l zM_?LuOGwP{Mn^2)G5MHgYupF}dN=ExHd(D&t?iYpR)?86n2$B6H>=S40UF728*h>A z)5A-lwwC%3jWNn|>m)1qbv$sYing4x?dhRDqJp+)HP{nGNiL?$ycRoX^-Qr#0j?2? zFWjr-ZsA_o<)Xb&V$;m^=Qy=PjLq)h{u^OyK5kgZa0%ie4@#C@%UCm2vd@nW-<=-rpI&k`L~YqW%tjV! zDsFEQ6}RgPGfz3vz!CTbcT4{?GG^$F^Or_8V1*W2d=^)RCJ=xL)@E$erf6J|~DPk*c#?pFeivtCVl6{<$pt#)g#+l4|5s`^2p;mFNut@x@9@5}K=i_j2(HBGGBcUJG`dpG?9? z_T|KR(_|ZEp7aixypcY5**8;Ry7U@Tmzx_T&d4V4QD&o^CbQ8;()YjRnN4|8mmk^B zJF(U?>PCTAWFuNlDaclulvB7#MJ=0~ zjn)e6@G+G6-}HPwoR?bXVraO%4YvvZaivEhMz~Q ztH}4daDB7Y*g+tc=4Pi2|1MX%X7OuGWH^oQ(&0+!;hnU z_0yvPmz3Pw<$fa^pl6wbQbz()hkWZ0XKIxD~--7LF^A6 zOAg)W0+2#64qMlR0((p!`&DDPV2oSp&xc8u3-{lFQfv*(FGeGaH-^7RRqeu}gkR;W zn|7Uw#GQ?Ma;S3zdLylk2)HZtgKUtY%edn*DFOYwi-8rbamgIqh~y zQdW8=NC5rFNL;iis9kVo*UdB-&#}M(HYM=;5_}ncj40BA1D24Cb6U!CvLiN(UWI7P zRMsd{ut`j{VU(gCWKs-CI;ciT{6qrz>7+0~kBRPY14NVw zIsP%xv}!V^s4g~b7zRjD#x=zoZ?I#ZJOkVtk_Co~DQK~Or{jzxh(SuZ;D!|yJmKj|ChEa~ zV5!)plQb%9bvNS(FPsH2YK#W6({TzXI@92=wwHT(ae3ZAe5C|8PoPqb_n$8*0e7c5 zgB2%?rf>`C4V$SIP!g#`veAnMpSb20>Nb>W8q}GIROm=+JyZaLqC$$fTEYPcGaV~n zS}cs4F{_exN9+>jFDaLFO;A*>q;cy)JT&r5u#nv+s%Bu*!t@?X444x>%Vm)=Th9?c zah%w-HXe6M9AY0N_6(ovInMkbUl5$(0N&hlcnxyg4eHf4ISu+D@a38WH@T66W>Pjc z3pSXF+WAfkjQ5(^CY>NeZHw?ZBQCkEiNlLc!eM5SM^YI!kT_c@Wx!5mCIn~2b-bJt z_vh3g;K;Xtpq@umEIl#p5@GeYw2)nIJ4T9LNC8&MP+4zToVsON%q^aDS;&7?avM0H zf`!Xuu=+j0Goi4ZSx5vT+)83;q(u(!&`Q9Jp&wOXW6vcFk1-6T$hsT5n_{j^%USs#=S2R+fQ`@C0`%hA;0M)7D&>P&Z54Jhy7i1jEQ%_0j2srrm{$)i;AFj zqCcXU!Ss=L^XAt~Gy7iU0pZj%%nT?B<8kYUE8OfYsJ`Zqxg}2Y{r=|o^v(X|0Ttb5 zI`i9kL^CL|rmLy=GtxXKg?%2njKy(qBwCuc5zY9sn6=m*g*H?3!l2Y_%@=JwqOycI zYqv;@C3h=FQh=-Evy#^1!*P^wzbWP3!&4QUjed;u$0<@kJ!MUutOa#-vqtUH2`n~p zF^{EN)s=5S3ctnjRhJ^wRh`E*CN=hso4z;f1xm$EPSXu)tF&7kElf;03FtG&>MNOqOGj*Hi?TwAE# z_Crhv^Hk~Ff%)q5$?==|HUN}lL0+F!YDUE=PM3j-s;n_qq7|nx?SZYPs6ygzQ z<_46lHw^`oo-IgLDSjPI6DeZp9mJCuodKM%`6iHjPRr8>{N>jUe!zA`@V2w*%TQX(VSvUaU_&KTC7AY!no%9Ju$_*P>O>- zE2}M-2AFde%PJhVA*1L>OBOKrrD`-gZ(eB?H$XtHWPbf$BvEMfPcI#p%yIPJkV>Yc zkOLMD)Qn_SU>G;8dj|3-F-0XnI`}UOH91b1T@ftk3 z=Hism88^#qOe~3~kZQsRJ*P5ddXRXBS34&rSrTTuvsv5)oQb4avwvykT9MjK?uBbh z74Q>?e!yg~)PQ3RndHgv^)mb+v8x{vava8z-ppK7wU|E66!Ey_Bb&w-e5vQSo<)0?QfHhQL2R+0&!CN+SE7J9Ez$|7OrjS>j zESapGaw8h`Ldq%ty!tOd5}aJ50DX4iG8 zS=Skv6(>~*`qAj6MRJ~Wy{yWg2k^TJlTAN9T z90jlp;len3-3Z}Aeq?J@ z!AIKGptKKII0H4Bvo^`|3Qw2QtNGF^rc{wb@(I;(f^u%s^WHFoo4o%VDO8^!cGgjl zmQg>&8|Gz2V|mR)${-q+iAqs#8eQYNAJLqJ{fxnX$xs+bV8=%kA1W-)CFHIrghx^KoV?)QpLtPG=3u0t_tTn5MXZ3wolykEyXp z6(*4z{TZl1h_#ganD9{J36~U(l4OqJBu(kNX!ia5y7ufTPT$%ZmzajbEFqd(O-ldY z?qHJZ^7zc9L1qf$v@Z~48dMcS|f2A#pfDlviaA;RX3>~HC)VGnvxyc z>a-b38Y^TqR|(W!zRQ}5_V7pfeA%9`-1se}=%w7aR=jqvou)F}31>eDKEzXfb?2Sx zn$n1lwG~2JefmpZrbU^Rf5Ne6((-c9K#LWif4hkuEShOfx(!h4OGy*px@`z#jbH8U zk*v8=kzqOY3wx!HVYC=!*vG$CN~@sZ$@pMo{@2=2LUDl=Wm81XPA`v6FDWeP-RTdf zXFr|t(ahU5H{_b`ceE5TvR>XbKMm5lo)I0w=Av**Z4jERTwd5V(*Y9ziQuH!vShG@6m zk`i2zc}~H)ker6&fx!2^x4tNA5hmQHO0qV5A6BLBrLCdBvS`*mvGl| zPNkwcFCZwRwP^)b_Xn@xvI0>~rTLtjh`;3(&dpThCv?7I-s18TnI%?8+C>$?QN9$Z z7L9LWk*9Ig(i@}AhZqHgo{ge?;;rG>v4tD7fq7R20-~RaQ*yO=taMan0k&Q5P@Z9@ zUFST*r`{eMAk~Lfw^UG?oWdWeoNL&(c>&kh#h8)d-OLW!M8cl7Nc?0RBW*Pj?i2yaFW* zs}E-ao5gdN>WQ?cze`<-T~&ww=k&BVac9uH@VD3XRDsgo_?CcC|WVF zfMllY;)EJIoC9GXB&S5H-ftjbF(c)hCpRpX{f~HgEky%(xEzGp|Fp+Hn zY1nSc--`;9OO=Se1$#pZ;gd#jwGywM+{2cRFda*^n$>Icvk==Odb9m}xo#8{gHNp? z+#0uWKOV+6_Z7PyY1@UvyF)k0jwzcl_t{U3c1`SQKG7ioqnMc8%sMw`n%7NZV79|> zV6fyZ&YRMwIzX47vXmIVe zkbmckk-u2yaUJDSzKjpeV#CUFi5rAA9-%jed5V)c398vQ!k)e{K|6VSSTmjF?y%68 z6wc~Wk%F!YMnEj&C!>xlZb>3ktdSX24X=&|3px<&U=wLCQ<;WZa5i<8zX4JI8&w*I z$k2mn1pMjco-6;CUH<KoKbjt|qHQoLO@0G?z94tilZKbIf5=O=Vy{R~HXgF>R(6EWNsGiYL8-nT4q5rSWrVp9s4{a3lqr zl!L;hqu+D4RMtnrAd!dmfGuA~?0bs^BOC3RB;UG$kr{;5M=Vi_D?VneiUClUjHEz5 zUC2^$jS1<4jJt=MN4_Nn3)3f9OHqyifYM2$ah4q{V4HG3tT4fhSH>d8YLL;*dXtcx zb{g%Ka9@i=s%-PC574*Cy6Cn~WUm}WNiQD-W>u5E$!`E6Azt@b!+zeX5M;QH9*-yY zhsLh&u7!&V?ip$1TNJvaw{!I z2}SH6t7U)03V+Zlz4g`s6oozSy!7dpW2512WY{iMI69dpMf^&W&)fxt_&9INgYq)os zqEOhZvR!RHs_Hmpxsvt@uA|KJJ3m*=HOHDfa;utnAm>g~^Yz!C(e)?NERe zs3Lc`Qm{s&P@;2K?BHsD@bgpXM-cIsxlu$qh0ykp4w2y|InvIJud$&uUigdz%y1A5 zjOUp{WLD8I#Y|xWA{0&osTIV5?G+RL&fBb&GqKja_CBPvND}bwCtOKh{qIIoYsj9U zhMC#p@Maj@m`GdVw_IqE`&Olws!^}=`(^3z6@`;fuo7zs;SdzN+?;e-Krg{4jGcgmbS;lHxsW9&lr@?i^}{PW)ni?1GBSz}<@4(T@CgTo zU^tbxImu*5rx{Rw#i>?op4KbG4Ia)o1E!kXxbw+qr0{-*@RBncd0p=?}-J-*wMUPk!Nd z;OT*?R&TKQ&Bpt1Iu3^$Bak4l;F;;7)>GFDzI=uWb|2OC_4ZECYXn>U&Y)J??)Q70 z?dvdXZnyEu-)gs?Rjbv7y*yiAUtio4vBs@!y1BQcuouFHMLm_sit}qEc{&tY5>qL{ z2X@glsMhemm1pWNf6f-f6>c(IGGDWBhm1?ldHs4a8|E;LM>Id|% z*Yg2lbpGWp@8{!DFey!f8=`6bK^H^Dhb7ZHw1j+_`uhNMD_sK5>ymM-i^5z9zT~H1 zn@jv;8UOstH4NkZ3x7k0(KVrgY`g(kYJg>puCt%X_BHE+dZ!25biLiE)!JL_Ub|7h zZf}&P)XZ}2^eyt8bDw#$%w==jj%IbB{3$NoC?&jlh4@Bwo_isSR zRBwmD4E33WpW>{`0n97{Zi6t_UsI+ly+s%X;q(K#)YaGc6=O4iib1j1eGzqN;(aOC zd>zji_zEYS%Oz z3$WjZw#zGkIy%C6I0s{j6~ST6E0m-w7at{PRS^c3&|qnriA{&y&^X1>&;)dt z7&5z!@D+;Y(Few#%4X8Sy?oI9ng-|^gt~LQ$c&ulm-J|wgpaW?XzI<;8@bW41=n(JI;WAB!)QGJxWT~D-r+Z}^=p!DV+{c`k<4N%UQ$eg@*^Z&II+s33gi3CLtSsgJuIENX%#&vwjY;-OW6dQgwP~xi8#A-5yU@LKtUL zxVs;-zDnDhdBCL!`daGQXJjAXYx(d~c7J#-rG|YwZ;RY5cnv48#1o;8SKA$U^ef6U z@OC6`->`j$4)G`Nk(@PxKHTYt!zA?EF&w1iJo-tdEHbe^>)w&YO(SMC!-Os|eaq?@c`5Umi-+@g5uW5GR|M0&Z_^S>7!v9+p|E#X^(}LET z*bcV33ID?X8`~8H&2=8r9F7cLZ?xcF{6GBNz#(o`)b=CVYr=aC7+wQri@!p1I6xTe z&ZjJ!PA^yqx1p>`wCaCV)zSt2iQ57sR{vPv-adG>*=oEim({o5!nl7+@v{f*{YqQ@ zwPKFl?S6Oq?x5R61j}|-WH5gCau<}_XgG_;-QMgU*pzO?XU*Pyw2&~}CXidNG}ROK z?iNzH`g|fk82-Z{@ExLY+~e$y%LXDfTIS`Ea` zayE)~n2*pwd;j3@@ZF!Ly9cZ-H-e1OfG-bmYyfQ``Py7Mv-R82JtGoQ9Lho z@b;Z&A9(o8QYY5ZTM$F;tlCQ&B@b$fVP8~c~q;%6u zx3k$K+1=O($F;lYeKZMSUu*I7X5(&y)ce}xcJjAb1f;xo5KReix|`fagV|r>VL$5c z?d&wRTAQ0YJ6|6=#d^9u?^$hFwaff$9eHYIF7JfdinkFXtSiK|XxwQ@mp)W46_AW9 zD1Y)-SmKYZ*OUZ+_AKB7@6iN|>3Y*KMLyk(38dW+ujbAi)D6TU9sW}hMat(dUxGSQ ze@Dep=ot>ai(!(ZbIk_{z#* zdt2=SCuB}b#pX2q1Un|T(WchDUPBT9G>#ZO*zP>oZT4hrXxJsMuehJ2K z)x^UgR?Dm5^jk=1eMbl)H@Rnz{x(IsbJgN!${M^hxCA?q6kjp#PEU@1 zI6C=-^FN%Y`{DKc#y-kV%YiCa)Zx*qci)|yeTToF@Bc)FE%A+m&@=z7mHBCi9C8*p9}F%2I?2nFxWL+{N)C&yxZjSpf|$btd{Z)ArFf;J}% z4*-0EA46Hcp@UPZ7;jzu%%g(HfXBLwhJ{f4%x2dx7(7qb!L_j4XKevhNx93)FZA*6 z7;=zRds-qSAw=AF2M0KjX#`#MOd6TPustn^*mqg|Cx}EZxNVa9sI`lDFvHS^I`Y_S z%(HBnY2}!=(9eVhuC#?qo48z%gVlbD>|EN=_3#!KnY+;5^@uS#MlWskLbDg!z0{`{ zwoTf5MHVUN(+^~%5>r^tr_J0?+qs{1^4r~dk)aKD|CE@MKNOk1s#cAoc0N6;e!^HW z8~UkEFcJx|@DbP_j;)RPEFd5}7o#9~|0~fBN5molnRT_lx9qRm_SgEy*A???<27S2 zG`2ewF0|RIG@2WnbIm(<>y{fi$cnk=QTDc3XCrfwr-E)@Ng>Z9vREznVD(!JV^C-SlZ^P=V7gW|Z@|iH z@T97<=!dCAf01t0TdX~r1PUJ~M;DhR)35v5@9Nbrmq#*eN`AYaVUT7?#)aQH<0}6u z!>TrZd*NOPrh80m#Kc`R?wR)fraG9I;NJEn{bd=mqhGKnNN7cg@|r=2QbrxpQA-c& zHDynEbhoxwHeQ>xB9o+W9jbLu`tIoT==}JA5X|sucoR(D6LXMKk)NL)V(L<|V{#r!i%YO)Zy?^_^)nS_yr#9xxp8(w`s@&Y|A_Vv!>XgV) z+@}JPoO0CKraQm2*~}c=g=?mj$7PBmZ-tqYr*j%)1b93c7**y4>yryr%k?F%*he34@&rUD9Z_kb|&Q7~Oot^)1@pk{_&Z_@HWN zKS-+WH>gPGj+g=~nW$On;kojcq(8Y47R$Vl=S-%Y0%h&`I90mT!bkw)*`}r{5S{_v=kK z*Z~~$L2s+kzAnn=OtrFzPX3%O)c+xs)MkbJP?!JA|jr@pa6fb|Slzu0q5Kd>Q3=jYwE3R|Jr-7W$V3@sOz0AIrIK#=QR?!P)Y zIyw6;<)Iyg0YxBTtw@Z*t7@#q&sQfvX%{ae8rN{^F{|W>eC-eMvzuI&hj`dc z3>SirITqu)TTC(2Sw4-j2@Upa${dos=XsCuXMLU8bUyu*8M%Iyr?z~pA^7;kj|2j0Kbvu7_#6D#w@Rme zT%$gt%UQcVjq3KA%*4!V>Qut7v*jT=KiRlb1I5CtlQ2ltEFkrOJf81fIly zSqI&(jm&Es{(fQ+GVUM&I?*)#zu|Q$to~0`|Jadi(Q-PqCfh=B$nugly-^B{>SmD+ zn`PJ6V>S-h5dv7Q!@+_jX*6GCpTQF-ky+6MvAQB4%IY@7I@5kO(8#&Iv;BwcrykTq z@j&1%Z08L6Q}!dfey)EKRyLZ@B9c+gKb!jJ^LE&8;c0d_HNVt8N^hxmZ1c#On(lKt zN`qnpnt#|vt`RaX7aql8^3d)Ot)-b`M-Ol0(f!B=HmUNJbT$w*ehB~J2eK9qf^&x`Ns|=3*0S~ z@RAbwKoy8UgP%f_M=d`Vh-J9sfC_@~dwRZz$u9%+GUji*Gw63 z2*tQeZo(fwlEE;zN%Tb3jLS!#KxK}@x!YptL5^&)USa3!wt~7(9B$1_C)>~<3Yq&OFFD& zJO0!l%_05OKJwf8x&&RcfeE0d+1$Z@3oZlZWlwY?&{IC+qmNe57?#x-D0q!uK-d4p zBUZ;+w~=;&s1v#me15vStr>e z4WIu_UD?g;$_}LlH5zH-m}~XAKm*TB2|uDj3uFLeI_)8X`rGrP*T+AX#D3ESO=CVm zcQx?nT}{c)wQ&w3Va)f>Uu2u{i<=g-_RfAPKV3>5-#`IIFS*aWeckYQi_>G>gbO9?n z!5TYK&H(?Qq;~c;=jT z4H4qmjX6r=8lTB)3*7|WQIsIExkpG0a1Uzje25`sjC((brpYX?8K&N@1xs|=+P4Ub zoQk*W)>Y&;pTE$i^ad!q?M9{9d;)f%@1*W5dk&4nUw9OEm9uZTlLl>_ZonT=GR*75 z-C}rJ@#Bh~hxScX%+*@IK;S~B^$R9zZrgd*tLK$RrO8anQNcF%DciUUJ95s4#-mZG z!K{m2nip|r_)Q&)I@NFI&Bc}-==L;=a+!_!T9+=PCziC7<~B|%K+wcx7PoK!at}=& zL!Ep+3O6sN+Ta7BavG)xT~n_!2@o0}_l))vDvFY~ZHK@W^;ws7O>J1FH4&vUQ@)t? zOdW=3EH&Q>hQpTnE~@EH3uhDU4u$_A3?y*+^ziaXXdJ(er+2}$|8^SR5S})^_qwR@ z5pZlE6I?oanqnYrVFm=dM97Ib6%GNOg#{dBZLUK!`C%c|)y6>u9geW3;pdNpD@pJO zxJ;t2$PYj`i|^=LH_3P!vJ65zhI5Vsul5x_dK-*y81z0}dd4V{8TsNECEPql!%_tc zz;3zSTw)5t_c*ZQGnYw^)$zueOe5}5h2B1g`%W3QV*}q~sbm3pWe#O7vC~rpgkI2) z#9CzA$Z|Je=;62*zU4|f)9mr4asX8>>;$(QhbrsWbOoNNJWe%eoc+z~Qa5rTZ6FnX z2!@f{=~22PODObc!5x4Fx68Zyl}VV9HsRePU6kACfe%os@`8|3Pw2qlV6#zggq!W= zAgI-Lg7((VPSENEoh89Y>DE?Js{SWL_;bP#DYPPIjp=jQ&Gu$}yWVbHH=CVp4D$|~ zn?bX&(`Yvr?a;rm<(ZS)27rtF6{Ib%_V`0=|9s>O@MZn2Z_xT&|~6Dp1V^C0$fiVyMgw#zm}O z;2o+9NYPzFS|8O}hQs1scU%;3L|ND@-=m0Z2HjMFYF-3RiCDL#f; z6kk4A!~B~HrD~w;WlEY?ID0!QKyktD8aK?vyLCx>ZA~8HEo;!`fpeGvU^7$L2LbD# znmM)#4|)yV36L_V<;Aje>7@QhA!Mg`4e92kO#N_rku(grvdHwEU4t=SwzWS=jmh1M?@OrVexiC*s+)ZmHmw6VA z;3yG5$N9;TS31a)=PK+I*HPCd=KjkSuL%|;5I7Jn?mnF9XqZS*Fnrew;VPGb6oJB0 z2g-4_8=6~OW@!m3#_}ReRZHehJ)N@9xve%!=ZUC+v>Z*ky$he(dl#DA#b%B)pP}Ev zQ{c4Z%<`K;O0_it9a06k=#Ukxa%O!VQjXf)@v^GnQehKoYwCD}YI}gGv}<^Kq!!P@ zG1yPZ3+kN!ENY8nNaHr-{?qNf`Wv)43f^-l5}xP?SBvxfj6ouh7%N#DF?)@Iad1O; zbgcCubE49d!+RLaFub9q5`a7`%LtmlJ%((!%YblW(h^(8u+COM5a&y%)>#FIZXZFo zKItM=8QRq(#>D%uAV`XLyo_TPUFZRl1c!8XI+bQ!t#9$ISa>NaMgZsWi70$0(rwh9XxUamU(AR#y~%vTL?! z@7W6}rS9Lt3`vFDKkp*`-U5Sh2pd|r0Vl8Fvr;CH}!Au zoQ%+oyNgFfD_DHrt$25S;hPfA!XJE9KJ<#DI-DNQUCKoaA*lS@j~$0g%ha2b=o5tV zg(iwevl6-#>8gj;U=|N&CXFprhSnR+dM-C{R^@IABD%zB%eq|q@wNP?_|NfF5#|$C z#-nQwk-^NLoIle&xWpxpKsy2xqA)!B6%%$poP^^(X6ZX8i4Ru#v>`9iWzjtaf!h-v z)sN8=qXh!$6f3)bBR~Ln>Sm%JkUWJU8B?^heh9e?D=$C}9oep!Te6u}CR2JCO{AKZ zqVYv>4Clf~Z$G}%F5^=~j;Djh>2E&JCW_+)UskIS_@NMZ)hBQaz+6Dt zg+zR~8WH~&Ze^hEg8Ro4@nMFD`IFLPNmX8V+wRYN6@V!9&`@gCq112sD4suT(Xe0!6c3b}=`$D-A@e-R$snd`tzOreu-T^RAH0X^>Y4Uq#k=sc!kqXD3)& z3P|u$yP`ETig!=xZyzJ!jH2;e@LXS_N76{ssh&}&=FyTl?uy>q+iPA4zN=*InN6ZF zLXGQ_C#5&Fn9j6-3nMO$p=$_8dQ%Gc5F;^%gwDIV1J~{;%Rak9K!7A3};Ti zdv-O4ELbT+-tJ1RO5TS#Bts}(IV8Rcg%=JoPgGh&;j0f<2wMvsg%n=(Iq%`oV|cI3 zp^)(+qg}qc3)wW~uJ8Zcz4+zLtFx2tkNYR@jxe8~tTt4WezBlHUv?smKV&j$*<@Ck z$fMR^O(#j2+EM;x=ZK8Njy_*jbQ#pj7h77eavcFHF`CbVeX@|Nr>dyl0SiIx6; z!Ag0`do|6u(_2~Qngh5H{rhn+ih2suFm&8@O}!1^&TOE~kxH~;K_>Yw;U_^1sN>hs zOo#o}q)6_cXSX;O;3DL8p+xfs%}C7l+D<{SuAFwlmLAON?T+IyEI!?p61Rw^Y~;c) z>km;pPed3Nd*mhogs!)90Dj4)K*j{DMR7pU0=f9$j?%?X;Wr2Z5x$+V=B51@Z-S4f z*B2)4oub=IqN&_CjFezdCRKwzpTW7_))g?Rx#Sc(&d!fSfLUXnYTEUp+;G`nkN+ znPQ$$u{3bWIjgw|Kj}v#oGGaH>s$4m&Fiq!s@2+?y+M0(d(djYiCGrT&?wf38FNxkrj6+Z}jl!?M3(W=;#onBrGiNzKTQu zJ3mPl0t+pw67-oHEbN*z!p&`o&~l}<*Y8dbE|1SnyQlkaj!HjQekn6c-oK%ks_tp4UXig{rOc#Hc@fu*3#-LhKO~qyiPpSpNQ{TD8G& z5b2K-%=?_K-k+^E#Pe|-_Ba-^PjM!?tBPtht8_KMAgS+oY(CNs&*pZeQ3v+e*usC& zi1EY=Q;4bI{0eh8jqlVQ2Dp$$4eSTzLu`f+;ey^YPWVQiIUXeX!GPTm)?f_Bhc3o` zOy2n!#o*q>I3p|}hOzeOY=T|_q91DQ)+urOSxq_fT!Slr8d-mBG)Cz8VH{7a5t=zQ#_?qEI2(ZHku_up$0R^g7JO2J$J2s3V}YR zvAm9Bf{8^;eHc;C{y1n3{{_YpPNN=4-bkmIT@)s>nzv*RZ86Gi(V4T0t>9CS5T%|p zqkysBo(i@!HbqYUazup9fmq^_(hUtfx@cJX$ zOu;|+jjBbBhIe|FR7q2nlzrPEZKMZ*)iBH+ne2O|Or4)br(J2oh27Y0v4iQU4XmF7 z3P>R>)>Vo0UV3H2gAg3y(bCGs!;Ftq)x}c}^xeug(?o70p*H(E$3g0=OFq+-?>$n6 zHeR@Gnrn{nK}^FeC8G{2k$BbH*IqTvJ0cNnt$27~kXJ4xq; z#;e~t)TdwLFU(1H*2a6l90uE@t(C^2%;(vv|3n+nkH&+za3^R@{K9tTXlh&;U5Bgi z;Wt6@UL*C;i&ZnC)Z>iKrJu$3*7cL-nZ)b~X1*X~|GlKT8^9x!iA|B2MqyLe*2EODVxiDS62=hu`={GVGtW=hwvJ zr7ni1`QBbn#K4ohXHrbw8|bH8N?(GYwIJ-EOWum8S8{KNtkKlQnOCGXR=v_hJ+mcc zkDjJ`nN&Qf#!`rjs~!~@!xzU-#GtiDn2fjpEk}Nq+ksbkzeq=NTTd~O^oyF6I83DA z5r>uV6DYShU2eLNNr9KWXc=pkevrNzGLEnFA@5LHlE!T-(k$YS0vXLJ3Qa37+%UA|Kgr zBHdXUgq6mBeub~Fw?p<^km8Y$%~wN0x5>ob-0DyoqmwT?eZ9P2% z#xg70kJi=7ZOabo=2dm?LGSY02j3qZ{Lp<}D%SbjRjSCEF>$5$qvF_V@^GI}yO2m^m{Je{&hyo1~*k>Kgvc1mqVRTNEEG4|(<&&5uFMT?-MX>xEhw@iV zWgUY!59i{|+3DfY>kEdDUshj-FynJ9{`9#x7dGqd=3vm-uD9x)TCEdq@3di4nqhxQ zxfX6C-=o0a{TXwm5%R50{~eL}9;l*1UrcmGjyxyBfA10qF2&2%k@u<;m)fujveGQN-1UD2W7XM_R3tMU#p_Q!e!|8dCxx zc^<0+`@-IB)Q1tFW%j1mqbTg_dz0G)pZ*vn^I%xvijDk3H72BM20T{J#cA?fl}tT9 z{Rk=tdc>H{A(5Q&gAVTznpgRtpPY1$etvs)e(5^E)g9)4xZTnOhE#A2Uf~*o1LXG< zvk7lW>Q>2bw5o;^^0e;pkGte#*{=QR(!U>J*c7qv5Uxn~=spRZQ z>A9@u^D+jld>sxSK0u-d-U}6eWb`DSZ1I2)l>ieqOMN&~A2E2Ku=nFP+1~jx?0w~4 zBFcp2%7qN{#9_A;PJw_eHIZMEq26Q3I`}|2tR{+j64RC{EV9Tc#>e}Ye7rF`yB{HV z5i~lQusHrT7H}vYCO*J`mvV~s3V-zJ!Gn}R3{dX*TG9qRgTqMS1+3-Z1$ar+ixC6T zC;@A$f&}R$28WP;v8s$;IRSt(T{0R3t9!^bC`W|CdRY@yG--VoQ47#H^7|xxKny%hY%v-nDAn zs33o02?R+=qaa_yHcReWl#9+&HmwIvhxcYBMd=SgLU515azb7V0`LF>lv)i1zzMz> zg(I)16A=14j=5N0QkcqZMG8KL;D1ZsXOwk)p)fHbG|N>I_OVq)X`|T*=Uj~51J8Dv(9m( zo}r=gqL#Yvw%V$c)5C_gO~L~xO)jSdS;x<&hbN+nhtkHXEv}Y*=vo7Dk-qOXcRJgx zcC*#C?BWURWJIt(?Vq25V4<{nfiYed4K@+18nR7A7;C247_t@A^`{ZuNg&D+nM4Ar zgDtU)mfT`A);Rn?2)LZG%kYn^V1ATvd7V5V#uyO+wbTiZ-y4+Cn|Uw|;8mSuMn)dN zsFkJ}hgL-CGlc0tnuri~3cel@SZ$KTpsqzRIzM>gTVem3;LtD&oJTh2|l@~peorqHyf4ardr?L#5gASL|g4~CGcfI z$as+5nF5c@W#0E9kRA!tRn1jM+CMi|PKz#3Hxx@REXh_SbU^~?wJ~yx3xYkfYtQk? z{oE)V+3ILVi$~Uv=htRri9a;9iK)UdbPo?^ANl$*KKtJ6VfJpbZlUe34HH`d56_`P@_x%L(&7lbqR7`{d;1BgcSO$8GsIfUPX?xROR zVNE4YuJes5p~hiD8%ng)7#}mAqlX^mo-I7WSLOV)PcfNO4VZV{V9Z=w3+8%ABg93==%LXyo;REuDM>6F=8Y88EuGQ5 zmL?%kSY9{r>Fa zkaIY$XK<*dfreE~xmgKtdFE=_l1AF7DT=hxDKJvJ1ha(5tHlYXzy8K?NP_xGC% zWrv?YHzjncC)S53x|~$B?=&l*n5{Qjb^O;a!1)xnEzbzAxbP#LZ2?!cOy2<%O3*kE z4B6DK-I6B1{2K-a`bOG>CQ(TYK5#u6$NQuolm<*aluA;mC91*;0xiVD zU{X`ZW8*?gplx}W;NkEY!f(~@Nf=p)`%S^K?uzac*za;^g-nNaRadyl2 z*c=4YKKkqs{6GC#&wf{W*z#wZdQ!s4N}kWwv#2C?0cQ&-$QFpD*j~Bs6E?kpX64&> z(QiSN%5Wlm@}|-O#P_f|YCKE(RM6^^PYOk`*-uIi?s6r*psqjB#?*XMGckyRqO^n6 zf#>pPvQobbf?YqWa}T^G&nx-Gr|%JS6TBh(dW`r^m4>-P7>cK!%UVMjkY*f%d19q*gO)7;({w{CG_JqVVj)M0DZ ze)Fyhj+gEh!cJXe_8L=|Ob*pF7V#6a>v|8p^}qd=d6B#w_}vAV&yc8fuzzLimtuB@ z|2Y~tW61v&1xNk&q2T!D!*?Wp=Zss=cgC&%e$KefdZXMzC;wKx+}KJvNL|9ekxVa!9WLgRdyeI4W|bug0^N}aD}hafAa8^*Z)qZM z$#O28wf2AOQ%orh_5VIHbf{zfmHk`cG7X3S&b4gcTQ6JIfie%L0D@?}D37Wv6rX4# z$Gb4LQgW1oDW>E=(P^(<4kpsV(Rf{x8t)wIJnf+KOw*Nm-udnM&PsH?OkOZTKK*-0 zZ3#;MonnNGXdyCqd)oT95o0a~gWv58|5oZudoEM^@^5v@c*4R?DCCn9{%lI>)LBh* z;#;=wF3Xkn`-DFK-;A!tZ;vkJ-zpSm9v4K8;*jkH8)QesBgaQi#5#H}BI4Ei+r3_` zx!q{>!fG|#Y&7dzjmAwUYGy~oV;KI3c>K*sti!oZo)8i5Hw#9GX6OM`Lw|%gnteuR z4;iKI`VPYqaS%H4GJu~W9t}Egp^Pvu6%Pw2*_7 zbt(2*?43woF)^>TsCm=q)xvgTvtO-lM*V23H>eK=Q9FBHy&(P5>@CW(-d3~zlsJf< zS^pn6(VRqu%ov(7p^=%i2xGV%ygfNRx&C|i?BMY7;;Q?eWqKMvbw#-Ozz8DkhR#9x z2o%S-L7Q+E?*n6Q7)!ap471?`4jjbMT{xo*9rFjYz&j*VA6%V@5Jm9>mGEaDudd1M ziXe!FlkHZ66H%4}gK;s;R&q$-czO_>9Zcd;qKqDdPU9RzI3s8GXv`*i(w78{d@)oY z+1%5hBIz(Ti_3(h<@LfjS?(M4a7fZapN>(%R8k)d@sY+NvHAVMm4+KX!T`T$noDi` zbU0po30?}!OOvRx~;g3@-q+-MS3EWdQ3wBvKK|AFc0{%xz%?X<`>E~o+K zq^ll!D}34N1jncEf;ooQKzo?5-w&#og|2!#UR0;E$uyeHA5~lg?BmIJ9*u&-<7=RL z0;GpYt0-pg7b*~oQIZ!OH+#_U4>>X4=&?*wt|G*c^BW=(O+wK*tFOWf0(5cZa4T9R z%M=Y(* zaNQN*0?@V#@2jkrh235>uiov`$3O-a5HIPSIZIa01N{>V3iXdv1JQ^`?n>6g;;CQ_ z5P_CNQrXec*t--MmH+nU`m?@^9je*ijGFD6W^MbXU9GlSH{0!-M!VT+h1oll4a>hZ z*=3S@(wZs${uhcNgqs$pMiO!wMHSQ$HFC#saM?{F8($?glCC5^&iPj6A3gI2W@i(B zf_NE6b0;*fCo`bkn2dVMk>YUJt<7?iQm2;5sF_(Lf;mK-(oY!^3*T5Vv4UTX@auLl z5i{#6JHA<9t|(lVz7)?0sYTvyWKECbG-Gx7AUb`|=%T2gI$4nsSNHOI0s5a86^va9 z+!(qojCCio7wcv=GH9y8M=YiYf`7(|0p(GQPFd#X=sr+?*~tsl>ni_+KBv8+zLpmF z@)k2HPp08N7vikvq<}Cp7s}5nAE7mh!)G3anaKr~6E7a^K+A4V+_Ms|AT#q9M+3%? z6TpaI)sN|iT!CnvfyuNe6I*b*K!-)#^|^fk)wgCd|`^FxV2;% zfqb1z{fB6E( znmg!#YeE1M`M1Bl?;?KCdvJ=LhvlF@yu}S8AsJp@9vmLSxf2Jx)x^{&^uGx|!r*3( zN$$$@FY06AUmYQdUyB(DzOw85H2|RS2v&pdE)UL*Df7$K$1}1^k0 zB%uZRAP4ov3wOp~{<}9}JVaf2BxVi&FsF?^us!EjVVQ9}=13W8`t;-^#cGKs5`t&|nlyP=&4d?Kt)urK_QX~gk8g=kaVx?}>55K^Npirpq z?uuHNbF^NxikzLo{InZ(HGJ1>GtaEiaO<@eXQ29a<2^kEhJu5DXIsC%7!1_=EvuXI z_1VGS3iO`Oc9r&8(G<%G5*ux#fQJw zJ8*22fjx&DA$v5cqF3}78100TPvfgNn2J==0cBUVpoMc0ObWAi2v24j_GtJi8a<-s zm)xnt0CVsH0*6!f*mKe^qpcJYV7D8Fv)&yl%#0k66H^GrM+^W1>kOx+5c^gC`EZ8{ zStt|i29s#qXLBbMa%}+@(QCp|Us2kwchLBl(JB+98ASki3FxE4S-Bdm8tt{s&2K%8 z9u%k#Mv`+s3Rt%-ipspijjio+y$+qqxRC+(| zsLdVEa6lZ;88WVu1=N|3-7jGm67cMoI{&F39u&dgHwkc~Lr(dqU)?8PO$Pj`q+b~w zao+Ca?}$}%lx*YP^G^1K(Wz`G&T*IbyoDV>22^O)FmV_New#Lnx2F%5u;2kO1y^&y zu;ygm6^g(l6cu~MjO-&UhysTXi@pqF*nEIvOmKz9lQL$F2M#eK{eXf`0m85>YN2$5 zT1?zNoSzbB%6@)NGoDA8Q*eA-reE7{Yp$HI%4`BUHHQ! zCeI;|)$#Bm`X#9m=P{g4fsJ7eu_{HpT#llPCFIQdyXJPRl-{vt?-p*%Vph9E7fB)@ zN@>)~^;%GB);IBAcy)GsCIosOm26o|k5m|5S+5JaE#_?-s&&N;pk&pY=skQ{*}osX z4daMqT9S@-bWTeNqq1+|2b2Gh_be6w0Z13KIZ?5wNIe4}&J9B3IULQqV%?q!#_w!D zVkDY=VH2o*c?XZf=zdk;{Ca*rqW%{$YyG&3&p+fM_@y%O`lxA~P z-3A_p#5d&`#On|LC(#?sYAF?H;XQ>;IvEcp(t$>&8F~c^_%`f4isTk76=*DMn24fx zBJvIo^>F@WBk0Y)NY5MtY=79BQxb4tv#+HQfPOu}eI+g=o0ETkK$V{jv~0pG3(Mid z$!xlafokt2i%}oLAVOa$5{ABs=gWt?XvP^>IMBM(^+xxYcTq&-4w{(z9pR!7&5*7pgZTqy5C9o976&2) zOvIemdAN_Fx3k6YALGN(FF!ULKLPju6_xMX<)9A#4}aC0svQ^z;(X9NF+FR+6J4X6SC)B!*Pe?n=f-+=$10DNoKgRjPkf$$Gq zfk7H?KL`JUSHb?iN_cO-ZFt|dyl=Psa68HS1`|Pr&%o|#Uuv6&t%Jj(qmM;ucJn7W z2W-8}Oqp*UlnQf!zU3$}faHFIv1dVkn1#a;3ShrPGuS*SImFMr0z__R9@5(x!iRD**6x`ncj z&1OTi_E6VHRgregSWSu#i*p=|`&x|i|KKp?5=Z*AZdVXSr-euG0?7x2QTfr$|7mWQ z$@MY+Wi%>KN)~KoM}taUr&#L&kIQnp-#*SG5NUqww0KS6qQE7A>i|g^2_BaQQT%q^ zv?z(E#j6Kb6s{Lscg+^BpLS<$sB+ce`fBkiY}I-Fwwh~0MQ+dpFiq%av&~$gg`fd! zdvk5D9mHaX`37;67BdMDT7q_a{odl>0R)`?gI0sN6UakB3&6Lw*6(eDc_6Z|Cj8w- z&^T+*Tmz%n=`lH%Ve;qA$?i^D3wmlq^IGfq7S>zID*t zZX6yRynTDre23SW#KziNKbb4VU}e0w{EB-E@MWnaOI@GM9P6_{$AqV^PPt>r)k$ke z?o=(NrC@D}N0MoX-3-@IL#!)YwZYmXgi8qNn>JRlx#Gr%1RJbPK@DRC(P~0o)#fcc z*j|Bv@c5<(=t|6z*!DIL-?p3ek40zS!^hqGN;qA=3&~c-(5B7|$1-H*Wi;hZp`lLd zwJY`Hb#D}X2I@>)xjJ9Pb!&e+-0alrji}cQtJO|;vvm`7+RX->a#F8bT}a8RR=31| zu36W+NM}3_;z-?`B#aq-Ma0}jz4lGL7k2uab(q_z)q%e|jrvV%YuVg7;L;}8DP~wm zY(B}V@j23{1$(@XicZ-`Up#JsYV>oVYZ#G}^C~1&l zjI5TYsJM+ypPhfg95Vg$_4`iCypFF{Jti_QQ_Lo9OUd5!F<~}Y@d0#{ z$|q8ZFjLvb0TH7|ICplt;=5^qM+4?4M9LCxGAL{q?@=P)pE^1P24)(eo{RQj z*k*qrWS4_~p#46e`co85gBTWTg~%Yd3#ZdZmBVhkP6o>Ix_@P&F`t`R6n@&V-^+AE zZ<56Yy^Vq2V7H?_o!6t;kMU2zeo!y&B!x1NIY~{yBF%mH1@E&j7MRULk!>Y-dva~8 zF_x};tyJp1XrB4na2E2CMm(aU)yQy6{O-dDq%5J$_o8KY*XU3RSfLi-7pZd?Yt0yMfZ6Z0-zvr77OzU=IE>hI?AoD|3#r8hm&EW9+aAJD&!kh zgX}2q={ePrA(}WIv2pu3u9@skNh9#l9HGM6UM3LtSkexqyR|*rC7A1*87OG zK@o>nH3QhVna;!YGER%e|3Y3RQRw917}w3!tOq!?xQjTt$>^#SH+m;oQP{k=m0kRo zP@7uvw(980z0D?Ss%D+q-gX10HdHfuu#16?Zt6oZS zWbr2WI5O~(7eE&ExDXT_80^LvpO;V)sFq_-=D2t^xPDcosh3uAnbKiy|YP5$nelt*DM8 zTIn8*ITMrGKJ1w9S{Q^mpy1qGekZ-~Sr~tkl8l^ve{p;2WF4$8%6lA9q9$Yubm}zOyN1M$i??H2-s!NU_3P; zRt2g2W+YQy7DQG>N@*9%!=lckmEbx_!K_S{cA~Ntf+#2e?6mEm)M)^>b-1kLb!JfH ztk9~sxq7OTrHE;WVE9;4H$}jkw-+DJkA%!AxN%@cauH?EF3yjR-(9I_FH7{2n8Pzg zAIYDTUv5{`>Wg*e_tF z?^TXdBLzKto`$nI-k^IGlOWCiBkIlnS1Ts-(3SuCfSjjpA}_d9lCz8b?ygc~#&mISQdX399z6u#bFx>IHzcjW2xsB!@pnsWrS~7- zJ>wnAHr=so+a8iHnUB0&>Kmyy;j}Ze6I()GlCUeKA?XU4*3if&l7uaBwbZ~9PM5~F zM0ktLI~>&jYCx60y>EJx)7+-CfUR1&!`EHU0~!NKe4GzQ@rx4tbt&(3`cZEb&O{#Y zXHcAe%N)orN{;RF7*_G|Xx@#b6*H^V;bm`c%a{6i;`{sy%2bze?h$3Pc1d@Qq|_fOha~!jT5PL#!EPqd6g9U7=3G1M^N8#my$%$+N_Re)oT8(y zur_GaTID)Ub;rIVT6e1Dg}Bpfsa|~D5ErqmnZy`%P9tRYO0@nnzxdhUwiHu=uDL&> z04x)=>HBVy(5qbk&!QV|_%;AN$0>pAq1Kc8$LUkfnqy2ggigy=xBPk&QwLi{5FW~) z1!9{VcqfRuZN|=*-EdnMjpE{gddi&YL69~p)~!s;I!&*cgV`PChSL(|FwULgaRdFy z(IrRh97@IB=xmoeTj+WW|F#@5eU_f#Xl1%!&ADfjdqJ9tM+$FK2_EaXmjV?ZLG*BY z5#7}G2#Hc0Z_zDFBqr~LjrG5~=Vbsp_F6<9=*q}H8eq`!h&PHCm5lSFx_uYtM|qd2 z&W|cKf;d0w-|BYFI6u*W9ZWa~fyER! z(8>+ne-mM!Q_N-xK0SNnAPWR2_%lZq&I{|$8W^gcuuCWCDgbrE(TGM8bL3xfr?zkO z6q>z>^luKBg5GOkA8ij~0!OBtlCe!Vqyh&=p`Z%GKB~DU<6$oxS^ebEL1)7GJi4E9 zb3i+vfk0CGW{sWzAZqTXqsgO#8DU3|=(FlC)iydji|#3;_p)9~4c?!DXFCk^8Y34h z?qv`I%!*F_hY`li5mvB!aEkwL3NsPUIaj;o>#d&$^t1?!l2}#F=8q!~dd?@~^TlW+ zj~Z3cEp%>thHh2zc5BEu6)W|#T-2Iu2-9nWDnwr}B_)va?p|O{aZ!`>pJDli_X<05 z(AKixpb@+(rlXK!OUPB_boR;c%zmbh|78A0bRLahZqT4W*u;>R%{@-yU`Fu*$i{aD z{7D%u$eE^28kE2@%@a!Nh9Zwl9wKsgMH`u6l3!F!4C=MdBZl8(qlcD&4`{8Tylhcj zTE?h5y}JZR6NlEKB$zA0N?9J#ffmq$X@M{! zsL@$a5;OKF*OKgflJK6t|0=f`AIYT1 zVQqg}+alHwix~#7mZcG!Sk3Af{v>!7gHY#WZAfDA(@b`{ql{LfaE9L5Uxs*{3G;%d zRAZYg^DZKGLEQI7D9%0dK2o*Jfz0Mj9;wbdMgbvSrFOs^YK}gXQu<(F%-;>J%c|&$ z?+fj6@Y0j?9VoV>pF4`i$XcfXPj`Sr{Jq*T+FYB{4_c zcTpWl(KcvEd~qI*4v|&vktvO&Z#~_Zs!|jn6CrEx6$Af5Te!GReYcu#g@FKg9jR`PLJeJ^KaSN(H84OT;31?k`0YyH*i>zVMZ!nZ zO=(Y0vNR)eW;qGI=sK==dLYzrMb&f14<)0D*>%TXV_d0Nfr?CO8pC;KGL7-}*aO^T zHWu2&iEK#~bHvmZ#(YD$SqANj(OE!CC_=fho6>0d%X z>8#|^T0>23I&-u(q0)eDE&=D#$?V>sSrQQKH3v(+@!tw>R{wKXw@y}?y(OA!R|1rb@Y* zO5^gM!t2j!~j85yun=olZQBAiMzy##sL{8QDYRnf{I77L%%BEU4fv z9BJvO>MQJ-C%QwzL0?afBJ?zjBfsyD>X7O8wH%Vq3CIPN=qN1%SOG3L3MsktC>ult@0i(;itJun%}whP^VwLa?@1jOba*^r1-)m89PejE zkSOta7re!ZQmZTe^qs+3D^97RfAy~3g0;*Ojd%y18=rwVH-hv*dl5<@a9}~z@nd3R z!sly}n*abm{}4NJtJb8*(5=?C^jrRxDgbCxQ~*NAux*6qi}D z=uKubky#LrX7Sx*GWW6~sEZ1tAsoDWxY|jtKzOaoUBAGBt)?2U!#)gR6rm!MJ*O$= zI*FJ3w3J=F0jsFsxt&E(qWIH}Fh@JrD&$4)6J0HS4u>PU`bFu2CU^n_RJ5|$TppbN zrTgLH>g4+5;=HS47zFk024kkRg<;)W+pW~F?i;~rG*`{mI~|nwpuQPG07GNZ-5##< z9!CsaOtGNd1I&cSvwWSNbXl-()~!pJ!YHJ35nIph(#=uv3+nTANG(+qGsI6LJ_f@HPtA zU(hnU&>8${8*cEu<8Gb~8*Kbf3{7vLZyY)ljg^*Bma&Jvac40<3})Xjhwq~iDnsIj zaH?+^C6D{(V%Yn11&r=PIGfXK6_jMz-lEW5orWFCg2JgN{^&B{AAgeOJ<8IG`i$?b z07h4j#?=b;fbaHswySF2qoW?)OyrmhIQu1L&0du|L8&yh<+ z$13hiRM8oeg=(9epr9{)U_olX&<6IT|HT8)sfiMv(E*HO;hx}xaO6PVbxgIS9Ms+> zU%8HRTzy75F#3c5hw&8|78JBF<@QXPPL-I%p;96;n990)h3@BJ?3OANIngQan$Z{b zqZp}J_h{eHn_=b!5!{JQ7=oYCvh)fqOMjN>$OE#Ig$l5;S{1ayih~<;9uH?jJm;WC z9hQ<}8FYq4Hu%G^OLd3PCI1PO?aAN0&ePD|vZev4PF23f@I@)IXd<)T$}2crN?@4}mCF5^G(HLGJ%TMAT2 zvez3FHjb7)+qh}V-Z2Z)mv|5ox0P;y2r5Sh4&%I`tHA;!_D%|Q6CNHV2+x!ss?^zP z3nT33z%#Qk^vW3T?ir23ZwyVLR;KrkO}(@}Gim}=j#XvObfBZg3CG~G!%|7(33M8O zLnqK~;Sawoghh`yxyCq8wqHuxiY(O)(`>Pin#0tYtU8;masx!<%|@%-?D+VJu?)}> zlyIFiFqI5P7>4SO4Y?Cs?#J3swtk+ipbaxtchUKbS#3tP!JN!D+s+}+bmzFTog<(n z>B(WZa3q-6Gxo)QGSAV&z%YHcg!J-M*1Q8%BxS~TwNxra%L)p22nxZD>TZ;rCNmM`DUftW0-49+|S~5oSOB?p?p9Z$5j3LkpPTT5A z=UjF1NO`6}=)A`>KT3!xg{dblm08M1-4zj}RrR`TSsi$2c;BYkq#KhN^Gms04ThA1 zVWNX?@!q~wqu_^I9TDyM-NhyFn3MCzd{z2^!reV3pXP&GDo%vI2gFEBZ0!+sy8gXB6?5!b5L6@aoeAzRFS-KQd% zkm)X#WLPp~c0!Z8MU%VTYNWYd_C}L95)Z z2t)ks%No(RkI5r+q%uSb-YE^>a6^lYZ1O!BwG3z#0gRkZKX9l&8N6-Zw!x8=h3n@^ zKnQ@5;I&?BwK2{~9dkdEVU6k1TmPFG%1k-^cSRuU%+O}Gr*Iyync%XYWj<%iQktas z^Xa@VH}}Iv%H{7WUna8x!PcwYj4q6tX{8o%8h1Ds{0SMJ(b#+iz95oP51?s+F5ztZzlc6 zoPVVPpuItKe2$s5tY>t5F5~#^AE9XlFq*a z$7LVU<)^4zD5v4s_s7o#nmg`|qF{5B*8doe@?>6OPbd|&gr_*V#mj$j=OCk4mzu@b zEg`xOhBGMFlqK~Mn*ngZjUmN@Fy0h#=yJQDgdDJ2w8V|jbIiqLiXIIe(aC=-?x)xK zP(dYx+zJJE_ksy9FO^3X{yThseE65{U%T%Lf{5}E;bdCYQX4|7Or}M^F?!@{JUX-< zNNWGhn&tTpijL&V@yY4<@UMRbGFo}3-EOuG*;m)Wbea^h;PklODR4BEGKW3pNc~1x zo(*~}CcR$cAf5Gg>v<+!97CxgW7s?Yne1;h*`>xR)4k2pEzj`(neP8_)7@HOy6am! z-JA92W9h3*w)tU5ABNwU3e;s_cPv5$j&H>8u41Sdfv}FNY)jhXKL>1|wpAd(9enbL zlMK06R`K=9_E+Fao-@+wry^$nf%-e0n3gJCo&+=N?9`K|CIxYC&z+@W@TZKk-&(su zPzh*X$RK@{YI0elX)9r!MpJC~ztup+zV{|)tIsr2wfynHeU=T+jxp|bK_)O$**f_ai-N{?w^5FX9BB|QwQ4*|==`C}o)03|&( zw$P|+J3tMLgJ9U7hsw@?jHZTECKx8n6X|9w7&^giX>Q2SaQG9tcfkai?E(Jf9o<*i zjGSX#OvbRaaA2kq(KqV~_y9^IRQ?IXa4_^Sh!& zvV5%`PJ;-ehlxbFXw*ygq?ozG^i}=_vB1?zz!MLP1O3A_jzJ&-T zES~7k%4!fq!oYHH$72{r56nKD&^PG|Qe6Gp9L!^ok_@L~WaKyG;!!bL$+8*@8s*MhEo=(|=9kcHuNQ?KI!_2MGZNlit z`~*jW?TKKjdlbXm0OATTf`&{B9@E5fE=5JJm68FsY;rec9_ciO%m4WDQi&3x@uC@9 zp_Hs5`?zai>eVaj=e~*4qRedVK)Vx!hGX>ys#OOgF>-gvWPYL+UdouWM|~#*sMIlw zbu9WB#YnKwRh6R>KW5GhZj^U|6(sv2!SAb;S7JAagp4VXXukWJ1#(w{^n%k1J;|wz zGQ7I&1?}{}^QY3EQT<)V-jE~Ey88qd){OqE;WDApGc-Vp-7v|2>@va2{BqJn~hBd9CO)m^6a!UBa{i@>zkXbyJ~E2;UA9jN+^ZYm2Q|9s{#d6S$cPw7x`TC;%QK zXe~X5f$N_kEvP+|oVr6ReA)KhLay2%v>TyOz?W*%3y_+NP@DA!Y-Fsx)HVN*!yTP8^)*%dlQ@tkk|x$3}X%*iDBxa zKsXRluqe()YG=7Mmj!OLGxgVo_;YX1Al@#4w#>skLcR&KxDr&gj67=2gsMa6@KEKE zqOr?(EPco_dj{(<6mHfMU#d0A56V)@d^KhrX4=uJ^uu|Gb1v8v9`E`S*jzX~*bFzF z>4;=SGJKFq7+QMH(#fMKp->7tntr_85hq%mL4kMV(AjL{(bE){5uTq{f79-0g5uRJ zf|Oii%ojtiqi`6N7K`#9?rC6q! zJsC3(Yr#Hsa&_G$QL`Hl(YZk6^RDa*`SX>hv9fI|LrZHplI=@Ad@S6R_5!nOLRHwH zZ1TW&ea1H{3P46kU2^^rTMOWJL+^X;$cjJKe%g`jsjhULtRxrb?lNV6HjU2BrgpVq z655|{7E3MDSX(4m)Z879tuO@~G}|JQXgpcm-s#{d9*2=TmBY0k%|kf7X1k1FK0158 zv%N(Q$q|CsH!2xGmWAGZ9!?gq;S?beAhrQ9krG?Q)~`#Hm!A2o47$nU+@>j5xNmkPoA(@ewXOLGwhXqpla;@*($wTB`2(x;n1&zLlgDhma<&6 zvC2-9qB4{ne;O3vq++3=R30mzGxKFpC?_iW^1jAio7UKIN3ibm@+f>#Ag@Fosp)-I zgJPkh>UP(QpG2vmxuftF_fF}aj8A&%Of8RZt41DrGo0IA`@j*TYE0DE7 zdqUAxQmU+f{Y$;7H|W|xZL3Lo_~xujKI^kE`8*>lTL#iv*7zjCsREIFjAdNZ2O?+6 z`5?Dkp7_mnxka+l_V&|u*mNhH=bNqdX2?LH!_UOLuyIhD7X*80XAD23AM4u3ub^W~ zfv`MIL52-dF4LeKxKGXJmN4qc0nDRW&r|LE+7s zUwG+92xx1lTbEK$O1-CRY1h`cYR%@|leMk%JXzVgN{_UnHCy#)%&I)3?FD}2q$ANaH-f>5>4><8B*r*VSw z@8@AZ(wTHoBXS#!BMgIQWiNjmH1O@1LLF8z!5-XLc`X*k*!cMbUt@i4@X1x<_M~$uRpkAwW zTCEqAN+nO5FG{6Su6Buel-s~KD6#d6((Bj3)rX_MRZfS!XdFkClYTUwV;aiYo8bEo zrls+tFjD2)75qT?d2=V+8j-sMOP#BFF#(U_p8GuUv=uIC}LBVGLVtzyYKEYKF zZfWWg=Uo`jS?V#PU!&EexcWOiIsePa`S;z6^V7d`W&8-3RiUF&J2QwQ?2HOpZQRA1 zQ2mvG6F4KWR*>SUnT2A?vf7Ifvz)1$VpcXdG-#sUWIFeO8}(hJI;||G(d-7M{1X<_ zG-M73gAJ)^kQAg0oW$3H&Ri1L3yP9Xsar9gR?Inl?FRV0d)t&N;T}R&U11UXi&WI4cV> zfb*e@S@o(o;{?~h$*DGVdmL4!VH{frAvL$$3gL@$ZPQH`+M-!VC{wZFfRPk&+$@PX ziUkZ5rK+NQgn?5D&t=^ci9qT<&s#1;G71G8Mq^GCa!`LEwR(!!l62AWMWhPycKYqb z$Md6tmi9=PvF=l5(s3cP3S|}{j~FX}oG8_Om`%pFjxXO#oDPaCvGQeIS>t6O;PVV7Sjzh*6xQq*v0@noz_*Ce`oJLKwgLfT3`@fjR=o6PS_!~VdrcMpJ*f6Gr^a^zz^?2VS6p?BIv8+&WcUj zUeUm=j*hO5hP^r74iyTj_;+GT2AE|KZ@j+~`{ctJ1{{Bf!u#wa@Cj9gjM^@(?~nj_aPFlFd(Aq`qYnU41y5b)U6B*WoA)%1S_8I55qBq zwS=A{D=j>XCbwiywOLk(qbPoJ5LAl1)5-qSf(h<-4?~c-h*7#71^8@DtcwZe$TP|1 zu-{j(9D8@&NXR5nJQ;nCj;X7s{riHaAnyDVS=^o^h-JE zL16eYn$qEgTy}i4@tM^tv_+gopQDjAJjR{mOfbLUVm>Ka@`$n1Gk{<>sHdaBq>*eK z*fi!~VJ+@0ezCOQH$o(YM@Ui-0HzBx~4V@YcLMWWi_#~_l%8& z;)SG^=wlJF$}Gl1Ln6EiCX3=LzGP4slK+tZEI4w4cL=KQFC^F7B16Vzy;ZK)T&}lr z<6>XOU7z1qVV~snG#w4+nAd&JS}oSMGgvQ$jJoCEmF)hGuOWLwc3QGOdYZBeSZ$S@ zwmvi0j3gJ0owhQ#Y!(zxOE#BF=awl|JP>1INbe>yhf$@C!RBLm0Hx(NSobm7G-#Sx zrX8|~vj*T;qdQZRM5uUhd% zmCrA_ha5M%#`+hMv_yIkXqlb0G*mv%iq%LA)M-HQOas8ry7tdp{pYFpg#dnbA^gl5 z*gsRD1bk%2viTYA^|K*zV1gT3`@o2b&sjYZ&XeEDT0nG=A>_r1nTEtS0s6KhdnWY8 zEBGHLD4AwXRLN-lMd=C6r>_KKuhG3z{aUtyQt0c~WvgK-_8)gum?;QM?CBf?8~Sxo z1T1d0+O&0SHLSzhvrU=H;w$@4;_;s9z~o(wBsLbRw(b^|temgOQU6|Wyht7|T=uMv z9ZTwY8!WjEKFf~g0pn|H^*zn57 zv#11K1~8ZK&r4qbsBD_fCch=*Qq=oln_^F*)#m1s-LM<>KciNmzX~%Jn;vdBSD1TM z7yP4?QfZeOB)30K6{ok5Q51gW{StPf(PSL4)wVR8NHXz~!vsAcvzbH6X9Q~lixwc)Ez+2qW_ zZ?e1v&nmB#A^ArNi{U*SP%%UY-&wV`aeHfTZI>IJ=RtQ09@0Bmayv)$q ze)C)4(Nz)1E|!vAWQu_u;KOXvU-Y8cyKpqR345QGbhNglWA&AKVQv^p8cr&Sx@T72 zS6^>ZHB0%rm%7XRn`e{W3N7B71mQ*)P6VLxU6Nu`-Njf%btCYam0lY z?}~?a;rKRkxunutS~pteyQ-EGVw|FI%-7a1FuB7I#{sm&sZFW$OV@I24S-3A zqQi#YOicP;F?^m64m-Gso~X5NHzEI7D#lVFrpCdB0(2hHLH8atF8}NQr&E^sm4}q`*~?DU^)_(25fPIQOw{M^1A_o z61Fu#_W{*?v?{tmL9V8H(IPtQq>Rk7XM#o}T*^-RvTV?@MLT)s_8*hA1~&P7*U8HvKb4fuut{aVjBw# zC*yGT_{TWl(tO2C-Z}jQUFIf5qh%ir(n3Q!MmT|QA(P*FriGTo0VN(i3%~q8*Z(HL zSSZC(qR5_cVO%RNnM7SL=iHeU(z<&vpNG9Wy5@aj%5;WW0};%{P)_JZWYmvc_kNWO zQw=4_%$V%dKy> zE?GW3ihxv+8{-6vh^#3~G`+%}!CbPJYbuOd%B7Bl6((7p<%TE^Gi;fJM z&ZLwiuqB$l zR-~w(OXSX|wmOb%5NW7-yXbw2NZ`v$K`Y_Q0-qacH&&@kJI?5~vS*z_5*v~h? zmjQgGg(o|-%Hq>?)?0w0e#Rm8P+yHcFQ#1ZOK_FwO*Wc5#q(mwt?EDVvfsjcRBC15 zWgWyApqFe`W+xOnvi5GHLcZPFs^C5NeoGz$nVJr}s2&+5kMDF)rZeR}&~N?ETer^CL}k5^>t#`NQP>PNf)=u3^lKLP zooE6)g4GgX@}|+`0T{DnQhs|lqtEKD6+qHG>Vj^Ek^8F_v>ORK8%w_b?&9)?gUh4t z$vZJ~JY37>TXY>-S1tLgC2vgmw+{i6gf-#i_*efKBfn{S^1JIX;+AxFHiVC(fQT+2M``(5VxraKN}We@VN z?18beo6ohf-#IILy%k~KrIqEV5_f!b&G8HiP+vwFZJBpt&(n~;TGGvoCr8GGliN%l z0?DKggNM45kv@q@?);9jHE@7maXnoIs_0=@AapZ+2;nR|pGXFdR5Y0d7#qC03dfeH z|Bz(dm^Lu_GAC1;f!hO$PSK+bf&+{y4;zSdnhA#S_>Y)L$FRs*)+cxAF+N4-2L8^y z!#s1s6i8v{;cnQwlerD!MXwh{{k)AI`Wp9P#-S0vpfQU2w^9F!^$JN^QPI!HJDmP! zYE{vhvopxXs)6M2hebfis9uWvWe&`x+{6QMG}15=;7DjniyVOwMy_!BaW((ai`izq z9GEs;DG?+NL!=?H^0IqG#ts-?!lo}J9CF)sr4OS;d@)8D)_z~AOWCo}V%f3NO7W=a zC+~5(v*>d)Q&f}333MYcTu)Lx&oI)$h;GG2JWP&<^3O#?TZ3jWY;1piaee%T<8$>V z00NP-5W%6vcrgG9QK??QL-g=|ROvr4fcX7nCZpkynVl6|GwI`Ica<#zM2G#IEve2b zFyzRhyUihL{5_}Yn*Bs(tBH}oF=L3iON;@y9t7s1MM65oq2TO#LaT^K>b2%Jx&hSd zo7?OMum&jkBn(99AJoG1N0t2^%|5Ie4&!22TnZO6VXaBUU=LGuObsgV$WwQI!VxUW z{v$0No=Af|t_a^h1OWhRU6w8V8dqY%|6*B$e^8~?`5YlzCVR_&Z7CE>6DFKz-;4UJ^Y`C)CI9tdY8)G$f9Z=0$Hho^&ZX_Xv3- zNgWnS2n!t9vtW*ddAiGo(nwVy)g+_L8cx~Ls?b?Y_<*OB>3k-FDMpsKu)JAW@->D< z4Eq&QJ5o?#^iwu+m!-lJcSQ2fo%E8%)+H(!Z-f9+Z?O>9*ls4n>(LfM5p7aV_m}wX zXgG^t!aq~M#%OND-U|x!MlmA#WvB#{0EltWd_`jxR=eT2Z&nK|M)v%hG0`?}72GRB zi$N2XaqLQ8ESD}Q8O3rsk;y9;6jjogcmy7)mPA+OZsk*Bd$Cyvu?Y4D_4Dw#W~re0oix-#C&V4*Y0m0HJHB;3D0 zxOz`hh@QxvMxhyDh$eAhKvIDM4Jd)kBvIqL_ExJ?ZZ)2V4k!d2{M5H&ff$CS#e=t3 z7pEVukD)7u{=uoWmr;x50KB5VN9P!fT8aD*Ho?A5JZJlUP-JVVsQn=lA|=uuDw$Zp z1Q;dv8w|G`a%AQ?CVr&^cND$oJ_^UN3alyGlvmTRR;5*LDcC9^;Q&OWC3%@=yq9&1 zqX-?mAAH|^dvbkcY-nmq<>>akAg$KbglaowUmc^Bubu`K-d1T*Rm$5W z-Kr?sIGFQQ0?F=WoaPYZLZFp;t@ei?Ud-rjnqQmR(8&V_QVj;g->Zr!K8}gI4T<|j zU(kJNh!L!X3x1zfKGE!RhyomjwvOY)y@wMiOoSZYfrRr(^jkqBg2#mv2IvP2k`U&YZ7Is})yl1!r*S5@oqK84Q>RFEb*rbA)HX0JlHr zIvfXJ-vh8QHlnW3apv1WLFmDY)+tA??wMqlWJz{>gxV+Wj=r7mnDVK`7@(Uzj&OPL z18o>Sc3{Rx{Z=e`(oyKFF-AWaQC>8cA^kMcC%F{YHBx{1~ujCDMeFt4Z)}k zW;B=$`;jCj8JV;kyz;n@uO1Wg^+sTswgV*z7Ftt+dCs||URem#m=kzfg;bV-D_b^> zNe=9g4Ns<+sZCz@?Y6n@`F!4`P5lbfq^uegSe;%=q7H@mhc%5v#@annF{HGev2`N9ZSBna|QMaW5!lqI-%gzl2W ztD=qo1DOP2k1|rF#-ayV;{4_+`lk#)f1_Oqbf!aPrHK3zNMFXglvdeFV9|0@e~9r3 zP&z2c>U(=l)WCY{|0!q!eYI?MsrnMUmhhaI;pRx97?=E<0(N7i?!vY;)~wd+Y@Tg8 zQg-VoN4M^LL;olX^4#XAIQ8~cLk3rV_6{#mz9o7rCwMU!oQxgKW1_^x!c{kFg`8_K zRa#cWP$i}ikWoT(!is>Z1Wt?bupUKfql9~zIwI8Rnn|h(lQ||lW>u9$6E}vp%-P8~ z?=4msyt2^^2?AGvLWi*sR40rTVUT{ZdAT^DvVSe)>##4{tc71^5J|8`M{KTPu#3d# z%HE~!+d}4(Pto`)nJ&oKuzol>JvzMj@b~V;UymBB8y^wkCQOp{{8w`##X7f>Xd(_-l=WM47X2X5gxqawTVRQFur2=e@f}E zDLx(2^^Dl@)9Aqnk}Rjq-j($1_-HE9)ny+&^r9KVGLa_HCtD(vfK5q?##izn4l{26 zXW(wxwhUt5*Dq1CF_}Kf_dK~AbFkiN)KYEM)?xp!(I|8oUC;s#>*DnnV`Y_$0JU^U zEM%3E=WS5Zuibf4p1M|IUpX2hU-%mlRtWD241_wV35+jtWwe@%yrw5&<1(z75})P0 zE6u`rP-xUBnyv=#Cl6>;N{UbbKN@K>jQPVPs8L$;zQB(%ckYE_1#FH<_IT8smG$82 z;Jm677v}MAg=tswWt@Pgg2`%;BdB|hBb=_goT?Tvby3d`UD|Qe88xxjtB+2U^Q3)Y z3a;|0sX`e?y58Cr1mz0>s@mW@&+F5aYabv-IljTPaoDSU#M$@9=f{^Php=Rh<&vQz z6J00b-J$fxW7RfvWE55AcWA{CSxGQR7p`yaG5=l`M$BW5$@K`Y506K^zWS}i+x*7I3kiI)ckP@+t2{yh{e4;=V;rcYx374+yj*42ilHN({MKY z62*bMAqoRHh3Ow6`Pxm0w)((2{`R-`&F*Y=|KUus3md4^PAtM0`n@P*=sQVbL;Zn~Tn%YR!P~QD!~9gn8@&IvKI3c^t@@(BFdID1 zgl5>Zfisk5-X03oMyJza`rR}&{&_*j<-mW)z5+vQ!dT^>I~o(HT)X--oK8`(wa2Fs zy+DRznSkX?w=CRzxU*Yr?(DXV{IYs)Rcn0kK)bkcmo$TM;lZ>)%XfpiRFCZ5qFy?e z?j*Hhs*oU5T1VDYN}CR#;=)TzAo$Y0PqmNS;VngB5W5xWgLkZkf6(rZx6wG7X?l-C zz9pxd`3GT-3EvLMViR+fpy|NqA$*Lr$7CECd={geka43~o2*#P4i5*YQ2ya=Vw-Ck zEHQCY$`O`$Xt)XEk$a-<7Q1_@JkiA*tA zG^jv>PDCNnwBb`K5XM%O#Dui5*wemArImnr5Ly)LiNm>H9*YRl6JtebTXk8o?&3oU1{jMT%1GtGSKh5|Ul{j*s$D zON>653oG$a(C?Euu_cc#<;Ldi-;Q0^9b^^hC)qJsWlQm18npRjd5fWU{br$$N z*ACC(pJ*sawi;=<Ln~nluYT!O_9?o%^kJwPr&~|rLZ0DGjf5~-Rh<3>*}xYao4e5mxZiA`l4vsgZP#qrQAu8#A?w2K07Rm;9ry~jHfZ(u&Xwm;IUgyj92v>vy5nLiAR zRSg%$;$Fvm*;D3T3NTN7|4@JDX=u05$_3_OvsrF+o?{}6Q-@?9QU9$%V8OvB`oVKZ z<)XT<_L7+YG?QE*uTm$>;}`ef%V3@00?8s7t36S+H=BCi(h1}Qs(c}@PfrQ|VN%9b zvI#Vf{&_VA8N4d5@zG>pHYe*EGKW}t$Vj)Nuo`ytWcd3CGBcU6_Rr@H^k^pHf}j6xa1OW0Y5I8SgBTQNv|n2bTxPxOZ=cRLQ8sO6%nYll1jr_29sV=T+_P79I@+b zjZ2#4x5E&|Gn_w4TpR7I1P8>X1hcU%ZSv7e9*dofSrSGWgnVls7sN8^W8{oN5w#2y zsR}P|@DhitV=0;zC)|kzDAON%1AWOJ@HXf(MP)Rw9v4T$Tc2$j#cr#bupC)J zF z%a-0!WCwW{&b-874pxCt!roT13}?Smquy*=@#5Az57?71I^_IJ*}Y1)=}STVw*fmD zJP~Zg3-+6A`xV_-UTMra>SRe`RzZ})?3xJ)%=(=7tVNQS^(G;8B@GSIB(93J>XJf` z`nr}7Wc4Fm2ujyOuO$Ro;C>S!D2eZ<3qjsdJiic>XlAJpRPhWqhY*xLD;^=p&|w>g zFm-{{1nR{BzrmZ2VSIcmt~Vhsn=bSsZg(N zc2sC}jSBMM-{%Tlb_?xthIPWWr801o4VK*SMBv4urMlq#Oa*fk8M}F(!t4&HwrbS2 zjld&%W4d`{<2ckO`2sB(Ta+~$yaEkvgSe0OdK2<%7#qcp>?%!^>zJ{U)0%n+C`Xz+ z)S;>k1HjNU|69p%ZJjojgsMq>1V+7nlunt1t{d>L{!SM6dHjn3B|cHCE{u3b_7Bqe z)fUAvc1~N^;h0A;iRXPa3^lFQJ9Zqz zb?{|g^;z`{jdg})3q#`U!7$j>_=KOOQsV4sg!ZAKlVk(6VM=}+{9aEYx-@Y8&S@6stmbti>g9=OSrc8VK)dGMb-X|m4H1dwfnxZ}Xy!&`b30P3Q~Jpqs&WEd_9eI6J&klB zo>D$H6C4$=v!kP{Beo7KCeAr|NlA|-4=rVu#&6jK^%SBqo6fcWW%oWG2sxxAiK#MQ zEm|WY1KzLCuIg*1^_9&V42nn*kCGF8cn*;_wQx2PSW&tu-oGwWuKgFn;MbSf@M9- z#VVPU*_CD^9f&HDtUXY52Z_sHQN+oe8QG4@w*%lo7h%k%rK7@tG6r<9l zs3Zh>_{I4=q)7eRlAbZ3FKitEuX-;6Mn`iMJsMua@3=P`PAM=4#ZIi~j>XYGuP3Nw zl`c?HONzace}9`q9vg{{ z=3R09C^GW-XD$WOhe?bmI}5*{pWBM<5}!@9dcR7mXO4HBff4D=EiYnW`@MD&pBIX6 z1UnMqL8m4{lSz+A5Dph0^#7_?_}S^4V#?DCAwI-pk8fOTh-tU18nlnfN$CQj~(X@#ZGc1IiT8gwPT9IYj%{AX8d@6GZ`I^frjB+ zj>yMNE$+^DOuXDIhNJ$K!O`ZFkvG=JX;XL*z)rm>LJ4`$vmv8U{O~EB(iVP?cFQ;9 zi9k2ju-(Bn4eyT6uP^c9_=k(jzg&GdI6Us6#Vs(|0?a6~#Zpi&ZWza!7ghqN?6JM5 zWREIkFHxF5VzU`qX+zfn=sskFI0S|UV9MRm^L-qBQSOk-l-|6hk%o!eGxs=neLU_j zg~4c&PCACCiHNC%;D8|2UByy>oXbScvZ^K#=S)B8__C+MV`7%I-MdqiQv0b>lX7;+ zH6UE&gLP>fuUKtwAO-fYX2pm-dlU-nj$b z7HOa~C9SO*-m8=vo6Rlrg5}ARLcRGfVEw~!6kL70`fz-B-TioecyadO^7!iNy2M=W~h)d+5NJ5d8T-Y<}uF% z=jlc2^e$ip$oquYVj6_eYRCnt4Hbc}S^tfOJ`zeQ)9V2dn+6)B=$e(`L^0R>8 zY@2vwQ?H6HIz*j!SI1qR;SIq+yQlJS3;>K7uk6Dx@#M@q3Kvd?!dQ7bB*Y}Zz_}E+ zpk+YL^hwAEg$_!d?mS6%W6ZG04hh6=ANWo@yIBHR9%mC$WPYHp`}Wx znM1!s8*sbUEH}1s1Mbwz&AQrv*WdA|Lmm-Lh;t>AbEFGw^xk_$&C*+OHFssJUtn68(^u5&v0V%4x*<^mtWk?HHyxRN2FO=kSEHr2(3tD>(Xk`~o>#M5DqqU;S|%Tsp>H^UM7PE5ukJq&zRa|kCMJreUiR0Yx&3>sj=(c_~5 zl`Uj#WKn_(%vfj+@3oCl)K%s<2c-G|BMOFA{A3^2V zJXZGa7K3u|%G!0LU) z<=M*5DWjRtlH1+$ZHD-O=uz6^g(>BtcAa&}TXp!O_FQt2!l7l@LJO z55A}9V=^{B-i~2Z@1u}Jx@oB-i2B#S7W&*ms-H?b zM8G+4tGN^CotD1u<-AP{B$TO%q16$2$_QP|GSgzYnZwCRJo0)Y9Y0ip;XyRO*dK(!QaIVb`8sRVD zTub%^8T-@7$ZXCK`ijgV&VP#8qD8a;I7&Xuh7@ph7CsmUP)tLB1{Bh>d?UCTVoWlO zqZ*De{59S9^ZZarWeLER;ihOBPe8mJje?sHib0c0nlf@$7gQ2PmSJH?S2T~4$rUDJ zEf#}URu*bgMlEXpo2S7sokJ|e6%JM4)2I*>P>Dp+SxzgDz5Sp+u@J{7dFj@y|Dt{1<{wVHy=IfOvu-m z1{tc)XG6{+H^d9}{%{syxgZ>2a)`%Zuow-7BeZY5!HjZr8pSY$)>Ia8(m7gb5Z+8a zM{aUs`*i8)pOe2u95A)D_hy}ga&2$5%bN|`xReZ-{hTw_Hl_5pYq}i!r)2vUWH{839gk7KKkB#f^<-Gj{9a38^X|2h%B> zcQF4SFOq&V8s4KyfTEBr<^&5C8Q?*ZoTLK7=Wv6tiN?`Lw6+r=^yD_2;knUmLL?x( zr0I=m)Z@kd4YZ9ktI!h;usbI9$QK-1Fb39-QWFm@pD(6ID-_Iz9K2y0Vd7W47RB^F zYl}NCv=^ishE;DV&eZKEfEwy(X9Oa0t5M?Ru^TWb5aHBjVkH5kX|x4@7ybR)YFZZm2U(=q{U#!Wr;v7@33x>sPMb#B#4Df=Y>6c(;vV;^hp=*Z1aKwu>=i z;C8n&;w#bKD*82>FYNixf97R1xpDKjL9YD>`tttb(`J=&X-j#i?CD}pMo+6&~Aa7fsG&U z3bFgY+H!HmUNYB@OrydUo;(5q#MF4TxK|sn+*)FI$4(Nc0>fXDd?~>Tz&ITWbt)zk%z3?BuY=0ZM#-QL4o}t`z|n+s+!0?8p!;~xcVNreoQGhIj;<6p%~w)Q=^fTqE4{o zb}@@S591-Hw(8x5<6Dep2;5~fxrK>_0>D`?6$4G>Fvie3+TCT1A@iWs4vHXB@mx(w zY+?dX6NoYP<4?osB}LaFYJ7`fs5m4R3nTJFSrMS=s4QcQFzI)icyNWekT(|JhW43p zB=BMPw+gZqVA%5^7ChC$hgeS;`33y3aFtC|v>yEyMfii1OJ3%{@j|2plF)4?)HgR7wC-l2+ae7<*TrGdWD{jk?al8xRYxc z#vDpPESRk=lB?>iTDg;!#pRh6%#z1T0&WP@yui38$*mP1u_?K%36`geCM%1S*pwNu zNC1?6XGVg`J?h3xs?TH&c_MF%PWo3$jxgH$-r(G7QW9k9Y1`YW{yJ$n6_(GyT9uYujhob*QUoW1 zlqN`>6tWT7a$kf_DI461@;s_+-Y%;)`|He*Hb^|hSo0|z(KDX>o}?BUojs`^v<5!v ziNbc7Ua43Dmi#4GG zv>X8vO(EC%j!-^~j}dOcGb_ae%aV`9cc31tPT|D|z8JM#okKf8^j0*~?qc{`D~drvoA zQ9-IyU#=%|3p@S-%NGccLR%c6wlD(_KDeE~;(1zPT)YOxmq5(H;>^Zv*2GB`-%Vx` z`Wc{&{O(r3J(LCn1;8g9wN0g-QWPfL4zN%_W^Du z4*&kWiFi|z0$4UTM8$UkU@DhVcLxbdU12n`6{3g9VE%v#FZ1x_8v;s5S|uePUITyu0T(q6z`UMRxMqup36%UQ$cYiC$!7$qgGjY?2Y zro+`O@Lzo?I8>2@Q*FXMO^|ygw2{76Q!1s;q+4O6V-pz3gz=Hm{3UvMl(ZFyeUnH< zzlw?Nv`JCg*xV8>SL@EKUm3JP0!BsXv6aV73} zUUS{Elk>BK>%;dH22b4kb69nwf!k;t;c`tGMmf!tYuO63O}fM>d#N%^TEK6!T(D`tBG@%*;%{6qZ1N&J3lkRvGCm~QWnEpvfPv_t3&Da$2xrhm zbakDmWe?F}YF|Bh;@N~i{%2X$)=i7r?I!Dvo9(8R09;YFH1-JZM`cK}(MJr3l zi_)`#ma}0w(wbRgb@BN<#^t!-1iK=0te_tIr6{FkF0o&4w>mr}uwQR`x%v}FrQ>J* zfQ}$+M#T0=cXYQ-{upMfDeHw zka${e-80|@dx5g`tOtKe-xm}_^qee^w)Hy-g{Qo6SwwW;!lKJO>6T%x6H0*@!Q~B!Q+yl%cE~%to|5%8p}JtZ5X^ zizw|eh`YsLa37AX@Yfu~AI?PZ@hqB$Afts2*I^vGy|bNg^ZTzR+dvv zKJ82vv8)9ll|J%~C(;XQ&kF}T1mk!&3U7^F-wf115M>Ti`cQmuLeja#ITfO= zSW?}R9DcJ|rgOx0E88$~T^dV<*Q5QBV$(ub>_B_&aIABNsw1z1F>2M--_PD&oc{IT z^dozj*>S)mXV^~+C8j`iGpRiYg&{t8{bs z;g>54it$&J&;yRw<&0ePQq@8B!U_Gb@b$sl)8o^N@4H9G?+!kmUW*QTN`uS{Yf`8G z!`i&piY=E9G`RFhQ3r3%bacC^&C!(Nx$lm zHW;E=O2XnoVcMD0|9z)`|gPp#SUX)WyMXPu9C-b5S6Xv>r4+0yDX> z7&~%f&)6--p{JL0NTQ~}6~v`1lPT`5907*%3twBSUOu)xU-TfOcwk>-cJo7}3Br#l zkvQ|sQIc-cdvj-Wg~nl$gufFuS6vK_qF7oviS?ISqL`wO$QoBF?RX)j(uP%9-Moaj zZ^ZH-Abmp>Gf$aMgQb3#;An|7m01I1dFy);jSR9S&(yodz06T*5T4ddX zlcb{GfBj3nt2fZRT8b@R4B(R#N*IY1mXV{`OJD!+F~e3$C5zJ6s2JJeMKKLZy1YaE zNU4=_QH0M|`_5>}e=8|REDPsu>JH${5$>n#|P6BAztV&O0l0u6V zDmG89jWH871!^i_6ZnC*Nqj(!@It(DIA* zrmnIS6Mfp`*F~qSV&SgW-TtQB9-6gI$M1qUbHD!GEbcqFw=H<|o|YKW6HCB zi{L=6#)Vg->l8&iTa3plgg{HZOi>qEh4JK`UM%mk_lIN`-*f-UPPG^svN>vSsbAST zZiJrXE15r9mImq>Tn1%*(#L>5r2+=i!{&9QgPByyYm&01C z6rJm*P?5@Y?zX*6i;m`Cx_6U35k>nlGO-!?AAN;&Hkst6BVAn}$^@o;l%drQ(^hXsbp}tYkn0r&FM- zomC`^D}H%U%`YVd@l!5;@Uj??f_Vw7!K%-lFyH z-cevLvG><4?tZkz>Zsmk6$VoHY_!QZFwvzDf3w=&vRo`x|8NKuRnf{=r-v5U~Y8NX(=A1ckR0W4HZuDCWdy6EznanCm3Zs^!AOPqA8EtAWfL_=>P7S5@)iT`MWFIX)#DfG{swX}GDo z#Dct0)LqPS2T4KKk9<-IS%vx1+62lzQd3$@tTfha?{)k-CV}5%SCz34dQx)KC4}i& zDj;(FNl2+uOFUenLYdDe^D-PbL~hrN1E~P}Uije)x*B0(sxBXIE7#k~ho?;E;o>z9 zmx9}#pZIc5-jzXujKymjWma;X`!=1JwVk*N)hAqy?8~5S?k}AtsyNE6>1Mp5qb&K660a1qZEiebd**080YQMk;8{xBKzkvE z=;W*mvov%S%$R+IodM|0uu7a;QU<+uI#pFY=d2z0!HRmJ;R>pkfv~bOZJD<}=^WX$o+%mLL z)B-2)oB|9unYr+2)H?n?gk5x6P_i`3seRpPUqNmt`_Yy)3#CWt%UyTzdQd|anuy%9iEHiTu9O*>7|f*RXHjk#|^uAJcMVy!LnjYW;@!=aHV6D zWCeLmES!-r-RBCBZa(yk&DZ=J_uezP@2-gD2F8hH11tn&^|2KKn-c=F1x6W-h*5L_ zr7_BCv#XI&IwA-{US9Sd@>S*34{Kr&)chL>x-yE}VUq086@pZJVE+X;WXw5a?BVXpmj`ShDhAWaT4Bv4cl3&RMzjs<*NDv;HBY zFh9tYPWGiP&Nn}5EV1w^?pXf8g)lsez~8!*EYfZffPJ zk5MhcitFgjwX3^*9c%>1%k@RpuBB!ZUQ@7S%5!F2M^9t6pd^@f5RDl*-vO|%>d=V#^v{%a(d3-nIg=aV!xHAJ)qbvw+I!2}MjH+6>m(gs;0m&R*8eg_o|P`obj#Mg>vR{;KAa^s#PpkoL)7 zq&KI_fQ*=cMDyM)#=x230Ex@5wBVvhCF90F?V{7v&7oJRL`LL219lb;;UD*Cu#aD& zU-kHPY9i^ltfAyZleCl$K9!Ije!vbE5l2c16HgxLJE$7<=8f}}U}W;W9L>ei?b1w+ zo{3NGYrV#PmgPW#A5}|Rfp9DqG(K)+rC_m#EiRK);Mr?rEZhFKT_BQ4iZ2k^L?aJp z^6?1kK~y|Zq2PNkyH@u-#p)(XtMj>PuVs91HfwuOg1XY&YuAMT>~`7Mh+kG&%h?$) z`VtD5r%#K?Z^rpxhV}o%`{kb7X;k7_^cYdy91de8D=r2@T@1cTTUIz3us@~f)#I;5 zVCldJEUQn{$;{Pd$(zhqpJrh{1V19aKnwbYFTq~QfgO{;tdkuZdF@I=fb)*Mnw1t! zhoUcM7ctBw2e%muhTUb;fCu)N`Q<>5~+(hQ zbz5Isc}l9wCfUtX8h;Z$AdBS|l`{&qRV^E%nLLnj$^BE5ctIbBmEhJT3U*uNJE;n& zNAo@K`Y_XMa6$ix*JglNPi}Tn4uCnV?j3RYq^$^^e$V-Jh7^;m&ntT^>oaRUDwY6r zCso>*C-N454tAR5QtXvE3yBWRs3PPqkFn!JTKW)OTZ5(Ft^18sIfwO@K38E=_Rv}I z?D5)Jx+eSZ!ex~#w+{Tp%W{Fey+#$hW>i+hW1P%7dZDBY{4;O7NHnNSH5OkpJsF@+ z7GpMyzWPLV#Dl%5rc|j+rq@l-#u{8XXjcer)aSN{wxQ^JZZ>!}7S=2teF=F@mizcN zc)r2GaP0{kq#5*I@Z;O_Prd8USHFt7Q}9wwgnGorrlL|+iIZUPzu9oY!4032Bk%71 z=Jp)Hp5C0DmpG_0mQJexm_rm!zj!tFsOkMGWeFD$y#2*p3WN~5AU#w4^7YC`W1Lps z;vKE3DpkWdidD-7R@aTt{dvS4pdoPsR+JL6!%M$Os8XIyf2sY7N{YxhvkmeSLs{e( zcIq_wSzny|Hy=K{Ke;>a-QVc+1916Mpr=R`Fw8kX7&2^Bn*-T-dJb&q55q7sylrP@ zG)?(e#2Z@-LLFF8D2gncOwN@!0wZPOvZDQ!t%|uuy}! z_Ya!AqXo9Pn!INk5nk z*k8{PYd4Vv_DJ}1=H}}zA>k=Oz17up90gw=s%qRf!=etat+o8CX~ovR;GFiWvYGHL zZLjV(VN2`QJASh%^Ac@Q5K`I)`l(l&j3EU3vA^_}tqQ&iMm?ITO4GcLQ=4oiO*!O5 zoo;1GN#9Gq?8}#{+z)3Tp?yujpn)S@HS2rfpLyV1cI;2%Y}Dh5K-0!pb@7Djd~k+B zTO#V>YuE?9pE6A_2ZV`kIyyv5xZMcNMs&Q_XXPv$?i8| zFdab&I8!3+&_RJig1r(SaD{?t3qgOrz$@liI1FdFY<-3YlO5w|VjLy}Ua1~Z=LqWq ztJnifwl-~y!4!$zDYo^stA8!rXZW7z@C&kACSDNa*u|oA^cb#NtpF|GO21TbDw3eaP3|mRt`JCa-yJ};A8v(X zzn=!vpdZbPd7o23R6y6bk3`f&&rcR9W*kgY)<#NpTqyA!+L|?!^ zFqpB!qjX>N5^zrASyY94S;Ds7cjy25`TY9y{I2)@^5*ofy}M7B7x(uU z*MFhGY`J%YSsVjy8DS)1o2E||i*Xom!5us>`M;QlveuacMYfq7flB?@fYLSBaKgHn z;L?1X+jm)ZKAkb0Edp;T8~h?6Rc;W636unUjJ=2OC1%T_rKa!0V#aWtjqy0jExCd$ zxb{p!q7=V6`FrpBy^hk|u6Huee2L|w_d818L{6K`z3?kc30sG7dy4nT13zYqt160Cv zAhFB-=1%pO`e+={ zC9>u&A#S?MZyQiO6$hIx$eOWG4&+jRN~|0b{0cQ$xI^Pgo>p--ZT{cAG;iVAM%gzQ#fNT_<%TXAdR*S!Ikm>h+svl!b zuh~(WdXA1v!LVLWE~;WzX>L515lO;XLmVs^paT!Muv+zS^r-vO@|7vx-yxyC zS5M~d?PLr;V;=o1vg<+)=^rKMa0n_>0#=7 z@{6{I{3`2HMg!$Hqqk)rRVw^Q-fryq^%fk-yEVPm$re>i!zfmTm_#t9sw2EOMGx$uGas z6M4OAsNe)aAbaHfp&IhaM%%_fr7w4(AXH{DAejq{p{IbFAIeKtDTgSp*1IFG<<{A< zl^&OpC5$$1wJlrK&4a}>w*?{in&-*UWemn>4B=h?oNZ?>Xc(h*!Q=(JB_3&cjG(9k zTLlxZrFG(PkElsuO+u%QvZm6i)%L(i&+Qw(*;MOBaq?u-B8!6&-)j@GKFBi8uqVTK zQufz2jU&cAq!<0lq)0epdy9?#=EhCc`ovSQ6&EVaZInpi&Lkvw2WT`67?$BUn!_ZDiDxo0yK(@luYHc zFQXDa5mh-)19P-;VPQhbL_pF;Cc!j8*&&#jMrvI{u^Aq=FYQoP`&D#o1(hEuJ;srF z!PJp;8W?s;y zh~(`w>i!<6dEL6-{eD{B8b2w)yDlJ*!GhfDLrL?pPg&tpVmWP?GH)f+H3WGns0Xk8 zbuM8{squTi>}$(7)41ir=SzbB9O#I>&e*lS1ELbPdq3lMX2woN!N&SNrLCu_xzISj z@1x&ntF~W*U%o5HYwYV)kKDqQtRt0-w>#FccE@2nfFFGmBmamEH&GMPDjOlcie@VV zv@Hq>gI_58?MuiGE?>ggBY4#>X2SJPv+$$0UJ~Qs6m6vtTMhukVULe`N7kPKnuzkc zmCgF*bl6#jgR^fwWpC35+hxbufjszTA8d8yLNIfy>4UkqUTgWCrL1h*8uR7dGArYf z*2^4ZP5nT5Hx)1rYROF8#zAR{w5x6?2bkiAeGn={>~N~klvw?469mw!kSVJu_6 z_9n3qX{j9_zFwYS5vo+~^5PXe#yVjzzv3aVP*#hc^w9WIPEXth@OAM0r?CH<6)+`C zzeo%Ja_ zm}|Z7HT`{H!DhqX&tSoF77dU zMbAdEc)F1MTnh$YTJ-NFbwz`;>wSRRWG>+$o|gT>i1ZYX1`-OW%Md+{KCWOL1+Rt2 zV6HX-P2mnET2kFraIO@_S$>+0dp^!IAz}l{jT^u(Ou>jOw%6Gs{ixCCm~k~`dM%yh z=HOM$lTZSv-a|Zi$>zTu{C`@;$45k__dtLWXPNSW_j5@5b@+A4HgP!B$mB8eIQGVi z{uA4uEubD+)PelEDc#(iIf0~^%}(S_PMi^99xEBZ7T%^bEuu0;WHUVv#|(egJT%& z-;|8HWTlQ3ip}0$7ZsaEv+HM7k=$s(^Pg`&Wc_Jp0)$g_T1^r-ltI8*Y8-gk-iAp$ zqhO-gtsSTg*sKSfH4-Vhe-X`~?*;RDIG)Y{7D*LhHJ!wsxpVIl8N1C6k#WCckWs}W z#_a4d9L3M4lw<8;c+G@)I3U8QTN1d_#EjM;(@vB}1<$?VQ3x>XLq0lOM3d=aF5kj( zS&T!rn_&A7Ms5(w?ja+MD09~gBPJlED4amr5APqEQ27TC0Cyh!sDydtv9PD;2KINn z@njoX4DW*x+qniWw0@6VUUaEPBJ3KTI&r-lsDY}815+ZZJW<8WyShRrwdb;eCxrgc|b zt-W_U|BD63yRjUo7^(%s)DX7ALOZYryDqnaN1EDPw0E@mW@ zR5G+xN`ev*TQ#;bH|vaw?N-fiFH^=(nM95zi%|0!a8pyb4CeuHv|Vy}ysp>XlJg_f z?adqPbn!5X`e|$nU$t)LO;>0HLV?S^J5^-ojmU>XbE*))Olx311C)>yOAiWoje%`NJaDL;AS4L!FrweCb8}RwQzPUd?@U9?93T8@qo(0qV`6;7l%$vzor*O@ATh4BO zq|0Drjj9NJ;b@auZcI*S_adsYnHs)buR7hXH-Hq6SbZc5gkU^l5NxkQQ3SbU5^(?} z;RIP+(jrS7l{oA18-iB^6s&%y^uE*=BF?OP!uB*SPK?V4pTa5X-n)0Af9_{=89!RZ zNEnEc3o~|flJrm&M>0WX%e|D*mZmnJ$qF!2?qbQB&Gcd^#4irU&Eywofs9!xxS;9e zmy2ZPr&}lMa)HXNtimjCsCv_d=B)?7zf;kePN*Xw?x4Z7ie({FY%bVGM%Sq6u~ zKSUR(vA5$ruig@7tT&|{2d4x*^Rvr%Ie(U>;3@9xNDf(yOlT;(WHh@9=TGsV7kY#F zXk1>wZ*K{xRa^iO6o!v|h(@DNiy0VJ7@YZVY9|NEx-a!qG==^l{T_u_0RoO5f@mVx zY9h0mvjJQV#=t@ps>LR$dORA8`Q{oGe>7 zpfJB@x?E?B!1;ly*YFrRc<>zliJeg&LO6a>8EA3Hwm_`|M-iH|nsW%}6Y7i@F4rV{ zPy0jJJ+Rt@l+?uEA3t9!Uqcb~`UjzWK1mico{9#YxN&VllHz^|_?wH>Ee$%CJ+0I^Z#3 zz~q*c672WhFrMLusVt&koQKdBjk9qz4`ETvFgqC5D}{CPI(dp`uxQvpB;~5M!&6{G z+4wo#HZxXbT#Go|eY|0GIXyZ3c;34|`AhFT#3{u?4{`*h03oJ~gOVo-=mpP+7l_<} zxaLQQN`8`>7s{YHzX`^LyqkjeGq9Y7JMi2B1Am^C1#&)HOhDDJ)_DpTSUW)iVnQZP za9VM&JV#8;l8KcVvb%Vi{UW(D&o0Sqg^8o*A)KR5YnDJ zusP$FTh+hgmcDwkOW{_XX5DXXSDX8$+|M%toibF?1K`Np_!<41Kj7j!`f&iqC@AnK zWI*a0J;{HejgimK7R0pe#kgEd8*)o$q!lIeH%*n`UsH0^(i_#GeTI6AK8*eFI-cM$ z>~R6v1b5!5G}x$ZF|0zIA1c`;2#-yFM!?=r?Cf#ID-NwlwOJE|w%q_SS%U*p z7$h<1@h@BzjcDoN?w7cQrCNe8MV5{r{Ve3&*L*Qzj|?f0)@{0jd07}F9>(~tQ8+)Xu^ToJ2M4~`VMW=aeT z*HV^^wk8}T#I=M&MMZLop}#xwKAzlt?A@N9oKcZ!J!^#~F0KTo!XX6?b~y}^`GJLoi2{xUyGPT4y}t84_VMVi$(aYX zN=3AQ!eNkw#96#xKvzN~zey%SHcF2(Hl;5IT-Vua;ni@b+swQgUQ^Q%mEQEFC^quU zodrx3!gLWV=CR9NF1vBc?y=l+j_Ke?O`%&G*A^KfuI%lI*nrRV&Qf=|wYqRzfr}sp zh^B`o0xpIl>YZdZo~eCRFi+Kz3y!9SHt`uleHfrB$f^OJ!gyXKaK0S{zrWyCCS3Jf z&BmVJVH`#-S=k)+|FTo0;&Xgm)?Hj1t*XrBlpTwbnh38Xs3yElKvfja zq)^>!V$I)Hx7&hWwlTlb85d6??6did{lY~27=!G@ISnh*nDZj{-9dlen+4DOm8Jsf zS9tEF1}k8cq*sKmxG1F=Z4atcp41q@tqr_mFVC9|9B^6cCRZnaxj5~eU7X%u++3gB z{+nT6T1Fq#YHYR0U=XNS$~}5@0~x6MFdtdLNrmMY{H#OnRokXCQq9Xe<&T*|G04?> zCgIb_HbZMup#2jOPGATNb~p^^`T>F2xi-YKwr@6*Zc$<TbVvGek*%)HGjf8{@g46WzOmrcI)}87uY_L zb6XSyR%z`wy9J`b)aivr@-R<* zOiZ4Bj>-hr2(o=)5g{IR`?K2AGgS!VGVK)HQL;c-4G)%yp#MW=%aShHq;*e47P%Z- z&=Np}XCm)hKpOROh0PIW#`htD0LHh^7jzq}e?%zpZ;q6n2U- z-~4>7Z{tLh>9{wEPv-NW|D<2P{_$T08_%E(5K{IR%~huc3%(JWO9~gBoA4(N?ehF@ z=a;>+^A9IXuh1gVuCe%r6N^Ox&-E2Z#SHfWeV zXYl5MP1C&huoyyR(}DC6EwbFUxyti=J+l`%_xiN7TF2az-)H80q(_n|3aSOs+w zrHuwAe3af5yy{)*$MR!T6F%m$4VsFol}v$CDVlN)i46M-Dqat(0~?(o|X1|!cQtoiafWhb##5h z&*Xh&P4neL?U2ehI`KxGKr_Hjv+V|1<^lJXlsZ^ye6|>kB($Q3)ur%Kp165k7@)U# zaD|`L;Z@$N!3nimx4+lunsJcbuXS+uX*CsS)=GQISRj zB!BI221xt13HJ=bxL}it)+BuhALuYKjR(9kb?(VVh$}?r*@rmm zz_vY_7dw!pNUvd^4!1M+D{I_k;;M`BbQF#mfiaI7_Htd=7jz0Re_$^rxz1`fFl!dGYHm&}DXoqhRk^(*nG78euHtWS9wEam(R|A^$qE1;oSsibLh zeYzTc$Scv-CPIq7-z4{%%uDRndkiPx3{Js3&R8ai5rPbT=^&rjj@fdDZN4B5gULL> zl8bn;N$lxE6(WpR1fOxpKE?pB(

2g3IXfT;j3Y$JXld6GlLVqo8L@q)d_Q=tW-W zOP=Ft#Dtp{Pab1gDSp{4szsFa!dyi+!q)L(P7Ji;v0oW-tAh|MxI?2`9RL$}XAwqT zehGypF*xT31ft>{yh+mS*3dgLE_yunVdGe6P( z7^)B??oHkZ@KH^wIZgBy)1qHyenVw#^gzHBNucb2nwOp;u+SkiLg##ivwDmnX{ez# zm}1%m1$D2%OI~beXViP<+aDm9iy{9`L(wCuRkbhmHTTXG&^n7L=?A2wm1{B%@-AN5 znz^eqCV6LUUpjbA{G{L;f`xZbDLeul6*OJRRfo1Oc!G2fdm`KkTRcBOMaPmZa)4iG zj>#Xsy|-Umdh%RD`T6HRJqI^SohNHWgA}~EmB}|DXNAbENgm{szw(}^uQUQP>F3QG zPiJkWMDt5{XI5mWL<*7PeWe?UyuMRLZ*PFw5ymvK*L>fhwP$2`5I@VWz~G#UB+HM- z$!aUkr>m-*^aF`6VHd2zFCwn38cl6;Dg0!6AHWsdfhV#K8vBfqa!~&%mKYl#D~m7j zX{Uj*s9W3n9(O??EuozPB90JG^jQ9T4=`a*1OpS%{Sy1h8|@4esM@2`muDp+MyA6ssE%>#PWE= z)Qss{;z~+V#_TVfR>uVqOsl$Q;c>yhMSOC#M8B!nXpM9{X_3f?9^}lZ6GWa)<-jiE zu1f}MA0ko=mpL?n+~vU+l@wh!FgSiPCpz`@w^gPApJmQLgjsPgusN5|l1Dd2J?~GL zQ8AZLdkg|XXfjJ|Zr~7wDL2@JJ)+SE7WPAk^E^fRQW~ykFo6BvUQxy|CBchg5puoN(= zIuntQTTjJx?-;;2F!xg)6eO2aTJQ8RYuj&33Lum7CE(hMU@zg^zt6e9)m>UiU=8k< zXRG(FQ>p$V6h%h?0jDHHAxA>*=t$&pa1xO7mxB0Hem+O1QjYWB(^6dGXqTs$n_%*w z7(Y&(Q!NDF`jJeXR-eNW6YT4%u~dV$!lMq`|NWC6Ac-e)N@MlNX+qi z^c)LJX}rewm}afx*KwJ3ntnYiC)b9`C#B(q|!$+^OmDjx2%g z*rlWn>txbA0f31``9^UhGPgLP@Udk`3`%!NRb#M*A(mh>E6v;3LYr5H4D*>9I_Za& z{La*;Hjr$NQ9GyOJIFXXm~K<7Xb^K^qFmTzN*5E!Es8AzS0_Bi75sRCYAT%urn3l2 z_{Aa!6Rh^Uiqts5N5VRbVd=Y0OA-9wz9}u6+2p5VdSu<}rXa}!eUzbT!q0zM5t>OM zP1t2pNSJA-+8W9vlmh5%mumCPt?`h~21*-blp~{s%*08$S(bqyp2`BwV(;j7(ag2q zk|y`eJ;UV|l5wT1-4o+Q|H*I;CAEw*6kNe$8emL@hnG*{;QQ9lJkfDU*0)rWmFNAX z3oxGl67SsYUbyE|HC5=0^-hSNYj$h6>PwE_u9A!9F#Q83%c_ zx7KAmUY}=NwK=>t*NXg!8R2efe{5|ej(O*7EZt)4VOrUi>Gdw$ z3%ydeJM1rZ#N+SyJ-v1hq}~ZX0R39X6jgdA3CMRxSjhYkM7^FcYVna!cCY~^#HNjB z5W$P&)$6i7fXU!yDpP-uaVk-EEZ821J z4J>}QVM6n{Cha(DW4An93<$VnaV@>P%E|&I8wv}LeivBP9R{p2Y9{mE7+_uK^Ym>EFw|vg&GE?M@Lr&ed5@SL(k=r4i#pz9&BxsZw zVH8DUvpE7__?`umg!Kj{alnyGby9dM!EK!iIkAnz2A`Ndc{3pkDz5*ESEs8B2r;Wd z=5e!uJalLW@lXxJ8dA`5KjK(dl@lU<5mYB~I5e*z8DpB)I1N^a#p znG>wyiRCmbfRn2}0Pa4Nsh`_hsGpCv_=^ARlQ&z+XoP0V$Fv}_rqy}_t_pommc2M)9Nj-0u& zTu6I+w2+#e%$vVe7m}rbKxDaaKA+CwFA6ylWM#&E6Sf#wr-_Qn%A%*8pv|O*WGmwg)J+7 z7)6gXDlGjB<{e~G_R-N%ExVU_JP$?}c2`_4SQ>gG=2y(HrL-mu7#Dqdyf zalz(^iLuZoE&bS8H8mr_t1NrXE`{AKJzv`P3-KL}XeK^9@bDOnr5L6$O5D`JHV0E~ zz{tWBt?AoCHRL<*=mj**Xc)0MW*DRpwj7K!pC;nH=OB?@P3S@ZCGFTI#m-y_hDkjK zvjOa0k9g6SVjMMgLvkFZx=p};N?I@9bDssuyFRCp&S&w6Ek(FdfYtIAu(Uf|YM-?G z1>-@`pD&P9a?IpwCWf8F-fv+zg?)yxE!&XUc!=sK*PhYw%Ldkqi`_u4ola z=A}`kQd>%3!&3Dbw(N)qbW%%?7Ba&V=~OCv@oXTd`4FP+t|d$h2Emj^B-^O!(XJnE zPp-~Co}ZkZ-;!*RrIKw)%eol{xFNwoZtjgjDAfv{W8PC3fU3>-h_TFI(Ko51G2=#C zuPGK`_q9*inY*u9nqsxxcjL{M6G6;3@HgCx6wz)-rE1q}m;ty`+ph^f)@+@smqZZL zcrtrTyv#hD%d+b5zwsqFIp^zx8D=mQG-%z`aI+rdCrAfX@36nX`|I1G)>c~d`C`Vl zW^tt*LKWIoGW*ouSupu4`feA)Hav{{(;rjBhK{iLoqtFWFMy*UDhMz@mR?)!!ria5~M39kFW%p0S4?GWycqoYw(R&7;i|~=QGUb zNX*%hMhEeV_j{yKQ;h@pr>!E=Y8pwJ5 zv|u(1a7`n0VKb8J8QL@6y_kqMeDC@slp46MV_*`|5GsBSzS{DDKrkt#p>VwzKY#=s zdJivirpQzM%DdoXpt~i-9YZf6%g}g9Qd&>#5QiJ4s?-!u2_cPV!4IEki$4 z-RJ_tn%)0!=pS{DO$e@>(`EEKV509M8Px zcrhCAzj#S&3_-x7o+uU#>G{18oB{(UKx#g~Vux@Z!cvEfjY6pR4Il|ukB)n6N|>mI z8sczQF1ygnBw{UHP-huDm@Q2>$-X9}-&4FJIFJFKin@zFo%1!AvL6kXUGCx|@8%&v3GZWb9>&q`S9WH{Jx~&Q1-Em_yQ{~^h6;Nw(Z>DH>z;2a-Xm} z(H4`3H6*W2?oU5v!Nq09Cli1t5Rod69tNod;7cebEhttJOBME>CU{7=Rv9B0o1ejelg)ZjhltfhZD#x<4=hPWpC&kpR_Wb zLuN+zBD7Bq*h==fDIQ<1% z+@62xU4Op%m8AD*R8uZ)E~xK=WKF2Lj2FCiWUfI8v#iPZm^x+OrukBDpk*R_#X1CI zsC*3s1KeuA8peDZ4TkmU&DnXct|{@Vd;YerAs}w@$@LpmOuNf^p)N~M+#JZ1*Rwuy z_5D%|8pSYZu82W16N7B(c7wmLw zJ7e84l_46z-(-FVos(>KgOIk@*)D)pYo6`#gkD(oVaWvfzQd&T{wmclAr z%~CH>afju8d!gK#Zo?_vovT!d{Co$-9;9#e7*2b(9*p;D7x)>z#->odQo1l>#v$!V zxkUM!wBb<|Of$z8#Fc3I;P7j{PbF;pRAY4@ZuGSjMM@R(vXccBj*7ew`!fn?TC-oR4@p9`Of)shfFPf17w%ds zwy80$F?BHAz@$FaQ8l~J-Z(o^+o4mNOOMyL7avVJ?aQXy0DZ{5bMuL`1z0+Orh>`h z=f+Czu$6qrH#e~#9C+-=aNzy8@V)Vf{vF}Jux4dXhQ&b=Hth&*iABBc^&m=83B2GZ zPiah3QSZ{7IAb{G(?Y(Bld5eus z9q^B|m0R)LO9_75_k<|}n41mlZddij_JsM{9IvQ%u35*Vh0f9%ilgtCguK3`J`&_zXE`dvLR<%C$g#X`dTG8BFsFFSvX)P7Wi`j6>4C4^QZ~I3A+yGGGPIK zVh#$A$Rm3l7T(+j&3zwSwIO|R6}gq4rY)O>vUg`Tcw{427{33cj~214Jc z*d`@3cZM~TcSvsz*3J$AtZZiFp#dFc$?pwrj^ZJHfoYdfg{;ccsJi}8p7&xf8dPn# zY?>OWjhYM;N=?$5R=tMU9uL!v>RFBIIgM)0Mvex$HA5qjF_?PxV5qPEEUYc8IP1^T(mdZd%U3G z*3NG{nBINZkYBk}p1d~S+;BB2&J-6Hhv?WAJkG+9f-)_ulQQ7v^^<2- zU%5Af7?BW7mAc#Ru3PdjUfjBHhFl8q7rOltX~iI;hw|8afE)gYmtF4$B2+=v{T|L@ zG~z|LcZ8T@U@`R)j8}o3RN3-bGhra~!F~rfcN|6I+8Az(<8cW0=#L2d88AWrQMByg zGt4*A#6ak`J!e`Ie|)({$}yw;@qY{Sk-W0@$68dvv&$%_Z!4&XpmR-Y?; zZuk%6S~ChqmJ<$ipg^F4*BIVHFf}ei)@4A zKnOSk8(3wp;~e3!4Fl`M+l(uHR^W---Tau>7egM~4ce@DZW{R=NU@ zXQMzGt8`}g!z*$fVNB+dYbDKe{Ny^%iO{k_W>zI*@_58T?2zfKn1$9RUkW25*$I! zNlE-qIfH1oIY&qOnE+DL*=pLm3>z-itaJ7*B8K+%_DLK4ey86bJQ+_!hL_Tg@#$$i zjlgsKyI7~-=lgp9Ew@rFNi4hytb(ShLm3?8ptN{43j6ar<4wu<`UA!YbUlF4Vn97S zK5&10%`X;8wq8St5VY5(9A~|q9Vpwrv%{t%Le3)f8!+^-v2^kEqWPcDeV-zQ(Q;4l z@1I0A3`CF(X%>ZFP*sPbGoj2$Fow8nyg?2p$pSKV%z2`oM1;{jKr8}fMzGArlAC%V zzh62NqbNDoO7<#oFHyciRmF3N$s#vTQAW^a?DVUI!ix#iu5v{+_V%2iKy{w*-@yju zz2%lD0O@P!TQ}#4te0nhAS%=02l{*c8dTIFC4mdyN{2z)eM=2n-M&!+hv=pkhF)*_ zu1yB1OJeONGS!?H;>yh*Qp&q)Q#{W@9O>@qfUz8dKGp(U0z5z?31D#m2LmZm*+A@C zK3YNsKp%_*74120_n0)+sf^_!f6Eik@Uy+Vj>2UFxr@P7o-Tr{6)v&nl149HjjkYQ zY5^03_j-=C3tsDsIkgZKfZillD=;nc`4gFl%UoBE1pX>z7vtvCKE%=2bEm5Y2kBv% zNXy)aJ^5jdY|cMEEgx!D`|l&B4AJ5kD5H+VOu2|N!J?){^R^l537?W-Prns`nbtE2 z+bm^pghroKN2!Daq3$xkk<1hLf@nImI9;>_nL2r=x&d_=#Q@1lK|NKm92WtaoJ3o- zv=UclW@{M7nfHE$*S=cp(FB|ps8%M^Od-{0pyCOsr$RXCi*iGpBpz6D0Hb}y#WJO; zG7#b%lfp?!O7wcciV&h$4S82RypWrf4~Iu#-&AN|7$K&<$MAIB>!H6GFPH`b=~ILP z1t3IP0q_YxHw@T&`lH2whayEYo_{AP^K=v4S{riZc3Pd5VFTy67Bn&u;Z_PHT9P?N zX|iJ7DE&jm4%&qtY)NH4Rs;}h=+NjviB~O=3Vx6$mmq^o&G+Dc-ws8loo-_fQ`>i1 z-KO8zU!Qj0tg5?;k#${S*#Px#G#0eqfl+>r9v<&s=ApWN)=U!m24?JE`dyo%JpAa& zqrRJA)Q!HSX<;}I);e)bE~L+K>~by}-GeF7SFjdZ==jU`JjYKl;jJ0+buB>~?@@YV zfuEGrf5?#b^Vthe5UNrqE2(eDEv{>;&fl4x%dsKb!d=B&m>+DTl5Wv55?n)(NPfFF z37@$OjVE2i#KpApgl-ssiU8u-5D)BNF&}FsRlSlH@>YgOI6cm-Y=paHEfYEh>8D|y zT8LXhoO}RQHs=VxUoUPuc4k&yeYR7qmW3tw`UL}Z-v*{C!-%aot9q0pc+6Yki=o)M zppdtW%lEyIEo{iS5qOhWRyicr*2^S;dZ4j1c!Kp#F&}0RlGagr6#U>L?u77J#q{mv z{xt{QT^M@J+J0@<`x#6CVE7-{0O@|S2`jDv8Etqm@NklVJGrPwj#r_YG}+PU;nfko za73SlU!nwt%8o_SKNK@O0xVM|gdT#it z1jWFOd~6mEs(MM*opidEl&-yA+w<#4jhm7*Buq7ad7edReutB!%`vY2e%SmSIj_Q?V17Y^`h5%qtj;EksgU zi|3Ga@F>J6dPf9(tyS;VAwZ@XMr%;XLHgTjt|4{O8j@h&5b}pkUrrSboSRtzbM~8D zwYALVs`X`*m<`Xg8Yh%a%2GPEty#O8CM~j9Jlq@BI4y_U6;S z^?ts&{p;PQlhbpOh>*(_EfttiRo;t%Ar`d=Eb6rLws6M}YGBumj4#F7W=a46@6<*}Hjk80vk*F58bq0_!Rk6{sZy;Y5 zWG!a$H0%e9#PSn@WVkrBG0$fSpBU-9O8rrURjA73ay5&e!62ACp$j>8RL5hpZvB64 zcOyKWu$~oM6(q?5^B#M@C-VU)&0p%ft=%SHk9#e@4yVF)vkSj$v>wqn#Uaipp<#;F zu05LMCK(Ng);s}k0Pgy1?6%|Cc6a@Jx0lU#{hxDoVBcI~nR|RbT27)mvIQHcwbw^Y z0@K~5ojg!PW1Ee_C6l+ao!NuMyW#u+h;2`tJ^qzvt_V{0lv%TahyM3U`8iB2+;g)W zENK2R4wc8H3TMXdex2|4?KZFXxuIgGVP;rwDqg9C=9&kUYdaj2-@U8Dxf$QM81vmb zuW75eSLatfltY*2jTW+?-fK!BPI0lNIfC1(!wgo8%WU1TD6?g9$O-@zqv)Hv^8nyh zO6b$sV#2GyfVD$(6F@|!;7s(mOxj=NT8-wcLaldE*)y_$2hap*KE27@T& zR;jWmpe z@Hi~|1nfjhGV?R&aD#*;H0(6&!fGWMu9-@8Ek{w7J62BpKzXz%IXK_J^4tIn1OZP3 z@DviS)+@e1r|cd&6pDF+;}4woGV3h}Qoau#*#S{AsbGqBj9!2Tcwb5Paa@BHTjG9e zpCY*-C0Dcc{a6CoR0!*qCO6X4_yHw`wlx@gg1)~P^I51_mdIg5Ji;HCKki}Aex+wv zA)=!Dd&!ap;+Gyf)0|HRAFyJGwz~Ct;p&FoQNY@pEJjpx14}LdKhdR`tg|3o{pOx1 zenKCYu)zCDg`7vAcPQ^S>Zm$53>qT-24`3C9QbNaIn&3=fcgd>^V0A+#<`{die?`?Hu?>K|4(>6Z zF#3K>Y5ldN2>oEx2iJ$twG5G4*=YJv5{_OZ*pzFI)Qb6@jzhxD$)ykwIUOrRFNfoCF{Iunl!nNnbk1uL)vtg{MckDXK67>qQ?n4 zkd>I*`jK(waZ_z41Jp>7sfD_xc_eKRp>$hwAa;FG~)NuM!x%q#L z;FeIu)}W7Y+H+UoNT7)iQ(lyozvk7a#QO6 z5w8M=XsMNTqU6q#Qzmi4Z=^A51oXvl%#<>XSMcv$!hw;Jeo4QVm3;YcsV^ALvLYP$#%FyX z53tH18h(E7gKlAAdz8W#KC^4$IO52lgoSVvOQmxX(eFt(FTHW~iyIn9ws=@_y)T#7 z!bl7kUDpQDaOhR5k8maZb`(9l1+({C!HoEqlz5U^cb~k66+Zrf@{jG+JNtV?RVy?Pn- zq2ginV!#f2a4|7E@c#1Yvf9|KRlJ*9@BQ6bwOKtK;XMTGJZ~wd&S^Y-nMIFJbFb7d zd;bZjT+o6<1w5O$lEFQ!zl7E|9V(|Lm6hC~A34ZVtgeRqHA)JXD>UA`Z)$2_tt;Zgvsxry$9~RMQ zfSScQ-IjuwO5~Bkdk^muk6)$A;1+UcqEFPZLr+wXC1g}RB1}P~smvMuimN}Y&c}cM zG@mUCsnX7N;?DdY&Q}S*ft&7gadllJG%EuRT!j~eqaQ}W1VI(M$;u^e!`H+FRu~-W z03-fJJSqm0_Qq#*DbA+_xK^C=3(S<;R>u>)XU$?xnHs{wMc-fHuU-?W%e#}H8wl5l-X0cYZ=vU;I& z*+1B%|3efUMj7@EQncJjw!q9#sosz_u2ONuD#gj>cBQyth2r!8GNVx3+C}0jnML9% zKX{+7Ki{37^-k{ZZ^cx{lX@V-y97eo~BIumDn-g8J5O2Gz9S!;JCw^1#N z0(g8kG>kbvh440BAL-`j-p7->kG<2A(~smO_3PmhGy*0L`cD|z!8@GCUNnY<5qilq z3{~mt&z%;2a~F)}dE{TxW5Svza)ii{v#<*4EQF>6kDMC1Y!JQjO~KuHey@E{hCzzz zN)e#qJ%*ETrVK=qx;u&m4_yzNWTa^{x~Qi%_x*ae$YRy@F8%cm9<8tB6Fp$WZR+6( zMvOu$h8)`QMuV~V^H57(eM&XXv5Ms|oKpxi2v{hcZK9#C+hmvTgk0Hw2%my45o94v zMA`IFD05kcxEp(8yOp$w04cpm?1k(Y+@Fhek4gTZmE-`hY6wJ6A)g=Z*!qBeeqxdx zY$4!T^7{)<>B+HX0N@4+yuO$$6A4-sR@nGeR%3L?;t)hzWM2_JZVe9{5ufpcF=mYV zEt+~`$Sq2O)x(f<2uI^n3cRjs8Cc0Y;AP_aaWWUa-a;`zUX`6{Gy>%BAR#%MO|Zpe zZ!paUgke>htv$cdvUyfhsCo%7tu&fmaFqlK0aB6o{w!oX0xh^%tBmq$I19~(NJJsI zh@}_FJRV;$BFW#j!J(2Mjsvx|!a!oW6(x?1Yg- z|HhlCQhw@~Wpa8r0nDzH?E1huhrV@OnPL@zx2r-HAi8>lHTDpO6IHQupcwXZf^sAP z&q*w>2n;O1j9%1-7jszS2^O6fZ(%y}t$by3WDYBkJ|+s*;K|p%YT-ndn#W%1!H?rK{j8BO+$KFVk9Xw{b1+cl>%Q>pZaJ$YY;)V1$}a z6sFfX1-m0<*1+VeU*{+RD8zIGmSQ&po_xAMD!UF2y2Tfbfy?Z@>LcY0A}y)-QuuS& z=G8-~*xS}iRP0j%FfjyhwB97b^?nq&aCU3z}?SahRY?q6MQQy3H zGp|DJt8*(I2)}FZO=8IQi|M{mnPo=>o(G(t3Ptx*)jOqZVYEwvXuzYHWX|x11-*m& zXRC6sKuMO5=Z+6@tgv-JI29d=kXUud;aq8T_PE(@3AJQkgIYd38#zC9OF|iI&$Cvd zru!HGoompLa_Kve)Pk^dhj6kOdw)>t!F)9Cfhyzcf3xGRkQf88xn9P>fC-eRc+i8z z?E*F*PT~Ih^i;a_h$ld20TJi-1KWo`EoMvu3wxmhJB@CW7+ZrvM>EtySFV=Go9k+r z`?Dj{yW>UibI6V;zlF-0&5hpZ=K$)DD2dG(1)dk`g^3ZSUiACu2J#GxlQJU-Z(Wt_ z4SG;_i@QajzttQUv|Bqyrl!xo5$0ueee7G;guZ@@%4LwoS{7VyGq^=aCIdg9Q%17@ z6^O*Rb%wRhcnZTG*}Mmf(OhK59fglU|D{k*^00%aRP>-;$^i64pVmTMpcop@#n6>O`GM7M3*VG^Z$57T8npT3y@ zqWnW8)$b1#Cl$8buWrrb9)p_yIyqAioA$*^t5SrI)2w$L|b9K zwChDW+CXIrL2u6t6jHG!%W%MuyN&oh72dP09tFqi<$+^aPI#xxpdRL}iR=}@)Z4rj zfm7bVOqlI%htz@$dXJ;YUxH{-vWG%P6{dF2!$-JEVZXSRFl!EmZZ;(Rewu+K5JGU63)4h~FZ z2}9>@N$cZdK6;pj#-Z|-YIoX@%BIq3H2j8&8c%{@2-zJjA+<^{N{9(O2;?#AO`CtK zs?zGYk?AIyLJmLlGu(-%l?t1TooI@Q=y9Qm6FJ$?&p-99ZvJ+T07~-F_2;WJnIb?{tU z739fCs>;QgSN9u!6SWfyuvQD0kF25-oWFz5sDbzjriC5m#CIHpx&}`?)4PC1R+Gk( zCXIBH=8`7ObQAscGRD(a5Ak3bsL52}a5@KSCW@RDjZju;<)!Qi;0*vs>3sIgY@;&> zlg5lY;a4aeZt`=+64=?u;LKy=;mR)@Pe(Cax%6jo zVoAWiV?TzZDwF$KF;uY@#Bx+CI;u8~CVq}JKo=fqrY#FUD?JhGfy|0}d?bs<5v@+( z+K5u~zzq?)rH}YV2t@+fb4V2ndB-@)R*PzW6P*5!7uWZ_i|fma>vO}E0ra^aEVjsW z2ePzut`}+P-nIN!5}(vlL@VX{q}BD|Op|iH(LTw-MW1g&$23(2fkp4xJCc8}_4xC} z<=H7D9AO*C?dAFPU+zDO09H61K(ZS;B&!5e{pm~PR!~% zBej?yEnP)SAXD{$sIUvyZRnFkw{)6WnHx<55do2ek7J(O3Hvt-!B9$OQc<0qGjehC zoGDHquj(lS^5fpZJNU7=(D(r->BrWB8*_uXkS^@o*J-CX%;b!SXq<+B8!7hjiOE6E zd?~9U2Ji?m+ZSvN#Iyi{e}TGGLnBa{1sELm1cmAQLG?6h=)BppA7W3y=NmtoYIw!8Dm1_Of2mgOmJs- zjz$AG4Y0lB|6$?|L$@*cn^*79)c7K0i;{X^>KjGBK~xSC;Ilb3)*?&z7#vF+L~^}j z#N)5us|G{MndUgpSzr0>P^Q0&$e_UP(kDr*h;;lAmbmU z>3jxG`iwIrbgxWFIZ7o>E!QUYI?SGkVU#X=4o zJP)~;{cNDBY?MQlcZ!lCuS}_f90+Q*I)0szbFWs1e^;QVvf-*S zR>FEqPryH>Ivv3U(x0Kidr3NLvcEzSe~gex85NbZPM0jkRk%{|DR>$q(I<;>IAhzL ztUgBffI(vGo~X)b%9jGR!29_OK*Ez$1B6RWH{zgDQ?-VU{)^%C7hUK6)z@}BOZza3 zqLsNpY;|}kKq8P7R%-e%YjRx~c4an=c+IBRT82G2P8WQm$!IBsK&mji@y001Ue(`V zL3ZnOp1JAD297h5wITA@B#)n9WSu&r9HfYeo$~T?_Q~Ev1Z`>+Ai6U{t^(saZ*lZ= z7>0}$tuY?;4?__f3eZPKH6PsEc$Q5&6P9M#q&niVCT2+jFiwbIGvp+#LUyJDBryM0 zpF(4T6{<&Gql_g0aDor`CtS|#VS2yH9+Td5n`F6d$oi&usr^m;O&M0T<)_~8MK}qc z#fp)n-*6qS1=6(?1+6&79sy2tJ1j2``-Z=Q{LQNDD}PuPO0Xp*_-mtHpd6W2cCbJ* zqs$<;xGHRAJZiV?x0#-|*Q#mx*0(qVD+D*eSruIiJYFOOFG9oF3L#yh4!R7Fbp%}r z1O1m^#C9r8x&lu|$z?R3k09;2{W^J%XKYo}E< zk0!$C-NTTZc{J(u=f96erPAk4s|>h+862)0Bt#H#CJf_j2-mM?;rrPl`rkORPjT`~ zr}Zml7pVEh|LXX^#(Je0J%G?e2=w0;fb#Y>fYv}z9fa1#-`nd#Bdl5it2TmN8@D|{ zF2ax?ch-g0KxEotj4h7d`nVBdh!P+2+H~9V6?%J|Xx<+9^SHRy<}>2jQi<`c_r|N1 zWE47a@{U*gT5Ht1C(ZrF>DkHq_h-!yV4*-BZ+CyynH-O`vQN3Ovkel8(ns@zZ0P*G zy~>(JRvF#)ZEKA+8~Pe+`o{98tver0oR5Zp)8LG_KD4G_7x}+UaH}hngg{w=SI{aX zc;!ya1#-#W-s$^xv;LWQqp_FA8}zHv{UA$I^yrbS_D(;2PGw%-rnDY`v@Xa*a{rlG zG$OW=uP2PhkA!Z(%{e`P!XwlsJY62_$T;^}<%Nv|)LKRWscDvJwa6WVLp3OQ7x&L8&w^3r2Zy9;f3 z>^{^`%pLv`4DK>e&t8r6TQMTHRd9OGMsn>HTgGH&Xu!%gmvmT=Ra)S8&W)b7^1 z^>(e*Zgm=ax%{-Z5*UqE$>Fq9;6cQq#Ju0r4Ls!Y_~h>22TT8VQu9@#R^ z>8`^$?v99aD%L&Ghnw5ebMBOe-e^ChaL5|t`)Hofuj5gIhgca)K7!ibk57~@fC7sW znNnpCd&y!tjoG;tVQ^=;PRr-{1u!?Ume&E}M`OxQ0bc41Lr4l5jGlv+#0xOtB|$o7 zYOsoW7Yu-}xQYw}fS&~8%u9qh{O}_jts|1eAM)8le2~Rd6%ZO$#t}JC-+x`g?ZVE@fp(qVaSj z69V{vy#^7Ii(pUTzzZfX&rjhj6rQS(43L++NkO&(t)YUNLPVy$MCTWt`_lB@+_C?L z5aD2y#Ef~rph;fAOLaChSd4D%88<`16)1`fX#`T>C-J0;H7*|)@gfnDqgG&R( z*#UU|A9KoJvPGjrCw6Dhgh?pMJB$*gS))vf&1TncHLMUomBe}wJ}fBLTS;2|b|^_1 z-}!>CWz6iteR(OFFF1mFn4Aqa9(;Hm&md^^#i!gtM&hDj4F3FdCp2Wg9gvql`>F3@ zV#vjuXH0|ahg12MZETQffqQ;qptVd<_6z*>tNgUd0D-gwhS-Uw5I?w60)fkf1vV#l zyo-rWd(C;=M-IcZIc75~TsWME6G%gJ*}yEa5bJ%}^dKb(1}q?Gq6%f4i!cpRJe@OU z=H*&w74QsN3(Q4aOixaA54B{|>d;mdzzQ8O+NaS7b zXY@@}-TNdOfaDJ$8)KL_30?vU`hxlHgU2$Fe^-0NN798$IwZ)HF#@ovnyuZ`bd`{D z+}j-Cx~6Uyi$jx~fhcD7;UgqWfvIN1!K})#4w#}Ncn977cl9~HLF4&Q$C%B$=-8)< za2X82T0?rN=LkHHM0H*@pQi*8I65(>mMr#6=!YQTD0>ZM^?-rl>Vfn%PYb9o4$5A_ z6Gk)1hCwX|e@QMnJ#LJgt3Te{-t+W__a}epy}!7>^SVnux;nZ0t5+)3@dhLl3_1ap z`9l5}j;4^-U@_@aogv2Y*m4++Mjyo-VrY%^~Nb2r|)Nyvd#|D{7fUAKfj8cslnKVz~s~*diRDK!EbO;Gcjmuk4e> z3i?_|@vNM42B%KxLW0KzEq{pUn$aaV6Lauc2(+Nj^k#LSdNrbesctGU= zVPcKxYo7$9M0m}qt|SAf?&Pc+b5@5-VTEDNbh&1{p|@@nXw8os)bSC5z**p#)_DP{ zwSTxuu;1}pkX*Ub_1ozG3P=;_Uiy&J=&__F+jVCFIOM+QYUN=XDE6)&rmE+u%v0?B zsmvG+oFx*Z=n2|=$_7JVDTQDx1oSQ?pp=}L^hukX;h8&7S(uRviQYVR0n!1)Gm%+H z#Tu~<4OlmU$|Un>1b>sB4waNi6B~pivuDKI*D_3q90Krh%@MI_iz^DfiDF27AOWW zbT$P{o@epoQE4XTQot;pT6rf}8ggYwTG2SMxr{e~j506ws+f3x=97mXi?zd!u3l*h zIn}}QNPe%7y6LlH7UN?ZGBa8qebb*qDW_cw1f4v?DS7}Z zI`R%r791t3ulnmaXT5J@0e`=>yVYfj3wP7)52pAGn7)s;6V#Vwu49o*aSqF9k)Sp& z)4&SWB88oUz(p6#z=ILhe6aJ1)kr3z@GIkT)|?SGVyFHP7XiXxtP{<=DbtptzV{`V zMFE;fFl{69Fdp_?9bxkgDZx+crjs2{egw5cdl;C8N15J05-iPsdWj17c!x4u)0DZO zpYqdHk4C=c4z{ee?|OVG%B*H0e?A$kjrZCpuZ=CrjojJWEVUb7m)s&G?cFlv??+&= zl`38d6=}C$pw5)~m{+`NHy&z1x7OYd9)jK7X1E^)txnL{s|Bl7ywU&{t9Yd~NLRdi zyL!c||HkF6>J)ibZ~0B6&ngbFb}g$E`Jvzy-zW+~&G`ADvg5IR;$iU!MXCFsEo}yH z2W>cPk&q$y{BP%%7@`Q+npe{7Q2qRo?HZ+Ji_a<^ta~#@Q#*?WYh>mfUhqHR9A8?1 zXZPZr$js+0Diz}r=Jw=gky6q7vDWzeYo@eLe_8xmKBTmK45~K=sKKc=qgl0nFD2-6j$~@!4AU_ zs`^J5(JOM*9XBagp!l;@+i%l39Gd5)&J;sB@ba=}W+%_AEK8rs@ih}>x^iQ_F3Vg4 z{1%&<@-*=szYf*Q8-BB9I}&VRf~1E*I%1Yjix`Q|Csgi~)f9?q*Nl!hsgB|T4Qd9xn8^s1Hbc^s|~UtoM=GlWs9w8!2u0xZsD9{8l?a?#A`pJyF4_Tq z`YYh|AS*TN5%}Nq`~dj2t|TRT3Aq%v+fgW3bSt@k|KkJ+ayjh@8iJI&NqfLJ1&+lp z-s6HD{n#NZ6zS`ak6OO>gvrjZSeF-0(0XBcwdk-g0PfJytI;COo5ljolq&d)>dADP zexu8Dms*osO}9OFmUy8F`h_q7%l%GzPj`rw%k#pEo=1Me2|qp%s`LQ1!i{MI_laJ0~C~GW=L}*Z`@#A8+ zG=ek*fQmF0ZfaV{F@Y?I#X*!{r2s-ytA4l?>XI{KX6t1+xr_D7%ccCBs`_ZZSOQZ; zBR`Czyf}UtqnIo}SDSw}>PMAPJ)}K1nuuw?m%BRpSdmqeth?;1h-nR8Fvd~?PzaDW z!p}hN^9P2a$OI8y>pz6TmP*BhcDyS9xERA3XdHlPA5KkBmB`dIB^$8w3KMflC~t8M zRiB&v++)e&r;ndM^mxJ|#;E+))HbTp7DMY}?K;r;!Mh`GUwptw%o8s+Adw?Gx^-z~ zi(PGpZsi(ZMKwjN#}SZ;6)tb0FQJnhvOUy2nm@7EPw{Brby{3<2oVpvI@OKGdaX~< z5H_i+i|f*U)xg_VY2V?;xU6M}&-<{qDJsEznj&8heQ+Q}OoxqA_$qhdS`@o%hZ9UCAq?oGXL(^FY8(%}mRYoGsE(P&RADvBf zF{dYFP6$CKlb6b)hzh5aSob7Qv6h9fCS@qQP=1)nL#z{}%2C5Eu^0oiSx5=$sMH_t zLRKzS%aI>~#j=&lU1W#Y;Sb6@)8Br>Z-JL{yPcy-ucFz}avbK&Sy|8erhK~7RspJ? zVd&`AS)TLrTYg>0_7JMhgKC4gf7Q3Z5|x;Zr}MH*`(PKyyS%tRzdgCU!>V{aw%vK1 zCiU}&L$2_(1?!sApn6Oj+ZE<{%iij;J3KU+yq%`=S<(g7G)LsJQbqT?E& zKD$!8!eXJbThUBugfx0A6#J8>aYDV;XfYtf4cwJyZ^{m#K)V zaEeLIT~J+`f&s(*#Q`bg3ZBeANXxvm18S?h;24(4h#)jE!)Tx$isO_1oAn7Gc6vUV z@8~Cq9XzJ$TC3T+@nh~>(C{jr@Mc3(qNlr$Y@0-=*Q|_JmhQ{U_3)H9cbH?cy>lnZ+ovm9H$PU`C6 zV~V5o4ob_Zz1#u9oQ|cRJEw=C$7l%7oy!3fowTKZawc(kV>!zxSe@za!Db2S@5sLY zBUhf-le#h6k-{Mq*LcDTTSnP8F?DvPaX$C23{f|UC!s5-3>4>ZkfApl+-+K1xxNki zcLcFhjCqD*@Zy8Euk}%<+6rAAjZCb^enM_%DOhIhIPs$&=>$8=m*13WT4o$A2|^tm z)yfclCm+BjE4J&ZI0fS;Od0A$$vHST%CF5dr7IsNw2aN$Q%qedchRG~5xclmA{leP zGNFvtn-YsGS&B1YtZJzB?=YoV7c8qyEMJ}{0yLHY1=Dcd1CDWaQ^L;mV3dUzH*EJ0byn*B!-QSJ z?=dh`YlQLW-eWikfx|WFVqta<;jAE2Y4qT38*qQwpg)hkK-~z6!6Hvfh*qW2BM@0D zZsk<`x?Dk;nX8NIW7$>S_gg9J^Ps+7)QaIU*S4eCwL?g*2ao|9hzJ4+(v-vgBDNIr zts?J-M!n|O_qS$M;TNK$o|su2Fr2n|060QO68wL?M*q>bIkVc%=m`w-G<2wD`AS#0 z%3y$Rpm|d_ioJW+QqP2(dH1f~vgNG>EbUfsb#bjMA0ODsyw$tAzdb*>y3;}BV~qY1 zo1(4rm{q`)ltdr;wLg4dz5SNnm_>>PeIi=)AALQoNDkRcE;$;jjlG*Q_GV+ z(q@>aQ(7crY>D*}MGA*}u#}W|0I3!!7!FD-F^XN-`3EckM^xaH4Aw0St&YGTZw}6r zBQVK>a^SLg1+SXXKLUzZ1GsoTEdTgOg&DX zQ3OAcIr;nV`A8pQPPE@lJWCcJBerB9gShG^Y&UV?Q4Y)?K?ELxOw{Z@40|*IT?q-m zY$xNb+8f0X5JWa;;;lUphcVy)v^ zn;XJ}-gH5r(5w7)VP;>LAeC(fmF9jmY;l)YLLv+gK`6DvlKxhM5$0n~)>k!L;~x^G z(M)13)Cc29E|+l(*tjQ{gC4TEsuPU71To++@E+<1MZ3ulPP<87!?b>oLKm;VLq~g| zS=r0MlG;Rci`%R{ZOy(QyqpIBD8%pUQG4R&Mop4+uS(?3Co>^oV8N!m4CDs&E!GYH zLOCY4AjLO|Q)FKl(_8jCKy0xCh2lFDUHd|f<*j;7BN^*msQZmqT2PM$=}pECjDpk~ z2!3SrYlCe1+$yFO`b4R&OJIf`)Pw;=+4yIJir=AljLT6c3CY0z0#>eh6% z-LE~NGKa3&gJs1|5mgNjN7#@fAb2m|JbQ!NQ?H}`vPLbq4%KP1lS@Z#)wJmJw?6jc zzYoDuMF3qOtcdh7%KmiEgb5WAcE3}Fq`C0FyWNCMCoQK5vod#OJ$e==x)sh>&FWgN zx70E!PhhcaUUNd*oQs0oP}JKwWFKZMVh#RK2VE|~MvYQJ8FZzJP>;JkW%ED^IiM8=9~Pd)0XKomOFazmh^PRe+hpn+4E6GVFsIeQFaO9@Vr$Lh{}@i zEkz(|6K4>18pIm-py*y(T!2_tV^5*hU7}i0>x^@8sqjo9yk3|Jm&4NY9E_4!lf$!L z^kd`9z}_Zx`jEZdkT$m)&xPNuTIE#eaxyT|HaL|2T?mr%&CxA^zQ zO>XPQo;AR4#C&W9c)1tdW}sHExqBc-qn+)*X_Gmp0#I%v_lxj%Z@x)Yhs5x2Mr@l_ zKy9fx)Se9Jk2+;$IGYbdPzJK-p{N(b@JMyfb!DDs-9tD+3Lb?s-%#-E_^NYNT7tnH@dWwIkJ(-V2Pr=Nq2Fcs89(va!D3XRY$rtwqg+v=3Twp_z?Z z@gUwmXtz3Xz21#FS673BxDg#(Z7pgptbfp}{H1`nzXIa+Q?kf}11i9GzS%k$4GyB_ zpxJKM>z#JoZ#S48Y#0 zc-YN~3cFda!_H0TvqzA;hQbm<)XhMekqp+!YCe61pR4+-=gXM3#@8A`frzyXQ9QkU zLe9KCDdc<8{$M%h0Y8Hc)&bVA6@mBh75RL}`DMF8L)PP#Y0`ftWZ z2@=`)e6=2p;$`Y*Zlp#RRod4c#H{YkA)!Hb!L-kO5D$W{?zhDayYhM%v;&S225V@; zf*l;N|7Aa&RUk;0Mqj~s|8BlsVY)j!g&(~18GArldMm5Jue9eQHN@BU;|EHtgH>uz zJ_aKIE^6h&;~Ku^EAUbL=2NCDQ9BqBjH%)qlV?1lZ}?ju7i?ny+xSt#7cs^H2X{T* zGEbBSQI!Z0Xd?szisAV4cqkkk0Z#-N+e&ErAoBC|9BdqUQVp=XpwkOd0Eqp;{+iq! za=BImWW!<9s)i}M&8EI)eKn)ric)Pr!wd(`N5Sn4m`k8b6}k0X#jD#mo&^o;8mXL% z<$zIfkbS8v6n2*w2_0yigq!Jze3t`a9Ykn#zQoXShio{-de$Yhfdw(XI^=mb`RZdW z^vMMJRIKr0yjQW%IWR~TbyGX4|? z|EE8>W}k;;>ILFHLSb-C<&CN22)qTzi3oOEz^|d>?3_}xER&Vc2VWYiVNGYimKM?> zLLeL5h+vBMI%M_&Cr)6t^}eNttQKNJYM!5J+m*Ic(stboc^iU^+AIPb_-iSX#$cOb z!qYxm;AG9~5`RmGpwCP4#>8zcy3%(c7_SP?Rp63 z8;CJ|4J<%{#*yqb%>>OnM*Z{Q47^1;AxR!&P!jYu6ci5$W)Kyh%|ysA?Ik9t zwZGtvb3vl-ih2#Z{D!Rs{}L!M3DtPc1}MXdp#(?l&YRC{NdVtHTQ3nrgh>4Wr?B5j zn?iAN&&c0xh+l|Y`pgCg6Yk-FO6t>IaEtK?tn_qy1|`M;{6sE{+kiO&m1m3hkN#|u zZ0>OCc zp+1V@5=tluamA`&KpoV{+!i(`Pa6tN@IH-3%UowCC}{M^Z3UGm>OyvBbFn?3#p!&a z$P-lga7PVLLUJ57LNrM`qEKvUAd*#5a!QF+%lxEgcb!c7AOwe*^*D+i64(ft-6L{B z*T^D7o2)X=KgAQ1a~cyfvJJ`03z9wc!s8ENm0c+xc^)cZiw&0+v`N<-26OOCf)7u95SHeOKb^&*hy$xx*{7a=Yg zu=I@JPi}CrIbmH5hHE6w1j#xZ$|iZA2^$TX;~~|?s1g+7E}%3UoK9Zcd(opk62|}h zkBhgxlk=B9y?%A}`mzUj_Cngc%j{=`b^!@`q|`^^?iT_|VUuCl5g}Y%;DO=i%{v-1 zNySE0;d-?q*2{|gIO0)kHZZ|@)DkRC=6S$X>8<8;CVPw3k~?25KINHrd7jaxyzc{E|l9`J~0XFVYwN5MQa#|Xh*VtC4(JWhznWY7vp$d^-y zqzH9ojy}m=PzA^-2WLW@56*JL0)!39Peg0J-~BD3Q1Dq3*p%f4+V>ROj{jRqU*uYd zw-BZbctqsiOM&}E#<6DxD=pW<~q#EFGYqv2b<~3&e?q{_rydCLzg^wMx}g0aSqhKCs9MdvKD;*y{q7oV^6DP z1TNWdw1WI#Tmnbbb{v(=aXXSlm_eo3=$*PwJS>;4qYb2~Ag>@->#NzVORwdVYtl?J za~;K2nd>Mn=SON4i|@XmCN|rBUdvI6{zjuek=HS4jQJ^i$g4dx81&#h>%^e1>Q2Bh zc-*Vza!wr``>ioD?%i6>eQDC@)$E(Z>D&r&253y%CrxO;=$+Lh;7;EZja{FZuWcL& zVffz3#l_jX%TfjkhWj>i85oM@S19b;0+U>`-HNZ`6-QA)JpAsVy^NMlI~!*CnQ{SEr`)}(Mfa~Drr4-l z4C3hak%`^QtmCXx#<|FWa$V(B7`XLm$zds*5E^yG7)Z`UomPUqa^P>rnS9DoqZm`0 z95kn88`#w-pEwMA)dQ?~aS4LpA&qiVSVsS>Mg&ULX4oWg;qGY|`!R$f$7;_gPZD46 zW!$IJBN#=H_1#GyWc?8rlO;Pb`f)C0XE4^~tR#HuZWjN!FlfEfergy(U7p9ZG#aQB zls4Ta${_$Zk2K=VNcXnh?dDlB6A(jcb>_FTgyU!3Vo>Wq54yoHg~}BnI0NA2L}oMa z)^-I+&{)Dl$m9MVeg58}F#KOobM9Fki$=ZmPjMgpBO0{5t7h zqeTkw*q|j|-AuWbhMJy1xP8oE-pAZNq$gR=^kLE5Rok*93Y-I7;qa3FC}s$ZtX@cK zG*I!MU`D*W&}ofVVqLIpwuqllQN*bUSVEd}Nie&lIbypyj4>SMES~Uf^0D(CICFTJ z9OxB!mt1L2JbcO}IJRK7q{DBfL`C8ZvlZ{5&^z^<4VOu?5<*#mrEE!rZcaHKAr4`< zM4=K>7;dq(KgX0`gd{fnFnr-{%+;bo15c`4nD@#P{}s*-WEO*iGhS)xZ%iq|L$=0W7L)N{ z49DWvc!^2P*m|JV#Cm~!*gItgbY<%r(<0o;9mB94`0f?V29^M)HDG6rbqSHpID$+1 zu)z#jnGs!u(JNb{5{gh6=?|&B1o)Lo@4I_tm+P+u_yMjh6V{D*tv+c1t70Z6=9dk< z*9B_d@S>19)@CO}pPyEP{k!i`yJexJ+^f_gRZ`nS+Agm(PNmpHjgLyQv7Cl+_}bsI z6@|B4wX*RhwpG&e#gQJg96Jy`efh*9EDK2z%_!v@hJe6QdGmfIAv3Doi0{<|OF|KZ z(TQ%k=@!ETfnu~&suv;Sqd?SBMPo42ikwrKa18O3lz+2$?X}WH88Ig`I-oS_%_g(=Db!OM&ULptpHt@>e}JYY?N*d~lA%-QX5tYmNq(%ELI^@v?S`RJlY%snn>|q; z-v+qYka1)|Si0Uqg3lI8irkIh*ML@ZoLrDQ*l{5KJaTr0P7d~g4rT2IkQ$XX!=vP^ zo-WN01Okc3yaEkjej-%1+!i8EAXygL6yv1?oFg{_><7Y5g6kMT6U*4i1xdOl&*uqgvTT_T`WH7)*f~5oi0d<8r zV392&V4QPojpP}bv}i^s+@e?HC%9D+VTHmSw~{9<00yGd&?0AQ1UIr6MG@p{nu%dh|E|1u~WVR}v$N56~obCpwf;g3L5IIJar#2Kkb}oDBtKDn2QTC7~j%BAZJaiVYL{xVjw= z6m2T)2Cz5MU{p1Esyr0jPr_aU%P6siF z9NfTNk1pK>oaeD^9vCDBQXiWcvIepB^Lc;BR{Ul@WQ_N4HJK_grjC4+D+Ic^gPBz# zu1oIu&Zv;@HAi?(4ZUwhq8uY^Zq6Y~JSzzWZ^R5$QPJtm)A1@f2M>GUUXPXM*9@_e zp8my~cbC1>lhYSxy~~rIdQV?oUbxLnCyCz1bUb?n%jX{&rd#Dh8{G8Av#haQ=;`60H>;R3U$21U=Me)Es!XKN>#SbuOEzGa0H8AamQH; zLbxelk*UbBQ^sJlROT3vX~Vu?@}TiTIKds6WyjiY9LN}%%3vHkb~=J+3S&nqW7^J6 zp~A^*c-g-e90sh++tsp!6C77LbXVc%*dY?kP=MD3+d8iXUArZWOik7?_JHMuj52T5 z_;oyd2{&U&p3$l?kt(ev>(K7yYg(re%}XMq55`QMJV`&oAGaAX_E9yKtMG`~ad`vc z!wJLDjG$jF`x=A;JP;~pNV^9PZiZ-G%ox7HxV54Ta6CcTcs?a9H5rP-3tv&Et1oG1 z-|otKHou|FxGdY>P$-RPf>?3?fG0`Y3b`56*<#m>`dPc?tR3IAnzm~an6ypY9%cI; z-G+g)AG2Xb`JDaCtBH6U(A-;cYhl$^qlKbVV8Pf6xnAt=-iQ-T#%ax^uz=h|2t+WG zhki4h-7@uHorHU!x(#*dvQD{=_kN*Bw2j`&7sfalx zMh7`ppnRoO4d~yRTC*|o0+pcUvD0LQD`|GpxI7o}FC`UgQtFS!i&}p+xS6AJ9{-?z zs=>wOn|Egnxx77n^Xv?|=YvfR#0j9H)ykb1O@ zfrYzRxDT;*DpAw~wf()};46=uz@|cc1fpW{^(BSbdZ8J(P@XvnGl<~PS@1I`B*a=RZ(si55W4uDAeNx^s9F-Sk70N)ECb=6--Fj$_1?;5b?})q-Kc(4vKQY4vkSi zroo4W0!qFygOJr6WjXlKJoSf0Lq}k>p)C@OAL7^%wFPW4r3|vfzi^3Z_(S&5v)Vs0 zD}`F6NL7mLN|nuf3awhjx{BnD$>&eH7|#7BD=h;9M0wT8O(9#AM~X-QeyonTw=&Wy zQD1M>kl^*oT0d3L(diaRrlVxH|Al;;Nt9(Ls!|0e(uo+wXPu@Uq^7Xbxd<; zY@L1!@vA2y1|>SUH?62YioRG)7jH%*4bYDC`&a<#k$$^U6|zrHZu<*W;qmbkRnTXg zE}8fbmc+o6Jm_Fivth!4BY#sUcBq7J!2Px`s_zsAK;ZUXDUR}Al4%JX zA%n>y(}y9-6@ZX8=X~VU&Jx_sM`Tfr`g=!fYaG<8=yFuEU@Poe?BFF zv;2abOzOTsF|dpXcmtYLn}_={s=~5E6>RibJX!SvQFVcQ6Y)^^8I3;SAd5a~a(v=U zn~lC89fEp}^+lE%h4i`?>2-y~-w~Q`&~agH!KQ=CZrHQ@C$k~3sho9?IFs03eJe1t zi@5`2^QJP5r04u72%f?f3$8qHGih};p|&$Yo?*N(1hj`j5Dfz&nt@^Esz11aI33_c z!Snla>lA|CVA-EtE827SfGsd8Mnx~_kmm)(?K6bd5*tQ$X2}QaA~rMk{yRV<+u5&% z(QZJ_yk4{$+1$>^Ric;*l$3g@)uaxm=B&G5G8@g zduT(bI`_OS6ZFUfe+OCM?<5DzlmQC)U$cb)aa)auXG_}|T)@MHfk1UV;WGl6FpdcT z`H8VXBq$p%KGZ@~Ov_^M0z|`XziPE3^&WrFz3U*VD*puAzd@bc>VlAuV!k&0@* z;I@v8zd>cDfruQTH4FoS#0KU-VB4;9R5tPWH8JBXJyZ2Ap^+r1XEmYb(xLLe730-ELZtf;ikA z>`1{9ia2c;I236x;eog?CKWqrx>Q>wGznm1Cq1<6YY_B%TS+A&{Vge?CuLN2d&aJK z$&sc(k4&0H0ZSW478TkoOdi6MMj|#hq!F7Ak=V2s!^k2fI}t{V+WJ`ajJ4vrZ&o9e zKztA)`fIno#YGHH=4IS0auqkd7jbjrYZ#uqs~DcI<_U3etM=BtmYZILZXj8@kt<;e za)l@LX7xibo>?0_3$^1pA{(Guc>MQtGxMUh!4w|nwr-l&wf%^t7lof_Qdb5>;+E~S z^iswJ%s!F+pQj&fg_t9()s1$^t@xBc&R1oCarD1lQP*lA)7TEfT{De7kfpyF|Hi;9 z(%8pn8~#x|#$Cu-q{WCb_)3kdj10CChLz7!iU-a+X%*QvY@v$;01OJ~Bh#Z9Zm0f} zOXSl4Zq&A43#hm6UAPVOsoZ`n%4n&GHZ`XiuAP(v=SH?2bf0NQtz9XvzS4F}0~!YQ z+2Ly(P)K%5@5_eMiU+PI4uaTGD&Xm?S6+d=e{okG&t0=8Tkh6Ec3 zvy|@e_mpMvh(rI2L|KyRGp7s2hEfnj+n~`Mq>n-(R--Xwd6)P|F+Fter>O!7tNTKL zf#7?9$<;gUgI$9!Jh?Kr$@~_fiXp=th}?o)YCoIovP$XX*Y52!3lL#D=&L9(rau}N zQ&L=H4oGJSv%KJJiYN+5DA2M$LQCOJH$P$T7U25zFMzoZz<4&9YlgM{fRX%kf%2=1 z)sh+{*#quxoBV~s&bQcKcs8T*i(jnb`9?&3>Rl@y+vM;sO8?JB>2DeuAtUdwQH+O1 z=yap#Y8ZFg{cu>XH=;&s6h?7tFxuZZG(t`eVzcO9nTYeFBXr0;2D!UJ!4VLIM518u znv(%dh<)42V=*Ua&&H0XDMx+#7IULDWH~&tb6Dh>(XJK$G6e!T!Ec4A06zmuij^3W z>b-vd>Z9h{s?Rl94pitZvehjYJO6nxQH)?H{rz?ncdp|8RXeKJo6UoRtLDL=J-iwe zI`KuKp!^m7WW(?$QSlQljyJzzsv3b&HSbF+8MxY+j4&zp50J8BKjv>6IR*PH7&+w| z2Tu9(1u-cczT{NoQ^cTVJkcIp@0L?P2pgec2!)2g=>#u%9>){P4UP$0r}NJ-#dIfy z5|2}|jDKKng6sKw$YVTzCmrg9=E;o}z(xRf*Y1L@6pOhcBCB9M46l+MRwYP_tMZt( zyb?q^53AzZ9%O_u;S{kq8cKS|4+NyD)-ZW}mw|8jDs}~)j7SffIfd^3xpg>@-}o{u ztXbebz^P!o)*>aU%)%S;r0^oLZOmLqu@Z9WK4`5KipPRW0};pX;uW4ca@B>Q z7`|`FU-kd8Vx`i%$N(|h~ z7W{Wby>NJsG&h7Dpx%HFdu!Co8{lf{2zI?2@_eV;?79j1lwM_<`*Z_f|JR8vbN{F^ zMdQg(x&oB}bQIjp*Fo~j8VtN{`b$X3J6~VlP;!d#^$gtd8HWe_KvJfp!DOeWvcf=+ z`Tp#V)BF^rrqk84DW!S-Yf??+e#v_4kyI`u>E(xpu*Ek&bK&N<3nK#uqk*aT@Q7f# z>_tPWV6;-9T`H7OKw88F9YmzXF1#S{h#S~Ts5EvMU1rPBG8u7Y^leHD3Zs>&TKj4T zn9+eEzqzn#L6IFICU1Si5iS=cJ$BD@Bqf&3E#yBzsop4mHJ2aGX4d;jc4QOdJ3aT#xQc)Q8txeLa4KaA=xqQ zLVr0K$H0Xx8r}vWXgfP1GVc%!O&b}_@+vmcuF<(^c}Fd9B$Y{cOx!xqqWsGfaqwCq z4Zsg8Jff33Z-WFoI3%$T-kv?|eZiBcSNXc>!9<6HO0yYNqb;uNz2F5S+ZYdskp_}D zo+cb=^)cBagQn4X0`cfT_z#Jff}1gBfCGk({X;+}TVvZatp^u#1_>+1tj^{XZcDMu zKi|Y;Lnz38%rrG$uP`a1NS%m8ByJDr5g*h!EFIU2CaweL5%$t64~))>N-Lhjjz|VU>p+1faJHjD=VBJQlQL$fv6qD}~p6K%jNN)4jOZV_ceC#$`U;+Br568iy5rZ+^# zNAx6{kWI0}F*XObUEDmq1qz*qlal?w`ba}xy)dN@G%S6s>2g$7G&Y_ylA_`9HnN7+ zd->7$m1z^LZ_SU!uT0D6g1tGG(*?WxMwZnPh4F268XF2o;Ni@c)P{|RBCL`l6%mW! zd=GUILXBD!o{~1R#*Oo|%HlFqC0)}xvp5Z0GMp`aSx+%P6XNy$;L~k?ITS<%ktuZ< zPppfMr)p58xj*X1FMBRYe+|=M|)X>sDyY73lH%(10;a8BuB;Q37+DoCKm#v zi6U(~gBc>jQIa&B#zRoxi<_;k;?v+D8d?xgod6e&k_uSl4sCQOMn!82;M1Sqe8B)U z7(B`VNDCH4V8E}^K2NU-CVG1LwD^Fu=9U(zQ4MY;^J_OeU?Vj?AP1GDSC#=!s>vQ3 zKEn#E-rl2c5&|%z{Y|3*TA*(~1;YJ-riSrtQ%YfbJs8xwWjzKUwxh)T2z%`4hOMB| z>U6p2D%~(?l-GQXt+%W`EHKc!BN-kt&k-lJ9mEjwI; zVPjQf1hdqo(dw&f%WgD%VgCy8I8F`m81T452m6!{y4%hTd6$2#{srPYYQVT@c*oSB zg<8zQ3l3{$n<7cCFYY5)O6gBp%t%jo#`FOtJZX86L8Zlg%pX)v*hfj9cmg02-)@9K zoW%nMYW?LHv)K1%3F09sc_3EXjFAYiiqZ&%!Tx)VaRh<%8L#RV!lDpdU!K9jfQ*Bo zw3LX6&L)ZIiE?o@hY>-ODdgg=XGVRoOKuxDbguH34b)r^etfJ?OH?d@P_nFjIO4BF zQ~Yjbq;mr7=5ow)E+Wgn!c3=hWAh=DVd;nC1d0tw-a}ykn$@rYiFF~He7k*L^Z~*+ z6!I+D?oTLhIRTC?R_zCnf^@=PSphPg6?$@)m%mK$=>Q&RK3Jk}ZJk6LLb2#_xXkW-%9hlsXC1R#bnl$)cs3*o8;6UgvGF9$#7 zCVAEfB-K>SB5BIOVQxcwx}uFEuaH6bT=Dy0jYiSB-~wRWG^Jojj&3n~cC`=;?nHkW zc(_?6%(MbW26pvN$PZuyJjNlr7?U-b2hl9Y2_S&^=Xg2kFBX9N5%UXz-*U0D{A|S* z_|w(|hLpwLKIxaZbvOZ|>2DQI!06<63nySl&LbGm4&h^$vi7XEH|CiSPY9aW!4>eN z1!>YkJrQJ!UmuUsw_P@o6@bZJc0H7moWIG+1po6t?&;J(Qr0hnFx`!D=+sTY_fEOO zj>VGsciT`#;Qr8FP6bU(kH@u0q^`Me#U)e%7^r5hYRy7;o{{DsvsP7YraEQ(?B<>@ zcv50D$X1^yKB}C8ke zjb-hNj+S~NMK_y7mp0F1>U>#Sd9^oz&eNYcX!pU}w=|TRk*(`;O)BrqmEL{pxP@XzPsHoJg zHlU<(+*v$up|!v{whQlx~@%&hH*`ZCyzi3 zI7ZtpAe7Ucp59qJUwu$!p!YPJyXTz9kY*VswUM%??)4P-qj~1U^2`L8f`p{k11L6RuhFrzkV)`G=6UxU#}Wdn{^xiz?E#*bD(fnb1sdCn^aSg+5(g!GNAj)e>}1u zzuYGa2c*dBERhczqB#4xvQsX~nd)L3pTH(7u;vr_eGS}N^ehcAGxKZwi9SVO!8xdrPe!z;ybFqR(jGa9p zfAPnuU?02`kCfW5W2y^m^fK1%&P}=&HlZVIxk7%N?ayMUQUyqECOpNvGqnP8EqH{a zk(`#*Hy&m-#t2Rxd6Stf+=^tc)JqG)&Q`uR(hJKzMMg{)#oGqE6;DITXjd(lX$xC1 zKc-pA@;RB-vc^|FP*~3jtaAmNhROD$e5Co!p?nFXA1EHNTS+lf)mpX9Rp~AD0I?;c zZkG>&A=e4`9KdMU&eggO0!uZGtvQ`Zv>w&c0LO1+o8DAIwG}w)%?*8Q`(|efnsQ#> zz6|Arh(YPmMpwKoH&WoHP}AwQ#HyMK!Kxx;DsieWSf)>;njg$HJ-&;SYS?_VCJ&8* z!}JC$*{N^L1(eJ9QMN%UW?R6h9U#9-CPiWxc{7fsQTW9hX{3aDwzuh?ad*9Hq~9U# zu9u0~xZlPO1OEQ2hx+xlor*twNS`9;rYIKe2)f>4{u_6z$>zr%_O@T#D?`mVjTN`2 z>@An$)c}u{Z_a)^QPTYkruL_)rz=Qub7BI?!eiwdrqMng8-EoYq#`K5!z~>2~nO%Q<&KF~cq?)>g#k!XGJ- zmk_G=!P&90-{@4M?tOuJ_s%uljr?gUX3^|0T$OD+qr7h;@7ty}9)9~!uf_pN5C3JC z@`wMDU4WaeVw>47r!8Z6!!LLypa!0@#T+9fQ>Eb-P1DcU6Zj_2Jju7$L<%NL?UW{M zIZYbu(Z~G>;0_fgrN4xL6IJ1D2EOD0gko80qtfhWJpBj}it))~OH)E)nvx~_M6(6O zPm)svTX>2)%0qf86ZB*e4`>ePsCau5V-Q4??DhwPcmg-H?hw)5KdA1v*`aIhSEELB z-%|%Hk?TTd+Z&b@QNeG!lwL}DBz7disywW^LP*lweFIDPEJJr?*KS!(3-~W=E3Tu- zjg7!H({XW+Xv*g_1`CTG26Q!f+m*Ias_9%4^S}|eIo9q*;jK*4CGW+jn{UFBKUvc> zh8lgkGc$8hPx`LxSA&NdhxYe%P!$XJ_w_@0NrWQSv1_TmD_eDkbuwo48yM8~4mPCml9_yRoECjk6shWxA3N@*Jj4EWlv)lT zMMFiRov&KJdnevzrH8K$iC6t)J!Y5jp(NWlt>ZNkE@0e?YDXT&P6cJI2D+jgUFI-Q z8-&^V#P%DzgL;y3)WrpY1ZHHfAwrO^oA^57MfqD)jx;gonkxdgl}^T@6*OBHMGM%| z@*xc~47oXZ3^N%Py$pG~!?`{+?ox-5@rCk1D7Jt){}6q2bfuY^fG9=h=IQ~wYJe=O z>U$Cc&Ty8W`xB4CQCznkV`1Z}ZDn7zu6RzrZl6Fsmt5f*nx6fJ6PW zrzekf2glxC3RpZQFPW{3FpNH`a-HtuyhHXNxk*+FS${amRh>cP=^f@m>7eLhzL-1) zp|{oF!!ATTXIGTxD-q-g+)Ky<%%Q@QP(b%XX?){zORb-ZYrfJ z&Qw`y35oCPIp(S8uP9d_>XR1B`4t1EI}uP3thyHyO(u*fFwoMQR8K18{caLNh}KdP zLnwJQg#Q!Wp#>%FS}mOF`it4K~||y!iEyV#HUkCD3YoZ=fSEGQBEPR0njvT z?^J4~qCxV_(d9|W=@xCu$O9Rmuj*14NUL=)~N?qS5gH`Se1y;0H{;8 znd_CFi6W38&OPcVLq>_0ZjB<~tJ^q7a+-;hy&_ZDv?kukY{e_tk&-0is?`c)`$cE@ zL1zWx=fXjKAbK2r6B!qZ#V;%9p1F=}w&4}ddIV6e|E^5;XmymMbBJD9+5ir0Z!n%$1-GBF)do5e&*Nw&xdMox;+*(wZz;d1&~lYBB!>Aa&z(yPeQM9`C52CrR@hV@ zx{3FZUs<|CfuPg&@8SOAbo#qX0-X_TBL&>TJLVqwO1%1-Jm(k-dwT#$Hvtll<;JXs zI5Bu;;;V&GJtnZlY5jEey7&C#;I97LmbGa5!=y&iX3!-F`yiiXXDAZ#=`t=5BDt(Mo<9K-({Pgy`7S$V-;LW?>>BX~JvvxYc zYYRAvM|+I<0U~R0w;W&JKr)$f@E`rzjJ=vL-m@A!?+@au`TSEgU_3XlyLRbU@v0xZ zo%C10*(OvBiGluT#BL_GVF_fg-wRG4FDq0_0?1smWc&|)cp%fkCiC@j5F_6KQLiR6 zn;{`^%BKf}Aj~3J-LV^xmk3HEbE2$#v^3 zWgmO(xciqw8pFeHg|K-9OTw=VL-$_4fAz6i9CsWQI;oTXuXpu9`pKd1{G>LbgKobO z_V?S3R=qyzT(u7Ny9~_x`-M&_Z!7bp@-ps8eYE*W{WBj^ym-Q}dVt5Y;6b$t;aaoz z74^Pehy9q&XOFJNtAu@oOF+>}m&^Vb5+3|#yqqVU7Uwws#%_%9EDk=Mvj#6djWKE_ ziXFQ!G}wmPUXh1q`rgOv@Qo005cU4-{nJZ^TV}(j@rq68nT$wh<#lr+=~@x^DpuqG zJ4-wg(yhG`TedQB#zd0TuHrWXhGeVf4$*Eg5V2;UNxoYj5J|LfNTJ3q{YW)8II-*OUuo z`&qgYt&OMZjHuvLi*%jt8ZOSmcu~>1z=ZM``buW=z}!g~b!R?eIBKy50pxh`L96wV zvf{1+p0Ic}4{rM?zsKzJtQ+)Zn}Fx;GGKImGVL?`4C#{lGo_1jwuK0K8pv=tUsRz< zsm#OMahx!Qc5oMzrsE`mtUcouV}b&98ezCv-mL=5JiG`(=`#VQYFEo5htkwG;mc`|LjVVcoJL&~)ZGXL z(T1_*Rp`e>JS9d%am3;~ahXR>KVVY>kSR}N)NJ4uHd_U^(D09Z;FRZ{&uR3mclvURGD+_`oESx$%gRRV+ha{k1u`TRTpg{N&*;Gv97va_2 zY^Ge@n=GS@A1V%#hV{JHrCUKrpI;8Ym`Kk*QN6ffpk8tCe5$qHKY?`cgUvU)4evis zqYKLW;8n{KF_5#8{uV^5+_x(FU}FPtj?G` z9;tzw;F(>WQV_yK^LY>Y!y)S3rPDjOdZL6MeLiP*bX+@OHS9nh?>78eNP$Hb7T!jih(w;oH*(!&Azc|vyC z-SQCyG{g)AprX2*zbE$$3~oMP7 zP%RjwRI|&FL<_Rkx8dI{ki_#h?@rHpFJGU(e9d<1?A6=zlgl%c)OxylZZwr5Da#Hq zm`Pw^Je$-_DNbM&BCk~71d!H4iqm+yKpU~WN2$d#sVgjJ#3b;o+e5L#!i~fZZ3G&6 z3qO>_{x>wduwY1q;kCZR1=t&ThQZF4B(|k>)uUwS(LX#;V%O9G-a?m^?V3|iZ=>I@ zB#a7*38LEUdSpyQyW`RB4C5gttQf+zM9%6cPV139TFC_{>2Nq9(eCWZh(?R%S8xP_ zWIkDofNQb@Y$#WkznTDPJ8^E%=|t(bF?LR6 zsTl%|b?%LF%hwB+N@QWF5|IOfSBU*0%oo@iqFD!AY}3WcOM`LnnofAQssrTxJB{dD z?|HF(FSz%)`@eG-_%?8m2Mb)@v!net;vk|vH{8eFkHT-Y`~JHSzG5fi8IXb}vmv1M z=i_BUqG~C{PoaI`*-kWJ=&Ts*+rynp(k7 ziMtUF1AmA^bs>wK@xCaB2m;LrG%KBE2UROO+_2YY|9;6D>YctgdHK482Bm|WKIra~ zPvyXlA0?|<0gTY(QhsE=4C8UoXRqdqfQALU9@tjjNjq#xmoyf}vcKNZZ4U*fhOHSk z&JLov^gd$>{9E!f9`1v}G4j=IpDS|vV-a{0t@CDACvrf=1e)gqnOTQ}c^CS~m74lRKd!we9Cfoiz+vPsuj~fPS=` zPerPx3J_To_zu>5K0;2roag+dRjz^MrEMOmK4lIyupf;?G85HQMXn!CHyIHJc&M3z zTdNtQ>e~y4dB4#FWWFDIs3reV=4<@V=5^8q-1k+V{ywzpS03kczj=TgAa2uQ8&GhG zeaq|=;0GJm<%ucPA5L{be+TTy^qa6FcB{@5e^+*dwzb^>E!uvkg|<&Q+qxH&E?&NR zRu10HZ_&b*ODgGte2_2e*du$!;1te1yV=PBY$;nqrwBl`!pw&Z(%$^sdvS8{0uv_D zVh3Rd=K;%cry>Z=N7zK12)dI{3us*99~hSJUlZw$h!$OU;_LAY2@Y1Z4 zU9pT1>+%Zp`{+ps2AeP+Xqa_{i=N)ntCNd=uy}o?+ z^6Vn$wD@rPSBU3e=3mCs^)#5Rr&nO-FhVzzFwPUQ_J-M8)>q z?;KQF!@^PN|HK2}Dfq2=I_WRLPL8Ka~FqRoE zF}#>C(HxY%!g}14EPy)PhK}trSHw%Ao=>I+U6k}U0r#Y?X?O@+$hj9&?Qi_-(l!lC zz1~l+-=Fq+SBC7W4kb5=j-SBawyH>VZ=|^5R7OWP-znPQ)^+|J0oO}zcf09T3Q;;KQtqu z)$aAK*5e7=oZevdYsoWKBTJh%yr^)kber>y%gp*#^EaE?J$mscK zbqg@gN(T)00{eCt+^klM!$*($*VoJVnqi*$e0lxo)yYpUPapkFST+7VYJg!Q`v;xp z_-G*P{*W3-SPKk52cHB^uQL|$(JyO;j^UI(iuM}^t!Cp9f_sfld2K$b0WzwMXSF_K zfZ<1NGM`%?U-*XsA^gK|4qEQR1Su4M0Js&bn5J+T%$iiA zB%mt(%*v%e$ zA2y8t!}qLOyCY6@JR6Qj4{Bc_(z38lCFkpRV1C=%E0qt??DrkM1fwsuP7DQ3FiIkz zATsa0x(goSRz1Y)qK=isKF72A?U-?G;EjjCa{CC4pzDj9#g8j+q&YqvFTwSH*-LK5 zqt*BG$#6V8J~#+F&360X;BPM(8ytgk5d*E>z$lwQS8U6?@{b2KiOTh9v6m(IK=e6V z%yle;WNZpHD#Vw;V+tI=(@emd?~h^co*z-bi6!Hs2cITV`OCI$Fu$IS1x;>jCw2*2 z!8>F)lZ~g$-$R`HQsX^?JN_AgMTZy3G)5U~Kw zeFdrMN0WF!j?`4P9-OhQ5bXKjZZKihmAg3-OdlH6;5Uo5`&vftg8Q}{dRxZ3BTt-a zuAk2Bvvqj~E26o+g7Z5FrV;$qdt736TkKI4B!(GC@Zt!P;u9+ceUzaJe&Cx?XG>=^i=@O(E2W@5pfLkPY6YG4OO8u-{V5lOzxykR0c%jM8`j z5KFM`L!RgB@n=TpvnB;i?FS3xvt{%yw|Y-?!T>`cs6E-40WI;wg&#-)1m}GR-NT-3 z+dooPdbAw1b`@SxH=Ce}K5k zW7Jl1Ie*JR3C{YJ782|UpEkoB4TdwgTQa?>nMEs@tHA1EGM@6dDHSk+E18TxWskmi z`JZQ1&sZ$6{}B(r!kRn^Di!S0>l=Ww7tZxLVHaq$Rk=o$6p4kQ*(lcjMSrql2yEkC zsuEhD1zntGoJ{BvyA~O^JyI4%chFJP8gbEuh9eYSz{`CpvXIHzVrY26KEMuqANV#A zgg_s;LYANCZU{`hYVx9QDWR>;*sRoktB zb#E0nqS(64tyWa=pYn`95SUA+5fM$NA}&C9m2~vE5k4!_tA@^Y(RVelF`R?l#9%f+ zaN8p{Gr}0h>4PFzj?mjdqg#z4hTirY)hH~A-iT+B3UH8iZ0sMk5ex&duLJQqi4iyD zm;|L#FvV$?9aAkXL6(|`)H3|-2*cmB4JaRSUXo(v48I(MCojDs4}fFLT4ok9O9-&y zq>5RTQ7w8|AYaS^*_4dfGe(4^T{6ZsT@Ad40P(gn34wtn!b8+q039EuQ&=f|+=JNq z-Iu^3%8KB9&w`jF;a1C1h&m*8YD)r9LjZ(`T~1BYKF|p?B?Eiql04;#Jw(OA<;{G3 zeWQ>7gmCCHj5uF2t~){L61<*|0J&5t2^XjwS-JUaauolPsPO_R+`Tpcz%0XpiT+<9*jUrz7+1D1{FU`i+(R=7$|jD?EW1 zQ;UR7(~7;^dsKEWz_xnb22|NW zqd&N0oFfxclv_;&#T_+ zvp28HDrz?S1=^S!x(jaS%THuz!{bqaQnRn*)gX!d4y4QDG2~HeG4>M z?jme`5I@DkZ-R$%&ng4G&&kbl)|>U#VJ9H#0kB+Xan}xb3kZ1DU!vfT?M@dWhQq$2 zQG+7)_kxqrih}B_G*sRqWDd4pF{mGD>maBBtYkg-Ii~iEmcT}h?tm|YctLd_d;?gf z&{ne>j`zVvJDdafoxFaAuwb;*_|ok{?1HPX(P+Sb#*p++5HdCjaRE4nfrOPVFNkOG$jMp}s^y(K_TWH|(ApQkg!ltXi=iz$s3oLyX zsM~Vc-+C7trw|OxoFC%I&anZvwEgf{X~`VcZAS2eE(+#ti$cviH5+#(fu-KKc6& z+xR6icRs?+E0y$8j6f~(7jR?ml~VyjhopuJ6bXcruab6~4K1nvDRk+im65RW&*fw*{S@{i} zMl9@}fwsQ1Oz>@GBd#qVf*^52&uivGygG#1?YFY${v-Z7~14?S?OJT5m#m z;bjy^L`v2^X+lK~?%DOZ6mBXP6tkChKx zUulbl*Lr&tfJ_~^HQZI$OP^2lW-TZr1>?mF9cbI1A!-}2lC|?Qa=i)u7UZz10oM_* zGha~FQw!#hLG5oBdJdcWH^BMct}IfP?UK}skB1|D!T_k z%T9iAnW+8r+RX?FtCffNZbo!Zii?*0jie^f1Feh!by~=3(A{n}372CtD#103O|rk? z%Fk@hxbz+N0Pf#p*~iw{-+~l~x+yJ!(ag;lZ5`NV!$tlJzsce9ui_C*sWzFXS8LeS zQm=bpx!3?6KW1G8|HB%UN78l{O5-Lo;2@jF1J%8cy(<9kMOnN8O%-MHvYV37?NaLv z_w*EQ&86PMf~`qHbI~8rpnjRoKhJuJLuw{;;?)d_v>0yEVwgE&g)I!iwcAU&B^-Qi zWqS*?%3*G0LGrs4e|kMaD2M^pzb+)rpFh%;*;!NaV*F2F>RKsBUPQ#VNq#_J#^eD} zY9xO#JRrgw2(sVjEttk4X8>~+o1A?HQds4j6J3B^&TS!=WzKCOmu=2%A(wTo<=sy? z>~l`-92PpKb_N?=?Kp5H(SJ}R{1!6dt4d}wGdq)Lj-*~9r+UY|rIw*EnQF^uA zpuDwxgHorJ#AyhPVbsRVUG%RNHiQ9NRyY{_G2|j(Nb|?BuGM=d6Ut-LsUsuWUxp#> z_Oae*;ag1z*Mk2N7AUatKIs!6vhcmKYb-m{5gw1ndaW87PnZPr^T^RJYesY@}NV=17$ci5c9c!VXT`UHGY4 zS*u8BtPCxgHDi(DP(lt2#;7J-!H4T2B`6;;-4?> zz@Z=UFlHZzbc_Q=aRc6uc>&BWh<)%(D?i06rI*8O=@{%Eb$C8ENJGfrIAVM&aQQ}# z4T?(Iz!1EULxe0{+#$ldtPt()ScY1#gE+W_Ic=*2e^8am!rB<=1#2bnu6>H?)DNjj zE9CtU&PN&y8>0>~x)E`Q!HGlk z`_W<|$D@)O62?Or$PDO4+g!z~LH=V4O3Y@B_?V9a8Iagz7Bg))MP6aaDmYS*BYy+gKb)NCb-xaG27|+Vs$8D3h`*56z7r= zFmuGKHg3`u;j$6lydkr*w>=|lHUrlVfoL70e`bn7wfrr^v9s83CCm-)$nXZNyC#uO~YeQ)bL32Z%zQuu48$7 z@?0@t)Fq8p6Z1@L&QcV-1#!-Ki4NeoPqG7ecAoISX>?~v55)3i-@I3 zVw`|N=Qzkw2OW9pJ@eOdQ`~WqwH-#N<7^w5-E((POpj|(0Tu=6!$|oWGDm)CDMdj# zM=>P!#cokTgP_eCe<8jV!%v_bR^L-m_uAj|wEyK@ya;3>VcC@xQ(x}a6JDjP?>4J1 z3K(14@V64?wGvi!m}zLIJ!LEwfTRt^*G%YdCd?R(e!3AcB+tHF*osCg;WvYFOpA

IG9hgNaHc?`o^I_$sTFA&l}WUfs+w1q1!=aSf$GZKXtt1gIt*16L|!M-18~ zuu4vow)ll{#uLsNyK20g!K?T-DJ{y45MV^){P}#okfcDnxlcDpo2NR;2M7_p0iN5N zF@rS9h|K%-+B7ivqQ3O=QHZoE*a-N+7y zN74h7qa3sAHq;6j`JM@maBU}$ccwIq7pogaO=8k#zD%76q_1q9x)ErXH##}C)RQ7< z^KNBthE(Y{W76#SJjYDw$H9SkEK*1?ZG?y?k-+Z!(Dc8ke9mn9s=@bKvzB65KWA>% zXm@n5aGv2+?mIl$Zqe}>Ck@H{NhIh=j-A}oseJ`9e8TWNf=qCX!x6BlEtiL0%x{Fl ztv;{!1={v`&FOU!9mYv_Ee_J7gQ-z+b53JR*8AaXjDgJ}`Y!HjAOxl+v z=<52jpo=<&gd)4I##2Md56@okQZoU7mXb_61k_f{*Gr04mrxc2^i!iMPV=HBw~2xd zaQ&)02o;96Lqh~ycUjyWD3~=+l)Axs#b6AojwbOJ6574|;d9J300_mKc@l$%EzgsM z{f<$dm&|8u*moFe#ExLguqcJG8P6Ds|CKBPG`qe4wJwa=OfVJM!`XZ}?N1&C$snHf zm*aWD)i$_}jNVL~_E*dC^>qy4_4uyAkgs9((LbJ|Gc8=Qka-Q=duMk-n&GU41->4t z0vD43>$&#&0>R5@$s%AtM4H*Nm zqjWW136p3fZ=UHg1hFR~F1{<^x<_|%KeU+pHVE;_V)ixMH(H7()Ll6x#H)SVu_ok2 zDFrZ+jMJ~e6j4iBgXzSc*{IRjSNEj2Je7Dfc}Bbtl1H15)}T{x?FXG|2xM~yGEo$y zn|+T?sj?9ufqsURlpN1vG$CR9m6BBkKO)%!1rHI%Qv^*pXQme=@inB;O8Ds!Bfyh{ zbeJ}#sxgkVQ-w>u&B<~re)H3H-!Ah`m;k@+P8mEqKWSNwZgtw=ab%Omd>mFLI!8km-q6RLe-4+8YLqwEhfI=K_JIi1{UZAB-;lTIr~4y}wk;Gvhr&J?Ga z;rZrtv)0MCq@Cf}IsJ@Fnsd`Tp1uBxyF`EYy6FoX`oC^^ciQRnH9T9=*qvs?Y3xpT zFB)5o;akz!@cff#ZQO!wPoZ1+cR7jo?Y78i#6IqIBH{Tar!uSItxqOAp40i&o!^G^ zco6&(XmWV|Idr+(^Dd9XzVAqEL!WQCD?6akx80g;==A$;hK4BibFS?z=yiB@M6;(i zwH@6KPl_qr*Fe(f`ok@L(OG@I;u&$*c=07G(3!9c>GOELP7K>_><={$Y$yH;BX<55s3g|21atkuy_l>gsBppHpp*XcI_Aod&vEcyHL6qB+thdx zaDha%_j+L*c-VJ-SYKWUb1`*RHBuc)^22NvMc8u49*rh=0%fc|!e`T{tCQAppg)14urIaxj?%VFny&mTHo#((TT4QpExq z3uX$nlnx+na2@E&-0YDo|fuqai-hQ#NkuuaQP(U{9@+1OO7hW9+Gtzmu@y@Ij2#(k9; zCxUOm&!}XlTfQ$GwT9yaeJx#k3bJQ_(=eH+SOFMEq3+xO#q9MQ?6fn}H>2gRxh4U+ zIj3!{g+`Xacsh-TV{Uwj^qqbL4UA4iQMZG3z57wLsa>!3d~b^XgoMBq0+!)I%ap(9 z$dI*jHXBAel75(k>!uHNg;U#5k;<^_d6+=T`jcFY_0 z@pgm)8Ms56mNF3Aa9U8Y(6qcg7oL_^B$0u<#6#v=ex#*88nVk%M$ie{Z-mtl*pB_? zel==rdB(oqM8F=U9W(!hW5BWws6!2_HRiuK@PtmXe>0wyq4b z%RdiwuZbg7A=D~FszPK}5JvtlN2*OERpKGz*BNg<9t!se?hEl9dg=i&aTO0B*_BbK z#|sD>#66Lo1AFCtB=z7TMiWH1Uu0AV^XS8Ra>g5122DR<&DjbmipBEwDEY?9Rdm|u z%5p_`!9$}VnMen~G-G=z1-3`;=1A4Z*dKEw%KTsV3+xja9WVQn1c_@2VmykbGVzVk zky?_~h z{T+IsuvW&iv2t$b7T7?VA9M1BSiPfvqQ^?`O`KMOKcL^r0lKa-_-nOX7uG`H_a|^; zL1B*R`&Sd5e|(uD?zeK#X>z}njTcHh;@*)j{?%Mrj6gJL?kq~qzFT*ef~ps5jK(4S zqq?-Hsr~s~T2h@;b}_KOJX&N+e|faXrhXTXmV@R2-KX7yE$>tNSlA|ba8WrgrF=~| zE8A=+cdjON*Ke+nQR<5_jr-G`-d{;=fade=^>cSyb-ibVg8< zzqgs9Y--o0ie|&xxd8?K;5eLC73vQ1xpUCYAHiwCA_f0)T*#-(x7>^EX!F0^7mVjm z=)Q1(=_A+`8+p=5Y<0t?`6hUYqaY|iAc;*&Ba*c5se}?=t8YLonJsTaFsT-HK{R!n z)g}^6&1SXL-Wk!9I>`GIPVoFwh$jS`0tD3mYU}cQ5mWGdGlD9w@$G#;0uNyYPYQ7r zc~RCM#R@%-$b#p8yCr%K!L{Yy?1<>vc7y)1NBoDyxor^XG#y};l_+dCtDOx_6GI&ABxM(ltkT9RPGtmS zNCXQQNh~M)R!hr8O*|{c+1uXh_pd%`DZepa6rR03O4}|{pDzcP*Ze_EW@gq?N*6#B z`k3%=@x1W7<*J2%t3d?2`t`wZJQ@YH+BG}x{YR7W)uUgN)$oz%5PxBZ^_$=-`}>12 zVvx9fH5|2uqwa7R*XsxEM*Aur9yAAyW)Lp`Vb$?Xs2YSng72@x0` zRKOzj?X!QcosS3cEQxC`hmb~SG>(^t!B20`Yf-&X3EsR5o?bkwHEXAn{yHH{{%DV3 zfxX~#zPMXL-tM3@CQ3 zdjZAp&g_rabq04w0)C= zdvQ5!f#p#hi?mL05n@<(OuyCT2u~6$N8c7$9V|q;#KfEU+Ci z-Z+^JA;eG02U9Bl}F82n-llmHh8jLvf>LnMda%?a!4pPyshTmQTpmVS#xYrLYYhyv+V~-@CLqdcyR;(sdtmRnv7OTd8 zuSSa=+mF-Jm5~ugWI3^iIZPYpv-z6!w&iGxR^D#R*ULB_-W}p@w_9jM)owRjdN9diDuBHVeuM zOS3t4JcPxe6b0~M;#Wv}&fC_A^ga0(RODoE9|V+?2jm%}h#BLuWSr6D&P>5Ap4>{> z=#81Yesg(t7(9&!NIA?YPa_*^AJTd;=0#2;Sxm+&v}^YlG}tbW2JHV^ zol$30c$c*%^LAS+`q!@Cd)McSRTfQ+pT$`ocBB4aH0*bxQN13@>d)~Wz@$T&6#hdeI42+@{jg9e$oQn(| z`wO!%-c7IOljH}sdMD5G9ZS-->J>J9{ZH5$eclntxp_-d^j zQnE+b{U;9(ndnD*wY?$_abTTl1mep5YO_s{!3N*pG=2 zwSGQ(`O}L_(4ess$k0GJD3w6GP-cH~zJv|!=NL{CmV$^_1*(_eM_L2H-{J`^28e!_ zeIlbLxCs5hV9nm#ZL$vC-L8Xw8DN}+)*)>f+y-DBl9+=8D|viOa8V7`6UJDxpST$0 zD*U@I0Q=wINB_`+!_7d0FA4s$X_L}n7O4!Y46BA01zWYxV}{EaxWJ)eC-@(KKS@C9JZKGI3_XhO43aJhv&Cc^tPV_)3`sQND2?1D_lVZ9?ED93|BLT z_=hBjMk@3rBlhI&-jI-7W;_sT}M>tWYHe#go0B_SOtq zX@^AMPjWi_@KH+DXIEzvb_p4SSj|`c2|w30tw|E>&2MKwp zW>En`Z<3qwi0x8kb+?Ea3Jv1fdK&zO#t*&5VopB1#|<)v=!Mto{*s-GI36B{!B=`o zgbf4zUW}1I)W9B8?mM!o(*?c=O{@VbjrCMUDR6^qm?MB}@vykZe_vOr8xdAR46UpW zhcPDF7g{imGq&x?6b=>cHYl6NmR_=B+7KV$h?Ekx5<&9VY>d6BR17meg17_(Z6Q1U zCHb4-5;02^^?`v!u9B3MtdV*ox+vos&r^8mO8mX)_Mp;H9TY7Bv=Iv@(P1`)ZHp4i? z9Xp$*RKwT|wOM|84`bE;sx3}eu5KXz{D=ohhqN=&1W{(LR8_Y z7=VZhjqPerjOhx|0(&jyT~P>7pU@#%N9ZyJY8ts*hJKPMAgb|fv0j-0ItnhId65~b zrPcoXIFJJ|#__P4(z2z^NCO;dFU{&#O*%OK}3-aqfrn;vS-9P4@5g7s)L?F)<#Qcv76p%X>fk5uK|SzBneA;95tyo5^u zt_Wl*iOm2d-r)xQ6k1vBNE+o~m4p;G9-ec6_1f0535Zdckd2TDH2!>QEtAiLRFVm- z4WGfExyf9l>Dyp?%B(1!K`_SUVcsCPTP14jLlmiP@MKz7^U(<0SucoYvED+pK%GP zB$@6+V0wm0av_6lG*dw zC%mfxv(Kp40W=>0;0%SPQ4P9YbKd(kS+y8DdqW*h$KL`kfq#Auo0NQjhR-V)oRJ%! zN3Cku00W>__2Aw|$a8#-CI1ZLc=P-e2ZB?4Ga6MKA67g;=KX;Cq>c!lrubG;66~(z zR~7fTlAByb9SX?l*?P+Zm);VVrotdnovtT{kWHwu;)Z#SlOJK55Kv_I!-xxWqIR3C zwDwyZ(b?oa19;Ir%0*l=esONAnzTtdpr|1W@rnGl*YV3M$@{H?()dNZ6dizB##}8K zS*xgJ1K|?gRKCLqdm?2&Y^jtR&z`l8VCw!BO7Syi*GwtR+L3K)r`1>zEXejCqm`Oh zDjDd_t+nP|ByF_z2LT2j)*nBjD&`!t zNo64VQidn+s@Pw=2)HN73?i}xS26qJx{vv8Y_v-IQ9BMc8;^xakw4`-6Wp^cIJECy zpTGRa+4+BzWTQ{?oiCU5-n0)^Q*v^mh(Di%E;N;*4LsNT5h34$mT7${&$ZlZY`)|> zx(X+Iu(Ziv%hMzngs10kPXBT7@;}c&Bdjyr9x78O;LFbB_?BqH($-&ug8n%U2L0I& zD-30~UXdY~8Wk-k06U0O6F3*{C^>lTVRUA|`(sevLZghpCzlWs*^p!g>=vNOX@6A@ zE`SV3R&jrb(fMRk1LUbzL5RpqMJ#lCXD2_b%P@s#WeL6D1tOcf#j|lmG9>|&buH#H zDgDpEtbj57=8|l>9F?t?Lsqqx1#Z$6L{(-@(Nyrr7hD|K>PryWVLY1wI_AlBN?RX- zCmFzaq}@R>QI!d>4itKHnicOs@(;c$C>tGk@t}4Os!=PbG{dMGwZF|lHRM9qF-5bX z)`W2LEvf$!9uATSa`>CPp%>u($pblvw z!&;LZQJWS1p+D*Ixa^e(r~Yrk$?gz{Q_!ST)p4PCm%`YVoxZ+ko0=tM;EXZt5b9f< zGE}9qIhgjVWzw4ocseBCwZ~C481+BTmkAf>@ZLyarU&{=7BO4Q661o{b^%Eb?yjJ` z91q68a+SHm&ht0#PS1KT*}eAqtao|#>h1Z-!xW)6ZgAFjGNBjgCb*b6jbd9G`7pK!lT-QTqa0ncBZ zT%Nu#McsNChSx=*vR9&-b_sK70cfQY1{G5Vpu;%|8sH*G;dRaXng*>Cf|a0Dg1jYS z1yYR_96{+u!(<25KhR3Sfm*C~(P6__V!_z)Q-|260E>cO1%pu%5aR6z#^W->Is;+R zu%>7U|JG?(eh)RHHWG}@)&cx0*!A4QZC)7xSyW6MII0B0tUl}IlHGOllg6+ zZn?Dk&}Qtn!O(njB$9SUMx#lJ4X3ifa6C~;1S`+0JU-@vEWJ$DnKq&*x)=gJqxnud z!t7D){SHqqZ*TYIGWI&Py6&vjZ_L}oA=7HFhgM><{=^E0vEk@Q5Ur1ie6ZZCEnfyAllN8V3wDAj8 zWzyhFyNV|aS1}yLz_%S_Sof;>#cR7C= zgW+P%5FtMdj&Hh{kijcM(do2rB@gKQ@i069w=;Ol=+S^RUus+j&YVIU!dO}Z=|GoK zAqC(vN)xc{8rHHpG~g_wpby5R%d%RY|Hs~&Kemk{38Ozx{tC9|bw;ud>-Ozw$4BCA zA9=RZ$Cu5gr6kJcMiRB66vsWC_qV?~01zNSiISbn^lWo?Iu;2Ou0o+ur*=yt(uht1 zp`2eqE#XLcsS(e_ogtiwi`=vg-E-(7kQ0SV2W(g)*&Ax~>g_E~ty7YAo-Il+RA{wC zUW6=^Ha_&o3}CD1tTAHCc)s7yoE$9G4jTIKsSAIS1VF6K_p`MHe3}4N-9^(c@`H_>6tTEEKIQ>CR(-Dzior4(4?^p6N+9KY-P$sIyRZPWsMN_SV-tXD5 z64~@2TB)qqn8t+br!^DV&1QpgQf_W;G_wWgJ#w~*LS}R08wcUBPqS~9#T*y}sB}OvW}`2WZvQfvdOODT z*>StmnL8LfQWQrFqoX8kt~_$ZoaQ=SpQ8QFI#Y5a8{I`Z5yg-%(F&XFxNS3S}kJ1~miX@m#L-hGHw6h}EC1!v@ z!?5k`mT5rpjF_jGLfi`L>MNPTJHP+#<-*Mn?@|o=oqvphn%M)z}PYE5X@H*_q7jK`g#mQqY zK8T_f$%^s4YBg2UVF}e^l4vGorSR;tf^9D|E6Fo8K9bC{q97!3O%i__Bn>xg75M;) zFO@cR*(y;q-l&V(-pT9g7B&T|R$3H>vs&5OWHpZ^ZX_&_isd>l0-sz>f9MW?LZYvd z4C%IOmY`!oP_A8>k(@z$LqM3^OT99M1x90}pr13_ASO)t3<0pR4BUk*1cuY@4Q22! z=kLyX{MC;0@p3j8Tt^|sb`5AIt=(6R#7M3BAQVL8Dqsv4b)}cyXJ&(e-)2#&Y&7>$ z9+)a&wNGv2)f?zZ$sbFFY_+nz#Tl&86HjwOSbpKma~^euBs%7OhK@rjzBnvJKsz!n zIWpB+L-+H=x#ZvgTjONd!p<3$uT(^BD}(?KwDcQ8OEKP(GNAW&9|7|w+(+h{PUz29 z>au_|qYDd)f9@ZMi1rl7#>4P=^R5#3L}wR|zNn2uTT?xpxQ!o;PlImQVV#Eb`^S>g zAg&&Zr(v#MI&B)5wx&&kC1(;}L=cUV5~q|zZ92Uu5Bw=pVg4f}{fbd#$Q7v7l!Qk{ zd8WJ3GN+`BNv#v3()wd8qP`n*-_H)K4MK`6PlJ>ZZ^?8o~pH4v5KL$ zs%QbyeBwC~f1uubIIBODrICvnEJ_%H;|dkUiW2H_=?BdE(D8(&Pj5Eiq&SlYcY=mL z?r3BqS@?WOz9Nfv>h)!ZhVRH9dl7c4mli8{`6P^Cw0$gUr|scH^%xm2t6L{Ds_sz= zg+$f#O+ho@>>}@kRt8v~eiRDM+!RYOW4yQeN}6^;Ds>4!Eh@{rxcO@;p;@XSraW^S zN^3_~mx&FmS>}_)4F4`@9hn18?dna~)yPin2qNoEm9raTG%&U0T7S`*BOe-^7MjH9 zto4}50)?`7B|xQCS36jcQTTgK`a@Nk31Tppg4!$0A)iKtY$t(HMOoSC{05qgKVXxZt%47*<1?&debeLMvonG6QolSkC=|4m3neG1w0cOJM7E|K71|+Orkow z4r|2r<%;l&Qc1e${xRwn(EqV2*r=)SUi4 z8TF#Ke1xUqg*4Eif#VL5_2kZ4cmNG;b%*wAfLSv#k25PTcWPSdF6gn_`$SUteI9oq zO{t8J!Kz_(zsIikKsOjM$gF?reE*v-5m(`ICIl9FYDx}Y2#t6q#1KWlhm696XY~qN zZ33Rvn@_&h#~*8&Q|c?L3q_X;Jb%I*H!X{3swF9%(>z3(5h#$tD>2e^fVQk>1}HW# zj03b{)sj6F$unnMv9ioLzb1ZV%=;}G^CiT6=zudKeH z_iT~OL4ebtpa{K4FrfKK@GuvY6Sr4?VV~2Kjmk^c+Tx|#Zfb{1MhK23uu9O0S}cdt zbU40Frg1VXa~(6V2QMtJQ~}B5048}pNHvbiz2J#uq~nt#f5g&=Whz}{-1z8uW4Oai zC&Bnp3reg8ibmQ~C54mYllu%i%84h+6qBN~FUOiJ5T&Lo2+f}=%4@S-qey?%Ms*wi zSg^5ZscN}|Y6x|aDzT|%Ux}@ulG5z497r-Olf6AHK+|s&p-GF&q?W2y!hP$=+$uY2 zRrrIae`4w)Hi>*q1R+!ufuLc?nt1385!OiahEcArxRr@spkb;rK|$BwjF>UB{rzcK z)i|Vl9#zp^G1{@fIl;9tbkL>zky7fiza0@A?4^p-5c!z;BPW>jINWu%ZWOY1L*Z{) zALNuC!Q#{9WIA{(O26PyL=sVmhV;*!x|k3X$PFj2qhJ+KW)QeNucuB0a@q8bS2LVQC@4IM~Z=NiUW8A5%hwy@wh!WwlC0;>foRgY^K8s`xwOVJZRoiNA zb==Kzx$nB0^?tw6snmO!lQk?z#lc1XOu?~23XaGHd*goAZ%04PR2*}PiB;a(?$lb_ zZmUu4!YcQwn>DZA?00(GZptdp0~uTGc}02Ekp<?M!$Rh7J_tWV(+}YRw%(|a-%CI&YN008PvAtQ};E~cm#W;Dv zwmBF~g4=5=5b2o!z$AIWvHz zct7;0AV28#5-7F-E?+>rC+t5i^KM`$MGnW^K0mws@$B+-`|9%iS37i!#F*Amx|+4e z6aN#;Lux4^WzsDXOBK=v9SuWXgOwv^QS$b)(e!rW`j}Sb|9F!iY&C@6_meUG1)>D9 zf>U=i4QV|ab;|64L0m;<55A4A9r4{1P_fJ5V??4j{PP#~3i+ibH%TZnyoGSS5(o9& znwIRKgy}+PbSIaGug*`}hc_4PU$$E9R-^su;;e>e?D4OchZnG3_#1OMXe${j-9Eg% zy}ohi&)dW6*C)61=IqtG+Y>rEe#Yn(Iy@x4-Uu1jUXgOoX#Jje%mrh39gGNqbvr{J z>(CYrXX9}&L4Pk8;sn;lM-CzkFdrUnKeyLoB985_GZd8MgexBJa_SF-ypeh7p&JiE z840XE{1z=&!isBNt4-Fu4qrF=ne^%+~4Q&F$iO;&dHZCVD=2ZIvB;_nrK5 zdvbl*J~H281IH&v=dmJ;AjXQJ&E5ix5iwtLZSGE+Qg+1W#xXj7d2({hrBB~o9^IZ@ zT{=}c6$?vN;u!ti3Oy_(TLD(`@ZI^X-ldXlb?k*JSeSZMdM*laLKNV_9l3WDApFIX z4_MY`Y*o;B3_S~~L~5R=w=~&5xf35VYy3QMfqkEZ&(UR-y+DXwa_hJX$BKn{hj-ay zN{4zn%2SRUXT66hP(=m!n?zmNl&aCgk{X8By#YplSpcGatowux&}eTtf-e8d_H)TS zW^VH1*C&^`{wTj(o6+;1!f35p+(uKwCWZVIH=I>rCYpFAKnA$b$V%KF5lXB(Hk2^Z zUeg*I;Oxrq__FFNBtY@6A_XPFQ!G#5?90C-xc$G&K6Vy<7t^L=oYB`lG-8l>p^vZPlXkLPirRca06)bD**YPVbozG%VXWx$&xX z+IZDXvGJaJchf(A?v<71P7Rh*vSGJ(=havMP7Rj9QMXrymx7I*p!c{mDn{T?e=Jmu zY*bj^D%SK6LtOdT3%j%^z)44Ma`BT)q!%$0AoZp_85(B6MFn`Ql8Lzp8gI(SH*}xP zp?*bTgg>UB<@O4gr#`(BbuW5NW+bm3r)X#3kKb>85bA7+mK;JG?w5GEqmUl^QI)?t zTO12m3mPOpVye1|&^#{zEEa+&2F*!8cc?@5+f>NY;&Z_x7WB zKB`^k5NnoT$+9JrLDG@$kh|%J1+nXX}2~|xK?J_Rm z4@lvll@gM24j>dxVZ?On#_aLN^x;4{9>(xu7-E9Z%2b1fKZRUE%#;u5F6Dh7QBw;RH^P7lrs2q;%p_bf=;-J$|FYlIO6&@H%CO zvCdX^Ld<{0mar6f81}D7K!bA7aA5vi3)1O@7$z`v-Fq%Z2JBHKtIU){l=<90tf zl-psbqO*yh;vEu*2~0}uN0N4M1=W$N-VpMAavEI^WnvtR^lXJ!%?Y`s%>@>RPh_mB>vhmKr= zBebO!?Ez-rIr|nKML;bR&4fZ?AH)7dHhgQ-bees(^^!I`dc>`OJpgiSB?6@|`~pvy zZq$L)lH(a7T0_;Ias~!$4GG&mW9VC<%ZOC;NCB~bxIu;MFm@D1hVw=S5M-Qi`07mQ zgz+BtT@G0XKRkzHZGc9pzB|g1i|%*$pvfcBhWH2i2E75MQkNmV8$rpwAFf$*b;1oC^D#XK7-cy+S*R8=|8zVh?|Dh~> z7HU+wBO)2tK`fXLR|>uqgk^(v@V4L!0-yt)d&?6G9y(6V zly6kgT}ne6U=L~f@ZK9?@x{$6&fz3(o98#L$|Bd*rpR@*C8B44J@T^dP>4ghJB|{n z=Nu1_(tk)!Q?E{Xc&Oyu15yp~0!Mev2k&p6Jndeq?;x6Fo_+#e`5>4go%{%@g}YT+ z?rT-5bE{NsRcdpq)NEDibF0)pD8WR^SacF-jwfENy0BW+R;{+MTFq9izOWjNEkg6N zK2rxpHjCnN8>NgzMChRQzYoMT&@p71)=ta21$W&aNr^;JFzumthxfq~Bwi_E#uXZucc^7r*P_V(B#rHfrT`0w%pacQ zc=|}Wl${te{114vX|OrD+!jeSYMaud{JG~kA-tY*nrfSatZ7KDLPj8ZYh^|5pjFy@ zNAh?uM4_!lwDrUcNOB}A>Y~^bE1{f}iE3Ltg0@(MT3FoCn_F8)3rluX4L8^ZO%| zWRXz}1^31~$i>_1lbf6NFEx>psm`e|Dl&IkTo}jFwNd94OvWz%z{}mPBcy02gXu3- z!M>rr0bEfzjj*`QMv2>74Xs8S4)zA?6}Wg&Z%Anvu+niuvsbs>T@6qE;|5Nh=)xZI zvM|-6`&!s0Dh1H-Bx7^0`u}RQ3P8J3lvd3p7_>M3$3nx(3#p)#4m6D?T*yXjb~Ohqj*Y$vW}Zi&t0Y zKOLUGONC%1e@2hBvD;+{3>-2M#hd8m9-ve%DXo%nEI#4Nd1^bh0wv$o9&aoMQp>!rb#K#RmdqoLjUhEqX;_hr%OxP`AWL`@3} zC+RnzVO?u%gglfoK`lrWz8-aM2Po#y0R`$U(-HI8CQ|balcu$YH#aBOxAHuR zB${-lh__KAYPU(tR}s7`+8BD|uIMe)Llog#DvJz99?Hwf)yM z$KSjO$rT6aNmMQ~yCI~BBplmU6T^NMw6ypiO3beMld@A8i%SQ<{eA>?r|7&?3j%-3 zHA`#)szcr%Kf|ADsIm9?BIBV9y<&Z(o(|N0c|hC|b%pIy@fwY%~c51NhvqejW*@FAh@@ zQIC?WSTKUQQ+$XKrt%%_Q4NVA1z<%+90h`m$1##Sus4lbmGg3cwbkRKtWNmgW55se zR_!?fuZ)P23^!2BT;K0|q@wmm$i0#z=;7W+jc%xbQ9wXwq9^op?v(sH{O)YvPAC!R zoHfNYd5EnkvmpD%CL=D@D5WrPUtehrlRg(kM zC3aI&W+=xLIYHM_jZZMx0shT>f(52@Ho)7B`8Fz_NHZYFBH&^r2aO51`S{Hq>zBqH z48^$P%X>slD=5P-YN=Lr%+VCG2{UP$sjV%XsaBYx3DhuCmD9iHw6X(qq^b}#fQ?c(9sr5Qq?IUaA*XJ= z??-P=j(%*P=4hYkBF&e$7l1|JkTLLwP;2BJJ%Y&m>NQ!tCac$$sa}`W>#};Cs#Dq* z;A`t!oII%7sKrj1EU1CU&RjbH+DoQuN2IsRkv>nvo2M?EpS^0oIXgZ+xdbx4a-*o0PHWV( zG_kb$q9A6bpk%W)|Ea>#9qMN2&Wpl7M6%#kgN5%6$vjvr$<%vgNyc@nErpN5hJ~;U zB`@kzUP{UhForovBxAR9L8N*c2nJOM;)Fq3l#L!eW!bb#x6>R1U3cJoLgychN}=eg zykB<5k3?bcniB~f)lCL}sGx^>9p0w-+)E#?H!r1hP*vpE%E_kCfZ?SSXRFcymo_9( zRzaI7r!_5G6aZrGc6Z6NSkeTsc5>F#(V}GbRt8gn6mxt??K0C0NY#zF1mSi_^3F?= z0Ynu=FgZGfCa7fzNS8q ziYmS6muNp~Lq3A-Gn;y1>Wp(a)?rMCK$f8x&6D)v<4Mq)VOWl)GCUR6D$_E@0ArX% z2M8=AfvVsxX~-qET+KThPIGrrFNBsgbY^4R5!EWIVAr8n`%bgTzxJF>5-W!>_l9+? ztL6B&c|G?NJ!jL3lAL+y`)0E5@*3y%y}=`T(>e@C4JVW)8Ssrl@pdo}Lg;kwz3#_w zL7zmIFg+p9Y*F+xjxt8IP)X7lz&cHOYS+;U^O5_*S4TD?*b^v(lDu$=s{WwQr8v;Y zATjgsJ(x|z0FhQ;3^8^pMvEz*HA(>rCFCri_whhz*I|MZAmfu+6n9^8aK1;+NSI)< zwdPL4J`0p(^}MwYBzfj~1vzQ9i8}d=);dXhyFiQ&U7fpw0Oc)g`hW)6i58jF=EiCj zWVRWvlKwdzK;Dn39hvG@>C_KQ;+e2C$CyWC+wGD0S8|((P)INePh|JB*>JOMlWS1V z1S}XfEyP<1CfjEqb~+| z&3Z&+?XR7%oBg#D_~>OXu+Z^il0X1~lw02)vDLqDBWQ^JCFu_`g@*~57MkSpA*%gyIXkxQu%y(Y6J5n71!_~TTL=8nd;swX-l@s#79%so&Gjg zpKVjN+aU=Rg{Zl#hi0w_@ zOWLJ@oGVa#419apeudEQ4lkFyLdeSlUL-PHA)?#E%j6Z}i{`!Pl;LiQ2LY50V;!*cF8KV$1=lt_gpDCEG>EA&++{|~_%R@Y8}1he5G0w<3zgukX&MQ#VX z^&Jp?yUq17GQ^A>Pg!$q8kO#6;ryAdPRmVTVYvxZE*NSW3-M{(r)ivLqppH|%i!Kf zQkBs?|B`fGVcXKuKbi;GLEc2_U1XVeC3$RRrW*0ux(@(*BN(QN3}aolSfo1xoK+v# zO1rh)z=-Kv)y?J96pE-(3+#38v(>5UIGkO6cu)4^lBt1P2i)KJ@?p_Qn)g{YofKvR zQngU1H;n0aY=MxH&wqTMBB<>B_+@kF35|LHCRwI1f&1cxzU0$C34Z8WVoPZ zA=pp7rMqAa805T&!iHqidgifC(!6&uIxMGLv5qmba>Xzhdj?36?U&{JFd|JU=IwMO z&Wr5or_|HQ;7GEA)$|ee&``>OQ-s}#+$2*Y^Jrp&6$VNXQ81RwcyA~;#O zn7=w4$!+!Kmf}0)?0#pNUlDr{c(Q!rzPr-G%LCaE!obvPVs>TU8+W9N);6=Qk>ShG zc=$^Tf@8!XTpVpO5t%Px-}+UMA?o^aEiD((DcWSqpuuyXK%_@!YhJ^iHxM=o`n8%1Vee0 zhF&xaR(o3*AO;%_Q6@QiQA$vur&TvMP&I;JZ|*0Q?h)bG7#? zoc$^8mF5@~p-Y+?_qq$HoyC1J1^9AsGf=;8MMW20SyqADmD#b}vBo-hc-1b(Jl5td8X6O!OpNQTvV!PBRAF?2^=x|z#q#5kC zhH>!8r-)}5UTRVuD@ZFaRa*l&m!=AFCPnOLFBfzs;NRyyy8X{vPt}~}FI04d6GVF| z|GJv^cm7E4-O&9ke6Z${*17PHxUX5-SUdylC5Sp$%m_Jv%dX*ADD^?2F(8OWv*c>R zQMenGVzq(cfNPC3KUc#RS|h5pRFBEj0W9*QT(BFT>d9LB>Yl8%`Myx8=tw>=;zGL9H!bu*7>Ihyt~ z2+ol-rFgfpmmpCd&AT;vE4^F4k#nowoea*RRDUUVQA(rYS^Pz#56e2Z{)_Z^H78er z5^OIDXFVo>Q|Y}Mln|p@ugVyY@k@Mdp;whf)8%ONO;=|XtnOm-nn<{dJ(suG;&JK3 zCf!@CTHC5{=IU0hXjL3$Ye}O$($ir91yWU$5k8e*fns@TarU0mC(Qu*ReaJ`?dd$2 zt9JBH-?^21=c*VDqK+h{(bz6lH{+*dmW}dlz2{PT@h|8-H;=IV^StNgtm4=6o=a); zDc*A#j6PEh3Kz^l?n#b$^Tztk9P^?O?a{mF_MGOkmndO#-Frs2yoq-Ql*yZ<8P)!4 z`Q-&1S&V&Ls&ZBX5o(v-){M8c57Hk-f2#%E6%LPoy8UU7)$NZaF2#KQ8+-TBBp@H) zG)2f4c=K^K=8*nYCy|7+4nxvwr#a~XC~`w$w;}0In1IKXYV5(seq-ovxX4Q=a#|D3 z3qYF@AmIdssWu|F#&nNBgy#;z!1*`=MvROmwQLz$)(+*!ApIU;yQCK6}(=(xR4E;_)$ltn>mCX~__=VulPOS=xcO|(F@{pb?#CK=ooG%fxCqtCZW5sGy(yeG z1FQ4q7n`eJif68i;c3vYIqsrMlTPlGi5X{y6m|v-l~*SqF(JJc&(!QO0$E?mG z?5Xot+BB-^BtGFD>tjH63gx}D<7ZAe$>w+G`CM8YK!Lw~;P6`0AXw4`wECwD=)9f# zrwgd;K~@*gVBrDZ_a}hD^>6A6TK^`#pz(`@{Iqo~5dj+Hz|VfCvfgQnv0v*Tb&@P@fgqL$UD&sm6-7a_I)^-Oaw zyOU|k1<_qF)7EKi#5a|7USH~A;h)`amwV!7Sl~F3#Yd$4A?gmt0D?(K2>~$emS6zL!;%x5=&BwJeUE3lKOjcHjh`6|40Wz|cQR8Fiv91r z5&yt!jH)M5t2Qtj$(O6pnt9lb&vTNQY*>!ZQkafUHf+b|ixt-lH+0E&bJ8h7wJ6h~ z%t^yIL{78u2UxxSR3t@e!m5fh3p8*Z58LQZ{(|;N!OIkS9=ryWy45s4aXfI#eQ@?c@w|y3)agY zk-&s6%u+LJ?Kf@_sr_u!TE*=urozS8__bxs15Cf-C5$Yx`wTC1a<%vEPbb$!TH=+R zTo>#1zO|F<9Q1q+C)Y*T|IALV&rRo^&B-;30{h?I$@STgVWE@j0wa%qij(USC=i*u z6f$*UC6BFneD5E|V=F!KgME1UIex6Qe;-G~>PjBNDeSb*+8!T*SMUP=8!}HA? zZmj&ul2D%F;YMYRjh^CqR-5C3ymTNIyUTKxxmW<@59aWeal1U-|M`D8SxUX7tnUB( z-0K;0GF))w`iefpX>|2ZAL6G|*!i^8>ejmiZG9?L{o~WqjQe~=2jZ1sXL2B3dEfn! z9EcN?Yh?#w`}w!fm$<6?5?7nmtzrY+MmKBqVr#D3!Ky?Ndh%}MfAS`w%gJyVB!#i{ z23~g>Oh~JY1=klp8*}a+#u=aN)6o5Vyj1YMXF{6gID?FiP2|8KB?l=>o!2E308wxV zGPc}evK83Hl+|if42xHQ4GDH5G;BkrbHAl z;MN}kw~9Ym16OOd_|+H39oz-&Dg0oyUKs{Y=F;UEL)^IR2%>5%e$L>A#Z_(z z`ic+JCvHR2{7Z$jXj`MP_$-e6vntQNEO}jK2-OTjs6E>dEJ{St6&q>I(CU0NpWmf9 z)b^__B2!pw;>&y?{a2JFJr5)FEE4*a#7AkQi9Rk*n)qaC){2{GO0v04f23V#pJ8Fl zovB`1iHrK0db4`+9wCfafVc}+GHWcoj8tstxvFNEt7o7m-+HcqT*gP0LN342@OGYQ zcsuEI>W`1&%vj>{MX1R$37*nIaTu)pLeiaXzN$<1pDYJ^dQW0ReT4`;nRq$^uH853 z_diRJwNO<4^d?p!wOQxqYYWLT%+FJsh>_QCDS%_sz;7#fv&`t%9wyJQGykagifA9* z>mBK=nu7gVV(KriCeA_{mr}zn?&LqIe}9ICcK-D>yDMc1$f((U0&HEg8wD3j6T|&0 zu}RBlCjK|FNlzC{KQScvzc`!pbfv))EiS$)oAgYh`^Kt}rP-wCn&G9`q-RBYP5qHZ z=5*!9zbYTJjE?4;aYA21Lb+LM6{}6BP~B>^Fk*YU$<4~^w#-=((Zg(#%q7$$Pqc5+ z_j0xdS8wZ^2sW2c&wQJydj@@w{e;ol%%T>0j?vCLXudg)K1~;vZN}%FIA42@#3ZAo z7Z@tfDV6Tf*Lq{%cQ$?tr@f7+3-r6)>-pVjx$ATm7J0#8OnRM8cXPYC-Se9Da=Bjd z`mLr{bvw|25jmC>osg$9`7llG0ORGeni%6Ww7V<@5W5c<5d;Cl3-0xyQx6)b9 z8%@#Od9vfYetTZ3l`943>e_jAb6l#Ijt1@wMg1G=fQuY+b%G)B;R2m^esxD9cr_TH zgZe4VZ6^pm7M-(E7bAtlOJ8_X*Lgc|r~P0u47nHwEbI6Eflu)WUtk!L!@h(@(6r*p(5mj!EDm?C*kk&c+>8LxH!3fvN9)u4Z5WPw% zL+Ujn#gX7}Xm%;U`Z3gr*k^iA?|>shaGJJOkAWg9yqaNvPoRTXl>SSCoG9)BJ_M+A zAirW*B=M&>(M16aN(5WD46{HC8pkNsIJSH)_h!~V5AO2omCr!F;Hio@{9O#1&s%Xo zA%|(JclY%;PcF^8i4%+qQ4_e_g@Nan5W?W_UdjU_Sl@sgT>eQ>7o89fkX0=)_H@%u$ z_xhFkX1$hmy>*z27F{p^8EpJ8h*4IZA3>vb^b9M_W)<*ubzMB;T`XnU*5DD^QTwi%9ss>75exV4ry;P z1*~buCvy&0r!5iQIIR^0pQn|?)A2Yu6&Ex|-80*YzP`ebsz**5-eOceqYoIB6JFL; zrcL&b+pJ;2ufk@7^M2uu&$e5f2y>2)&yH@}Zw_zX+??EEA~fK@4v*WXhex+p*X^UL zcbD+?-TAH4h!AFeotv}&bHY{XJPEh=9>(Z?2v`#8j)+v@gy3|l;u~6H6brNOqW2v| z?!qwsB$IYrxakM(w7hgo1Ac(a&L$HIT3+OU*+b98@b^xoyxG*-UA={ypoq*Eg~P-K zqhW1`z+cgzDT)gA1Ou>ofBS^d*}I;HVLexE9%;Egek}G2!7nA*;;-JzGB)Ua}4V6?j4 z+1)6Zct@m5nGI!?URx+y)EA@X#Oq(o22+1L@aVJ%WxdI;2mS5e0LqF_;s(X_L}Vwp zcOQlvo?2KGB}Pu-1RQ-#nxG9EXRuML7OOCYjasAFsKl?LeVns5o(o(b-sW-(lELPQ zgH%BkM}tR_ymE+p$m>>+v()L zkRwHp9Ip$z(ZNj4Ua!b4%jW=#i%wB_O#Bd{)~d)cZt>j%5ES5PqjK&L_g;}0@#ulB zyCcU#8yaGWix_6Ndr$dVIzXO)D?!efhz!rXFu2JR>05t3SjN)8ef%_9owi8x(#yry zZQbE%GnI;##S`o%FH_qS>y_1tDkK{!d<@g3p+7pqWEVox#PSVH8VDn#59bv0Iwu-a z7DIX_TiO38i|snU%FFzen7uZfPJBX$SZs5#F;oKEUO>g^CWB{6kI6YhM6@8Zy$eL7 zb&8q|WRczic@_w%kO)8q*e^`60|fJt(u~x!R68i7aR;W0nwW8C&zK)2e$ihFNzC=h z?YrwsmOZu4P7wmjY>JMEmQ!@r{1MR{lx8uhTa)4!%87A)^*YytV)M{vC%$6e|29); zYx(C|(I}(7Mrm2}As(O}S&Kjy#ax|w-TRS`nQl?sHXHPCt`Tee9uivIC_?L)mlNzy z{}xix8Zl6g>#heOUu~YCH(Ei$8t(o6CpAs=x=deVLc^v7L$o-K>7SQ2~)#BDZP#o zd3NvJiE|HUGNvJnLiV4Z)N46>Eoj-r;p?-b%Xb&APOekZyFo*^A|(BcmCyz343mf^ zcQsxZ*IPw>SKY{~)z*6Aq~(r4vgA656%NsFz{-J#7|%JJ{l36|C;aG-$H)eVa<(Y^ z^XH^}^W)jux5RqEHU9e6=4Z0h_~jhrEOYSE-tfiY&5yAON^~nNnSj2{b8sQ(`=8sh z5$4Gn#kYawS>p^lydel#n6e+%^nrho|4Zo0O$si9cbdWLpZ#dV++L6K4z~wj zm7euLi%>tekh)3D6;MJGNdT*?`y}tEko9)#qKb2oYkv`Pk$|#MDK@s9LbJ75Y}S&> zIZ6#LcO0FW#0u*nuTxlIhp8Mohwq!jA*W?LMb^nFa7RzC4=+y6j`5-+wRZ6nEeGK8 zISE(PQ$u@ma(H}lP4(e;5~Wg{Z;5!QYZ2@ph4MmC#qRAXVfGaf5}bkh9bFfoccKSk z5B%eaK&<o9JL37TBL2rq>hH zJ5HbR92Qyf;6ms+cm5|h);o`kH@HiQQil%aj0@0;#o_BFOZg-CQY#6U^Ta8z08k{} z_cXqTw&zcn*aM&vd0=LPbAlLOiI8AA7{rnR#m7hHgmQ<_DDfLM4?X1Dm|ynF4C9h> zNS5a8#@vib<#mc|}htZvjOyNSfe6Rr| zPmY82XT~kkg4R~E)FXH7cKs<-j==f-ciWH`92)c6 z)hG18+%IvnX#uZbg{Ll-X`N3w(bSUkya7fg1QP#9(p*F8VDD{(crrXIV_{gC2OV;1)I zwV^t($pZqlZzWo>>ouZFtHMTMt$GFjeSU<|AKWPZXy$1n{31CfeEwlabUjCop)t`2 z+QQ|>>IeHNQ?6DdN`AAjKmBgFM1KUoRq8W@k8}Mk7Y7qNIH~WXvy(=7C5OF^7X}iclqPx)z6nP(me0^yJG{M zUCgwjro1{7O!Z_^wh< zpTX@B^GcJW4yBtmwV5KBb($f8W&i0^Q@)FnO|>f*T*yVL^7y8ix2H59spq$C7cJOl zyS6d2egtvPxSuB6lq|L$xtCB&D08D*y?J|1&~S64dAyukhLXxzSabf(i>+KR`=UsJ zjARncgw1T?u-DTh_xC(kaPS7RiA2`;IWArU?Zvk4mHKHn6?j)#{4E1mRyrNHcR=DA zXxV0OqAmpcV|2K{Jv0Y$9mtUVWH4q@Ku6ADHkB%#&jLDKF_TRg`}gATmz@5pm>_

S_{2F_ZSv8jX5u9@Z8QjDJ<4;YEp_{T(Pu5P6k$3PuV)aCIR7Ep zkJP3#=EGX%i$*c!;A4x1`7v?O{K*`2G+2Gz6jn%c1u6w^gdg6-nAO}Tre&L%-6^_ z{SEoG$#EDcw|eoTeU4$MGPFkXP!ie1@9xX$)Z}BV z9Tvnj&~1I3?<-rsI63e07)~WxAo!hMo1^MfoMK+<%6wZi!%z?y|HN^&ihjZvUUS~? z0K=@E&d2UZ&n$PdnZLzpx#R}WI1?|<{?xcF%jdbJcK9UNC%@?*d-;*5XT% z38XYijZMQ^d?oN%S~^D)l~qD#t0~W@Skc7HMZ&hk45&|$WF(3!ANK zrBy9%0Y$3T8^x`-*Fr>z78x_7-=R!nd+ey7nYTeCk4(5!06~(T?nJkkc6hyAsqr8^ z!i_)l2QD2ebD`(i%#ZABTyf&FI!{<8@acu#4+!?bb6h&LrtU}fcp4);bqAuV8xo5^ z5XzB16I%jK+y}U|Xh&6t@3+trDJGu3&(L3j{B`<_Jc!m>WI}TqOk_DSX-ag9405Pb zp)*6z5m?gRjIAM^0o+(e-A8D4>JM?VlX$`|!%_2T3P2+{z_?m=7nltRWkF3zd`=1$d z-l7L&W)@Hxm^Wgh>^j(08Vj?u#uS4k8ysn+QYRnRYJIaMM0P4x$FT_3!80$Sj&OvL zusvOO#2d|Ea)V6FfIstxfRy*Met+nWioDkz+($$a&-tb5d}5txXb>{vY?>eCSF~I@ zkRedAW#2U(q>yAzjDMWKg}kI*E$_QYj$0yRmi*JER(J|5V!gU}^y%BgG|Jr6R|shobpXl;YKI}7_!<>@ z(W^>g($0<(uRAQBaXQns3>6|*yVa!CZq~PAI{&nAK{~U=5t8H?CxZpC$Z zZ==ZcA;4o92|sNA5WqP*T%|;I&Enn=cx+884-Z!l?35b@EY}@k08}^h^5rB5G-dA0 z{K1rsG{oHaq=g{cP-H$ljM336OK;Ija=+G_LN>y=_j^4LJ=XXN3!m6)COzLr!KehYPjE(4twlBVG-p`~AA`Mu z)rSMPXo^&Mk#Md%Lee8REv=90tOq7uwj#~G$lW0_W5R#{|5X;~wb@|wYHU6;df7zc zdFhBtHt@jHee!#VIKyCqES}f(VSXdG-Lh$OW9bn~KQh||TLk;)Vfzns4C4QfO2{`A z=gY3t7NQ0P!qlLCl2>G;%$~IV6YbU!( zeWl*%u7-AXO%$_$YX}>7_}va!PuIrlkD;`g3#lJ(UazV5+a`d6T1x)58?)R?87JEA ziS^tr)+)H5tp-QfTx$L)ECtY`K3wuRV6{6!VuNCqgg<;rUre|nm&Tm9cqw9CJ9Nbr zrGy96R+3vzTS`Y~vE~C4iz%YsvQR{?y=XlRfvQ;0l3Y;gnp_fTf~iarr8I-)9nOZ2 zZhA9$J{ae&UrAp-qv_eN#v*be_0v#nqVW#E^*iL%!K7ADUVV+k{2t3Cs6BE7m9 z!XySkw>${7iw^NBJcM=>&IGU3hhgXqJA=nqAZXi2;!0CncdM~7@eXMh8X- zo;VAIv$4(aU4_E0ipAWjCC@LylshdDO%hw;3rmerApdoC(2csR&thUFVp$B=*|}FN zU`hZQ`|4GmvT(L`jQ}?G1*+^WS{c**dyXptuS$a~Y^qz;Ei~n_Elar6vhbgrU?H|F zS|`;IQUm_v1dWP)X?uBs`W05>=*`K|kL}}ESMM&5Z*p^MiaAH=u8csfT{g0X zIV6W*f@ssleG#iuuTfO43XsEU_1i3u{?K9!?xjht(t6X;Zkdy)`>CHu0Z}@D%PeZw z`A3*E57M8=ewd<7^t}h6-Jj=5nqY3`3kWUuvnUeOOJ?1PSizjqFUA)cCtRB-@k8)B z3p{AoPGdt-x>|oEB6F=d^MsolimSXwBFYLPX@Qj5th7@=GbAaXwaXFCE{%Ijk?}Y0 zl=LCzrAHO1gQAE7yRw4B?Y2!p$qTfJn7Czn-mfQTpGiY%o}EYEhTHbTuK_Mww(5Hp;Sf`EjZm26*@iv zS%D`9XYE6PVBtT1r)tRs+=2J3bazKi(CyAB?m)t98xC(L=MBfxM@JY|rlB?MA$1wt zH%|9NIaaLROx-(d0$zo5nNF#Ks=bR~;{6;EslS)i|S)f>d+ z)vERSw_0^PjhGIy8#Eg+3KFQjO5@i=4^h{gN|4aHaLSUAaexiN%m7WA4SE=fN=VX$ zO+Jb5`K;jpEFSG*-c;p;|CP43`kE<&tQ4z_pZ)2*idV&$h3HLgA$c1PJx?3PUJ{L^ z@vPe1r}C=Ntcyr;)8HIl#TqK{m=d*>xiaHkbCZtZYNPf~;bG;o#+U(H5S4$d-Lcw) zB_@E49{iy{#ei+h4?(lg8#pAyueU*n5#4x}{BhQqN+KQ82_5YdMWQC-MXizKw6T1* zAM^dtpACveijmJ2g=$Im(OPVYex{reTG#i)?Bc1AINJoVBYQUy>WyOqF+*H*9$bG) zI#AL>MHy>i;}=H_H!GNFD9m`=+QOuafXL03@U{K=h%AVB+FX&?f;fTxU>4rT65S=- zAg948787Vq%L83tqYU|ri~(XRWI>o!;U-3C0HRT(?Yk7_ST>;VN7-Ic@0!g%mDxkB z*@T;iD5dj-dM}A3)Fq(uG5V2cBvG7viuKX&8(kNy5S1XSu7Nid0SjzT8o!9PK69=L zLM|gFQ*1l^?wjx;%%_>~U~t0N6qt~7Ul-@Vi8q|L2lsql@M+NFCf|I~!1&n6Mo3&E zKD2{2ICQto4ZQwz=X+l=k2~Mb(8N8p4f&8rH#zaKos-W33FTF(vmo3R zEnYAwqBJ{im{`uSvBc+_+F3qFgG<<2I+}Gh$w|;ML>)NQN{tz(?fN{+?57?#bQkl- z`}@M1hxS$gzlG2#jo>KJZ?c?pUAeq! z%lq5g>#Nt4SNG!ZmxOP9+ysw$O!v2x;6rn3)A*_O+*nBS8%_+WjR6c#XJ)9f#a&3k zAx`83B?@s1LUTK5mj4pPbXL6DATe^GR;yPturTe=$Aji@au+(e`Q>xi!C|0>~(* z!CDwsU&2B6tRRe8{U}Tqh)#w(GMjDGmv!z0+UrDhnVpBKg6X5Wx@At0eZsVQDxsB> za@~0&Z{l}zdA5CGk+ljJ7WBpF$r4P!lEltng1RS@Nm5oQwmIw3UdpZflDaFgTMlhs z%s?4Pmi#1I6f$@1i;(|etGX3bd^YuOvDp%zM4K3yYahpIz7CnE)0V>g&I$oB0`ZBv z%I-Fl7D0c@@IvW-r+pHjc*JP|b61*WwSnz39euXVc090`HsfEGNZ=Sxca<^0pDZoX z{B^6_)p}>k+w5*_m&@DzW^;4XYj)uE(rNw_@MMSF2D~Wy~zYS(X1y z^487CGxsl;PHx3SaU1LHmHocg9p}RBPJ)oofI0T}FWi`LK)y zWOuMev06uOuqNiYvha?*2m^8Fq`0ihhxN^e=jXR?uCLy`eq*@Wl3)uh4kHcR&U_9& zGKg;$9ZM(_R98mc;TLC@?W0%cS4Tf`vb6nur@rgldvI%e@aO{X%qYTGlca{!o%%u+ z{>go)W$J@5jT&Pk?GFSF&>u%B@+NeKXO%6pJo`4)W8^VExE~yG8aJV#MJJqt1E+?0 zBkBvPcH>U=ej-Aae8fm#Ki$l_n4%-SOcMK9RY7<~(<(h7j0=D@-^wTwK%(2%zEiP_ zo_=>U8`(QR&(6@V<=A2Q?{p>-0D6r9X;O6E0LEthr9%*3@b!*Uu};;4KkeS<#L8IW zfm!5RghQ1b{N>e09D6L*Oogh5PkK)?PWcg=oe8q0$E!}C@r7`cRAWyuGpVAZ1k`zK zm+&V@$Q{(gs7N`*Bn>OlUT0(579Y1;4OBa=w4G9vYUs|`MR&ygeLP|(-q>@~O)V*| zijK0jD3|hy2)FPqj@aqyFTxIpd^VYKrlV12Knny@G0sO@20nxe+M!7Zx4e4l#ZS{jf3Y&n8Kc&9r6Ms3)JT@|nNWXo4szK$f+NyJ{EH*Kj_#v;`LMBG>)-D~_`i^mDOF3zs;h znzq^;TwygbSdAw2NmQ`<@3}au@60UAX2x;QpQjR9_KnX%4rDocUurR8&xy_RCt!?Czzs)*S7p+6>pScc&>e4GwZ9Qcr zz7B3So_$UlPs2_trF-Vy$7PS1dY`^S7Iya;geH`%rqUvl$Yyo3SZ(4ijFb`>^`&=u zb0TgYK~IDV3%&ojc^r0v0qn!6Wf!98A?L%uw{{&LF6SfsrvN`wFQ_U(bE}aH#}rkz zfWcDWzdqcyKV>w$9kZG0XpWKg!G6VdQi+<-&_%v=^Wn!k?1z@F8Gd0m_Ccjon>Z8iIE3X=8W_ zC~chU9JdY4qewIFk4E0a83bsw$pK-ysH?>|?o$fO+nKl{csulAZb`KoK@#(V=J}Hu zDnlnEqXJx{{zp@UF@-Q`I%b)U!PDpJXV009CV{AqeS(4cT_w|`aiOjU|MiArwBjI@ z4RRV74ABdXB<#yW^sNxJG3&`oOzaWonjEy2DXM|`z(TVrGDSUY>+!bRufT5G!!1`R zSoa%Y;xQes?`4-hlsj+|{3JZoZ4!H}^Evf*0WixOqyB(^Ubx{$3$Yaej#aG#2i2^% z_rNOO2ZLTdJ8Va)#>bPOgSog}t9`p@@ZIJ4*^ejZzoxhR=}ZgA8&?Dc#O$A)#}+`-{nYc?14D^{5j=-AA`iMN2v!HC-foR-~{W#Pi#et?nSAAIJr z9gavr#R)S0O1Y-duBDf2w_SJAy>B(z?YzntfN>PPDeSgTbk31MZrBQZ=;B8{ABDjH z9d@L*#UOzg?KUzuczbNsI6d#pA&fLwJ`-W6B0YAN%6ktQDc8UY@a*>;S>fB2z3lw^i-&YeKO*l;TZPWF^9&6M#*Rxtqa*RZ3b*JiUl!-<)Am&fd6&yKlcir z>a&WvjbSU{CC8)$($t#-V}VX0BsVea$aX%8K$h6NImw(b7cO|+kE-t*>6DS21fmbG zFW()Bx8Fr?y90N02e0#*>2S1|g8>nYo1!yRe+7O1Yu8pony!h8aMHY#Fy@vYM&*ZD z%8O25d0T%(YGKiIQDHqgT%k7jyi7+VGJ=UOA#V>M&NQ1>LQuVxVM;3(sjXbB(25vl zq7~SxowRo7+rpN#Ak-R7n6soy&*tu8=)*MVi)S$d^ax#6n!VV`59P|l5^&%=%pA?V zFCD~rVJr%Vyec?ID`@6+3Hq;4aJMfzZj|R^0gnUJHr8s@Vm%6__l%@pKAM@>K=I9L zApB<6R(u#0A7&^X^i^>+Y*{>+p=!Xa?A|``OP|lriILt-lp5-#F49WDbVsFN0(5?Q zW8IK@9EQ$aw+l2F*@G#b>q4kO(WyC$1Mt+3zj>3e$o^zLCb0HN)_`n?A@)FC!WI5% zLwE8KIKq$K;BkanCt^&IJd>sBo&O2IxDzrzua_F%!z3B<4m&76D=2-C+DR3cfEdCt z#c8J`c4c-(-0sX{N}fB(+^ynHFV^G$I6Gy{5(}3vYiLEIl6EB`;k>YkKfJj)xxUSb zZP50^bA|qRpRcAG6`A$Vuh6dk6mQ?;1=l9~>NarFH^`a-q^8ZQIUV&9;torr>8$X9 zGrvPmZlJWF0TQ2Un=LVixMU~~uGf$g%n?VTQ6Bbf6id3F&ZpZ``Qj6i6yp07O`$(Z zWPr*)iVlq@qWj|NQ^t-RQK~Y|&uD9f(xy>T47yiPQ^ebfq$(dhZyXBQ;~ixWLr1+d ztGsn)07F4eyii0V=a+7`>!X*Ah{ev=hzehG(g3E!g|JV~{3T6MZivg)zC5l9P7d#n z*gph&>W0(Y%f#dsqm@uS$fIy1`jyfi1E6L8Q}~6J0OtJDGZ{cg+f3 z?B-}n_ZH2m^h?HjGzs$D5s3xGJu!9tp3w6Itoepc$>m*QbvM=zDk!?)LBed(>#WYB00ZRF42QZUnJo484R;WgK#^L7Q&)=Y^aggRwDie*w!m4%5V1gs zm8Xwm52%Ka&dSg|M)r0li#PW`{4Ck+FJoFG1k5AgdU$6i8in*p2^ll3CLJID$ta(> zCLSUAD4)ZfH;t}vAq|4kIux`5QAg<$GTxDDY6q8+bf_YDqsOr!sMrrGGsIempw~!& zqe<l!`9o#9ScnIaVlDuN9_$7y@}Vqi?CNdByT|gys9oTs ztBJNT&H27lRa)W*D3+NO1xX{*l9YqNf(1K~W_zEU!W3{kUl@=Sl|cu9PtQ>oqNNt; z*zhY_mtnRnB6KP)if&5)rIZzc!HiI=R$cUgQ>*30;0jS1ix@IcUN8*!f@z$-C?%A0 z)|V&PK4y5DqGk;-P)#+a3>yi5aWu0K30vVja6d&Vr%@1{XehPg1Ar8Lp@>$B#Gs<9 zUF6WX{Y2Epd1$8!JrC-|+RTQ2mOfZf&?q<4em@=ml0w}Ta8Q9r!$ad1jc8Q1oMdQT zdiYB&?X&2%4T0n0p)A-=&BTsd9r^M3VwoO=Brr$O?CAN`ZGiW1MxzTxNm~Mz;&xq@ z8b@WcOtS`!Nb3NV;XS-sYdR<66BkiXZHw0ge%H?K$pc4lVnXRJB83V?GF{%#Xks>@ zV@TXignc5xj8ycb=%vGoBfU67ztPaVAh^H~Dn2k2j37Z&%pfxqs0L<c|{pY)f)?D5;e`%orBX9S!=l=_s3njGBK|U5U<#l}_#21BmL7Q6Pyqf}hYS&TO0G}dwlCjZ;0GX`2Ir)) zUbt9kGiyZ48j=E`BW|a1+Okc+?p&BG)6(FVi~%GSYB3W!3VIn;&*N!r?~!HwRTuO) zTqP-ouPA4gGUcuBr>(F3JpFysvstxbl>Gqhjw@&orox#NXE z%(PM}2{f0Q8&!hqJn{?eT3q|T_GOPi{fsGX(GG{*?%93@Hb{pFC;79)1a7MC*+^($ z@paLlPd+Ef{WuC9Mnw^yUvA2dM-$~uKEZ5%(%|JV*v1YqjA*4Kxj>IJt!RTpc7@&9 zWa5pcr9tp;j&YEs7*6uXN%^A;xv%RRom`?^J}F-PjuX=Uu7Z=uAhJAG;V+V_qTS_Q znj|qF#hsrgDKasi$uMPI%FDD%T0&GFYFexv88&Zla*S#BsW+zmuSzeigW#;lB3DFgC|^ImB!qgfwq z6TT)Dh1N>g#^L(JM@udwG`!@5E4awDcP4>Iusj?CsE$9tV4P$*hzs8H#)IGy*AFJ+ zF`RmM!jsrejz7vWm5PCx5ipW;9*aQpGVDs|b%g`h#9ps z8HXey_?o`azFA`p7LMDf15^^7dp~%Btp-dps;#$kv0HF8WRCmn9hB4AC(T4 zJRgOV)Zj6F?AS?dUD-nt)->udxq`6g2hS2ti!#ceN`pF>GqaOo^uP=P96jp1G!{>v zy7Z@^dK_%CV`7O?i z2IRiF(q(OBy%hClNxhUB2j1~uUfv1^rDMUg$(XGa1NSUE3I;u8jjdCr8;|yWH8K_I z(!t|LG9;3Y!`L1q-4HH1F$$&J4yT46i+?u?ggVBCq^aGnTB5Zm1&n@h>y#ZB$TvwQ z2`X?(%23AjCLo-qDY|Dc8dRHP`dq74>%~UxDVKKUHDs6UyoUBQ;>F^8rskcG2*Wyo z15`<5bk#4UK{i0c#ZN!*$?No)+tX!<2wM0J2x$^FDc+E1g9(sSJ)=EWN&Uo*??oR; z5LK(>EnQ4nx~+*L0LwC0{*86Y3PUjE&UZ=l*UbQzxtZ_n)f)MHK3<&`@Zj+Hr$gw( zF?Q#M(%*1mUAnHJUosp=k0eBPB}c(4PYIY3X<0PaT~q|<=iq+2#+39MeNLcY-V-fRNL5(dI1RT>B0@WL^=Kms+Zl{f3((*dwq-l~3| z7=V@P}6=+7x5kpL0S-{H>;zcd~aIr`fo1k>WB+R#`#M?pk$I zDw2MZSMoe^BKAYOEm?Y|csl-h8B1u%eSL-!5-Da=Wbj#)D*4CES>UKA`wmPdO ziMn4pNz}b1N!0rCNuo&1{%@Nns@1^xX*EUSg_Ph;*#hu6nq9WEuadcgFe0r&cKneZ zQx=6a7~qvI*tle8Qrk<61RF&Mefu8HNAf7SOR)HZJHb?|VR;b|Se>|k9eO|+f)ic{ zaIUk?qWuO3wDi`F(qU!XQBei4!MC@nm73SUHH@Ewp+SX>hxBLdzfM?(1 zTBSd?`ND0!*`%PioEaj@1orOo-Ob5y`|$SmT3{mHZZ6K-Xb|#-8SK3;GhluGjJ$_i00a1?R2N+?#i&r^gKB=&2_9bI~d6Pfq&ShuE#+tf1 zozqlg3Hw3#yJFxV)c8_>F?5RX;7&imfI%uSwg_TK#9yQ*W9w{@u8i{FIo*lp8b$}P zeqGF)f{Vly2&vPA?LwrM)15j4&xOA+%DHxt`T6P^miO?8!Ntt}sVw(ccKtoSi#qUT z)|btr7}Fi0BRgj1!?0wDnCj|6o(1$g99pJ9np8yai8p?)8#R8|jlQWxd%t+6U_Id! zB|^UQP{Q_9xFvINy=qGaCubB_M_@bDRvJYF??R~TIr2~aWOzc;Hzk}O&IVI|jQEDNVu%k2av{D`6WD&@-L8ZSy_!g~EU&kN(gKX1QDo&R zE0UFQ5;@YU#u-xCITrb>P^01GL}IO8K611eq1&XEh)XxrQPwi%AbRY8dV{fXMH%{g zci}1C=JOy-JG7(k;*54Ht*%?$+H~Dcvs~VC8=GFI<~Ewu#=7n7J6&H~zo?LQlMW)fc8cd?kYT5GH2)mxiht6wfRTCJ_xmRE0XHft%1 zX@wPAOlz6sVm@DL-9O+;lK7)RGO-3th;#DG+pFu_wm8~g269Nl(F6pY%tOk)ftQzL zIoR>uopT z=GoYSGi|e1uQsb&-gd9)&RbO*Jlne3%Kf>r$=l>6M#7-~#>rt$Waaj!?OrJ3-Y9<< zvX5?K2i0e;AbvROvJyE;|3w#qQY{%?CxTrt3K6SHC1sJKRvWG;WWV0Vh%Drf)%1Fu%|^4<0hX&%?RNV8YNuA; zbS~hQbmNVk>ZVhv?9l%^Xc z81??1<#Ks$ zoi-a*gj^TuH5yHaT5LKvu26U<_KHru0gL6MHxI?%%@qK@=@RX6dQ>J|8ArT&)%%sJ zMq_UX=F&1&DI>x-Z>Oi zMw_C)Kb;+)T(xiCU7lROQ12S&wf29!J$c=Jb8`L`Km28iI(?W(lu+xX&abZCzB)Ym z5y+A(;;i8pN6w@J8wv>P8kD&A24ncgacl)p?)z7~#8U8OF(5YUATvrD)P1ehD1Qvh zp1y&XdKWXTm)eZ!lP8Oa6>9)vRqgbum2STV7m}V=-*mScjsA8s+lnc0^OsB&!!@hd zaLwSql*uya{CDa9$sJu?o}RsK#}_G&Hk1VBzsKHP8RjTi{Nm)IhL#)0CqG5Shu*LT z|FhTl>Fst@^3!-5OT8#0%Sk*?h1DJmaAC~~h;pw`l5}y{hIhATueqAv^McN-U+N9r z5*kALmSO01Z!Gn~Idxv2T$&Yd!l8I3go#n%^aYF&uaWma)KMovl4IANa=8ZQXn(T> z=+$WwDs8~V*ci>wbZIWe#2Q4rsT3>7ztIUH6a=O9lG80=Png}=an=U@X!d!HzMzCh zxb(-58|VqoScA&z8<#qSmExS-Q63NdCK`)xJ3qN0hT=JgZ9YOjgwXxy{%|*R{XrSH z&%Cv6wSpGL+s@e)94RA=`R@ES47|YzHhoV1^@9&+>rPW3RGpit*LO$GvF|;&gPxNM z>36xuzke7$PG`e%&r7Laa}GyS;ASp^PyWZ?lareO9~unH7sue+c?lHJ#H4AO-wu=LH+6UynXxY+Y>%z zqMxs?4`00k)=6(fR)GUko|k5OOFjPytW>OfR>&8J|8aF)w6}V3c8T8*)L#TxYq?kN zUZ0-*lE+cl#9JmmN83%ZeRyLPRSw_XzPY-F(%N}Ybk>L?#AnS$?Dt~C{-CH;FbTZ= z#US0vJ%hW4abJT6#Tpqr?d2@9u$Evz3C$)`O=#TDzI_Khrwag9iYHMqbUNe1Z z^3Gb>EDPODU#(>Ec`K%+ODe=AmqK7!P$xg~=RmC`As6V{8$(0WkGcMET6EmWT?qHY z{Bmg83&>4-)rZiNk`H^G*&Rhk%?yv~{zVJ{w@dr%^7M+yR!O4ldviiXCAlNoZOZY} zhME6RbjE%!_kAzF1IXmil!(%z@YD@IwxLXWG|7pW6#1g_*T3S#<#%KK`m*|be39w* zDtVo@TpV8im|J3XQlLL^!6X=C(W==OP1Uz7-1p(m_r3SuhtX2}=V+}6i*$Mh$lWfQ z%OTee3-o8BF8*a(B3zoMEQ#!K%Cd+(GYf#GX_N18Zfi-oC+fpOHsh4~}Q^g(KJU`N})Bjxb#b#_}R$`5Sx`WO5) z@xs|)%2M5sRPRns6lVjX@38`YC*%JSCnm{>!g~}<8YV&fhP$mOEn(~%Es)$;y&(Hy zv*%<-NE{udou{NJQ+GJd3G)d2)b3E`cW1pL#sTy85leCv^6r*FO}xk|5ygURi(`~- zHPBE&x!TA~S{d2Ls^G@{BKk)i9(!GfqT>EVZz%VYqXdf6I$i_%k*w2dM71Or_m@uT zK+Q{e`U%+2E@tBl#oxP@A~P$0D|wsoZ6 z*&PN>gw>4phngF_&s@E`HRndWbaJpLu=wy_V3^PzZsZIB_c%_Ei8StKhqtx4Btwk1 z1RK}wV!#fVA`GQL7=^+FOsLBxM7-j*gdzWpclXu1(^J^JW_QI)r(Uz8@)%usQC}09 zv=%+*@GK**tpXQrQ(->jh~5^RoA%lDyVqCF@A%ix?{2QOE6Ll#>kGQd$Qo<(?;RmN zkotj>!}xh9>EI}|`}-V}682Fi2)(>upy#51KE|5Se|^gK7gmlBQmbta)jB}EG+p=B z?t=&D0VV_&U5G;Ll30c|<9pE&JgZ}O6o9d)%Xr+YJBFhgtmi zCmB*Gpu%vfOk#Jn)b+S}dvb~RSFI!vP>yvD0*cx;1QfNG+G~J2dR6!t;UW%-vvJ~Q zl{8*EH0Gk@q9JGqY0wcyi+zW}o5n~9eG>z$q!15LCK`tX8DWq(-jc18yaagN_u9Ee zHt5d#3MyrD`gw;Y-5;o<&X-tM+wNrIKISAi(PIod0sU??8rH6>(o7R{+0Fmo-j{Z@ zkz@B-Dy-PukSRblc8v42bB_7YONF{lUAi@4+wnQqLo%- z+>U#VX410;6$V%rE@AbN0vZmk5YT3g=kO)MFO*>qbG;kVCk0w_IPB^3J{D|1>Y-d< zAqVO6a03~$V)t7@VBEp#z3gS((A^HicP-fXSSQ$C#ZLuHCVz|XtdP8oWte9dE;e(9x`A8)<14U84!dV?$-;J zy$NcB<8FIw;D^f|UtZL{*ri*OUfiEflJ9!AA>hc@+(4Mj%zgIp>>PtmXDv~DU;To& z25wshB|F^DX?O2%KmJfXuSF=2#;@XHTc>BgM{D0ZK+Sb!DDc->?F>BkkE<6^A>7*Q z$gOmKd45q9`~PFTdR(hR()G-$>rPd5G0Lj25v~v)9#;>S9Tg3#Ylt#|{^HVyqD%5_o6Y8}@XHtF-Y)|8N0{JtUl2c!+tCYjG z%uk1%4nQD)S=xzS2BCz#R_V6IduP(|#Xu}v@#;DB#Aa8|@db!HmWU|Cdlz+9tbI5= zJ*rhtutdK*Sb~fr__^_Y3EW)}9g}3FzKH3Rg*?~?55i446&1S!+2eo<0dzpE@XC)Sk8qt& zlL-WaOexBkePt!Fs5vkxMgNBuMVrEP`m?QH_PXf=lMm@n+l&h_Qn9qAgNee0*`Pup zgW`6`kLBlW=QS=ZidZ;~%r9m^yhF;N-f$}dSEKItq({ghb9Y_@fDWq{QIJW0q(6F~ zG)AyU1l!k-_%rc^W+Bw|fuZFns z+ct^0L8L~;iIyZb> z8^v6@hURXJLs!M|kwpzi#|k4*B$9fN+=#g)tOYI)NJ&KGPUTZ|J6enSNn?;ohv>FO zuY0z-b=8TyEzXwj6Db4+KW$SDz7lE7qs-TAR_|i#@is*&9YKC6IKI&LyO~IO*(SOr z2S0*DL233Dvj^gxNyZGwl4=G_JW$q-q^+m-ZqPC@-n4?}3Ux_p+m0Yz$afHF)^H252ExKI zMR#U02@>fPqtF8dqjzxVjVGz?&ChMHR52EvpxDtIyyzKBhS4~al{(^jTCfF>IdnqL z1Ro3FvWAKm?zJtGo(Orop=VcW(7k^|9F zM8UQp*TxtV@Zz@7oy7k=O{NJF<(L*pjK;LMSp*@)NK1lpi$5mA3j8VZi%bE;&~JfP zZD^VRDty3idq|lk6lV#rCj-c{h9zTYE9c&Dd2&)aIK4O&ti`_M&Fi)5eyzSF+4jHH z4=?EJ#!SvVlM6;L^oSgOUUQ@T9su@60l&754VQaXvfYA`-N9)IhU>gdR-EG&Wcjd? zc(vb(NkWbvlH>qYvQL7*@e$(IS0q32D9ldq2*IUhcHOy-I@fA3NugEeI-U#}xdjLv zjlw%|HVWJhI|Y1HigIS8B$|-cP)M1?e_~nRfW}v!m)swT?=o!EjI>2`ng@Z%aDqMi8UrOU%{QzOSukl#sD|YWq-X!?LHh z4Cb|);d4`mhT;Qj5mNK#(N0vY*Q;MC`VLnP=-&v78v^YGs~LJuYBpG z{>Du>DWTnh9x9K{LPD2%!c4VYv>u7N(R+zv@r5(ilD#A&`Pc5Z3PZiKqZ?mZk`T*f z^BUVUOC6vD@Lz z@WWA$q-Ka6jLetY}_=uJ&Xu< z(`|sYQzw0A8CI=wYOht}?558vN(M^Ma(oQvBL=pY$%YRV(-7-}+?3R42km24842rX#H3iv7)ZEPf96TB-kU41CoLHEF z2BV-$-R?!#_zL8R{DD#6&CQsS{^*1TLF8USnWBw6xm$?|oIy2bp`TpzY&63$iv4bKsI16X|!aY4>i)uQSGY$uJ-D+>O~DuHS6cUaJ4{V zj8q0?22!`NVi$FpDIUl(dop`$icWr$oA<`vy?FH-dh^6X%9tj8n>9=PI)9S1@2aK) zFuL6x<~I!Z1TF;EVIk=Gi)5r#{n7v`M(UWFO%HRRaalw*EIXFRf0nlhg z-N`Fy8&GCFMggy$uQp$y*hjlpY!)+(p~Q#ZrlYvkjIm3(%S@Z7nTMeGyKUL5c{Z!6 z7Zj&L9%!TlE0`RRK0^0%9=h;%ClYlX;Iam_(rV)09Ym`Nw zkMYKbrzqCPEkLN^3QP72MPIf9FGVf!zT4h5nfi77_QPi6SwxeM;DR-EmVP%Er;Jn| z{aF}gbmDj`=1}a;D+&eZoB|_~*FC!GblYPTJAj`}!L~zGM(icWKv98WjqfF{A33+{ zFhPl8O!OSm?MW{ps8uoMN2blvk9iMRcPhza{di1?nT>qw2!zVS4o-%#`AV{ad1j8V z$)X(?Tp_nuks1bX_4fD%Fi_xr)g2wDSTtuWUyJL)=@b)}J(Yv8$LG(DIRP1un(JUF zyp_OhCIbf&t30~S(~OJUm2Sb4tev!ajj`daWU@i#!N59yJTY>(pl!Paj1=r#t}}_I z<0~V*a_>_gJBNsL_pUq{-^KTBz=Gb=Z7&Q4rtu>?qlG{Sh+xH11O`h^w%tl+I7;(s zW2iRenW5{c=aNbIPgo{0wvyxz(C$kH7D3Y#=`>F&Y>Ii^hHQUn#22>|)2N_dptyti zVMh=>7|D4!u<+V*>=d-hS9~e&(qC4Kzubxb4{}=&FcY*^@qb>{E^GVSSrvX~b8%IE z|MaA`ogG~S0K}csemoGl%Q*p51Ihw%IshQC_8*4E%t1-|gfZ4|agk{iED)6DgVkxT zr&bn`ztk|pQZAop(+-7+8U1{?e|WkFfYrb5RnOR!+;&VB9l~sl??gKe2^YT=ODIUT zI@ukQZH|ZtKEb1V%CPhy0-dT8z~EuOErIt-&wkr)K2?Z7u+dS;h}C#dmA zrD#r@rv(~lP9ce0XJ|JfpBC96a>TLs-Dzpq)5)CT~{O zD{CcXoP&{UIevmt0i+~hQbs%+s1)zN2)umac#fLTY+JE=lyuFrWn`|?n(g0>>83c@ z(vkXi_XXKGBGYN8)Kz!BvQFMR=IT#fEO(~|m0W3zujVDC zO3{Y-=2Yku(7FjGK}nNfSwXAiUF44!k#C2H^Q>jBqXS%nY`KT!1Ylq~t&Fdc9%1@8 zFq)*)0y1jVSTIC_1EeCWa%0d4P+u*rke=;$`dCAsZRzE#fi6*7XpYK@=41UBy}Z#S zZvXInuU?aMo=dm8X-hPgRHXegC`KF8XBE=Gf8s2rb?pWf}G1xn#CDPqLDu7{u ziXsFDrXWzm-V|;ZVXm)sa(PVNX5bN97qfVt$pC8NrQ!?BMZkPxt-fH!dJ7Y=*(B#N zY1~Zer5mC-4bT{R@%!(re~PF$PjH|-zi}%(At^f)?>$oH(j~dTbFGIwj>p8!``jQe z?s%!rt`Ptw1BF@5a!8eoTDxYn5nbjf$)85EMgBq*%rZ*}QVA$z+oyygcVy74Ihq%R zW;K6!l108|w~wXl4kV=u)4H-nwD9<%L@qYcvxA&b*uP2L z&2_MM$(_%joBc_gdwG@wGa&RAb=yhW3d&2f*u5nR0lfj?Hy+3Uzs5ALlS>$9lSLAd z(WO~M49THbylvDa70vYo-RWYM3B_*PkKw-it2CVd_M9=tdRE(Z-hBGjvXHkb7}oRV zW7~9one3en=x2`2B8y?;;*K;EXM-{|b9o29v}UD9ZPZTy`Hse zz#p)n=dhEtC^*^e_i-Zq>76-2D40%xF{iK*r+)<;^SxyvrwVN8w<&@uIoe}2C=XIw z@T@{-=@_N$glQ}dl3*$e_aAg{HCH3`$6+XDtHG;D3yOzxzkdaIje za)}r9uTQavjKR&jj2>elDX*uWzAe6*)__IGC4kD=ha-VRJVd;isR-BNO7>rv1lC-I zA1=?o>Z4FQkV@jK#z95%SD~$SSuoPJygr z1OqpA2^a~hg$@gkHVRaPHGj)NIoiXBWCggTL~FQfsCQc6OgzO_qqr}$Rn_FMv@xAF z$H^6HX26Xe`!R#dgg49iQ*4#=ap^73L|`9Mi`W({TvIg~;Il}McwaV*2;fmwTs+@# zW_pMv{Ul5pK{}g96YKDg-Ci)Bp7-54j|0*D0*M=$_Z-*O$~a-{ z-_Jl+#kv$HhxV6atd<^;Q=dWDu4YrUe+5PzQco@E!vH78#--uHC#cL#qt|V#YY)`C zqEICe@`+t(A!P*~|0pEyrfQw=f-iwZdzY(EAbN@*k?5Vzp<_spC@RK-VI1gk?^A8> zuSTi~h~to^)BZ@NiMbsQ2UjI3JRGZ%f=|JO4iILmciXtbbdj)V#lH7?S)KDz@(5QE zz0@(>om$|EpTdgWjC&?u5*~N?O4ek71^mxj!gl0A9sEzMM?xTG%V-W2pPS0~KoH-B zxlUd#{l;B0;d$fWAYy?upwKu;F)gABtv&1~5P(dHw{zKm*(=FBlZaioRdk+jB3dg# z1+Lu!T4BW{#6v#KGj257nn%fr?hs-JsA+iu0|IWNT}OjM2i|R1vjbJG%_=Gofv96? zgDmxqx*3T@iY|-j0*~IJct5%=Mb|b|@7WC&0htj#+^Bf1E9S=nx3pWnN{^aQh*W3% znmpV?p&wqF6IN0eG;?s7msHuY!5~P;?3-oC=(K)*R^3y>D;mQ3 z%<8*-?Y+bJY?DRynZ0_|&g%ys40uVBe^@)(kEK`YlbAo3zSOXM}jklH{`7 z4PF{iBn6mf5MQx+ba-&$!C?)}*|cUn6Sq|CDJ0Rt>o zaFw(GfNL?EutHp}+s1eRWJY2|z0SM{Q9JVxgr&JeXgS7S>67-T4eYFSqh)S^#c@`x zSC7-#;yOrY1zaW3{SFA`Of3S=w6^KRm=7p`g{>)L&x+eJ&U7Nawm{U%iUzc7&AR_W zijJJCEW~`CiiIqF3Oz4h4Li$Sxz}F8EKH()0W&O&@hi=fS%ZykqfcP^X?X1T97q z2f_{pCV`U>nQCv|EYlfnFfmtYCngSzTJTJ*>!?6AVFlBH*n*(0Il6{-iWSd@U7~N) zZSO)x882Er92(@yiX@LSz8m~$lRIf zq@Ak?kz27H!vpxh+t7S2PJQ@Dp@9@5A|GiTh*<}0@u>hg!(WIi7vx7942^os5@NT~ zxI%V?z~C&e@DpxQPb(x{O*Ene{UVcC5Wd^Jf_e~U0rGt+BSLp11qt&@Ibvl2Eur4A zho7mT4byH|Cmje4NV09YUb4>7yqUdPaApTXMPO+>_*LK*J~@-*Z-aY)kDf?*BS252 zKXMQbujOuLJEonV^+`}MFYi+@wqXR4vaXJ;^kxg>!SZxtI^S&@^xa8Tp$I5g1GD-E zKJH5Sk#B)S_a-9=2AqM79=`H?J+TeE<^VGAx=*3IL?NAZpc6ea+aqa(eFg-RUEBwH z2&)}qT>6(9|;Ha&xYSfDD;4lIJpQQu%sJL z5|+#TN0#B6tEy*?T%|I&_X2byT^o~>dsN$|ba|IDYfI)PVw>9>-3qFWSQpjHyPO&) zBfvH_QL?h=nC3BI+JWSOnui!1^oeKN)q|q#mx{noLO#q~D?UT;`ZUq1Evd85wzinZ za)SDp*Koo_3yUCE;4dSd6b7WS-a+tL2faPttjyxzO`czil{{BK2rA-AoY=|ET7V~~ z7Z+c3YinSdmG4>r@|j-J7^Afd-p2|Q7MyJQFt-7^l|?8%PALS8^hg63Oh+_A9goMr zR>WX1*3bi$UW10c6g^i8s_sC$pXw51vqj%cb9Pz}#JpEesz;{>x?DqyVVI(HY)wDK zz&LE`4Uh8~W3J?LfObuQgFSF{rJ$RiQE=Nl%e}WGQ=03n53y;KT#OgAo$&+HZe(ix zXR7e$*j{!d?Un8$`dB~S7Qwc<#CUb(8VM^X7>_0N<1vC_ix#B%95`}N)DO_+h&jt~ zD+A5vv$X{aqxoMG%NCyJykdlp4m5e3b#f5C;hH4&cruO-L3a(8QJkRcbLH@4=COkP z&v~j?K{?P=b4C0K9x39vaiX}#!IKkk(Y-~#0ZiHdAdhYEdW0fEY*TuHAllDU?HLa3G2zCCF zy_#da{>zZ*pclk_=roS`G0Aj3lGBBe{4b4TkPBG~PZQNn1ZZEUV@8AFpfuy8hc;Uj z3?@g%qwKq6O`sL5n-e>s-*1czJ-(7%g=ei^gZzTouEXQxzac~6Jfz&5Ce|6M^oz{} z1pUD>G94BDmHL40{WJmG*p8iTsUyu|l%jwKj}bN}FWNh-jBg6YI1!uU4nJaWC#{6H zQ%9-AM}7FWWy2*|H4DcW63^q>@oD{Qe0*Fz%e;e22$a*Nxg6OY#SB>{MFLVNrFwOB z4@T#BZ&zwJl(qsdQBr~<)WeZ#O-dq6gK-7p`)FMSHntI^4W$hDz52VMs#MLuL!ivI zGTR|iHzmq8u`+N)A1|*%TZV5-Ntw_f-9$5E85CnQ98GCsL&^;& z)R>ruv32->~FVSP13CTj?GM(u=h7q&6Fg|Zh0 zs{uxb=%+mFl*eefYR!iUK?zlzo$hxlC92AP8eD=Na1w{XP+_9cQnbRcx*2scdZyrY zLC~5uI!OV`4N3tlNh~hu#ILeP1WnXbvBZ?Qzl4`(jQVXSVz#ijZV9JA&H+wBXF`_B z(@`5Oz}(@MoGi0?{p$p#pc)@TqQ>C>6;s%VlZfwi^y}KTgtNTIuV+11p!_>Cfri60Ny3V#ZTl2khcO3%r*TvQn) zLrZM=wS?B%7CTY64Udw~l%TrzL0-R&lvCnucS|!ja9)Hu!Di&^g+vqi%%RB`s7QXySYS*N+x*bXLOHh- za()KH;XL@l)}*T&^-H(Grg3F`6#fMD?A(#!6~d+hMZw*Q8fbVsqXr3C^0R3%C1omw z(KyTno)wKHllj4WN>fsbP`2WVPFgIJ2OJAcjsH^}fRa*vG1q}n)rb5qaRw9~#T zsRKv%5+I1Ub0e8oPO<%ltEjpfLOTnZr5U7CT3sO_!}e9G3KG&HlVgMvZdySV$i`?6 zvooVhR^eceV#^`Lwt8j)65N>vP-?CK>tYS}N0rG2}^KsneQI?q_!lqSLA|Cdt z$g?>Kl6dlb2;Jj$)g8pj)lNQJk+j)#&J(ke@mLCXB(=I0cMVajJ@b$WxLp`%g>59Q zHfbgMN66y|i|3dUkIIX>ddq5 z3DO~6<;B@6TlvU3!{5mme$fl#JbMYhjy8yb2jA`l@K|Qa>+9E{D)>qO1jeS}IeHzC zmz5#m@e4^LqVO?p`uk=;s}o%Yx4!IPZopjBn8m~7MPi3gpqS%e6B(Hv@$TuaLig5a zlA7QPiH%%3WK&9uuNR-b$wlaG3BAyyFC2@gTwh~!1IL|%w#iT*??J!5Xn&Kgek{~e zI}Dv@rL2`Dt&{8F^y=CWw+{#9cG7PQ#I3;M711qgoFwGuwM^30?!e9^Qp7MYXt@pn z7^LC)?QlH4V-K4y#LrN%hXsCg*Xgv~P6nLQVhOb6p!7xz5(uyD`??sIyp~t`m0_pD z-t$%lUTYBHn{$@Go_kkmCpov3{oLzH`(Nn3k|oHpFE5XBUztZ|1%sSZbQ4fYJ>-%h zFFFCXK#h*Hv@)8GrCQQ*-@t8Uf9B;OO#I4_BEh*rJSzn5SaRX>A+MLc&9xp^&(3Q5 z#e0(x1C)^35DAHyCnaCbX^bU&x*Y%;vhSE+lTjDm6a-Qjs>~E_Smj1xZlP_I zZMc(8hNC&R$j59pg7-)#pOsLA?T^|l=62CX4AVR8@zEHH`|qOgf{J$Kh6ukem+!xZ z?PV#~rQEj8qNR-#TCfbl+$;9SV=Y^@)7(2f`FME1V-(|FkM~ZG&#D)J1$xz=o6W^6 zTu?k(Ss;nceca~DyOeMz)1(KA(Cq<2A<4Ng9EhZl#{qhde>lf6i=-D_jJ=jlbg z(T$T)uhB|;bPHnMQ*Nj5<0rc0a)acSN4hf4w25ZRyVD}+Cg-XlI)EB;ji;ZCuaZJ0 ztk#$wJ{E>-fwA!D zr9fVb7Oc%gHijhbrHeKxK+0qO@FX-c%)04>Z~DmPD2(>whM;LNe_grh2ePYM1`F=V zNLNQuPY{ZJZ0ZxX=`9LJNxF;tuzYF>6+9If2@^s)Q}oj64b$YGGX&(2mJQvwdIQP{ z;_v+7`FgSXE-!FC_+=Yhw}wLG~g1pTE%^6oT~>|iGvD<}WI4fVMdiq<^%!@K zpgN8h8bY+|3zVHC5Mn8Eh{?$qJenp$a;=yQ#fNE2kd{uP+w)UFcm{Jb#HbX|1T5&4 zfi#;~Mj@%HVbiRTIJy!HZ}k2>h0QzvbolY27(K(&PKncvk%}uUgf$Sm=VPsY?nxNC z0}5q~qa{w7GSj(^{{3M)QRB}NGO~_pHoQ_76@a!7VE<7=w#b}y8smAUZ27ee?)in{ zd$Mc>4n7&;a|j{DLGgob5YKe{UKzDjt3HZyvVtNWPc%Kh=_HL@ZBg5J&0H`zRG=?G zS^f61lmAV6)r36?oGTdm6!7|SKv9z7Z(5F3Xxf>xrS1pvHJM9cXGcjwtR=$&Fcnrp zT5OX;)^6~cn3wR)K#J5;<*re^L@0!!BqdVW$5eELOyr2L3(rRsky11bivZ{pJSjl` z(gX)1Z-C#dSiuu|EsB@u2qJ4#=1E zY1?>V(p9oPQueiyA%fG*l`myncr2Gc`PT0{T&67{$ z9)g+YX&h5e7%d9w@`IxOdn`;GLsQ24rqOiJcD7MEpMRt4kB=<^$F}Tut`OY;5UdFM3X$MEIHA++GJ{>@F93rhD_6m zXEx+ANH&x^uAcw3=n2BHDShNvVtZ3uXWULmL@uuo7A&Wi7t!zd>2LLu)A}(sY)8-` ziJ%sGhYeb!oS#Ja*hdcNKBhgp8HH(;OFZ0h5XFZlJgWM>8r3{AmOEd;5I^eSk1z=^ zhAnSe95GLZ)9-cr-HD2WBY;WK$Bt9OO6L`pAHjDx`LQ-Rs!()(a!kawDD6!@X;4Zi znc<70_*1ZNS9I@@i+dZLEx?Hz^7zGabwd+$cdFoT(d@1f0H1nrERAK_GBE+aVI(lY zf_om(Wk~_*zdLsLwOCq zMHYhrrUJ}PcW48U?P?3u*wncNLuSd%F%H}EP!&!bU~q`QUnXtlFtHAgvz=s)b7={2 z&v4t^^w=S4!GUYzeirGFW2v!;OIkV&2MMhn?8!f6;LO)G?RG)PYSIQ=rYe{?Sy_ob z-n|%+hYUg}zM(z|DHJT~znCo7J6a~&Av%#24NUJJOLj5ew!GiDk9=_&Grho|nB9~G z*njAmPUn_s@yMD%v0e@M*5z)XlPasc8o;`>E3;N>F1111cNx)rE(e@>J#f(c&y8rP zH>zBn=pO5=PWdkRZtn=L>g_zdN=Q5wmva>8>qc=xqZ!UVs0o}?5%^=Cy^sPej@?yj z+Mb(2D9mmU$9PJDTu1TH+zJITh-+x9oZdyf|oiq&>4XcBul^oFaDk)ZB6FF~UgwW|R*(^NaL;(2 zW;&|*eKyE_M4*=^C$)pqi^J+gZQnZ@Se7Bp@O=a6kW~hUHDgAJM&#Ou(M89*9O$w$HYrhY(kutZGa9E*SS`Bl%8toaxGvqW#x={0wKzk7OED_IBkUI@pNq7RVP{8>w8~NDZ zOh369be`8?9-&_aHw1C1?@R|RtzHGKtKi903?7U|A%p4I!G%mcro|f0BGOFq1Ez1# zjLHc2i@3&s6r64}@(vTazTgYBpPo?{a*{2+nUn&w0B?}_G#=~CFOwKa&D#&N4eS2r zw`IT9ZQ5QTcH6pB*4ODE@cwK}u9G{ItPF;z%@cR``lt%;MO0G{EUuJ8Z{@f8Yc}K;Zcw+9s;T%Amb7PBh zY(oYhTqwpA6NtKR3TB-~y`(dd(HGTnGr^xDfLGSBS%~AK(-Y9ue~II0HK2bD>X=*s z()romwhf=T4b(=KG8wU#rAuAoO58r*MK1)A7XK@Uj)1-~07faYw+O8z(iDQ05WE$@ zG^stDLN*~oB!p2E!5^~M>t127%4dcScfQ_jouWFM-B@X>E4)4JIIk+xx0vqHcqriO ztJGhu)^zNG6TaJXc((ASQl9KtI#rLrI0gYe9_Y-@E@p>z3r+kBOyDSf%e9Qd+h4fP;hSI$#5J(q1Gi90;|&A7?vQ`YSzRWH-|?n zpiQ%ifex&=yP?>E*0RLzKrIklITl}Tu?1FKsggc)B*6yj1B(8JMnUijC@W{WG9nT{ zNks(Q6Uu^r0jk@Tmp4Z;fRD`Eyki<=94kS=g2ChA`%vOTYHUIMeDDSu(AJwiU1n?~ z`M`=hVTGIkU#nzrHMv$gVV!poLIq_eR9(`MgjAUM=;-j|@=N@=RzE*HJ&E^D_iJK| zq-u~)3!A0rO^FE;!m3?cqzyN|P<94l92mvS1C9f4`f)d90fWh2lh6F%AQ|tb?#paH zFm^>?f}VXJP)u9UM#%Bj!!*I!#a&liQQfc)@)4$%v@4s>AanO(`h3>IiMQ)97$GK? zX`dmOb-Cc1zZ-qh8zbA zs#m)7IO#zEQ7mhqCFbY4V81)S{1uoRE`ze{?zp67s}%jYL<^3Z;pQ=7w9GkOJ&X5i zpAYvSF8kSe^t*m+ZRY5ADSvPakX*~G27Nf3=kakWlxFzct#6g!Q4wi{Yzk!H1KCCt54Hr=JdaPW5E-t1x7Bvi6IY(t?K7cb8zARom0fjU%F!kOAC|@ zr{$Ca)4ak)R!&iTGwh9H##5S?N1Wjbt3*RHt`R8TAQh=H&IxEqu}z4rf?q>LN|9{! zbrkB%K6w4qYgusETHPQVRuL(Sb)dnH)WBDw&1f~+;0?k@*{AB~8m29)$LF<+%QJhD ztWM%quPJAW9$zvDmd`G z(`}F%*lVWk+Jt#ze#2E5Y=LzjT3G0gKeR7xA`J#>@v9tz5TzP62Jt zPD*dOy5TpihFLak0c}THDp{WMmf~MRrvWrG#e_56m z0)>o%Vuh$O7Ho1bwJ&=|m*+d}=v2H(xS*;%P%$5S6s3ybpvwO@N2^9-i2q+VQbR;f75ntdzrXw%= z@(u}x0C(cjsS+?%q^F$O?BU7BQ>)$PQ0#HpMqh9zlv4)W0aL++M63`h2p;U&j$p%z zmFHGKEN9due>S&msg>idr`FM2hqNt_t9>~1Qkmm)pF=i`erlx;lHgX^Xvfl)Z z&EZlNv^010>^9Xjw6Tzw8&bl9QPe+@BHqomdCz**APE`7XX zEOic`hG5LZ4RTilM3P`0)|s<(2BGvmJ3CUhNldUj1tdbPWw{s(Yxtkju{0+zbxx=o z0_FzwGq}9uNo3xv9%Lw$3MF(jJF#9-KV(d#7XFL1 z&9-V>Ayw8R0Vc_Y3uIXG(rQ?(eNitkYu$Dz1R9wZlR=&ql+xm{xo9e93C-PEH@ch- zB&^$ta+sWrvypQ+qVWDU+IkR3IBE=tJ|_1EHP%w3`p~^%@vBhr;==&Z0ySqr;qSm| zQqRpD5xL8G+eW++RpZ_rNFzF>OE3nn$*$~7t}~?=ln=IL8@534B?Dth4>hewS#c$$ zZKma!Ri|Q>%3k9(QrnKzkCY>32(XleExr$NRz6#$nlssN&5K*F75i*3(^dh^ADZl< zD&ksv1cE~ESPZK%nxI*dz2a0HfbSd_r+- zmJkOU@)$I08flY=Vl!AW-RbgcWiN=@NJ-U2={2cP7X{8SHM1b(jb6kD8(v1*jyaG# zjs#tG)$}NjkwOR*`fP{WP-oVxMo6%e(Tez(^wb$lAHe9O(iu+2t=4v+N^95~a-Tn*vm3<{`e z-6@+C5H;|1sJx3__i%*=b&7bpitO=mRNKw`8DR*r+p}D*+DuWyi+%m zwq>|t_v}vWJBx@)T7SV^r`5MP^~hElg5}9W<7coc2mZ_|gX*5q z1S3?*@&Ki}Ez1Wg#}>Kq^`{fD67lNvTesJP5JLr*Tj|AE%A^mr=dsnIA#;^MY6Pl? zAUYwRS-f!m6;&2SLqo@jW28ZmO zfB0y=LKc@8B8-|c91EN~_^t#J39bj+Rx%`BWQwpY9@BAKBzw_Q`f zhs7(7mg_G_5i5r7?490;kzE5@)a;F%AA<`7u%o04MwgM%q?k0_t-!rto^C`5F*z7N zvS>!C%wGFyEWGv&(x3-ygC|RkQyGCR>Mk^B3f$TOTop}Z~T@-DeP|42jPAn zC!9n|Y2YL|0P9h~ltQP)tffPv*Yovk$DNrXGW+gyk;+MHX%T_X!WeYwf<&e`6M=#$ zzAj~<(EucfN3C7_C!?e#8$vD7;FLCiXVOz(InQnVSU??M&9{RTZvh5xE>z2udfQ@fyNnVeIKpsUDkN?X z%i-;owd6Y>Be#oc@wzK<*X5*TI{tMLl^J)1Pi7+74w3$cb^PjEryZo!>c>1B?X%HU z6A|kvt;TYA&QOHCZEaeHX6$UX&XOi(@vV68z``4`vLmm z2w*G2TY>PUrJd;F)8To1e7b*mRKs}MFct88AR11R-!M?83bsunOxkp>SlY?CbWm{a zVSm(^%v-wGgC42#;V7oBrO+4#PLOZLOTU}8m|9s~!Sj4{pzjIve0+Gm7w>;PsU9Eh zfu+=G9YF)#9=x_>9k{V8fNOZ=PdmJs3w%GAe`tnncbo@KuOB{#GaR67!z&;|t3!!K z+izgZ%AN`;xcv9{7z;q`O#eMRrQ8Yq#>jsSPcs%E-pKLW#yxSZ-I+3cnetK1!E=Cx zy=xKhFPqa0+6B4sBKsMIX(yb9OCguUW)PlFZJ+Y(YJt2!MmUWfG9?CdH~^eRv; z4U|iDIk$iG`t-WTr*`jL(CXbQmIjKYs+fnm{v;OP;o$P{)<<2r4T4$Tf+J8m%_^OS zN@JvraC!NPYGe&T;5Rgcp^F}~ya&|K#d${Qt59ik2u^wVYRI!-Iyc7ReqBGRehot@ zZX~_NoewBI&j8k|bmdgio?J8BEHy;wr0py!@x<=mEpp`+!Htil9AZG_=5*TB9Rg$H z^eCk^8gQ>rDhnCK#tG!fO|5eL%o~7s=na5_G^uh}#w_F%XnQ5vT*QQ(1CLq2^Rtp) zuG0w=>~zcpmz_~fHin7ROu+DgwPbX^cJ3Ricl(tI?R4PQlkTz}GyU*JxNCOrjn#OE zErrai!@5N_xf*tq;$61dO;z~t$ zFw1LnSVfx;3+SqEG4f73lI@7A#x*LH8v`Gd4bKgN5Toz%8$N_2k;wINF>oy97umKA zfDbZ~2#wYq;}rLgZrK}??OpT30G!1n`OY&2=t``T4?EjtECxKc9faQS+))W}RCstI zcaaRgfaD=y;|IG(e0lvk5Prtf-gU0{F0_E6lYDRW1U_#lCWPpzXtc(AQ8p)Ch;#v7;j)1A4Wa@lox6lE#I~ zNQ$T0I^bDxWXHWS1K&#n9A1B2aoxr=%sSS7{^F0?<1VNt#Q!PAC2d#mzZZg94jb)6 zZUo*3;dRI_3)Y6T!$a&w>u-wD%V_O8vX1jriQX<&1%Hex%$}Z~RL{;oonFYox~=C? z;kjU8#4Z#Bpxe1CxJ8T6^Wt-D$0@5lH>@l98O~H_G1}am3j2k8PCJ>!>CCp^Fr-FF zV=x^BI4>&%qPCInO`6(*d<9LK{6z_x_CXtUVq?&Kbhwhawy^4)19ew2F~o8mjg2gEKB1BsE)C`%GW^t!blFMNr{2X+OqGGx`QMr62@YGnPZIiC{lXka( zqb|3GQQn%|ADm%jGhm zeK76ymS4YqJ%5M4{1TPdN^8;U_0rF8qF;VNiTL^1{+IHRnB+m4lniX)&T1A!L8;0J>9(aY*)eGt|0P+!A1m~0XIQL%YS3w$eT>X#J zdbBpH6s+NBePc$k50?iY55GizW?hEA`A|67X>b57pc~n&1`1qPRt!Dkia{L8(z#NT zh4+K;ZqX{9VJTyjkjaH`dc=8#YSU@*9vlz5y`J_<6fAM$rqM;4A_&xJwIWVS(f7`p zRKZFDCSxIsjhs#>Fk*x2yZDq%Rs23-~UXROI`q*q%KA zb5Js!HmQ|F*>IH)_fvv)oTLy(sXK_s)XKCaS;?PGY1UzsVbcfHV`mNtU3Gmew_JcF zt3@!K5bM2wMnoK?wMaWU=~Ghz;pO!#uvb~MU>R|s$(`xV99Zj(XaG|RdMC8Hxf@Q$ zj6HYB6jlrdaA!v>URbWv$*_;dGByD=0V&z^x!_?c27^=F-1rs-NwB}&{npkAY8(bR*FLyWx# zTrCKq<QL6 za+CBJX9~7Z+fpANXjM=W5+I0K;CmQ*-LFcIdlioiyn58N%o(@?1#Cen3(@3xdyr&}<^9-YRcJNyFerl1D(ZLF+;o3E4%E*cSNFXf(uAk4~4o zC?`c73{SKq=UHrfK`7H}s4oD42;pQUid@8GSZ*d|CifCwDbnLeMwkZSE}(;)lqn1! zmlRKBU9`7z*BGx7~}oj^JQT)=G5w?m0T zJR$g4QfChGE9wXqba~k7>RIqkB)uKQ@)X@B-y3Kn2?vvV61`uWg$%?lp<{#55#XyB zWVvxgubkN>2(B9~A{PKMv?U1EkwaD5Fo%7Gb{4+ku=`Fr_f={;i_Ds_ zP(4_nw|rP;p`!X&aBK$Pcq_puZ;F2#GnbeP%iCN~q>X{ogp?+PfZP%^&joV*jf65P zd>21s@e^H%pQiXJ&VpIAXv4hzW?`U38+kzg^wUo{K$m}lf3v_8Z9EuE(MB$qZ)Sk` zW)3jlJOY?+^1=M+r!OEDc{ z^!dK*6YifN0mvXdU@yBXL#pb^g^k2s3%A@W%a`eFlE@>B^f$ z0CPynf|T#;n{2?=*L;e%wg_nJYtG7BTM)SQwH)Ytj%w%XIfv;3e(z5FL{sr|BYtLW;|H^Q)JFE4U4G7C_kiBaX7{L#2ZNc% z?!oJ9c8|cE#qLoX4+oRkJ(Tt|8+Xarx><*yKd&Dge0b|Y{{bt*yXbARJIMu}nt0G{ zg__6)@}U0VAPY!%HwTc|#KVBZCUSuMP|pTZyqgC|XySoDLKE3QemFR&X94-4`gVN| zAhC&u0f|i*A(kAWP_Hp)jPry-TkqE2t(CZ-;9tvo_nZUr$>(fN@ayVcb8j5s+KzI;hE^UMY-m%T-Zn)9|JreqrjDpQJjCPwIIA6a zV>}*$My(9*a)*Ys^Y3TY*udYWUOPXp?avq=Hk>&xL74yJCd5w#a4x+2pFL>z zKd*28yp;>;{^zFvbpLZEn8(4TARd<2S2tF-qT^|A(jE1Z?@2pY*ag+xkuNZYLl+#g6tou2=c}}AkPp6 z-XG*_?H9cDDGP$U1)g@3XfDW~gsNb}nUH_^e7Nrc{^h2dPraemQ@{%CW`g>8Z#}M_ zoYcO={t0w`e0EWbwl>y}qv~Lge2)&0#p!h$12lq28vT5^xNiF?#$_}=$MBA35AW!{ z!#jG~;T>fU@64AHh-dj=pS^t&aL=;k8EE+8f%)?cFn@j;Fn`WIfoh?Hvo`C{tUb|z vS>wa<$M$l$94&8Dw$>_Z%gg`k-#`EU`S;JifByaRlt2F;d_)pJ0B9Zn63mW^ literal 0 HcmV?d00001 diff --git a/6.3/0001-bbr2.patch b/6.3/0001-bbr2.patch index ad903369..db245253 100644 --- a/6.3/0001-bbr2.patch +++ b/6.3/0001-bbr2.patch @@ -1,7 +1,7 @@ -From 47998571042386996a5fb55be493e9927f594f76 Mon Sep 17 00:00:00 2001 +From 9b7414fdd16442c2efd67e1e34588caf4c780f1e Mon Sep 17 00:00:00 2001 From: Peter Jung Date: Mon, 6 Mar 2023 18:43:03 +0100 -Subject: [PATCH 01/12] bbr2 +Subject: [PATCH 01/13] bbr2 Signed-off-by: Peter Jung --- diff --git a/6.3/0002-bfq.patch b/6.3/0002-bfq.patch index 1b63e4ba..5bf90cc5 100644 --- a/6.3/0002-bfq.patch +++ b/6.3/0002-bfq.patch @@ -1,7 +1,7 @@ -From efd83b97f8daac5950de449458be133718d6a8c7 Mon Sep 17 00:00:00 2001 +From 3598b17e783ddfa1ab4dbe8c1867f1be9e3a1566 Mon Sep 17 00:00:00 2001 From: Peter Jung Date: Mon, 17 Apr 2023 18:21:50 +0200 -Subject: [PATCH 02/12] bfq +Subject: [PATCH 02/13] bfq Signed-off-by: Peter Jung --- diff --git a/6.3/0003-cachy.patch b/6.3/0003-cachy.patch index 33786e29..7f85c349 100644 --- a/6.3/0003-cachy.patch +++ b/6.3/0003-cachy.patch @@ -1,68 +1,70 @@ -From d9ae45f25bc334f2258284f5a509bd2c71cca8a1 Mon Sep 17 00:00:00 2001 +From 1b79bdbcce300d92a09023d487cc7445c9665a17 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 17 Apr 2023 18:22:10 +0200 -Subject: [PATCH 03/12] cachy +Date: Sat, 22 Apr 2023 11:43:07 +0200 +Subject: [PATCH 03/13] cachy Signed-off-by: Peter Jung --- - .gitignore | 1 + - .../admin-guide/kernel-parameters.txt | 9 + - Documentation/dontdiff | 1 + - Makefile | 8 +- - arch/arc/configs/axs101_defconfig | 1 + - arch/arc/configs/axs103_defconfig | 1 + - arch/arc/configs/axs103_smp_defconfig | 1 + - arch/arc/configs/haps_hs_defconfig | 1 + - arch/arc/configs/haps_hs_smp_defconfig | 1 + - arch/arc/configs/hsdk_defconfig | 1 + - arch/arc/configs/nsim_700_defconfig | 1 + - arch/arc/configs/nsimosci_defconfig | 1 + - arch/arc/configs/nsimosci_hs_defconfig | 1 + - arch/arc/configs/nsimosci_hs_smp_defconfig | 1 + - arch/arc/configs/tb10x_defconfig | 1 + - arch/arc/configs/vdk_hs38_defconfig | 1 + - arch/arc/configs/vdk_hs38_smp_defconfig | 1 + - arch/x86/Kconfig.cpu | 416 ++++++++++- - arch/x86/Makefile | 45 +- - arch/x86/Makefile.postlink | 41 ++ - arch/x86/boot/compressed/.gitignore | 1 - - arch/x86/boot/compressed/Makefile | 10 +- - arch/x86/include/asm/vermagic.h | 72 ++ - drivers/Makefile | 15 +- - drivers/i2c/busses/Kconfig | 9 + - drivers/i2c/busses/Makefile | 1 + - drivers/i2c/busses/i2c-nct6775.c | 647 ++++++++++++++++++ - drivers/i2c/busses/i2c-piix4.c | 4 +- - drivers/md/dm-crypt.c | 5 + - drivers/pci/quirks.c | 101 +++ - drivers/platform/x86/Kconfig | 14 + - drivers/platform/x86/Makefile | 3 + - drivers/platform/x86/steamdeck.c | 523 ++++++++++++++ - include/linux/pagemap.h | 2 +- - include/linux/user_namespace.h | 4 + - include/net/netns/ipv4.h | 1 + - include/trace/events/tcp.h | 7 + - init/Kconfig | 39 ++ - kernel/Kconfig.hz | 24 + - kernel/fork.c | 14 + - kernel/module/Kconfig | 25 + - kernel/sched/fair.c | 20 +- - kernel/sysctl.c | 12 + - kernel/user_namespace.c | 7 + - mm/Kconfig | 2 +- - mm/compaction.c | 4 + - mm/page-writeback.c | 8 + - mm/swap.c | 5 + - mm/vmpressure.c | 4 + - mm/vmscan.c | 8 + - net/ipv4/sysctl_net_ipv4.c | 7 + - net/ipv4/tcp_input.c | 36 + - net/ipv4/tcp_ipv4.c | 2 + - scripts/Makefile.lib | 13 +- - scripts/Makefile.modinst | 7 +- - 55 files changed, 2144 insertions(+), 46 deletions(-) + .gitignore | 1 + + .../admin-guide/kernel-parameters.txt | 9 + + Documentation/dontdiff | 1 + + Makefile | 8 +- + arch/arc/configs/axs101_defconfig | 1 + + arch/arc/configs/axs103_defconfig | 1 + + arch/arc/configs/axs103_smp_defconfig | 1 + + arch/arc/configs/haps_hs_defconfig | 1 + + arch/arc/configs/haps_hs_smp_defconfig | 1 + + arch/arc/configs/hsdk_defconfig | 1 + + arch/arc/configs/nsim_700_defconfig | 1 + + arch/arc/configs/nsimosci_defconfig | 1 + + arch/arc/configs/nsimosci_hs_defconfig | 1 + + arch/arc/configs/nsimosci_hs_smp_defconfig | 1 + + arch/arc/configs/tb10x_defconfig | 1 + + arch/arc/configs/vdk_hs38_defconfig | 1 + + arch/arc/configs/vdk_hs38_smp_defconfig | 1 + + arch/x86/Kconfig.cpu | 416 ++- + arch/x86/Makefile | 45 +- + arch/x86/Makefile.postlink | 41 + + arch/x86/boot/compressed/.gitignore | 1 - + arch/x86/boot/compressed/Makefile | 10 +- + arch/x86/include/asm/vermagic.h | 72 + + drivers/Makefile | 15 +- + drivers/i2c/busses/Kconfig | 9 + + drivers/i2c/busses/Makefile | 1 + + drivers/i2c/busses/i2c-nct6775.c | 647 ++++ + drivers/i2c/busses/i2c-piix4.c | 4 +- + drivers/md/dm-crypt.c | 5 + + drivers/pci/quirks.c | 101 + + drivers/platform/x86/Kconfig | 24 + + drivers/platform/x86/Makefile | 4 + + drivers/platform/x86/legion-laptop.c | 2783 +++++++++++++++++ + drivers/platform/x86/steamdeck.c | 523 ++++ + include/linux/pagemap.h | 2 +- + include/linux/user_namespace.h | 4 + + include/net/netns/ipv4.h | 1 + + include/trace/events/tcp.h | 7 + + init/Kconfig | 39 + + kernel/Kconfig.hz | 24 + + kernel/fork.c | 14 + + kernel/module/Kconfig | 25 + + kernel/sched/fair.c | 20 +- + kernel/sysctl.c | 12 + + kernel/user_namespace.c | 7 + + mm/Kconfig | 2 +- + mm/compaction.c | 4 + + mm/page-writeback.c | 8 + + mm/swap.c | 5 + + mm/vmpressure.c | 4 + + mm/vmscan.c | 8 + + net/ipv4/sysctl_net_ipv4.c | 7 + + net/ipv4/tcp_input.c | 36 + + net/ipv4/tcp_ipv4.c | 2 + + scripts/Makefile.lib | 13 +- + scripts/Makefile.modinst | 7 +- + 56 files changed, 4938 insertions(+), 46 deletions(-) create mode 100644 arch/x86/Makefile.postlink create mode 100644 drivers/i2c/busses/i2c-nct6775.c + create mode 100644 drivers/platform/x86/legion-laptop.c create mode 100644 drivers/platform/x86/steamdeck.c diff --git a/.gitignore b/.gitignore @@ -1922,10 +1924,27 @@ index 44cab813bf95..25edf55de985 100644 }; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig -index 4a01b315e0a9..e9ddf76b8b57 100644 +index 4a01b315e0a9..e4a6c31a80df 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig -@@ -1099,6 +1099,20 @@ config WINMATE_FM07_KEYS +@@ -641,6 +641,16 @@ config THINKPAD_LMI + To compile this driver as a module, choose M here: the module will + be called think-lmi. + ++config LEGION_LAPTOP ++ tristate "Lenovo Legion Laptop Extras" ++ depends on ACPI ++ depends on ACPI_WMI || ACPI_WMI = n ++ depends on HWMON || HWMON = n ++ select ACPI_PLATFORM_PROFILE ++ help ++ This is a driver for Lenovo Legion laptops and contains drivers for ++ hotkey, fan control, and power mode. ++ + source "drivers/platform/x86/intel/Kconfig" + + config MSI_LAPTOP +@@ -1099,6 +1109,20 @@ config WINMATE_FM07_KEYS buttons below the display. This module adds an input device that delivers key events when these buttons are pressed. @@ -1947,16 +1966,2813 @@ index 4a01b315e0a9..e9ddf76b8b57 100644 config P2SB diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile -index 1d3d1b02541b..75b30a3face9 100644 +index 1d3d1b02541b..fde9a683103e 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile -@@ -134,3 +134,6 @@ obj-$(CONFIG_SIEMENS_SIMATIC_IPC) += simatic-ipc.o +@@ -66,6 +66,7 @@ obj-$(CONFIG_IDEAPAD_LAPTOP) += ideapad-laptop.o + obj-$(CONFIG_SENSORS_HDAPS) += hdaps.o + obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o + obj-$(CONFIG_THINKPAD_LMI) += think-lmi.o ++obj-$(CONFIG_LEGION_LAPTOP) += legion-laptop.o + + # Intel + obj-y += intel/ +@@ -134,3 +135,6 @@ obj-$(CONFIG_SIEMENS_SIMATIC_IPC) += simatic-ipc.o # Winmate obj-$(CONFIG_WINMATE_FM07_KEYS) += winmate-fm07-keys.o + +# Steam Deck +obj-$(CONFIG_STEAMDECK) += steamdeck.o +diff --git a/drivers/platform/x86/legion-laptop.c b/drivers/platform/x86/legion-laptop.c +new file mode 100644 +index 000000000000..d1268d239cc5 +--- /dev/null ++++ b/drivers/platform/x86/legion-laptop.c +@@ -0,0 +1,2783 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * legion-laptop.c - Extra Lenovo Legion laptop support, in ++ * particular for fan curve control and power mode. ++ * ++ * Copyright (C) 2022 johnfan ++ * ++ * ++ * This driver might work on other Lenovo Legion models. If you ++ * want to try it you can pass force=1 as argument ++ * to the module which will force it to load even when the DMI ++ * data doesn't match the model AND FIRMWARE. ++ * ++ * Support for other hardware of this model is already partially ++ * provided by the module ideapd-laptop. ++ * ++ * The development page for this driver is located at ++ * https://github.com/johnfanv2/LenovoLegionLinux ++ * ++ * This driver exports the files: ++ * - /sys/kernel/debug/legion/fancurve (ro) ++ * The fan curve in the form stored in the firmware in an ++ * human readable table. ++ * ++ * - /sys/module/legion_laptop/drivers/platform\:legion/PNP0C09\:00/powermode (rw) ++ * 0: balanced mode (white) ++ * 1: performance mode (red) ++ * 2: quiet mode (blue) ++ * ?: custom mode (pink) ++ * ++ * NOTE: Writing to this will load the default fan curve from ++ * the firmware for this mode, so the fan curve might ++ * have to be reconfigured if needed. ++ * ++ * It implements the usual hwmon interface to monitor fan speed and temmperature ++ * and allows to set the fan curve inside the firware. ++ * ++ * - /sys/class/hwmon/X/fan1_input or /sys/class/hwmon/X/fan2_input (ro) ++ * Current fan speed of fan1/fan2. ++ * - /sys/class/hwmon/X/temp1_input (ro) ++ * - /sys/class/hwmon/X/temp2_input (ro) ++ * - /sys/class/hwmon/X/temp3_input (ro) ++ * Temperature (Celsius) of CPU, GPU, and IC used for fan control. ++ * - /sys/class/hwmon/X/pwmY_auto_pointZ_pwm (rw) ++ * PWM (0-255) of the fan at the Y-level in the fan curve ++ * - /sys/class/hwmon/X/pwmY_auto_pointZ_temp (rw) ++ * upper temperature of tempZ (CPU, GPU, or IC) at the Y-level in the fan curve ++ * - /sys/class/hwmon/X/pwmY_auto_pointZ_temp_hyst (rw) ++ * hysteris (CPU, GPU, or IC) at the Y-level in the fan curve. The lower ++ * temperatue of the level is the upper temperature minus the hysteris ++ * ++ * ++ * Credits for reverse engineering the firmware to: ++ * - David Woodhouse: heavily inspired by lenovo_laptop.c ++ * - Luke Cama: Windows version "LegionFanControl" ++ * - SmokelessCPU: reverse engineering of custom registers in EC ++ * and commincation method with EC via ports ++ * - 0x1F9F1: additional reverse engineering for complete fan curve ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("johnfan"); ++MODULE_DESCRIPTION("Lenovo Legion laptop extras"); ++ ++static bool force; ++module_param(force, bool, 0440); ++MODULE_PARM_DESC( ++ force, ++ "Force loading this module even if model or BIOS does not match."); ++ ++static bool ec_readonly; ++module_param(ec_readonly, bool, 0440); ++MODULE_PARM_DESC( ++ ec_readonly, ++ "Only read from embedded controller but do not write or change settings."); ++ ++#define LEGIONFEATURES \ ++ "fancurve powermode platformprofile platformprofilenotify minifancurve" ++ ++//Size of fancurve stored in embedded controller ++#define MAXFANCURVESIZE 10 ++ ++#define LEGION_DRVR_SHORTNAME "legion" ++#define LEGION_HWMON_NAME LEGION_DRVR_SHORTNAME "_hwmon" ++ ++/* =============================== */ ++/* Embedded Controller Description */ ++/* =============================== */ ++ ++/* The configuration and registers to access the embedded controller ++ * depending on different the version of the software on the ++ * embedded controller or and the BIOS/UEFI firmware. ++ * ++ * To control fan curve in the embedded controller (EC) one has to ++ * write to its "RAM". There are different possibilities: ++ * - EC RAM is memory mapped (write to it with ioremap) ++ * - access EC RAM via ported mapped IO (outb/inb) ++ * - access EC RAM via ACPI methods. It is only possible to write ++ * to part of it (first 0xFF bytes?) ++ * ++ * In later models the firmware directly exposes ACPI methods to ++ * set the fan curve direclty, without writing to EC RAM. This ++ * is done inside the ACPI method. ++ */ ++ ++/** ++ * Offsets for interseting values inside the EC RAM (0 = start of ++ * EC RAM. These might change depending on the software inside of ++ * the EC, which can be updated by a BIOS update from Lenovo. ++ */ ++// TODO: same order as in initialization ++struct ec_register_offsets { ++ // Super I/O Configuration Registers ++ // 7.15 General Control (GCTRL) ++ // General Control (GCTRL) ++ // (see EC Interface Registers and 6.2 Plug and Play Configuration (PNPCFG)) in datasheet ++ // note: these are in two places saved ++ // in EC Interface Registers and in super io configuraion registers ++ // Chip ID ++ u16 ECHIPID1; ++ u16 ECHIPID2; ++ // Chip Version ++ u16 ECHIPVER; ++ u16 ECDEBUG; ++ ++ // Lenovo Custom OEM extension ++ // Firmware of ITE can be extended by ++ // custom program using its own "variables" ++ // These are the offsets to these "variables" ++ u16 EXT_FAN_CUR_POINT; ++ u16 EXT_FAN_POINTS_SIZE; ++ u16 EXT_FAN1_BASE; ++ u16 EXT_FAN2_BASE; ++ u16 EXT_FAN_ACC_BASE; ++ u16 EXT_FAN_DEC_BASE; ++ u16 EXT_CPU_TEMP; ++ u16 EXT_CPU_TEMP_HYST; ++ u16 EXT_GPU_TEMP; ++ u16 EXT_GPU_TEMP_HYST; ++ u16 EXT_VRM_TEMP; ++ u16 EXT_VRM_TEMP_HYST; ++ u16 EXT_FAN1_RPM_LSB; ++ u16 EXT_FAN1_RPM_MSB; ++ u16 EXT_FAN2_RPM_LSB; ++ u16 EXT_FAN2_RPM_MSB; ++ u16 EXT_FAN1_TARGET_RPM; ++ u16 EXT_FAN2_TARGET_RPM; ++ u16 EXT_POWERMODE; ++ u16 EXT_MINIFANCURVE_ON_COOL; ++ // values ++ // 0x04: enable mini fan curve if very long on cool level ++ // - this might be due to potential temp failure ++ // - or just because really so cool ++ // 0xA0: disable it ++ u16 EXT_LOCKFANCONTROLLER; ++ u16 EXT_MAXIMUMFANSPEED; ++ u16 EXT_WHITE_KEYBOARD_BACKLIGHT; ++ u16 EXT_IC_TEMP_INPUT; ++ u16 EXT_CPU_TEMP_INPUT; ++ u16 EXT_GPU_TEMP_INPUT; ++}; ++ ++struct model_config { ++ const struct ec_register_offsets *registers; ++ bool check_embedded_controller_id; ++ u16 embedded_controller_id; ++ ++ // first addr in EC we access/scan ++ phys_addr_t memoryio_physical_ec_start; ++ size_t memoryio_size; ++ ++ // TODO: maybe use bitfield ++ bool has_minifancurve; ++}; ++ ++/* =================================== */ ++/* Coinfiguration for different models */ ++/* =================================== */ ++ ++// Idea by SmokelesssCPU (modified) ++// - all default names and register addresses are supported by datasheet ++// - register addresses for custom firmware by SmokelesssCPU ++static const struct ec_register_offsets ec_register_offsets_v0 = { ++ .ECHIPID1 = 0x2000, ++ .ECHIPID2 = 0x2001, ++ .ECHIPVER = 0x2002, ++ .ECDEBUG = 0x2003, ++ .EXT_FAN_CUR_POINT = 0xC534, ++ .EXT_FAN_POINTS_SIZE = 0xC535, ++ .EXT_FAN1_BASE = 0xC540, ++ .EXT_FAN2_BASE = 0xC550, ++ .EXT_FAN_ACC_BASE = 0xC560, ++ .EXT_FAN_DEC_BASE = 0xC570, ++ .EXT_CPU_TEMP = 0xC580, ++ .EXT_CPU_TEMP_HYST = 0xC590, ++ .EXT_GPU_TEMP = 0xC5A0, ++ .EXT_GPU_TEMP_HYST = 0xC5B0, ++ .EXT_VRM_TEMP = 0xC5C0, ++ .EXT_VRM_TEMP_HYST = 0xC5D0, ++ .EXT_FAN1_RPM_LSB = 0xC5E0, ++ .EXT_FAN1_RPM_MSB = 0xC5E1, ++ .EXT_FAN2_RPM_LSB = 0xC5E2, ++ .EXT_FAN2_RPM_MSB = 0xC5E3, ++ .EXT_MINIFANCURVE_ON_COOL = 0xC536, ++ .EXT_LOCKFANCONTROLLER = 0xc4AB, ++ .EXT_CPU_TEMP_INPUT = 0xc538, ++ .EXT_GPU_TEMP_INPUT = 0xc539, ++ .EXT_IC_TEMP_INPUT = 0xC5E8, ++ .EXT_POWERMODE = 0xc420, ++ .EXT_FAN1_TARGET_RPM = 0xc600, ++ .EXT_FAN2_TARGET_RPM = 0xc601, ++ .EXT_MAXIMUMFANSPEED = 0xBD, ++ .EXT_WHITE_KEYBOARD_BACKLIGHT = (0x3B + 0xC400) ++}; ++ ++static const struct model_config model_v0 = { ++ .registers = &ec_register_offsets_v0, ++ .check_embedded_controller_id = true, ++ .embedded_controller_id = 0x8227, ++ .memoryio_physical_ec_start = 0xC400, ++ .memoryio_size = 0x300, ++ .has_minifancurve = true ++}; ++ ++static const struct model_config model_kfcn = { ++ .registers = &ec_register_offsets_v0, ++ .check_embedded_controller_id = true, ++ .embedded_controller_id = 0x8227, ++ .memoryio_physical_ec_start = 0xC400, ++ .memoryio_size = 0x300, ++ .has_minifancurve = false ++}; ++ ++static const struct model_config model_hacn = { ++ .registers = &ec_register_offsets_v0, ++ .check_embedded_controller_id = false, ++ .embedded_controller_id = 0x8227, ++ .memoryio_physical_ec_start = 0xC400, ++ .memoryio_size = 0x300, ++ .has_minifancurve = false ++}; ++ ++ ++static const struct model_config model_k9cn = { ++ .registers = &ec_register_offsets_v0, ++ .check_embedded_controller_id = false, ++ .embedded_controller_id = 0x8227, ++ .memoryio_physical_ec_start = 0xC400, // or replace 0xC400 by 0x0400 ? ++ .memoryio_size = 0x300, ++ .has_minifancurve = false ++}; ++ ++ ++ ++static const struct dmi_system_id denylist[] = { {} }; ++ ++static const struct dmi_system_id optimistic_allowlist[] = { ++ { ++ // modelyear: 2021 ++ // generation: 6 ++ // name: Legion 5, Legion 5 pro, Legion 7 ++ // Family: Legion 5 15ACH6H, ... ++ .ident = "GKCN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "GKCN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2020 ++ .ident = "EUCN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "EUCN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2020 ++ .ident = "EFCN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "EFCN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2020 ++ .ident = "FSCN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "FSCN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2021 ++ .ident = "HHCN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "HHCN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2022 ++ .ident = "H1CN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "H1CN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2022 ++ .ident = "J2CN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "J2CN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2022 ++ .ident = "JUCN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "JUCN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2022 ++ .ident = "KFCN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "KFCN"), ++ }, ++ .driver_data = (void *)&model_kfcn ++ }, ++ { ++ // modelyear: 2021 ++ .ident = "HACN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "HACN"), ++ }, ++ .driver_data = (void *)&model_hacn ++ }, ++ { ++ // modelyear: 2021 ++ .ident = "G9CN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "G9CN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2022 ++ .ident = "K9CN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "K9CN"), ++ }, ++ .driver_data = (void *)&model_k9cn ++ }, ++ {} ++}; ++ ++/* ================================= */ ++/* ACPI access */ ++/* ================================= */ ++ ++// function from ideapad-laptop.c ++static int eval_int(acpi_handle handle, const char *name, unsigned long *res) ++{ ++ unsigned long long result; ++ acpi_status status; ++ ++ status = acpi_evaluate_integer(handle, (char *)name, NULL, &result); ++ if (ACPI_FAILURE(status)) ++ return -EIO; ++ ++ *res = result; ++ ++ return 0; ++} ++ ++// function from ideapad-laptop.c ++static int exec_simple_method(acpi_handle handle, const char *name, ++ unsigned long arg) ++{ ++ acpi_status status = ++ acpi_execute_simple_method(handle, (char *)name, arg); ++ ++ return ACPI_FAILURE(status) ? -EIO : 0; ++} ++ ++// function from ideapad-laptop.c ++static int exec_sbmc(acpi_handle handle, unsigned long arg) ++{ ++ // \_SB.PCI0.LPC0.EC0.VPC0.SBMC ++ return exec_simple_method(handle, "SBMC", arg); ++} ++ ++static int eval_qcho(acpi_handle handle, unsigned long *res) ++{ ++ // \_SB.PCI0.LPC0.EC0.QCHO ++ return eval_int(handle, "QCHO", res); ++} ++ ++/* ================================= */ ++/* EC RAM Access with port-mapped IO */ ++/* ================================= */ ++ ++/* ++ * See datasheet of e.g. IT8502E/F/G, e.g. ++ * 6.2 Plug and Play Configuration (PNPCFG) ++ * ++ * Depending on configured BARDSEL register ++ * the ports ++ * ECRAM_PORTIO_ADDR_PORT and ++ * ECRAM_PORTIO_DATA_PORT ++ * are configured. ++ * ++ * By performing IO on these ports one can ++ * read/write to registers in the EC. ++ * ++ * "To access a register of PNPCFG, write target index to ++ * address port and access this PNPCFG register via ++ * data port" [datasheet, 6.2 Plug and Play Configuration] ++ */ ++ ++// IO ports used to write to communicate with embedded controller ++// Start of used ports ++#define ECRAM_PORTIO_START_PORT 0x4E ++// Number of used ports ++#define ECRAM_PORTIO_PORTS_SIZE 2 ++// Port used to specify address in EC RAM to read/write ++// 0x4E/0x4F is the usual port for IO super controler ++// 0x2E/0x2F also common (ITE can also be configure to use these) ++#define ECRAM_PORTIO_ADDR_PORT 0x4E ++// Port to send/receive the value to write/read ++#define ECRAM_PORTIO_DATA_PORT 0x4F ++// Name used to request ports ++#define ECRAM_PORTIO_NAME "legion" ++ ++struct ecram_portio { ++ /* protects read/write to EC RAM performed ++ * as a certain sequence of outb, inb ++ * commands on the IO ports. There can ++ * be at most one. ++ */ ++ struct mutex io_port_mutex; ++}; ++ ++ssize_t ecram_portio_init(struct ecram_portio *ec_portio) ++{ ++ if (!request_region(ECRAM_PORTIO_START_PORT, ECRAM_PORTIO_PORTS_SIZE, ++ ECRAM_PORTIO_NAME)) { ++ pr_info("Cannot init ecram_portio the %x ports starting at %x\n", ++ ECRAM_PORTIO_PORTS_SIZE, ECRAM_PORTIO_START_PORT); ++ return -ENODEV; ++ } ++ //pr_info("Reserved %x ports starting at %x\n", ECRAM_PORTIO_PORTS_SIZE, ECRAM_PORTIO_START_PORT); ++ mutex_init(&ec_portio->io_port_mutex); ++ return 0; ++} ++ ++void ecram_portio_exit(struct ecram_portio *ec_portio) ++{ ++ release_region(ECRAM_PORTIO_START_PORT, ECRAM_PORTIO_PORTS_SIZE); ++} ++ ++/* Read a byte from the EC RAM. ++ * ++ * Return status because of commong signature for alle ++ * methods to access EC RAM. ++ */ ++ssize_t ecram_portio_read(struct ecram_portio *ec_portio, u16 offset, u8 *value) ++{ ++ mutex_lock(&ec_portio->io_port_mutex); ++ ++ outb(0x2E, ECRAM_PORTIO_ADDR_PORT); ++ outb(0x11, ECRAM_PORTIO_DATA_PORT); ++ outb(0x2F, ECRAM_PORTIO_ADDR_PORT); ++ // TODO: no explicit cast between types seems to be sometimes ++ // done and sometimes not ++ outb((u8)((offset >> 8) & 0xFF), ECRAM_PORTIO_DATA_PORT); ++ ++ outb(0x2E, ECRAM_PORTIO_ADDR_PORT); ++ outb(0x10, ECRAM_PORTIO_DATA_PORT); ++ outb(0x2F, ECRAM_PORTIO_ADDR_PORT); ++ outb((u8)(offset & 0xFF), ECRAM_PORTIO_DATA_PORT); ++ ++ outb(0x2E, ECRAM_PORTIO_ADDR_PORT); ++ outb(0x12, ECRAM_PORTIO_DATA_PORT); ++ outb(0x2F, ECRAM_PORTIO_ADDR_PORT); ++ *value = inb(ECRAM_PORTIO_DATA_PORT); ++ ++ mutex_unlock(&ec_portio->io_port_mutex); ++ return 0; ++} ++ ++/* Write a byte to the EC RAM. ++ * ++ * Return status because of commong signature for alle ++ * methods to access EC RAM. ++ */ ++ssize_t ecram_portio_write(struct ecram_portio *ec_portio, u16 offset, u8 value) ++{ ++ mutex_lock(&ec_portio->io_port_mutex); ++ ++ outb(0x2E, ECRAM_PORTIO_ADDR_PORT); ++ outb(0x11, ECRAM_PORTIO_DATA_PORT); ++ outb(0x2F, ECRAM_PORTIO_ADDR_PORT); ++ // TODO: no explicit cast between types seems to be sometimes ++ // done and sometimes not ++ outb((u8)((offset >> 8) & 0xFF), ECRAM_PORTIO_DATA_PORT); ++ ++ outb(0x2E, ECRAM_PORTIO_ADDR_PORT); ++ outb(0x10, ECRAM_PORTIO_DATA_PORT); ++ outb(0x2F, ECRAM_PORTIO_ADDR_PORT); ++ outb((u8)(offset & 0xFF), ECRAM_PORTIO_DATA_PORT); ++ ++ outb(0x2E, ECRAM_PORTIO_ADDR_PORT); ++ outb(0x12, ECRAM_PORTIO_DATA_PORT); ++ outb(0x2F, ECRAM_PORTIO_ADDR_PORT); ++ outb(value, ECRAM_PORTIO_DATA_PORT); ++ ++ mutex_unlock(&ec_portio->io_port_mutex); ++ return 0; ++} ++ ++/* =================================== */ ++/* EC RAM Access */ ++/* =================================== */ ++ ++struct ecram { ++ struct ecram_portio portio; ++}; ++ ++ssize_t ecram_init(struct ecram *ecram, phys_addr_t memoryio_ec_physical_start, ++ size_t region_size) ++{ ++ ssize_t err; ++ ++ err = ecram_portio_init(&ecram->portio); ++ if (err) { ++ pr_info("Failed ecram_portio_init\n"); ++ goto err_ecram_portio_init; ++ } ++ ++ return 0; ++ ++err_ecram_portio_init: ++ return err; ++} ++ ++void ecram_exit(struct ecram *ecram) ++{ ++ pr_info("Unloading legion ecram\n"); ++ ecram_portio_exit(&ecram->portio); ++ pr_info("Unloading legion ecram done\n"); ++} ++ ++/** ++ * ecram_offset address on the EC ++ */ ++static u8 ecram_read(struct ecram *ecram, u16 ecram_offset) ++{ ++ u8 value; ++ int err; ++ ++ err = ecram_portio_read(&ecram->portio, ecram_offset, &value); ++ if (err) ++ pr_info("Error reading EC RAM at 0x%x\n", ecram_offset); ++ return value; ++} ++ ++static void ecram_write(struct ecram *ecram, u16 ecram_offset, u8 value) ++{ ++ int err; ++ ++ if (ec_readonly) { ++ pr_info("Skipping writing EC RAM at 0x%x because readonly.\n", ++ ecram_offset); ++ return; ++ } ++ err = ecram_portio_write(&ecram->portio, ecram_offset, value); ++ if (err) ++ pr_info("Error writing EC RAM at 0x%x\n", ecram_offset); ++} ++ ++/* =============================== */ ++/* Reads from EC */ ++/* =============================== */ ++ ++u16 read_ec_id(struct ecram *ecram, const struct model_config *model) ++{ ++ u8 id1 = ecram_read(ecram, model->registers->ECHIPID1); ++ u8 id2 = ecram_read(ecram, model->registers->ECHIPID2); ++ ++ return (id1 << 8) + id2; ++} ++ ++u16 read_ec_version(struct ecram *ecram, const struct model_config *model) ++{ ++ u8 vers = ecram_read(ecram, model->registers->ECHIPVER); ++ u8 debug = ecram_read(ecram, model->registers->ECDEBUG); ++ ++ return (vers << 8) + debug; ++} ++ ++/* ============================= */ ++/* Data model for sensor values */ ++/* ============================ */ ++ ++struct sensor_values { ++ u16 fan1_rpm; // current speed in rpm of fan 1 ++ u16 fan2_rpm; // current speed in rpm of fan2 ++ u16 fan1_target_rpm; // target speed in rpm of fan 1 ++ u16 fan2_target_rpm; // target speed in rpm of fan 2 ++ u8 cpu_temp_celsius; // cpu temperature in celcius ++ u8 gpu_temp_celsius; // gpu temperature in celcius ++ u8 ic_temp_celsius; // ic temperature in celcius ++}; ++ ++enum SENSOR_ATTR { ++ SENSOR_CPU_TEMP_ID = 1, ++ SENSOR_GPU_TEMP_ID = 2, ++ SENSOR_IC_TEMP_ID = 3, ++ SENSOR_FAN1_RPM_ID = 4, ++ SENSOR_FAN2_RPM_ID = 5, ++ SENSOR_FAN1_TARGET_RPM_ID = 6, ++ SENSOR_FAN2_TARGET_RPM_ID = 7 ++}; ++ ++static int read_sensor_values(struct ecram *ecram, ++ const struct model_config *model, ++ struct sensor_values *values) ++{ ++ values->fan1_target_rpm = ++ 100 * ecram_read(ecram, model->registers->EXT_FAN1_TARGET_RPM); ++ values->fan2_target_rpm = ++ 100 * ecram_read(ecram, model->registers->EXT_FAN2_TARGET_RPM); ++ ++ values->fan1_rpm = ++ ecram_read(ecram, model->registers->EXT_FAN1_RPM_LSB) + ++ (((int)ecram_read(ecram, model->registers->EXT_FAN1_RPM_MSB)) ++ << 8); ++ values->fan2_rpm = ++ ecram_read(ecram, model->registers->EXT_FAN2_RPM_LSB) + ++ (((int)ecram_read(ecram, model->registers->EXT_FAN2_RPM_MSB)) ++ << 8); ++ ++ values->cpu_temp_celsius = ++ ecram_read(ecram, model->registers->EXT_CPU_TEMP_INPUT); ++ values->gpu_temp_celsius = ++ ecram_read(ecram, model->registers->EXT_GPU_TEMP_INPUT); ++ values->ic_temp_celsius = ++ ecram_read(ecram, model->registers->EXT_IC_TEMP_INPUT); ++ ++ values->cpu_temp_celsius = ecram_read(ecram, 0xC5E6); ++ values->gpu_temp_celsius = ecram_read(ecram, 0xC5E7); ++ values->ic_temp_celsius = ecram_read(ecram, 0xC5E8); ++ ++ return 0; ++} ++ ++/* =============================== */ ++/* Behaviour changing functions */ ++/* =============================== */ ++ ++int read_powermode(struct ecram *ecram, const struct model_config *model) ++{ ++ return ecram_read(ecram, model->registers->EXT_POWERMODE); ++} ++ ++ssize_t write_powermode(struct ecram *ecram, const struct model_config *model, ++ u8 value) ++{ ++ if (!(value >= 0 && value <= 2)) { ++ pr_info("Unexpected power mode value ignored: %d\n", value); ++ return -ENOMEM; ++ } ++ ecram_write(ecram, model->registers->EXT_POWERMODE, value); ++ return 0; ++} ++ ++/** ++ * Shortly toggle powermode to a different mode ++ * and switch back, e.g. to reset fan curve. ++ */ ++void toggle_powermode(struct ecram *ecram, const struct model_config *model) ++{ ++ int old_powermode = read_powermode(ecram, model); ++ int next_powermode = old_powermode == 0 ? 1 : 0; ++ ++ write_powermode(ecram, model, next_powermode); ++ mdelay(1500); ++ write_powermode(ecram, model, old_powermode); ++} ++ ++#define lockfancontroller_ON 8 ++#define lockfancontroller_OFF 0 ++ ++ssize_t write_lockfancontroller(struct ecram *ecram, ++ const struct model_config *model, bool state) ++{ ++ u8 val = state ? lockfancontroller_ON : lockfancontroller_OFF; ++ ++ ecram_write(ecram, model->registers->EXT_LOCKFANCONTROLLER, val); ++ return 0; ++} ++ ++int read_lockfancontroller(struct ecram *ecram, ++ const struct model_config *model, bool *state) ++{ ++ int value = ecram_read(ecram, model->registers->EXT_LOCKFANCONTROLLER); ++ ++ switch (value) { ++ case lockfancontroller_ON: ++ *state = true; ++ break; ++ case lockfancontroller_OFF: ++ *state = false; ++ break; ++ default: ++ pr_info("Unexpected value in lockfanspeed register:%d\n", ++ value); ++ return -1; ++ } ++ return 0; ++} ++ ++#define MAXIMUMFANSPEED_ON 0x40 ++#define MAXIMUMFANSPEED_OFF 0x00 ++ ++int read_maximumfanspeed(struct ecram *ecram, const struct model_config *model, ++ bool *state) ++{ ++ int value = ecram_read(ecram, model->registers->EXT_MAXIMUMFANSPEED); ++ ++ switch (value) { ++ case MAXIMUMFANSPEED_ON: ++ *state = true; ++ break; ++ case MAXIMUMFANSPEED_OFF: ++ *state = false; ++ break; ++ default: ++ pr_info("Unexpected value in maximumfanspeed register:%d\n", ++ value); ++ return -1; ++ } ++ return 0; ++} ++ ++ssize_t write_maximumfanspeed(struct ecram *ecram, ++ const struct model_config *model, bool state) ++{ ++ u8 val = state ? MAXIMUMFANSPEED_ON : MAXIMUMFANSPEED_OFF; ++ ++ ecram_write(ecram, model->registers->EXT_MAXIMUMFANSPEED, val); ++ return 0; ++} ++ ++#define MINIFANCUVE_ON_COOL_ON 0x04 ++#define MINIFANCUVE_ON_COOL_OFF 0xA0 ++ ++int read_minifancurve(struct ecram *ecram, const struct model_config *model, ++ bool *state) ++{ ++ int value = ++ ecram_read(ecram, model->registers->EXT_MINIFANCURVE_ON_COOL); ++ ++ switch (value) { ++ case MINIFANCUVE_ON_COOL_ON: ++ *state = true; ++ break; ++ case MINIFANCUVE_ON_COOL_OFF: ++ *state = false; ++ break; ++ default: ++ pr_info("Unexpected value in MINIFANCURVE register:%d\n", ++ value); ++ return -1; ++ } ++ return 0; ++} ++ ++ssize_t write_minifancurve(struct ecram *ecram, ++ const struct model_config *model, bool state) ++{ ++ u8 val = state ? MINIFANCUVE_ON_COOL_ON : MINIFANCUVE_ON_COOL_OFF; ++ ++ ecram_write(ecram, model->registers->EXT_MINIFANCURVE_ON_COOL, val); ++ return 0; ++} ++ ++#define KEYBOARD_BACKLIGHT_OFF 18 ++#define KEYBOARD_BACKLIGHT_ON1 21 ++#define KEYBOARD_BACKLIGHT_ON2 23 ++ ++int read_keyboard_backlight(struct ecram *ecram, ++ const struct model_config *model, int *state) ++{ ++ int value = ecram_read(ecram, ++ model->registers->EXT_WHITE_KEYBOARD_BACKLIGHT); ++ ++ //switch (value) { ++ //case MINIFANCUVE_ON_COOL_ON: ++ // *state = true; ++ // break; ++ //case MINIFANCUVE_ON_COOL_OFF: ++ // *state = false; ++ // break; ++ //default: ++ // pr_info("Unexpected value in MINIFANCURVE register:%d\n", ++ // value); ++ // return -1; ++ //} ++ *state = value; ++ return 0; ++} ++ ++int write_keyboard_backlight(struct ecram *ecram, ++ const struct model_config *model, int state) ++{ ++ u8 val = state > 0 ? KEYBOARD_BACKLIGHT_ON1 : KEYBOARD_BACKLIGHT_OFF; ++ ++ ecram_write(ecram, model->registers->EXT_WHITE_KEYBOARD_BACKLIGHT, val); ++ return 0; ++} ++ ++#define FCT_RAPID_CHARGE_ON 0x07 ++#define FCT_RAPID_CHARGE_OFF 0x08 ++#define RAPID_CHARGE_ON 0x0 ++#define RAPID_CHARGE_OFF 0x1 ++ ++int read_rapidcharge(acpi_handle acpihandle, int *state) ++{ ++ unsigned long result; ++ int err; ++ ++ err = eval_qcho(acpihandle, &result); ++ if (err) ++ return err; ++ ++ *state = result; ++ return 0; ++} ++ ++int write_rapidcharge(acpi_handle acpihandle, bool state) ++{ ++ unsigned long fct_nr = state > 0 ? FCT_RAPID_CHARGE_ON : ++ FCT_RAPID_CHARGE_OFF; ++ return exec_sbmc(acpihandle, fct_nr); ++} ++ ++/* ============================= */ ++/* Data model for fan curve */ ++/* ============================ */ ++ ++struct fancurve_point { ++ // rpm1 devided by 100 ++ u8 rpm1_raw; ++ // rpm2 devided by 100 ++ u8 rpm2_raw; ++ // >=2 , <=5 (lower is faster); must be increasing by level ++ u8 accel; ++ // >=2 , <=5 (lower is faster); must be increasing by level ++ u8 decel; ++ ++ // min must be lower or equal than max ++ // last level max must be 127 ++ // <=127 cpu max temp for this level; must be increasing by level ++ u8 cpu_max_temp_celsius; ++ // <=127 cpu min temp for this level; must be increasing by level ++ u8 cpu_min_temp_celsius; ++ // <=127 gpu min temp for this level; must be increasing by level ++ u8 gpu_max_temp_celsius; ++ // <=127 gpu max temp for this level; must be increasing by level ++ u8 gpu_min_temp_celsius; ++ // <=127 ic max temp for this level; must be increasing by level ++ u8 ic_max_temp_celsius; ++ // <=127 ic max temp for this level; must be increasing by level ++ u8 ic_min_temp_celsius; ++}; ++ ++enum FANCURVE_ATTR { ++ FANCURVE_ATTR_PWM1 = 1, ++ FANCURVE_ATTR_PWM2 = 2, ++ FANCURVE_ATTR_CPU_TEMP = 3, ++ FANCURVE_ATTR_CPU_HYST = 4, ++ FANCURVE_ATTR_GPU_TEMP = 5, ++ FANCURVE_ATTR_GPU_HYST = 6, ++ FANCURVE_ATTR_IC_TEMP = 7, ++ FANCURVE_ATTR_IC_HYST = 8, ++ FANCURVE_ATTR_ACCEL = 9, ++ FANCURVE_ATTR_DECEL = 10, ++ FANCURVE_SIZE = 11, ++ FANCURVE_MINIFANCURVE_ON_COOL = 12 ++}; ++ ++// used for clearing table entries ++static const struct fancurve_point fancurve_point_zero = { 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0 }; ++ ++struct fancurve { ++ struct fancurve_point points[MAXFANCURVESIZE]; ++ // number of points used; must be <= MAXFANCURVESIZE ++ size_t size; ++ // the point that at which fans are run currently ++ size_t current_point_i; ++}; ++ ++// calculate derived values ++ ++int fancurve_get_cpu_deltahyst(struct fancurve_point *point) ++{ ++ return ((int)point->cpu_max_temp_celsius) - ++ ((int)point->cpu_min_temp_celsius); ++} ++ ++int fancurve_get_gpu_deltahyst(struct fancurve_point *point) ++{ ++ return ((int)point->gpu_max_temp_celsius) - ++ ((int)point->gpu_min_temp_celsius); ++} ++ ++int fancurve_get_ic_deltahyst(struct fancurve_point *point) ++{ ++ return ((int)point->ic_max_temp_celsius) - ++ ((int)point->ic_min_temp_celsius); ++} ++ ++// validation functions ++ ++bool fancurve_is_valid_min_temp(int min_temp) ++{ ++ return min_temp >= 0 && min_temp <= 127; ++} ++ ++bool fancurve_is_valid_max_temp(int max_temp) ++{ ++ return max_temp >= 0 && max_temp <= 127; ++} ++ ++// setters with validation ++// - make hwmon implementation easier ++// - keep fancurve valid, otherwise EC will not properly control fan ++ ++bool fancurve_set_rpm1(struct fancurve *fancurve, int point_id, int rpm) ++{ ++ bool valid = point_id == 0 ? rpm == 0 : (rpm >= 0 && rpm <= 4500); ++ ++ if (valid) ++ fancurve->points[point_id].rpm1_raw = rpm / 100; ++ return valid; ++} ++ ++bool fancurve_set_rpm2(struct fancurve *fancurve, int point_id, int rpm) ++{ ++ bool valid = point_id == 0 ? rpm == 0 : (rpm >= 0 && rpm <= 4500); ++ ++ if (valid) ++ fancurve->points[point_id].rpm2_raw = rpm / 100; ++ return valid; ++} ++ ++// TODO: remove { ... } from single line if body ++ ++bool fancurve_set_accel(struct fancurve *fancurve, int point_id, int accel) ++{ ++ bool valid = accel >= 2 && accel <= 5; ++ ++ if (valid) ++ fancurve->points[point_id].accel = accel; ++ return valid; ++} ++ ++bool fancurve_set_decel(struct fancurve *fancurve, int point_id, int decel) ++{ ++ bool valid = decel >= 2 && decel <= 5; ++ ++ if (valid) ++ fancurve->points[point_id].decel = decel; ++ return valid; ++} ++ ++bool fancurve_set_cpu_temp_max(struct fancurve *fancurve, int point_id, ++ int value) ++{ ++ bool valid = fancurve_is_valid_max_temp(value); ++ ++ if (valid) ++ fancurve->points[point_id].cpu_max_temp_celsius = value; ++ ++ return valid; ++} ++ ++bool fancurve_set_gpu_temp_max(struct fancurve *fancurve, int point_id, ++ int value) ++{ ++ bool valid = fancurve_is_valid_max_temp(value); ++ ++ if (valid) ++ fancurve->points[point_id].gpu_max_temp_celsius = value; ++ return valid; ++} ++ ++bool fancurve_set_ic_temp_max(struct fancurve *fancurve, int point_id, ++ int value) ++{ ++ bool valid = fancurve_is_valid_max_temp(value); ++ ++ if (valid) ++ fancurve->points[point_id].ic_max_temp_celsius = value; ++ return valid; ++} ++ ++bool fancurve_set_cpu_temp_min(struct fancurve *fancurve, int point_id, ++ int value) ++{ ++ bool valid = fancurve_is_valid_max_temp(value); ++ ++ if (valid) ++ fancurve->points[point_id].cpu_min_temp_celsius = value; ++ return valid; ++} ++ ++bool fancurve_set_gpu_temp_min(struct fancurve *fancurve, int point_id, ++ int value) ++{ ++ bool valid = fancurve_is_valid_max_temp(value); ++ ++ if (valid) ++ fancurve->points[point_id].gpu_min_temp_celsius = value; ++ return valid; ++} ++ ++bool fancurve_set_ic_temp_min(struct fancurve *fancurve, int point_id, ++ int value) ++{ ++ bool valid = fancurve_is_valid_max_temp(value); ++ ++ if (valid) ++ fancurve->points[point_id].ic_min_temp_celsius = value; ++ return valid; ++} ++ ++bool fancurve_set_size(struct fancurve *fancurve, int size, bool init_values) ++{ ++ bool valid = size >= 1 && size <= MAXFANCURVESIZE; ++ ++ if (!valid) ++ return false; ++ if (init_values && size < fancurve->size) { ++ // fancurve size is decreased, but last etnry alwasy needs 127 temperatures ++ // Note: size >=1 ++ fancurve->points[size - 1].cpu_max_temp_celsius = 127; ++ fancurve->points[size - 1].ic_max_temp_celsius = 127; ++ fancurve->points[size - 1].gpu_max_temp_celsius = 127; ++ } ++ if (init_values && size > fancurve->size) { ++ // fancurve increased, so new entries need valid values ++ int i; ++ int last = fancurve->size > 0 ? fancurve->size - 1 : 0; ++ ++ for (i = fancurve->size; i < size; ++i) ++ fancurve->points[i] = fancurve->points[last]; ++ } ++ return true; ++} ++ ++/* Read the fan curve from the EC. ++ * ++ * In newer models (>=2022) there is an ACPI/WMI to read fan curve as ++ * a whole. So read/write fan table as a whole to use ++ * same interface for both cases. ++ * ++ * It reads all points from EC memory, even if stored fancurve is smaller, so ++ * it can contain 0 entries. ++ */ ++static int read_fancurve(struct ecram *ecram, const struct model_config *model, ++ struct fancurve *fancurve) ++{ ++ size_t i = 0; ++ ++ for (i = 0; i < MAXFANCURVESIZE; ++i) { ++ struct fancurve_point *point = &fancurve->points[i]; ++ ++ point->rpm1_raw = ++ ecram_read(ecram, model->registers->EXT_FAN1_BASE + i); ++ point->rpm2_raw = ++ ecram_read(ecram, model->registers->EXT_FAN2_BASE + i); ++ ++ point->accel = ecram_read( ++ ecram, model->registers->EXT_FAN_ACC_BASE + i); ++ point->decel = ecram_read( ++ ecram, model->registers->EXT_FAN_DEC_BASE + i); ++ point->cpu_max_temp_celsius = ++ ecram_read(ecram, model->registers->EXT_CPU_TEMP + i); ++ point->cpu_min_temp_celsius = ecram_read( ++ ecram, model->registers->EXT_CPU_TEMP_HYST + i); ++ point->gpu_max_temp_celsius = ++ ecram_read(ecram, model->registers->EXT_GPU_TEMP + i); ++ point->gpu_min_temp_celsius = ecram_read( ++ ecram, model->registers->EXT_GPU_TEMP_HYST + i); ++ point->ic_max_temp_celsius = ++ ecram_read(ecram, model->registers->EXT_VRM_TEMP + i); ++ point->ic_min_temp_celsius = ecram_read( ++ ecram, model->registers->EXT_VRM_TEMP_HYST + i); ++ } ++ ++ // Do not trust that hardware; It might suddendly report ++ // a larger size, so clamp it. ++ fancurve->size = ++ ecram_read(ecram, model->registers->EXT_FAN_POINTS_SIZE); ++ fancurve->size = ++ min(fancurve->size, (typeof(fancurve->size))(MAXFANCURVESIZE)); ++ fancurve->current_point_i = ++ ecram_read(ecram, model->registers->EXT_FAN_CUR_POINT); ++ fancurve->current_point_i = ++ min(fancurve->current_point_i, fancurve->size); ++ return 0; ++} ++ ++static int write_fancurve(struct ecram *ecram, const struct model_config *model, ++ const struct fancurve *fancurve, bool write_size) ++{ ++ size_t i; ++ // Reset fan update counters (try to avoid any race conditions) ++ ecram_write(ecram, 0xC5FE, 0); ++ ecram_write(ecram, 0xC5FF, 0); ++ for (i = 0; i < MAXFANCURVESIZE; ++i) { ++ // Entries for points larger than fancurve size should be cleared ++ // to 0 ++ const struct fancurve_point *point = ++ i < fancurve->size ? &fancurve->points[i] : ++ &fancurve_point_zero; ++ ++ ecram_write(ecram, model->registers->EXT_FAN1_BASE + i, ++ point->rpm1_raw); ++ ecram_write(ecram, model->registers->EXT_FAN2_BASE + i, ++ point->rpm2_raw); ++ ++ ecram_write(ecram, model->registers->EXT_FAN_ACC_BASE + i, ++ point->accel); ++ ecram_write(ecram, model->registers->EXT_FAN_DEC_BASE + i, ++ point->decel); ++ ++ ecram_write(ecram, model->registers->EXT_CPU_TEMP + i, ++ point->cpu_max_temp_celsius); ++ ecram_write(ecram, model->registers->EXT_CPU_TEMP_HYST + i, ++ point->cpu_min_temp_celsius); ++ ecram_write(ecram, model->registers->EXT_GPU_TEMP + i, ++ point->gpu_max_temp_celsius); ++ ecram_write(ecram, model->registers->EXT_GPU_TEMP_HYST + i, ++ point->gpu_min_temp_celsius); ++ ecram_write(ecram, model->registers->EXT_VRM_TEMP + i, ++ point->ic_max_temp_celsius); ++ ecram_write(ecram, model->registers->EXT_VRM_TEMP_HYST + i, ++ point->ic_min_temp_celsius); ++ } ++ ++ if (write_size) { ++ ecram_write(ecram, model->registers->EXT_FAN_POINTS_SIZE, ++ fancurve->size); ++ } ++ ++ // Reset current fan level to 0, so algorithm in EC ++ // selects fan curve point again and resetting hysterisis ++ // effects ++ ecram_write(ecram, model->registers->EXT_FAN_CUR_POINT, 0); ++ ++ // Reset internal fan levels ++ ecram_write(ecram, 0xC634, 0); // CPU ++ ecram_write(ecram, 0xC635, 0); // GPU ++ ecram_write(ecram, 0xC636, 0); // SENSOR ++ ++ return 0; ++} ++ ++static ssize_t fancurve_print_seqfile(const struct fancurve *fancurve, ++ struct seq_file *s) ++{ ++ int i; ++ ++ seq_printf( ++ s, ++ "rpm1|rpm2|acceleration|deceleration|cpu_min_temp|cpu_max_temp|gpu_min_temp|gpu_max_temp|ic_min_temp|ic_max_temp\n"); ++ for (i = 0; i < fancurve->size; ++i) { ++ const struct fancurve_point *point = &fancurve->points[i]; ++ ++ seq_printf( ++ s, "%d\t %d\t %d\t %d\t %d\t %d\t %d\t %d\t %d\t %d\n", ++ point->rpm1_raw * 100, point->rpm2_raw * 100, ++ point->accel, point->decel, point->cpu_min_temp_celsius, ++ point->cpu_max_temp_celsius, ++ point->gpu_min_temp_celsius, ++ point->gpu_max_temp_celsius, point->ic_min_temp_celsius, ++ point->ic_max_temp_celsius); ++ } ++ return 0; ++} ++ ++/* ============================= */ ++/* Global and shared data between */ ++/* all calls to this module */ ++/* ============================ */ ++// Implemented like ideapad-laptop.c but currenlty still ++// wihtout dynamic memory allocation (instaed global _priv) ++ ++struct legion_private { ++ struct platform_device *platform_device; ++ // TODO: remove or keep? init? ++ // struct acpi_device *adev; ++ ++ // Method to access ECRAM ++ struct ecram ecram; ++ // Configuration with registers an ECRAM access method ++ const struct model_config *conf; ++ ++ // TODO: maybe refactor an keep only local to each function ++ // last known fan curve ++ struct fancurve fancurve; ++ // configured fan curve from user space ++ struct fancurve fancurve_configured; ++ ++ // update lock, when partial values of fancurve are changed ++ struct mutex fancurve_mutex; ++ ++ //interfaces ++ struct dentry *debugfs_dir; ++ struct device *hwmon_dev; ++ struct platform_profile_handler platform_profile_handler; ++ ++ // TODO: remove? ++ bool loaded; ++}; ++ ++// shared between different drivers: WMI, platform and proteced by mutex ++static struct legion_private *legion_shared; ++static struct legion_private _priv; ++static DEFINE_MUTEX(legion_shared_mutex); ++ ++static int legion_shared_init(struct legion_private *priv) ++{ ++ int ret; ++ ++ mutex_lock(&legion_shared_mutex); ++ ++ if (!legion_shared) { ++ legion_shared = priv; ++ mutex_init(&legion_shared->fancurve_mutex); ++ ret = 0; ++ } else { ++ pr_warn("Found multiple platform devices\n"); ++ ret = -EINVAL; ++ } ++ ++ priv->loaded = true; ++ mutex_unlock(&legion_shared_mutex); ++ ++ return ret; ++} ++ ++static void legion_shared_exit(struct legion_private *priv) ++{ ++ pr_info("Unloading legion shared\n"); ++ mutex_lock(&legion_shared_mutex); ++ ++ if (legion_shared == priv) ++ legion_shared = NULL; ++ ++ mutex_unlock(&legion_shared_mutex); ++ pr_info("Unloading legion shared done\n"); ++} ++ ++/* ============================= */ ++/* debugfs interface */ ++/* ============================ */ ++ ++static int debugfs_ecmemory_show(struct seq_file *s, void *unused) ++{ ++ struct legion_private *priv = s->private; ++ size_t offset; ++ ++ for (offset = 0; offset < priv->conf->memoryio_size; ++offset) { ++ char value = ecram_read(&priv->ecram, ++ priv->conf->memoryio_physical_ec_start + ++ offset); ++ ++ seq_write(s, &value, 1); ++ } ++ return 0; ++} ++ ++DEFINE_SHOW_ATTRIBUTE(debugfs_ecmemory); ++ ++static int debugfs_fancurve_show(struct seq_file *s, void *unused) ++{ ++ struct legion_private *priv = s->private; ++ bool is_minifancurve; ++ bool is_lockfancontroller; ++ bool is_maximumfanspeed; ++ int err; ++ ++ seq_printf(s, "EC Chip ID: %x\n", read_ec_id(&priv->ecram, priv->conf)); ++ seq_printf(s, "EC Chip Version: %x\n", ++ read_ec_version(&priv->ecram, priv->conf)); ++ seq_printf(s, "legion_laptop features: %s\n", LEGIONFEATURES); ++ seq_printf(s, "legion_laptop ec_readonly: %d\n", ec_readonly); ++ read_fancurve(&priv->ecram, priv->conf, &priv->fancurve); ++ ++ seq_printf(s, "minifancurve feature enabled: %d\n", ++ priv->conf->has_minifancurve); ++ err = read_minifancurve(&priv->ecram, priv->conf, &is_minifancurve); ++ seq_printf(s, "minifancurve on cool: %s\n", ++ err ? "error" : (is_minifancurve ? "true" : "false")); ++ err = read_lockfancontroller(&priv->ecram, priv->conf, ++ &is_lockfancontroller); ++ seq_printf(s, "lock fan controller: %s\n", ++ err ? "error" : (is_lockfancontroller ? "true" : "false")); ++ err = read_maximumfanspeed(&priv->ecram, priv->conf, ++ &is_maximumfanspeed); ++ seq_printf(s, "enable maximumfanspeed: %s\n", ++ err ? "error" : (is_maximumfanspeed ? "true" : "false")); ++ seq_printf(s, "enable maximumfanspeed status: %d\n", err); ++ ++ seq_printf(s, "fan curve current point id: %ld\n", ++ priv->fancurve.current_point_i); ++ seq_printf(s, "fan curve points size: %ld\n", priv->fancurve.size); ++ ++ seq_puts(s, "Current fan curve in hardware (embedded controller):\n"); ++ fancurve_print_seqfile(&priv->fancurve, s); ++ seq_puts(s, "=====================\n"); ++ return 0; ++} ++ ++DEFINE_SHOW_ATTRIBUTE(debugfs_fancurve); ++ ++static void legion_debugfs_init(struct legion_private *priv) ++{ ++ struct dentry *dir; ++ ++ // TODO: remove this note ++ // Note: as other kernel modules, do not catch errors here ++ // because if kernel is build without debugfs this ++ // will return an error but module still has to ++ // work, just without debugfs ++ // TODO: what permissions; some modules do 400 ++ // other do 444 ++ dir = debugfs_create_dir(LEGION_DRVR_SHORTNAME, NULL); ++ debugfs_create_file("fancurve", 0444, dir, priv, ++ &debugfs_fancurve_fops); ++ debugfs_create_file("ecmemory", 0444, dir, priv, ++ &debugfs_ecmemory_fops); ++ ++ priv->debugfs_dir = dir; ++} ++ ++static void legion_debugfs_exit(struct legion_private *priv) ++{ ++ pr_info("Unloading legion dubugfs\n"); ++ // The following is does nothing if pointer is NULL ++ debugfs_remove_recursive(priv->debugfs_dir); ++ priv->debugfs_dir = NULL; ++ pr_info("Unloading legion dubugfs done\n"); ++} ++ ++/* ============================= */ ++/* sysfs interface */ ++/* ============================ */ ++ ++static ssize_t powermode_show(struct device *dev, struct device_attribute *attr, ++ char *buf) ++{ ++ struct legion_private *priv = dev_get_drvdata(dev); ++ int power_mode = read_powermode(&priv->ecram, priv->conf); ++ ++ return sysfs_emit(buf, "%d\n", power_mode); ++} ++ ++static ssize_t powermode_store(struct device *dev, ++ struct device_attribute *attr, const char *buf, ++ size_t count) ++{ ++ struct legion_private *priv = dev_get_drvdata(dev); ++ int powermode; ++ int err; ++ ++ err = kstrtouint(buf, 0, &powermode); ++ if (err) ++ return err; ++ ++ err = write_powermode(&priv->ecram, priv->conf, powermode); ++ if (err) ++ return -EINVAL; ++ ++ // TODO: better? ++ // we have to wait a bit before change is done in hardware and ++ // readback done after notifying returns correct value, otherwise ++ // the notified reader will read old value ++ msleep(500); ++ platform_profile_notify(); ++ ++ return count; ++} ++ ++static DEVICE_ATTR_RW(powermode); ++ ++static ssize_t lockfancontroller_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct legion_private *priv = dev_get_drvdata(dev); ++ bool is_lockfancontroller; ++ int err; ++ ++ mutex_lock(&priv->fancurve_mutex); ++ err = read_lockfancontroller(&priv->ecram, priv->conf, ++ &is_lockfancontroller); ++ mutex_unlock(&priv->fancurve_mutex); ++ if (err) ++ return -EINVAL; ++ ++ return sysfs_emit(buf, "%d\n", is_lockfancontroller); ++} ++ ++static ssize_t lockfancontroller_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct legion_private *priv = dev_get_drvdata(dev); ++ bool is_lockfancontroller; ++ int err; ++ ++ err = kstrtobool(buf, &is_lockfancontroller); ++ if (err) ++ return err; ++ ++ mutex_lock(&priv->fancurve_mutex); ++ err = write_lockfancontroller(&priv->ecram, priv->conf, ++ is_lockfancontroller); ++ mutex_unlock(&priv->fancurve_mutex); ++ if (err) ++ return -EINVAL; ++ ++ return count; ++} ++ ++static DEVICE_ATTR_RW(lockfancontroller); ++ ++static ssize_t keyboard_backlight_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ int state; ++ struct legion_private *priv = dev_get_drvdata(dev); ++ ++ read_keyboard_backlight(&priv->ecram, priv->conf, &state); ++ return sysfs_emit(buf, "%d\n", state); ++} ++ ++static ssize_t keyboard_backlight_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct legion_private *priv = dev_get_drvdata(dev); ++ int state; ++ int err; ++ ++ err = kstrtouint(buf, 0, &state); ++ if (err) ++ return err; ++ ++ err = write_keyboard_backlight(&priv->ecram, priv->conf, state); ++ if (err) ++ return -EINVAL; ++ ++ return count; ++} ++ ++static DEVICE_ATTR_RW(keyboard_backlight); ++ ++static struct attribute *legion_sysfs_attributes[] = { ++ &dev_attr_powermode.attr, &dev_attr_lockfancontroller.attr, ++ &dev_attr_keyboard_backlight.attr, NULL ++}; ++ ++static const struct attribute_group legion_attribute_group = { ++ .attrs = legion_sysfs_attributes ++}; ++ ++static int legion_sysfs_init(struct legion_private *priv) ++{ ++ return device_add_group(&priv->platform_device->dev, ++ &legion_attribute_group); ++} ++ ++static void legion_sysfs_exit(struct legion_private *priv) ++{ ++ pr_info("Unloading legion sysfs\n"); ++ device_remove_group(&priv->platform_device->dev, ++ &legion_attribute_group); ++ pr_info("Unloading legion sysfs done\n"); ++} ++ ++/* ============================= */ ++/* WMI + ACPI */ ++/* ============================ */ ++// heavily based on ideapad_laptop.c ++ ++// TODO: proper names if meaning of all events is clear ++enum LEGION_WMI_EVENT { ++ LEGION_WMI_EVENT_GAMEZONE = 1, ++ LEGION_EVENT_A, ++ LEGION_EVENT_B, ++ LEGION_EVENT_C, ++ LEGION_EVENT_D, ++ LEGION_EVENT_E, ++ LEGION_EVENT_F, ++ LEGION_EVENT_G ++}; ++ ++struct legion_wmi_private { ++ enum LEGION_WMI_EVENT event; ++}; ++ ++//static void legion_wmi_notify2(u32 value, void *context) ++// { ++// pr_info("WMI notify\n" ); ++// } ++ ++static void legion_wmi_notify(struct wmi_device *wdev, union acpi_object *data) ++{ ++ struct legion_wmi_private *wpriv; ++ struct legion_private *priv; ++ ++ mutex_lock(&legion_shared_mutex); ++ priv = legion_shared; ++ if ((!priv) && (priv->loaded)) { ++ pr_info("Received WMI event while not initialized!\n"); ++ goto unlock; ++ } ++ ++ wpriv = dev_get_drvdata(&wdev->dev); ++ switch (wpriv->event) { ++ case LEGION_EVENT_A: ++ pr_info("Fan event: legion type: %d; acpi type: %d (%d=integer)", ++ wpriv->event, data->type, ACPI_TYPE_INTEGER); ++ // TODO: here it is too early (first unlock mutext, then wait a bit) ++ //platform_profile_notify(); ++ break; ++ default: ++ pr_info("Event: legion type: %d; acpi type: %d (%d=integer)", ++ wpriv->event, data->type, ACPI_TYPE_INTEGER); ++ break; ++ } ++ ++unlock: ++ mutex_unlock(&legion_shared_mutex); ++ // todo; fix that! ++ // problem: we get a event just before the powermode change (from the key?), ++ // so if we notify to early, it will read the old power mode/platform profile ++ msleep(500); ++ platform_profile_notify(); ++} ++ ++static int legion_wmi_probe(struct wmi_device *wdev, const void *context) ++{ ++ struct legion_wmi_private *wpriv; ++ ++ wpriv = devm_kzalloc(&wdev->dev, sizeof(*wpriv), GFP_KERNEL); ++ if (!wpriv) ++ return -ENOMEM; ++ ++ *wpriv = *(const struct legion_wmi_private *)context; ++ ++ dev_set_drvdata(&wdev->dev, wpriv); ++ dev_info(&wdev->dev, "Register after probing for WMI.\n"); ++ return 0; ++} ++ ++static const struct legion_wmi_private legion_wmi_context_gamezone = { ++ .event = LEGION_WMI_EVENT_GAMEZONE ++}; ++static const struct legion_wmi_private legion_wmi_context_a = { ++ .event = LEGION_EVENT_A ++}; ++static const struct legion_wmi_private legion_wmi_context_b = { ++ .event = LEGION_EVENT_B ++}; ++static const struct legion_wmi_private legion_wmi_context_c = { ++ .event = LEGION_EVENT_C ++}; ++static const struct legion_wmi_private legion_wmi_context_d = { ++ .event = LEGION_EVENT_D ++}; ++static const struct legion_wmi_private legion_wmi_context_e = { ++ .event = LEGION_EVENT_E ++}; ++static const struct legion_wmi_private legion_wmi_context_f = { ++ .event = LEGION_EVENT_F ++}; ++ ++// check if really a method ++#define LEGION_WMI_GAMEZONE_GUID "887B54E3-DDDC-4B2C-8B88-68A26A8835D0" ++ ++#define LEGION_WMI_GUID_FAN_EVENT "D320289E-8FEA-41E0-86F9-611D83151B5F" ++#define LEGION_WMI_GUID_FAN2_EVENT "bc72a435-e8c1-4275-b3e2-d8b8074aba59" ++#define LEGION_WMI_GUID_GAMEZONE_KEY_EVENT \ ++ "10afc6d9-ea8b-4590-a2e7-1cd3c84bb4b1" ++#define LEGION_WMI_GUID_GAMEZONE_GPU_EVENT \ ++ "bfd42481-aee3-4502-a107-afb68425c5f8" ++#define LEGION_WMI_GUID_GAMEZONE_OC_EVENT "d062906b-12d4-4510-999d-4831ee80e985" ++#define LEGION_WMI_GUID_GAMEZONE_TEMP_EVENT \ ++ "bfd42481-aee3-4501-a107-afb68425c5f8" ++//#define LEGION_WMI_GUID_GAMEZONE_DATA_EVENT "887b54e3-dddc-4b2c-8b88-68a26a8835d0" ++ ++static const struct wmi_device_id legion_wmi_ids[] = { ++ { LEGION_WMI_GAMEZONE_GUID, &legion_wmi_context_gamezone }, ++ { LEGION_WMI_GUID_FAN_EVENT, &legion_wmi_context_a }, ++ { LEGION_WMI_GUID_FAN2_EVENT, &legion_wmi_context_b }, ++ { LEGION_WMI_GUID_GAMEZONE_KEY_EVENT, &legion_wmi_context_c }, ++ { LEGION_WMI_GUID_GAMEZONE_GPU_EVENT, &legion_wmi_context_d }, ++ { LEGION_WMI_GUID_GAMEZONE_OC_EVENT, &legion_wmi_context_e }, ++ { LEGION_WMI_GUID_GAMEZONE_TEMP_EVENT, &legion_wmi_context_f }, ++ { "8FC0DE0C-B4E4-43FD-B0F3-8871711C1294", ++ &legion_wmi_context_gamezone }, /* Legion 5 */ ++ {}, ++}; ++MODULE_DEVICE_TABLE(wmi, legion_wmi_ids); ++ ++static struct wmi_driver legion_wmi_driver = { ++ .driver = { ++ .name = "legion_wmi", ++ }, ++ .id_table = legion_wmi_ids, ++ .probe = legion_wmi_probe, ++ .notify = legion_wmi_notify, ++}; ++ ++//acpi_status status = wmi_install_notify_handler(LEGION_WMI_GAMEZONE_GUID, ++// legion_wmi_notify2, NULL); ++//if (ACPI_FAILURE(status)) { ++// return -ENODEV; ++//} ++//return 0; ++ ++static int legion_wmi_init(void) ++{ ++ return wmi_driver_register(&legion_wmi_driver); ++} ++ ++static void legion_wmi_exit(void) ++{ ++ // TODO: remove this ++ pr_info("Unloading legion WMI\n"); ++ ++ //wmi_remove_notify_handler(LEGION_WMI_GAMEZONE_GUID); ++ wmi_driver_unregister(&legion_wmi_driver); ++ pr_info("Unloading legion WMI done\n"); ++} ++ ++/* ============================= */ ++/* Platform profile */ ++/* ============================ */ ++ ++enum LEGION_POWERMODE { ++ LEGION_POWERMODE_BALANCED = 0, ++ LEGION_POWERMODE_PERFORMANCE = 1, ++ LEGION_POWERMODE_QUIET = 2, ++}; ++ ++static int legion_platform_profile_get(struct platform_profile_handler *pprof, ++ enum platform_profile_option *profile) ++{ ++ int powermode; ++ struct legion_private *priv; ++ ++ priv = container_of(pprof, struct legion_private, ++ platform_profile_handler); ++ powermode = read_powermode(&priv->ecram, priv->conf); ++ ++ switch (powermode) { ++ case LEGION_POWERMODE_BALANCED: ++ *profile = PLATFORM_PROFILE_BALANCED; ++ break; ++ case LEGION_POWERMODE_PERFORMANCE: ++ *profile = PLATFORM_PROFILE_PERFORMANCE; ++ break; ++ case LEGION_POWERMODE_QUIET: ++ *profile = PLATFORM_PROFILE_QUIET; ++ break; ++ default: ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++static int legion_platform_profile_set(struct platform_profile_handler *pprof, ++ enum platform_profile_option profile) ++{ ++ int powermode; ++ struct legion_private *priv; ++ ++ priv = container_of(pprof, struct legion_private, ++ platform_profile_handler); ++ ++ switch (profile) { ++ case PLATFORM_PROFILE_BALANCED: ++ powermode = LEGION_POWERMODE_BALANCED; ++ break; ++ case PLATFORM_PROFILE_PERFORMANCE: ++ powermode = LEGION_POWERMODE_PERFORMANCE; ++ break; ++ case PLATFORM_PROFILE_QUIET: ++ powermode = LEGION_POWERMODE_QUIET; ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ return write_powermode(&priv->ecram, priv->conf, powermode); ++} ++ ++static int legion_platform_profile_init(struct legion_private *priv) ++{ ++ int err; ++ ++ priv->platform_profile_handler.profile_get = ++ legion_platform_profile_get; ++ priv->platform_profile_handler.profile_set = ++ legion_platform_profile_set; ++ ++ set_bit(PLATFORM_PROFILE_QUIET, priv->platform_profile_handler.choices); ++ set_bit(PLATFORM_PROFILE_BALANCED, ++ priv->platform_profile_handler.choices); ++ set_bit(PLATFORM_PROFILE_PERFORMANCE, ++ priv->platform_profile_handler.choices); ++ ++ err = platform_profile_register(&priv->platform_profile_handler); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ ++static void legion_platform_profile_exit(struct legion_private *priv) ++{ ++ pr_info("Unloading legion platform profile\n"); ++ platform_profile_remove(); ++ pr_info("Unloading legion platform profile done\n"); ++} ++ ++/* ============================= */ ++/* hwom interface */ ++/* ============================ */ ++ ++// hw-mon interface ++ ++// todo: register_group or register_info? ++ ++// TODO: use one common function (like here) or one function per attribute? ++static ssize_t sensor_label_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ int sensor_id = (to_sensor_dev_attr(attr))->index; ++ const char *label; ++ ++ switch (sensor_id) { ++ case SENSOR_CPU_TEMP_ID: ++ label = "CPU Temperature\n"; ++ break; ++ case SENSOR_GPU_TEMP_ID: ++ label = "GPU Temperature\n"; ++ break; ++ case SENSOR_IC_TEMP_ID: ++ label = "IC Temperature\n"; ++ break; ++ case SENSOR_FAN1_RPM_ID: ++ label = "Fan 1\n"; ++ break; ++ case SENSOR_FAN2_RPM_ID: ++ label = "Fan 2\n"; ++ break; ++ case SENSOR_FAN1_TARGET_RPM_ID: ++ label = "Fan 1 Target\n"; ++ break; ++ case SENSOR_FAN2_TARGET_RPM_ID: ++ label = "Fan 2 Target\n"; ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ return sprintf(buf, label); ++} ++ ++// TODO: use one common function (like here) or one function per attribute? ++static ssize_t sensor_show(struct device *dev, struct device_attribute *devattr, ++ char *buf) ++{ ++ struct legion_private *priv = dev_get_drvdata(dev); ++ int sensor_id = (to_sensor_dev_attr(devattr))->index; ++ struct sensor_values values; ++ int outval; ++ ++ read_sensor_values(&priv->ecram, priv->conf, &values); ++ ++ switch (sensor_id) { ++ case SENSOR_CPU_TEMP_ID: ++ outval = 1000 * values.cpu_temp_celsius; ++ break; ++ case SENSOR_GPU_TEMP_ID: ++ outval = 1000 * values.gpu_temp_celsius; ++ break; ++ case SENSOR_IC_TEMP_ID: ++ outval = 1000 * values.ic_temp_celsius; ++ break; ++ case SENSOR_FAN1_RPM_ID: ++ outval = values.fan1_rpm; ++ break; ++ case SENSOR_FAN2_RPM_ID: ++ outval = values.fan2_rpm; ++ break; ++ case SENSOR_FAN1_TARGET_RPM_ID: ++ outval = values.fan1_target_rpm; ++ break; ++ case SENSOR_FAN2_TARGET_RPM_ID: ++ outval = values.fan2_target_rpm; ++ break; ++ default: ++ pr_info("Error reading sensor value with id %d\n", sensor_id); ++ return -EOPNOTSUPP; ++ } ++ ++ return sprintf(buf, "%d\n", outval); ++} ++ ++static SENSOR_DEVICE_ATTR_RO(temp1_input, sensor, SENSOR_CPU_TEMP_ID); ++static SENSOR_DEVICE_ATTR_RO(temp1_label, sensor_label, SENSOR_CPU_TEMP_ID); ++static SENSOR_DEVICE_ATTR_RO(temp2_input, sensor, SENSOR_GPU_TEMP_ID); ++static SENSOR_DEVICE_ATTR_RO(temp2_label, sensor_label, SENSOR_GPU_TEMP_ID); ++static SENSOR_DEVICE_ATTR_RO(temp3_input, sensor, SENSOR_IC_TEMP_ID); ++static SENSOR_DEVICE_ATTR_RO(temp3_label, sensor_label, SENSOR_IC_TEMP_ID); ++static SENSOR_DEVICE_ATTR_RO(fan1_input, sensor, SENSOR_FAN1_RPM_ID); ++static SENSOR_DEVICE_ATTR_RO(fan1_label, sensor_label, SENSOR_FAN1_RPM_ID); ++static SENSOR_DEVICE_ATTR_RO(fan2_input, sensor, SENSOR_FAN2_RPM_ID); ++static SENSOR_DEVICE_ATTR_RO(fan2_label, sensor_label, SENSOR_FAN2_RPM_ID); ++static SENSOR_DEVICE_ATTR_RO(fan1_target, sensor, SENSOR_FAN1_TARGET_RPM_ID); ++static SENSOR_DEVICE_ATTR_RO(fan2_target, sensor, SENSOR_FAN2_TARGET_RPM_ID); ++ ++static struct attribute *sensor_hwmon_attributes[] = { ++ &sensor_dev_attr_temp1_input.dev_attr.attr, ++ &sensor_dev_attr_temp1_label.dev_attr.attr, ++ &sensor_dev_attr_temp2_input.dev_attr.attr, ++ &sensor_dev_attr_temp2_label.dev_attr.attr, ++ &sensor_dev_attr_temp3_input.dev_attr.attr, ++ &sensor_dev_attr_temp3_label.dev_attr.attr, ++ &sensor_dev_attr_fan1_input.dev_attr.attr, ++ &sensor_dev_attr_fan1_label.dev_attr.attr, ++ &sensor_dev_attr_fan2_input.dev_attr.attr, ++ &sensor_dev_attr_fan2_label.dev_attr.attr, ++ &sensor_dev_attr_fan1_target.dev_attr.attr, ++ &sensor_dev_attr_fan2_target.dev_attr.attr, ++ NULL ++}; ++ ++static ssize_t autopoint_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct fancurve fancurve; ++ int err; ++ int value; ++ struct legion_private *priv = dev_get_drvdata(dev); ++ int fancurve_attr_id = to_sensor_dev_attr_2(devattr)->nr; ++ int point_id = to_sensor_dev_attr_2(devattr)->index; ++ ++ mutex_lock(&priv->fancurve_mutex); ++ err = read_fancurve(&priv->ecram, priv->conf, &fancurve); ++ mutex_unlock(&priv->fancurve_mutex); ++ ++ if (err) { ++ pr_info("Reading fancurve failed\n"); ++ return -EOPNOTSUPP; ++ } ++ if (!(point_id >= 0 && point_id < MAXFANCURVESIZE)) { ++ pr_info("Reading fancurve failed due to wrong point id: %d\n", ++ point_id); ++ return -EOPNOTSUPP; ++ } ++ ++ switch (fancurve_attr_id) { ++ case FANCURVE_ATTR_PWM1: ++ value = fancurve.points[point_id].rpm1_raw * 100; ++ break; ++ case FANCURVE_ATTR_PWM2: ++ value = fancurve.points[point_id].rpm2_raw * 100; ++ break; ++ case FANCURVE_ATTR_CPU_TEMP: ++ value = fancurve.points[point_id].cpu_max_temp_celsius; ++ break; ++ case FANCURVE_ATTR_CPU_HYST: ++ value = fancurve.points[point_id].cpu_min_temp_celsius; ++ break; ++ case FANCURVE_ATTR_GPU_TEMP: ++ value = fancurve.points[point_id].gpu_max_temp_celsius; ++ break; ++ case FANCURVE_ATTR_GPU_HYST: ++ value = fancurve.points[point_id].gpu_min_temp_celsius; ++ break; ++ case FANCURVE_ATTR_IC_TEMP: ++ value = fancurve.points[point_id].ic_max_temp_celsius; ++ break; ++ case FANCURVE_ATTR_IC_HYST: ++ value = fancurve.points[point_id].ic_min_temp_celsius; ++ break; ++ case FANCURVE_ATTR_ACCEL: ++ value = fancurve.points[point_id].accel; ++ break; ++ case FANCURVE_ATTR_DECEL: ++ value = fancurve.points[point_id].decel; ++ break; ++ case FANCURVE_SIZE: ++ value = fancurve.size; ++ break; ++ default: ++ pr_info("Reading fancurve failed due to wrong attribute id: %d\n", ++ fancurve_attr_id); ++ return -EOPNOTSUPP; ++ } ++ ++ return sprintf(buf, "%d\n", value); ++} ++ ++static ssize_t autopoint_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct fancurve fancurve; ++ int err; ++ int value; ++ bool valid; ++ struct legion_private *priv = dev_get_drvdata(dev); ++ int fancurve_attr_id = to_sensor_dev_attr_2(devattr)->nr; ++ int point_id = to_sensor_dev_attr_2(devattr)->index; ++ ++ if (!(point_id >= 0 && point_id < MAXFANCURVESIZE)) { ++ pr_info("Reading fancurve failed due to wrong point id: %d\n", ++ point_id); ++ err = -EOPNOTSUPP; ++ goto error; ++ } ++ ++ err = kstrtoint(buf, 0, &value); ++ if (err) { ++ pr_info("Parse for hwmon store is not succesful: error:%d; point_id: %d; fancurve_attr_id: %d\\n", ++ err, point_id, fancurve_attr_id); ++ goto error; ++ } ++ ++ mutex_lock(&priv->fancurve_mutex); ++ err = read_fancurve(&priv->ecram, priv->conf, &fancurve); ++ ++ if (err) { ++ pr_info("Reading fancurve failed\n"); ++ err = -EOPNOTSUPP; ++ goto error_mutex; ++ } ++ ++ switch (fancurve_attr_id) { ++ case FANCURVE_ATTR_PWM1: ++ valid = fancurve_set_rpm1(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_PWM2: ++ valid = fancurve_set_rpm2(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_CPU_TEMP: ++ valid = fancurve_set_cpu_temp_max(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_CPU_HYST: ++ valid = fancurve_set_cpu_temp_min(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_GPU_TEMP: ++ valid = fancurve_set_gpu_temp_max(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_GPU_HYST: ++ valid = fancurve_set_gpu_temp_min(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_IC_TEMP: ++ valid = fancurve_set_ic_temp_max(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_IC_HYST: ++ valid = fancurve_set_ic_temp_min(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_ACCEL: ++ valid = fancurve_set_accel(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_DECEL: ++ valid = fancurve_set_decel(&fancurve, point_id, value); ++ break; ++ case FANCURVE_SIZE: ++ valid = fancurve_set_size(&fancurve, value, true); ++ break; ++ default: ++ pr_info("Writing fancurve failed due to wrong attribute id: %d\n", ++ fancurve_attr_id); ++ err = -EOPNOTSUPP; ++ goto error_mutex; ++ } ++ ++ if (!valid) { ++ pr_info("Ignoring invalid fancurve value %d for attribute %d at point %d\n", ++ value, fancurve_attr_id, point_id); ++ err = -EOPNOTSUPP; ++ goto error_mutex; ++ } ++ ++ err = write_fancurve(&priv->ecram, priv->conf, &fancurve, false); ++ if (err) { ++ pr_info("Writing fancurve failed for accessing hwmon at point_id: %d\n", ++ point_id); ++ err = -EOPNOTSUPP; ++ goto error_mutex; ++ } ++ ++ mutex_unlock(&priv->fancurve_mutex); ++ return count; ++ ++error_mutex: ++ mutex_unlock(&priv->fancurve_mutex); ++error: ++ return count; ++} ++ ++// rpm1 ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point4_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point5_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point6_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point7_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point9_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point10_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 9); ++// rpm2 ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point1_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point2_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point3_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point4_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point5_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point6_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point7_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point8_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point9_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point10_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 9); ++// CPU temp ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point4_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point5_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point6_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point7_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point9_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point10_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 9); ++// CPU temp hyst ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point4_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point5_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point6_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point7_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point9_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point10_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 9); ++// GPU temp ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point1_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point2_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point3_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point4_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point5_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point6_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point7_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point8_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point9_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point10_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 9); ++// GPU temp hyst ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point1_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point2_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point3_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point4_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point5_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point6_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point7_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point8_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point9_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point10_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 9); ++// IC temp ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point1_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point2_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point3_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point4_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point5_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point6_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point7_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point8_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point9_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point10_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 9); ++// IC temp hyst ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point1_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point2_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point3_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point4_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point5_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point6_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point7_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point8_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point9_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point10_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 9); ++// accel ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point4_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point5_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point6_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point7_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point9_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point10_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 9); ++// decel ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point4_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point5_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point6_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point7_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point9_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point10_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 9); ++//size ++static SENSOR_DEVICE_ATTR_2_RW(auto_points_size, autopoint, FANCURVE_SIZE, 0); ++ ++static ssize_t minifancurve_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ bool value; ++ int err; ++ struct legion_private *priv = dev_get_drvdata(dev); ++ ++ mutex_lock(&priv->fancurve_mutex); ++ err = read_minifancurve(&priv->ecram, priv->conf, &value); ++ if (err) { ++ err = -1; ++ pr_info("Reading minifancurve not succesful\n"); ++ goto error_unlock; ++ } ++ mutex_unlock(&priv->fancurve_mutex); ++ return sprintf(buf, "%d\n", value); ++ ++error_unlock: ++ mutex_unlock(&priv->fancurve_mutex); ++ return -1; ++} ++ ++static ssize_t minifancurve_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ int value; ++ int err; ++ struct legion_private *priv = dev_get_drvdata(dev); ++ ++ err = kstrtoint(buf, 0, &value); ++ if (err) { ++ err = -1; ++ pr_info("Parse for hwmon store is not succesful: error:%d\n", ++ err); ++ goto error; ++ } ++ ++ mutex_lock(&priv->fancurve_mutex); ++ err = write_minifancurve(&priv->ecram, priv->conf, value); ++ if (err) { ++ err = -1; ++ pr_info("Writing minifancurve not succesful\n"); ++ goto error_unlock; ++ } ++ mutex_unlock(&priv->fancurve_mutex); ++ return count; ++ ++error_unlock: ++ mutex_unlock(&priv->fancurve_mutex); ++error: ++ return err; ++} ++ ++static SENSOR_DEVICE_ATTR_RW(minifancurve, minifancurve, 0); ++ ++static ssize_t pwm1_mode_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ bool value; ++ int err; ++ struct legion_private *priv = dev_get_drvdata(dev); ++ ++ mutex_lock(&priv->fancurve_mutex); ++ err = read_maximumfanspeed(&priv->ecram, priv->conf, &value); ++ if (err) { ++ err = -1; ++ pr_info("Reading pwm1_mode/maximumfanspeed not succesful\n"); ++ goto error_unlock; ++ } ++ mutex_unlock(&priv->fancurve_mutex); ++ return sprintf(buf, "%d\n", value ? 0 : 2); ++ ++error_unlock: ++ mutex_unlock(&priv->fancurve_mutex); ++ return -1; ++} ++ ++static ssize_t pwm1_mode_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ int value; ++ int is_maximumfanspeed; ++ int err; ++ struct legion_private *priv = dev_get_drvdata(dev); ++ ++ err = kstrtoint(buf, 0, &value); ++ if (err) { ++ err = -1; ++ pr_info("Parse for hwmon store is not succesful: error:%d\n", ++ err); ++ goto error; ++ } ++ is_maximumfanspeed = value == 0; ++ ++ mutex_lock(&priv->fancurve_mutex); ++ err = write_maximumfanspeed(&priv->ecram, priv->conf, ++ is_maximumfanspeed); ++ if (err) { ++ err = -1; ++ pr_info("Writing pwm1_mode/maximumfanspeed not succesful\n"); ++ goto error_unlock; ++ } ++ mutex_unlock(&priv->fancurve_mutex); ++ return count; ++ ++error_unlock: ++ mutex_unlock(&priv->fancurve_mutex); ++error: ++ return err; ++} ++ ++static SENSOR_DEVICE_ATTR_RW(pwm1_mode, pwm1_mode, 0); ++ ++static struct attribute *fancurve_hwmon_attributes[] = { ++ &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point4_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point5_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point6_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point7_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point8_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point9_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point10_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point3_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point4_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point5_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point6_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point7_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point8_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point9_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point10_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point5_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point6_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point7_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point8_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point9_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point10_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point1_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point2_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point3_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point4_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point5_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point6_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point7_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point8_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point9_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point10_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point1_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point2_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point3_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point4_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point5_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point6_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point7_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point8_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point9_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point10_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point1_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point2_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point3_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point4_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point5_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point6_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point7_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point8_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point9_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point10_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point1_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point2_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point3_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point4_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point5_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point6_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point7_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point8_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point9_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point10_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point1_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point2_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point3_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point4_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point5_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point6_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point7_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point8_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point9_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point10_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point1_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point2_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point3_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point4_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point5_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point6_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point7_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point8_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point9_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point10_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point1_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point2_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point3_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point4_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point5_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point6_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point7_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point8_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point9_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point10_decel.dev_attr.attr, ++ // ++ &sensor_dev_attr_auto_points_size.dev_attr.attr, ++ &sensor_dev_attr_minifancurve.dev_attr.attr, ++ &sensor_dev_attr_pwm1_mode.dev_attr.attr, NULL ++}; ++ ++static umode_t legion_is_visible(struct kobject *kobj, struct attribute *attr, ++ int idx) ++{ ++ bool supported = true; ++ struct device *dev = kobj_to_dev(kobj); ++ struct legion_private *priv = dev_get_drvdata(dev); ++ ++ if (attr == &sensor_dev_attr_minifancurve.dev_attr.attr) ++ supported = priv->conf->has_minifancurve; ++ ++ return supported ? attr->mode : 0; ++} ++ ++static const struct attribute_group legion_hwmon_sensor_group = { ++ .attrs = sensor_hwmon_attributes, ++ .is_visible = NULL ++}; ++ ++static const struct attribute_group legion_hwmon_fancurve_group = { ++ .attrs = fancurve_hwmon_attributes, ++ .is_visible = legion_is_visible, ++}; ++ ++static const struct attribute_group *legion_hwmon_groups[] = { ++ &legion_hwmon_sensor_group, &legion_hwmon_fancurve_group, NULL ++}; ++ ++ssize_t legion_hwmon_init(struct legion_private *priv) ++{ ++ //TODO: use hwmon_device_register_with_groups or ++ // hwmon_device_register_with_info (latter means all hwmon functions have to be ++ // changed) ++ // some laptop driver do it in one way, some in the other ++ // TODO: Use devm_hwmon_device_register_with_groups ? ++ // some laptop drivers use this, some ++ struct device *hwmon_dev = hwmon_device_register_with_groups( ++ &priv->platform_device->dev, "legion_hwmon", priv, ++ legion_hwmon_groups); ++ if (IS_ERR_OR_NULL(hwmon_dev)) { ++ pr_err("hwmon_device_register failed!\n"); ++ return PTR_ERR(hwmon_dev); ++ } ++ dev_set_drvdata(hwmon_dev, priv); ++ priv->hwmon_dev = hwmon_dev; ++ return 0; ++} ++ ++void legion_hwmon_exit(struct legion_private *priv) ++{ ++ pr_info("Unloading legion hwon\n"); ++ if (priv->hwmon_dev) { ++ hwmon_device_unregister(priv->hwmon_dev); ++ priv->hwmon_dev = NULL; ++ } ++ pr_info("Unloading legion hwon done\n"); ++} ++ ++/* ============================= */ ++/* Platform driver */ ++/* ============================ */ ++ ++int legion_add(struct platform_device *pdev) ++{ ++ struct legion_private *priv; ++ const struct dmi_system_id *dmi_sys; ++ int err; ++ u16 ec_read_id; ++ bool is_denied = true; ++ bool is_allowed = false; ++ bool do_load_by_list = false; ++ bool do_load = false; ++ //struct legion_private *priv = dev_get_drvdata(&pdev->dev); ++ dev_info(&pdev->dev, "legion_laptop platform driver probing\n"); ++ ++ dev_info(&pdev->dev, "Read identifying information: DMI_SYS_VENDOR: %s; DMI_PRODUCT_NAME: %s; DMI_BIOS_VERSION:%s\n", ++ dmi_get_system_info(DMI_SYS_VENDOR), ++ dmi_get_system_info(DMI_PRODUCT_NAME), ++ dmi_get_system_info(DMI_BIOS_VERSION)); ++ ++ // TODO: allocate? ++ priv = &_priv; ++ priv->platform_device = pdev; ++ err = legion_shared_init(priv); ++ if (err) { ++ dev_info(&pdev->dev, "legion_laptop is forced to load.\n"); ++ goto err_legion_shared_init; ++ } ++ dev_set_drvdata(&pdev->dev, priv); ++ ++ // TODO: remove ++ pr_info("Read identifying information: DMI_SYS_VENDOR: %s; DMI_PRODUCT_NAME: %s; DMI_BIOS_VERSION:%s\n", ++ dmi_get_system_info(DMI_SYS_VENDOR), ++ dmi_get_system_info(DMI_PRODUCT_NAME), ++ dmi_get_system_info(DMI_BIOS_VERSION)); ++ ++ dmi_sys = dmi_first_match(optimistic_allowlist); ++ is_allowed = dmi_sys != NULL; ++ is_denied = dmi_check_system(denylist); ++ do_load_by_list = is_allowed && !is_denied; ++ do_load = do_load_by_list || force; ++ ++ dev_info( ++ &pdev->dev, ++ "is_denied: %d; is_allowed: %d; do_load_by_list: %d; do_load: %d\n", ++ is_denied, is_allowed, do_load_by_list, do_load); ++ ++ if (!(do_load)) { ++ dev_info( ++ &pdev->dev, ++ "Module not useable for this laptop because it is not in allowlist. Notify maintainer if you want to add your device or force load with param force.\n"); ++ err = -ENOMEM; ++ goto err_model_mismtach; ++ } ++ ++ if (force) ++ dev_info(&pdev->dev, "legion_laptop is forced to load.\n"); ++ ++ if (!do_load_by_list && do_load) { ++ dev_info( ++ &pdev->dev, ++ "legion_laptop is forced to load and would otherwise be not loaded\n"); ++ } ++ ++ // if forced and no module found, use config for first model ++ if (dmi_sys == NULL) ++ dmi_sys = &optimistic_allowlist[0]; ++ dev_info(&pdev->dev, "Using configuration for system: %s\n", ++ dmi_sys->ident); ++ ++ priv->conf = dmi_sys->driver_data; ++ ++ err = ecram_init(&priv->ecram, priv->conf->memoryio_physical_ec_start, ++ priv->conf->memoryio_size); ++ if (err) { ++ dev_info(&pdev->dev, ++ "Could not init access to embedded controller\n"); ++ goto err_ecram_init; ++ } ++ ++ ec_read_id = read_ec_id(&priv->ecram, priv->conf); ++ dev_info(&pdev->dev, "Read embedded controller ID 0x%x\n", ec_read_id); ++ if (priv->conf->check_embedded_controller_id && ++ !(ec_read_id == priv->conf->embedded_controller_id)) { ++ err = -ENOMEM; ++ dev_info(&pdev->dev, "Expected EC chip id 0x%x but read 0x%x\n", ++ priv->conf->embedded_controller_id, ec_read_id); ++ goto err_ecram_id; ++ } ++ if (!priv->conf->check_embedded_controller_id) { ++ dev_info(&pdev->dev, ++ "Skipped checking embedded controller id\n"); ++ } ++ ++ dev_info(&pdev->dev, "Creating debugfs inteface\n"); ++ legion_debugfs_init(priv); ++ ++ pr_info("Creating sysfs inteface\n"); ++ err = legion_sysfs_init(priv); ++ if (err) { ++ dev_info(&pdev->dev, "Creating sysfs interface failed\n"); ++ goto err_sysfs_init; ++ } ++ ++ pr_info("Creating hwmon interface"); ++ err = legion_hwmon_init(priv); ++ if (err) ++ goto err_hwmon_init; ++ ++ pr_info("Creating platform profile support\n"); ++ err = legion_platform_profile_init(priv); ++ if (err) { ++ dev_info(&pdev->dev, "Creating platform profile failed\n"); ++ goto err_platform_profile; ++ } ++ ++ pr_info("Init WMI driver support\n"); ++ err = legion_wmi_init(); ++ if (err) { ++ dev_info(&pdev->dev, "Init WMI driver failed\n"); ++ goto err_wmi; ++ } ++ ++ dev_info(&pdev->dev, "legion_laptop loaded for this device\n"); ++ return 0; ++ ++ // TODO: remove eventually ++ legion_wmi_exit(); ++err_wmi: ++ legion_platform_profile_exit(priv); ++err_platform_profile: ++ legion_hwmon_exit(priv); ++err_hwmon_init: ++ legion_sysfs_exit(priv); ++err_sysfs_init: ++ legion_debugfs_exit(priv); ++err_ecram_id: ++ ecram_exit(&priv->ecram); ++err_ecram_init: ++ legion_shared_exit(priv); ++err_legion_shared_init: ++err_model_mismtach: ++ dev_info(&pdev->dev, "legion_laptop not loaded for this device\n"); ++ return err; ++} ++ ++int legion_remove(struct platform_device *pdev) ++{ ++ struct legion_private *priv = dev_get_drvdata(&pdev->dev); ++ ++ mutex_lock(&legion_shared_mutex); ++ priv->loaded = false; ++ mutex_unlock(&legion_shared_mutex); ++ ++ // first unregister wmi, so toggling powermode does not ++ // generate events anymore that even might be delayed ++ legion_wmi_exit(); ++ legion_platform_profile_exit(priv); ++ ++ // toggle power mode to load default setting from embedded controller ++ // again ++ toggle_powermode(&priv->ecram, priv->conf); ++ ++ legion_hwmon_exit(priv); ++ legion_sysfs_exit(priv); ++ legion_debugfs_exit(priv); ++ ecram_exit(&priv->ecram); ++ legion_shared_exit(priv); ++ ++ pr_info("Legion platform unloaded\n"); ++ return 0; ++} ++ ++int legion_resume(struct platform_device *pdev) ++{ ++ //struct legion_private *priv = dev_get_drvdata(&pdev->dev); ++ dev_info(&pdev->dev, "Resumed in legion-laptop\n"); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int legion_pm_resume(struct device *dev) ++{ ++ //struct legion_private *priv = dev_get_drvdata(dev); ++ dev_info(dev, "Resumed PM in legion-laptop\n"); ++ ++ return 0; ++} ++#endif ++static SIMPLE_DEV_PM_OPS(legion_pm, NULL, legion_pm_resume); ++ ++// same as ideapad ++static const struct acpi_device_id legion_device_ids[] = { ++ { "PNP0C09", 0 }, // todo: change to "VPC2004" ++ { "", 0 }, ++}; ++MODULE_DEVICE_TABLE(acpi, legion_device_ids); ++ ++static struct platform_driver legion_driver = { ++ .probe = legion_add, ++ .remove = legion_remove, ++ .resume = legion_resume, ++ .driver = { ++ .name = "legion", ++ .pm = &legion_pm, ++ .acpi_match_table = ACPI_PTR(legion_device_ids), ++ }, ++}; ++ ++int __init legion_init(void) ++{ ++ int err; ++ ++ pr_info("legion_laptop starts loading\n"); ++ err = platform_driver_register(&legion_driver); ++ if (err) { ++ pr_info("legion_laptop: platform_driver_register failed\n"); ++ return err; ++ } ++ ++ return 0; ++} ++ ++module_init(legion_init); ++ ++void __exit legion_exit(void) ++{ ++ platform_driver_unregister(&legion_driver); ++ pr_info("legion_laptop exit\n"); ++} ++ ++module_exit(legion_exit); diff --git a/drivers/platform/x86/steamdeck.c b/drivers/platform/x86/steamdeck.c new file mode 100644 index 000000000000..77a6677ec19e diff --git a/6.3/0004-fixes.patch b/6.3/0004-fixes.patch index ccba872a..9a5732df 100644 --- a/6.3/0004-fixes.patch +++ b/6.3/0004-fixes.patch @@ -1,7 +1,7 @@ -From c10af555acefbb465dbe1834391511fb21569a68 Mon Sep 17 00:00:00 2001 +From 5157ccf4ab5b2429eb11e33b006ff0110baf0625 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 17 Apr 2023 18:24:53 +0200 -Subject: [PATCH 04/12] fixes +Date: Sat, 22 Apr 2023 11:43:21 +0200 +Subject: [PATCH 04/13] fixes Signed-off-by: Peter Jung --- @@ -17,6 +17,11 @@ Signed-off-by: Peter Jung arch/x86/kernel/cpu/hygon.c | 1 + arch/x86/net/bpf_jit_comp.c | 5 +- drivers/bluetooth/btusb.c | 2 +- + .../drm/amd/display/dc/bios/bios_parser2.c | 7 +- + .../drm/amd/display/dc/dcn20/dcn20_resource.c | 2 +- + .../drm/amd/display/dc/dcn21/dcn21_resource.c | 2 +- + .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 3 +- + drivers/gpu/drm/scheduler/sched_main.c | 3 +- drivers/leds/trigger/Kconfig | 9 + drivers/leds/trigger/Makefile | 1 + drivers/leds/trigger/ledtrig-blkdev.c | 1221 +++++++++++++++++ @@ -55,7 +60,7 @@ Signed-off-by: Peter Jung scripts/atomic/fallbacks/add_negative | 11 +- sound/pci/hda/cs35l41_hda.c | 2 +- .../selftests/mm/ksm_functional_tests.c | 96 +- - 50 files changed, 2796 insertions(+), 270 deletions(-) + 55 files changed, 2804 insertions(+), 279 deletions(-) create mode 100644 Documentation/ABI/testing/sysfs-class-led-trigger-blkdev create mode 100644 Documentation/leds/ledtrig-blkdev.rst create mode 100644 drivers/leds/trigger/ledtrig-blkdev.c @@ -532,6 +537,78 @@ index 5c536151ef83..5a80379253a7 100644 gpiod_set_value_cansleep(reset_gpio, 1); return; +diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +index e381de2429fa..ae3783a7d7f4 100644 +--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c ++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +@@ -515,11 +515,8 @@ static enum bp_result get_gpio_i2c_info( + info->i2c_slave_address = record->i2c_slave_addr; + + /* TODO: check how to get register offset for en, Y, etc. */ +- info->gpio_info.clk_a_register_index = +- le16_to_cpu( +- header->gpio_pin[table_index].data_a_reg_index); +- info->gpio_info.clk_a_shift = +- header->gpio_pin[table_index].gpio_bitshift; ++ info->gpio_info.clk_a_register_index = le16_to_cpu(pin->data_a_reg_index); ++ info->gpio_info.clk_a_shift = pin->gpio_bitshift; + + return BP_RESULT_OK; + } +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +index 3af24ef9cb2d..51838bef7fb0 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +@@ -714,7 +714,7 @@ static const struct dc_debug_options debug_defaults_drv = { + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = true, +- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, ++ .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +index 8f9244fe5c86..c10ff621cb1d 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +@@ -642,7 +642,7 @@ static const struct dc_debug_options debug_defaults_drv = { + .clock_trace = true, + .disable_pplib_clock_request = true, + .min_disp_clk_khz = 100000, +- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, ++ .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +index 95da6dd1cc65..c4000518dc56 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +@@ -304,7 +304,8 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, + | FEATURE_MASK(FEATURE_GFX_SS_BIT) + | FEATURE_MASK(FEATURE_APCC_DFLL_BIT) + | FEATURE_MASK(FEATURE_FW_CTF_BIT) +- | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT); ++ | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT) ++ | FEATURE_MASK(FEATURE_TEMP_DEPENDENT_VMIN_BIT); + + if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); +diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c +index 0e4378420271..1e08cc5a1702 100644 +--- a/drivers/gpu/drm/scheduler/sched_main.c ++++ b/drivers/gpu/drm/scheduler/sched_main.c +@@ -308,7 +308,8 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) + */ + void drm_sched_fault(struct drm_gpu_scheduler *sched) + { +- mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); ++ if (sched->ready) ++ mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); + } + EXPORT_SYMBOL(drm_sched_fault); + diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig index dc6816d36d06..bda249068182 100644 --- a/drivers/leds/trigger/Kconfig diff --git a/6.3/0005-fs-patches.patch b/6.3/0005-fs-patches.patch index e538c1f3..2a67b730 100644 --- a/6.3/0005-fs-patches.patch +++ b/6.3/0005-fs-patches.patch @@ -1,7 +1,7 @@ -From cbcd9e3198b7d93339f80452d765183b6fe5f30b Mon Sep 17 00:00:00 2001 +From 44c369d8ceca091040f847df8e2e9e15df9dc300 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 17 Apr 2023 18:25:39 +0200 -Subject: [PATCH 05/12] fs-patches +Date: Sat, 22 Apr 2023 11:43:38 +0200 +Subject: [PATCH 05/13] fs-patches Signed-off-by: Peter Jung --- diff --git a/6.3/0006-Implement-amd-pstate-guided-driver.patch b/6.3/0006-Implement-amd-pstate-guided-driver.patch index 1c85bda2..1d7e1ce2 100644 --- a/6.3/0006-Implement-amd-pstate-guided-driver.patch +++ b/6.3/0006-Implement-amd-pstate-guided-driver.patch @@ -1,7 +1,7 @@ -From a22a87b1984afabbd1f3f9963647e5a4198efedc Mon Sep 17 00:00:00 2001 +From f20814462752f28649096c4de6604e56ca1091dc Mon Sep 17 00:00:00 2001 From: Peter Jung Date: Mon, 17 Apr 2023 18:32:06 +0200 -Subject: [PATCH 06/12] Implement amd-pstate guided driver +Subject: [PATCH 06/13] Implement amd-pstate guided driver Signed-off-by: Peter Jung --- diff --git a/6.3/0007-ksm.patch b/6.3/0007-ksm.patch index 3064932d..ba5621ef 100644 --- a/6.3/0007-ksm.patch +++ b/6.3/0007-ksm.patch @@ -1,7 +1,7 @@ -From 0f6a1f135f27479417b7e57a2cdd75e1d736b83a Mon Sep 17 00:00:00 2001 +From a4c03062611ae066405d1ec08eed628e8d1640f2 Mon Sep 17 00:00:00 2001 From: Peter Jung Date: Mon, 17 Apr 2023 18:28:52 +0200 -Subject: [PATCH 07/12] ksm +Subject: [PATCH 07/13] ksm Signed-off-by: Peter Jung --- diff --git a/6.3/0008-maple-lru.patch b/6.3/0008-maple-lru.patch index 36a7a465..ac668dcb 100644 --- a/6.3/0008-maple-lru.patch +++ b/6.3/0008-maple-lru.patch @@ -1,17 +1,21 @@ -From 1300046c6d7d6110fa0da7db293f028382ee4c16 Mon Sep 17 00:00:00 2001 +From 94175b03e3ecc1d0bb22dc4ecea1457bc64eb5cd Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Sun, 9 Apr 2023 21:24:33 +0200 -Subject: [PATCH 08/12] maple-lru +Date: Sat, 22 Apr 2023 11:43:59 +0200 +Subject: [PATCH 08/13] maple-lru Signed-off-by: Peter Jung --- - Documentation/mm/multigen_lru.rst | 44 ++++++++-- - include/linux/mmzone.h | 10 +-- - lib/maple_tree.c | 103 +++++++++------------- - mm/mmap.c | 48 +++++++++-- - mm/vmscan.c | 136 +++++++++++------------------- - tools/testing/radix-tree/maple.c | 24 ++++++ - 6 files changed, 197 insertions(+), 168 deletions(-) + Documentation/mm/multigen_lru.rst | 44 ++++++++- + arch/s390/mm/hugetlbpage.c | 2 +- + arch/s390/mm/mmap.c | 2 +- + fs/hugetlbfs/inode.c | 2 +- + include/linux/mmzone.h | 10 +- + lib/maple_tree.c | 158 +++++++++++++----------------- + lib/test_maple_tree.c | 27 +++-- + mm/mmap.c | 57 +++++++++-- + mm/vmscan.c | 136 ++++++++++--------------- + tools/testing/radix-tree/maple.c | 24 +++++ + 10 files changed, 258 insertions(+), 204 deletions(-) diff --git a/Documentation/mm/multigen_lru.rst b/Documentation/mm/multigen_lru.rst index 5f1f6ecbb79b..52ed5092022f 100644 @@ -96,6 +100,45 @@ index 5f1f6ecbb79b..52ed5092022f 100644 The aging and the eviction form a producer-consumer model; specifically, the latter drives the former by the sliding window over +diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c +index c299a18273ff..c718f2a0de94 100644 +--- a/arch/s390/mm/hugetlbpage.c ++++ b/arch/s390/mm/hugetlbpage.c +@@ -273,7 +273,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; +- info.low_limit = max(PAGE_SIZE, mmap_min_addr); ++ info.low_limit = PAGE_SIZE; + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; +diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c +index 3327c47bc181..fc9a7dc26c5e 100644 +--- a/arch/s390/mm/mmap.c ++++ b/arch/s390/mm/mmap.c +@@ -136,7 +136,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long ad + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; +- info.low_limit = max(PAGE_SIZE, mmap_min_addr); ++ info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + if (filp || (flags & MAP_SHARED)) + info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index 9062da6da567..4bff10704e7f 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -208,7 +208,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; +- info.low_limit = max(PAGE_SIZE, mmap_min_addr); ++ info.low_limit = PAGE_SIZE; + info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 9fb1b03b83b2..cabe7f51ea66 100644 --- a/include/linux/mmzone.h @@ -131,7 +174,7 @@ index 9fb1b03b83b2..cabe7f51ea66 100644 struct lru_gen_memcg memcg_lru; #endif diff --git a/lib/maple_tree.c b/lib/maple_tree.c -index db60edb55f2f..9172bcee94b4 100644 +index db60edb55f2f..44f34a51afc0 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1303,26 +1303,21 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) @@ -284,27 +327,99 @@ index db60edb55f2f..9172bcee94b4 100644 /* Not within lower bounds */ if (mas->index > pivot) -@@ -5312,6 +5286,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, +@@ -5276,25 +5250,28 @@ static inline void mas_fill_gap(struct ma_state *mas, void *entry, + * @size: The size of the gap + * @fwd: Searching forward or back + */ +-static inline void mas_sparse_area(struct ma_state *mas, unsigned long min, ++static inline int mas_sparse_area(struct ma_state *mas, unsigned long min, + unsigned long max, unsigned long size, bool fwd) + { +- unsigned long start = 0; +- +- if (!unlikely(mas_is_none(mas))) +- start++; ++ if (!unlikely(mas_is_none(mas)) && min == 0) { ++ min++; ++ /* ++ * At this time, min is increased, we need to recheck whether ++ * the size is satisfied. ++ */ ++ if (min > max || max - min + 1 < size) ++ return -EBUSY; ++ } + /* mas_is_ptr */ + +- if (start < min) +- start = min; +- + if (fwd) { +- mas->index = start; +- mas->last = start + size - 1; +- return; ++ mas->index = min; ++ mas->last = min + size - 1; ++ } else { ++ mas->last = max; ++ mas->index = max - size + 1; + } +- +- mas->index = max; ++ return 0; + } + + /* +@@ -5312,6 +5289,12 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long *pivots; enum maple_type mt; -+ if (min >= max) ++ if (min > max) ++ return -EINVAL; ++ ++ if (size == 0 || max - min < size - 1) + return -EINVAL; + if (mas_is_start(mas)) mas_start(mas); else if (mas->offset >= 2) -@@ -5366,6 +5343,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, +@@ -5320,10 +5303,8 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, + return -EBUSY; + + /* Empty set */ +- if (mas_is_none(mas) || mas_is_ptr(mas)) { +- mas_sparse_area(mas, min, max, size, true); +- return 0; +- } ++ if (mas_is_none(mas) || mas_is_ptr(mas)) ++ return mas_sparse_area(mas, min, max, size, true); + + /* The start of the window can only be within these values */ + mas->index = min; +@@ -5366,6 +5347,12 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, { struct maple_enode *last = mas->node; -+ if (min >= max) ++ if (min > max) ++ return -EINVAL; ++ ++ if (size == 0 || max - min < size - 1) + return -EINVAL; + if (mas_is_start(mas)) { mas_start(mas); mas->offset = mas_data_end(mas); -@@ -5385,7 +5365,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, +@@ -5376,16 +5363,14 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, + } + + /* Empty set. */ +- if (mas_is_none(mas) || mas_is_ptr(mas)) { +- mas_sparse_area(mas, min, max, size, false); +- return 0; +- } ++ if (mas_is_none(mas) || mas_is_ptr(mas)) ++ return mas_sparse_area(mas, min, max, size, false); + + /* The start of the window can only be within these values. */ mas->index = min; mas->last = max; @@ -313,7 +428,7 @@ index db60edb55f2f..9172bcee94b4 100644 if (last == mas->node) { if (!mas_rewind_node(mas)) return -EBUSY; -@@ -5400,17 +5380,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, +@@ -5400,17 +5385,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, if (unlikely(mas->offset == MAPLE_NODE_SLOTS)) return -EBUSY; @@ -328,12 +443,12 @@ index db60edb55f2f..9172bcee94b4 100644 /* Trim the upper limit to the max. */ - if (mas->max <= mas->last) - mas->last = mas->max; -+ if (max <= mas->last) ++ if (max < mas->last) + mas->last = max; mas->index = mas->last - size + 1; return 0; -@@ -5819,6 +5791,7 @@ int mas_preallocate(struct ma_state *mas, gfp_t gfp) +@@ -5819,6 +5796,7 @@ int mas_preallocate(struct ma_state *mas, gfp_t gfp) mas_reset(mas); return ret; } @@ -341,29 +456,145 @@ index db60edb55f2f..9172bcee94b4 100644 /* * mas_destroy() - destroy a maple state. +@@ -6391,7 +6369,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, + { + int ret = 0; + +- MA_STATE(mas, mt, min, max - size); ++ MA_STATE(mas, mt, min, min); + if (!mt_is_alloc(mt)) + return -EINVAL; + +@@ -6411,7 +6389,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, + retry: + mas.offset = 0; + mas.index = min; +- mas.last = max - size; ++ mas.last = max - size + 1; + ret = mas_alloc(&mas, entry, size, startp); + if (mas_nomem(&mas, gfp)) + goto retry; +@@ -6427,14 +6405,14 @@ int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, + { + int ret = 0; + +- MA_STATE(mas, mt, min, max - size); ++ MA_STATE(mas, mt, min, max - size + 1); + if (!mt_is_alloc(mt)) + return -EINVAL; + + if (WARN_ON_ONCE(mt_is_reserved(entry))) + return -EINVAL; + +- if (min >= max) ++ if (min > max) + return -EINVAL; + + if (max < size - 1) +diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c +index f1db333270e9..4d85d04b26f8 100644 +--- a/lib/test_maple_tree.c ++++ b/lib/test_maple_tree.c +@@ -102,7 +102,7 @@ static noinline void check_mtree_alloc_rrange(struct maple_tree *mt, + unsigned long result = expected + 1; + int ret; + +- ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end - 1, ++ ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end, + GFP_KERNEL); + MT_BUG_ON(mt, ret != eret); + if (ret) +@@ -680,7 +680,7 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) + 0, /* Return value success. */ + + 0x0, /* Min */ +- 0x565234AF1 << 12, /* Max */ ++ 0x565234AF0 << 12, /* Max */ + 0x3000, /* Size */ + 0x565234AEE << 12, /* max - 3. */ + 0, /* Return value success. */ +@@ -692,14 +692,14 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) + 0, /* Return value success. */ + + 0x0, /* Min */ +- 0x7F36D510A << 12, /* Max */ ++ 0x7F36D5109 << 12, /* Max */ + 0x4000, /* Size */ + 0x7F36D5106 << 12, /* First rev hole of size 0x4000 */ + 0, /* Return value success. */ + + /* Ascend test. */ + 0x0, +- 34148798629 << 12, ++ 34148798628 << 12, + 19 << 12, + 34148797418 << 12, + 0x0, +@@ -711,6 +711,12 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) + 0x0, + -EBUSY, + ++ /* Single space test. */ ++ 34148798725 << 12, ++ 34148798725 << 12, ++ 1 << 12, ++ 34148798725 << 12, ++ 0, + }; + + int i, range_count = ARRAY_SIZE(range); +@@ -759,9 +765,9 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) + mas_unlock(&mas); + for (i = 0; i < req_range_count; i += 5) { + #if DEBUG_REV_RANGE +- pr_debug("\tReverse request between %lu-%lu size %lu, should get %lu\n", +- req_range[i] >> 12, +- (req_range[i + 1] >> 12) - 1, ++ pr_debug("\tReverse request %d between %lu-%lu size %lu, should get %lu\n", ++ i, req_range[i] >> 12, ++ (req_range[i + 1] >> 12), + req_range[i+2] >> 12, + req_range[i+3] >> 12); + #endif +@@ -880,6 +886,13 @@ static noinline void check_alloc_range(struct maple_tree *mt) + 4503599618982063UL << 12, /* Size */ + 34359052178 << 12, /* Expected location */ + -EBUSY, /* Return failure. */ ++ ++ /* Test a single entry */ ++ 34148798648 << 12, /* Min */ ++ 34148798648 << 12, /* Max */ ++ 4096, /* Size of 1 */ ++ 34148798648 << 12, /* Location is the same as min/max */ ++ 0, /* Success */ + }; + int i, range_count = ARRAY_SIZE(range); + int req_range_count = ARRAY_SIZE(req_range); diff --git a/mm/mmap.c b/mm/mmap.c -index ff68a67a2a7c..d5475fbf5729 100644 +index ff68a67a2a7c..6819eb2b77d7 100644 --- a/mm/mmap.c +++ b/mm/mmap.c -@@ -1518,7 +1518,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) - */ +@@ -1519,6 +1519,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) static unsigned long unmapped_area(struct vm_unmapped_area_info *info) { -- unsigned long length, gap; -+ unsigned long length, gap, low_limit; + unsigned long length, gap; ++ unsigned long low_limit, high_limit; + struct vm_area_struct *tmp; MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); -@@ -1527,12 +1528,29 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) +@@ -1527,12 +1529,32 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) if (length < info->length) return -ENOMEM; - if (mas_empty_area(&mas, info->low_limit, info->high_limit - 1, - length)) + low_limit = info->low_limit; ++ if (low_limit < mmap_min_addr) ++ low_limit = mmap_min_addr; ++ high_limit = info->high_limit; +retry: -+ if (mas_empty_area(&mas, low_limit, info->high_limit - 1, length)) ++ if (mas_empty_area(&mas, low_limit, high_limit - 1, length)) return -ENOMEM; gap = mas.index; @@ -387,25 +618,29 @@ index ff68a67a2a7c..d5475fbf5729 100644 return gap; } -@@ -1548,7 +1566,8 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) +@@ -1548,7 +1570,9 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) */ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) { - unsigned long length, gap; -+ unsigned long length, gap, high_limit, gap_end; ++ unsigned long length, gap, gap_end; ++ unsigned long low_limit, high_limit; + struct vm_area_struct *tmp; MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); /* Adjust search length to account for worst case alignment overhead */ -@@ -1556,12 +1575,31 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) +@@ -1556,12 +1580,33 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) if (length < info->length) return -ENOMEM; - if (mas_empty_area_rev(&mas, info->low_limit, info->high_limit - 1, +- length)) ++ low_limit = info->low_limit; ++ if (low_limit < mmap_min_addr) ++ low_limit = mmap_min_addr; + high_limit = info->high_limit; +retry: -+ if (mas_empty_area_rev(&mas, info->low_limit, high_limit - 1, - length)) ++ if (mas_empty_area_rev(&mas, low_limit, high_limit - 1, length)) return -ENOMEM; gap = mas.last + 1 - info->length; @@ -430,6 +665,15 @@ index ff68a67a2a7c..d5475fbf5729 100644 return gap; } +@@ -1675,7 +1720,7 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; +- info.low_limit = max(PAGE_SIZE, mmap_min_addr); ++ info.low_limit = PAGE_SIZE; + info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); + info.align_mask = 0; + info.align_offset = 0; diff --git a/mm/vmscan.c b/mm/vmscan.c index 71a7f4517e5a..ae60ddff831a 100644 --- a/mm/vmscan.c diff --git a/6.3/0009-Per-VMA-locks.patch b/6.3/0009-Per-VMA-locks.patch index 886c5e0a..d23244c4 100644 --- a/6.3/0009-Per-VMA-locks.patch +++ b/6.3/0009-Per-VMA-locks.patch @@ -1,7 +1,7 @@ -From bd89875ebbc1edad43e0af8a2bb9824ff0483cf1 Mon Sep 17 00:00:00 2001 +From 1a6be495a542f35f888b756b3fe71baec0acd980 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 17 Apr 2023 18:33:32 +0200 -Subject: [PATCH 09/12] Per-VMA locks +Date: Sat, 22 Apr 2023 11:44:23 +0200 +Subject: [PATCH 09/13] Per-VMA locks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit @@ -217,14 +217,14 @@ Signed-off-by: Peter Jung mm/init-mm.c | 3 + mm/internal.h | 2 +- mm/khugepaged.c | 10 +- - mm/memory.c | 186 +++++++++++++++---- + mm/memory.c | 187 +++++++++++++++---- mm/mmap.c | 48 +++-- mm/mprotect.c | 51 ++++- mm/mremap.c | 1 + - mm/rmap.c | 31 ++-- + mm/rmap.c | 31 +-- mm/vmstat.c | 6 + tools/testing/selftests/mm/userfaultfd.c | 45 ++++- - 34 files changed, 810 insertions(+), 113 deletions(-) + 34 files changed, 811 insertions(+), 113 deletions(-) diff --git a/Documentation/admin-guide/mm/userfaultfd.rst b/Documentation/admin-guide/mm/userfaultfd.rst index 7dc823b56ca4..bd2226299583 100644 @@ -1312,7 +1312,7 @@ index 92e6f56a932d..042007f0bfa1 100644 * Re-check whether we have an ->anon_vma, because * collapse_and_free_pmd() requires that either no diff --git a/mm/memory.c b/mm/memory.c -index 01a23ad48a04..7c8278e8b5df 100644 +index 01a23ad48a04..9deb0d0f3f7f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -104,6 +104,20 @@ EXPORT_SYMBOL(mem_map); @@ -1541,7 +1541,7 @@ index 01a23ad48a04..7c8278e8b5df 100644 */ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags) -@@ -5080,24 +5126,30 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, +@@ -5080,24 +5126,31 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should * still be in per-arch page fault handlers at the entry of page fault. */ @@ -1552,6 +1552,10 @@ index 01a23ad48a04..7c8278e8b5df 100644 { bool major; ++ /* Incomplete faults will be accounted upon completion. */ ++ if (ret & VM_FAULT_RETRY) ++ return; ++ /* - * We don't do accounting for some specific faults: - * @@ -1562,13 +1566,10 @@ index 01a23ad48a04..7c8278e8b5df 100644 - * - * - Incomplete faults (VM_FAULT_RETRY). They will only be counted - * once they're completed. -+ * Do not account for incomplete faults (VM_FAULT_RETRY). They will be -+ * counted upon completion. ++ * To preserve the behavior of older kernels, PGFAULT counters record ++ * both successful and failed faults, as opposed to perf counters, ++ * which ignore failed cases. + */ -+ if (ret & VM_FAULT_RETRY) -+ return; -+ -+ /* Register both successful and failed faults in PGFAULT counters. */ + count_vm_event(PGFAULT); + count_memcg_event_mm(mm, PGFAULT); + @@ -1583,11 +1584,11 @@ index 01a23ad48a04..7c8278e8b5df 100644 return; /* -@@ -5180,21 +5232,22 @@ static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma, +@@ -5180,21 +5233,22 @@ static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma, vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct pt_regs *regs) { -+ /* Copy vma->vm_mm in case mmap_lock is dropped and vma becomes unstable. */ ++ /* If the fault handler drops the mmap_lock, vma may be freed */ + struct mm_struct *mm = vma->vm_mm; vm_fault_t ret; @@ -1612,7 +1613,7 @@ index 01a23ad48a04..7c8278e8b5df 100644 /* * Enable the memcg OOM handling for faults triggered in user -@@ -5223,13 +5276,70 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, +@@ -5223,13 +5277,70 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) mem_cgroup_oom_synchronize(false); } @@ -1686,7 +1687,7 @@ index 01a23ad48a04..7c8278e8b5df 100644 /* * Allocate p4d page table. diff --git a/mm/mmap.c b/mm/mmap.c -index d5475fbf5729..cbac45aa39ae 100644 +index 6819eb2b77d7..bba9e996dfdc 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -133,7 +133,7 @@ void unlink_file_vma(struct vm_area_struct *vma) @@ -1772,7 +1773,7 @@ index d5475fbf5729..cbac45aa39ae 100644 if (vma_start < vma->vm_start || vma_end > vma->vm_end) vma_expanded = true; -@@ -2157,7 +2170,7 @@ static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas) +@@ -2164,7 +2177,7 @@ static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas) if (vma->vm_flags & VM_ACCOUNT) nr_accounted += nrpages; vm_stat_account(mm, vma->vm_flags, -nrpages); @@ -1781,7 +1782,7 @@ index d5475fbf5729..cbac45aa39ae 100644 } vm_unacct_memory(nr_accounted); validate_mm(mm); -@@ -2180,7 +2193,8 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt, +@@ -2187,7 +2200,8 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt, update_hiwater_rss(mm); unmap_vmas(&tlb, mt, vma, start, end, mm_wr_locked); free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, @@ -1791,7 +1792,7 @@ index d5475fbf5729..cbac45aa39ae 100644 tlb_finish_mmu(&tlb); } -@@ -2236,10 +2250,10 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, +@@ -2243,10 +2257,10 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); @@ -1803,7 +1804,7 @@ index d5475fbf5729..cbac45aa39ae 100644 if (new_below) { vma->vm_start = addr; -@@ -2283,10 +2297,12 @@ int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, +@@ -2290,10 +2304,12 @@ int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, static inline int munmap_sidetree(struct vm_area_struct *vma, struct ma_state *mas_detach) { @@ -1816,7 +1817,7 @@ index d5475fbf5729..cbac45aa39ae 100644 if (vma->vm_flags & VM_LOCKED) vma->vm_mm->locked_vm -= vma_pages(vma); -@@ -2942,9 +2958,9 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, +@@ -2949,9 +2965,9 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, if (vma_iter_prealloc(vmi)) goto unacct_fail; @@ -1827,7 +1828,7 @@ index d5475fbf5729..cbac45aa39ae 100644 vma->vm_end = addr + len; vm_flags_set(vma, VM_SOFTDIRTY); vma_iter_store(vmi, vma); -@@ -3077,7 +3093,7 @@ void exit_mmap(struct mm_struct *mm) +@@ -3084,7 +3100,7 @@ void exit_mmap(struct mm_struct *mm) mmap_write_lock(mm); mt_clear_in_rcu(&mm->mm_mt); free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS, @@ -1836,7 +1837,7 @@ index d5475fbf5729..cbac45aa39ae 100644 tlb_finish_mmu(&tlb); /* -@@ -3088,7 +3104,7 @@ void exit_mmap(struct mm_struct *mm) +@@ -3095,7 +3111,7 @@ void exit_mmap(struct mm_struct *mm) do { if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); @@ -1845,7 +1846,7 @@ index d5475fbf5729..cbac45aa39ae 100644 count++; cond_resched(); } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL); -@@ -3211,6 +3227,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, +@@ -3218,6 +3234,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); @@ -1853,7 +1854,7 @@ index d5475fbf5729..cbac45aa39ae 100644 if (vma_link(mm, new_vma)) goto out_vma_link; *need_rmap_locks = false; -@@ -3505,6 +3522,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) +@@ -3512,6 +3529,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) * of mm/rmap.c: * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for * hugetlb mapping); @@ -1861,7 +1862,7 @@ index d5475fbf5729..cbac45aa39ae 100644 * - all i_mmap_rwsem locks; * - all anon_vma->rwseml * -@@ -3527,6 +3545,13 @@ int mm_take_all_locks(struct mm_struct *mm) +@@ -3534,6 +3552,13 @@ int mm_take_all_locks(struct mm_struct *mm) mutex_lock(&mm_all_locks_mutex); @@ -1875,7 +1876,7 @@ index d5475fbf5729..cbac45aa39ae 100644 mas_for_each(&mas, vma, ULONG_MAX) { if (signal_pending(current)) goto out_unlock; -@@ -3616,6 +3641,7 @@ void mm_drop_all_locks(struct mm_struct *mm) +@@ -3623,6 +3648,7 @@ void mm_drop_all_locks(struct mm_struct *mm) if (vma->vm_file && vma->vm_file->f_mapping) vm_unlock_mapping(vma->vm_file->f_mapping); } diff --git a/6.3/0010-sched.patch b/6.3/0010-sched.patch index d01ecd5e..b834aebe 100644 --- a/6.3/0010-sched.patch +++ b/6.3/0010-sched.patch @@ -1,7 +1,7 @@ -From d89736ae49cf981dff598347d19170c0e8dba99d Mon Sep 17 00:00:00 2001 +From ecf6bb05c11fa452ce4e97b36fc5933186812273 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 17 Apr 2023 18:37:51 +0200 -Subject: [PATCH 10/12] sched +Date: Sat, 22 Apr 2023 11:46:01 +0200 +Subject: [PATCH 10/13] sched Signed-off-by: Peter Jung --- @@ -9,14 +9,17 @@ Signed-off-by: Peter Jung arch/x86/kernel/smpboot.c | 4 +- include/linux/sched.h | 3 + include/linux/sched/sd_flags.h | 5 +- + kernel/sched/clock.c | 3 + kernel/sched/core.c | 4 +- + kernel/sched/deadline.c | 1 + kernel/sched/debug.c | 1 + kernel/sched/fair.c | 265 ++++++++++++++++++++------------- kernel/sched/features.h | 1 + kernel/sched/pelt.c | 60 ++++++++ kernel/sched/pelt.h | 42 +++++- + kernel/sched/rt.c | 4 + kernel/sched/sched.h | 23 ++- - 11 files changed, 294 insertions(+), 137 deletions(-) + 14 files changed, 302 insertions(+), 137 deletions(-) diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c index 9ff480e94511..6510883c5e81 100644 @@ -114,6 +117,20 @@ index 57bde66d95f7..fad77b5172e2 100644 /* * Prefer to place tasks in a sibling domain +diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c +index 5732fa75ebab..b5cc2b53464d 100644 +--- a/kernel/sched/clock.c ++++ b/kernel/sched/clock.c +@@ -300,6 +300,9 @@ noinstr u64 local_clock(void) + if (static_branch_likely(&__sched_clock_stable)) + return sched_clock() + __sched_clock_offset; + ++ if (!static_branch_likely(&sched_clock_running)) ++ return sched_clock(); ++ + preempt_disable_notrace(); + clock = sched_clock_local(this_scd()); + preempt_enable_notrace(); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0d18c3969f90..17bb9637f314 100644 --- a/kernel/sched/core.c @@ -136,6 +153,18 @@ index 0d18c3969f90..17bb9637f314 100644 INIT_LIST_HEAD(&p->se.group_node); #ifdef CONFIG_FAIR_GROUP_SCHED +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c +index 71b24371a6f7..ac8010f6f3a2 100644 +--- a/kernel/sched/deadline.c ++++ b/kernel/sched/deadline.c +@@ -2246,6 +2246,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) + !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) || + task_on_cpu(rq, task) || + !dl_task(task) || ++ is_migration_disabled(task) || + !task_on_rq_queued(task))) { + double_unlock_balance(rq, later_rq); + later_rq = NULL; diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 1637b65ba07a..8d64fba16cfe 100644 --- a/kernel/sched/debug.c @@ -709,6 +738,26 @@ index 3a0e0dc28721..9b35b5072bae 100644 static inline void update_idle_rq_clock_pelt(struct rq *rq) { } +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c +index 0a11f44adee5..4f5796dd26a5 100644 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -2000,11 +2000,15 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) + * the mean time, task could have + * migrated already or had its affinity changed. + * Also make sure that it wasn't scheduled on its rq. ++ * It is possible the task was scheduled, set ++ * "migrate_disabled" and then got preempted, so we must ++ * check the task migration disable flag here too. + */ + if (unlikely(task_rq(task) != rq || + !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) || + task_on_cpu(rq, task) || + !rt_task(task) || ++ is_migration_disabled(task) || + !task_on_rq_queued(task))) { + + double_unlock_balance(rq, lowest_rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 3e8df6d31c1e..7331d436ebc4 100644 --- a/kernel/sched/sched.h diff --git a/6.3/0011-Surface.patch b/6.3/0011-Surface.patch new file mode 100644 index 00000000..fb96a147 --- /dev/null +++ b/6.3/0011-Surface.patch @@ -0,0 +1,5691 @@ +From f7077e2986dd0ae576dcffaa91900578923d270c Mon Sep 17 00:00:00 2001 +From: Peter Jung +Date: Sat, 22 Apr 2023 11:46:19 +0200 +Subject: [PATCH 11/13] Surface + +Signed-off-by: Peter Jung +--- + arch/x86/kernel/acpi/boot.c | 24 + + drivers/acpi/acpi_tad.c | 35 +- + drivers/bluetooth/btusb.c | 15 + + drivers/hid/Kconfig | 4 + + drivers/hid/Makefile | 3 + + drivers/hid/hid-multitouch.c | 196 ++++++- + drivers/hid/ipts/Kconfig | 14 + + drivers/hid/ipts/Makefile | 14 + + drivers/hid/ipts/cmd.c | 62 ++ + drivers/hid/ipts/cmd.h | 61 ++ + drivers/hid/ipts/context.h | 51 ++ + drivers/hid/ipts/control.c | 495 ++++++++++++++++ + drivers/hid/ipts/control.h | 127 +++++ + drivers/hid/ipts/desc.h | 81 +++ + drivers/hid/ipts/hid.c | 348 ++++++++++++ + drivers/hid/ipts/hid.h | 22 + + drivers/hid/ipts/main.c | 127 +++++ + drivers/hid/ipts/mei.c | 189 +++++++ + drivers/hid/ipts/mei.h | 67 +++ + drivers/hid/ipts/receiver.c | 249 ++++++++ + drivers/hid/ipts/receiver.h | 17 + + drivers/hid/ipts/resources.c | 108 ++++ + drivers/hid/ipts/resources.h | 39 ++ + drivers/hid/ipts/spec-data.h | 100 ++++ + drivers/hid/ipts/spec-device.h | 285 ++++++++++ + drivers/hid/ipts/spec-hid.h | 35 ++ + drivers/hid/ipts/thread.c | 85 +++ + drivers/hid/ipts/thread.h | 60 ++ + drivers/hid/ithc/Kbuild | 6 + + drivers/hid/ithc/Kconfig | 12 + + drivers/hid/ithc/ithc-debug.c | 96 ++++ + drivers/hid/ithc/ithc-dma.c | 258 +++++++++ + drivers/hid/ithc/ithc-dma.h | 67 +++ + drivers/hid/ithc/ithc-main.c | 534 ++++++++++++++++++ + drivers/hid/ithc/ithc-regs.c | 64 +++ + drivers/hid/ithc/ithc-regs.h | 186 ++++++ + drivers/hid/ithc/ithc.h | 60 ++ + drivers/i2c/i2c-core-acpi.c | 35 ++ + drivers/input/misc/soc_button_array.c | 33 +- + drivers/iommu/intel/iommu.c | 24 + + drivers/iommu/intel/irq_remapping.c | 16 + + drivers/misc/mei/hw-me-regs.h | 1 + + drivers/misc/mei/pci-me.c | 1 + + drivers/net/wireless/ath/ath10k/core.c | 58 ++ + drivers/net/wireless/marvell/mwifiex/pcie.c | 19 + + .../wireless/marvell/mwifiex/pcie_quirks.c | 37 +- + .../wireless/marvell/mwifiex/pcie_quirks.h | 2 + + drivers/pci/pci-driver.c | 3 + + drivers/pci/quirks.c | 36 ++ + drivers/platform/surface/Kconfig | 7 + + drivers/platform/surface/Makefile | 1 + + drivers/platform/surface/surface3-wmi.c | 7 + + drivers/platform/surface/surface_gpe.c | 17 + + .../surface/surfacebook1_dgpu_switch.c | 162 ++++++ + drivers/platform/surface/surfacepro3_button.c | 30 +- + drivers/usb/core/quirks.c | 3 + + include/linux/pci.h | 1 + + sound/soc/codecs/rt5645.c | 9 + + .../intel/common/soc-acpi-intel-cht-match.c | 8 + + 59 files changed, 4636 insertions(+), 70 deletions(-) + create mode 100644 drivers/hid/ipts/Kconfig + create mode 100644 drivers/hid/ipts/Makefile + create mode 100644 drivers/hid/ipts/cmd.c + create mode 100644 drivers/hid/ipts/cmd.h + create mode 100644 drivers/hid/ipts/context.h + create mode 100644 drivers/hid/ipts/control.c + create mode 100644 drivers/hid/ipts/control.h + create mode 100644 drivers/hid/ipts/desc.h + create mode 100644 drivers/hid/ipts/hid.c + create mode 100644 drivers/hid/ipts/hid.h + create mode 100644 drivers/hid/ipts/main.c + create mode 100644 drivers/hid/ipts/mei.c + create mode 100644 drivers/hid/ipts/mei.h + create mode 100644 drivers/hid/ipts/receiver.c + create mode 100644 drivers/hid/ipts/receiver.h + create mode 100644 drivers/hid/ipts/resources.c + create mode 100644 drivers/hid/ipts/resources.h + create mode 100644 drivers/hid/ipts/spec-data.h + create mode 100644 drivers/hid/ipts/spec-device.h + create mode 100644 drivers/hid/ipts/spec-hid.h + create mode 100644 drivers/hid/ipts/thread.c + create mode 100644 drivers/hid/ipts/thread.h + create mode 100644 drivers/hid/ithc/Kbuild + create mode 100644 drivers/hid/ithc/Kconfig + create mode 100644 drivers/hid/ithc/ithc-debug.c + create mode 100644 drivers/hid/ithc/ithc-dma.c + create mode 100644 drivers/hid/ithc/ithc-dma.h + create mode 100644 drivers/hid/ithc/ithc-main.c + create mode 100644 drivers/hid/ithc/ithc-regs.c + create mode 100644 drivers/hid/ithc/ithc-regs.h + create mode 100644 drivers/hid/ithc/ithc.h + create mode 100644 drivers/platform/surface/surfacebook1_dgpu_switch.c + +diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c +index 0dac4ab5b55b..623d94a9cb86 100644 +--- a/arch/x86/kernel/acpi/boot.c ++++ b/arch/x86/kernel/acpi/boot.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -1252,6 +1253,24 @@ static void __init mp_config_acpi_legacy_irqs(void) + } + } + ++static const struct dmi_system_id surface_quirk[] __initconst = { ++ { ++ .ident = "Microsoft Surface Laptop 4 (AMD 15\")", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_4_1952:1953") ++ }, ++ }, ++ { ++ .ident = "Microsoft Surface Laptop 4 (AMD 13\")", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_4_1958:1959") ++ }, ++ }, ++ {} ++}; ++ + /* + * Parse IOAPIC related entries in MADT + * returns 0 on success, < 0 on error +@@ -1307,6 +1326,11 @@ static int __init acpi_parse_madt_ioapic_entries(void) + acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0, + acpi_gbl_FADT.sci_interrupt); + ++ if (dmi_check_system(surface_quirk)) { ++ pr_warn("Surface hack: Override irq 7\n"); ++ mp_override_legacy_irq(7, 3, 3, 7); ++ } ++ + /* Fill in identity legacy mappings where no override */ + mp_config_acpi_legacy_irqs(); + +diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c +index e9b8e8305e23..944276934e7e 100644 +--- a/drivers/acpi/acpi_tad.c ++++ b/drivers/acpi/acpi_tad.c +@@ -432,6 +432,14 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr, + + static DEVICE_ATTR_RO(caps); + ++static struct attribute *acpi_tad_attrs[] = { ++ &dev_attr_caps.attr, ++ NULL, ++}; ++static const struct attribute_group acpi_tad_attr_group = { ++ .attrs = acpi_tad_attrs, ++}; ++ + static ssize_t ac_alarm_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) + { +@@ -480,15 +488,14 @@ static ssize_t ac_status_show(struct device *dev, struct device_attribute *attr, + + static DEVICE_ATTR_RW(ac_status); + +-static struct attribute *acpi_tad_attrs[] = { +- &dev_attr_caps.attr, ++static struct attribute *acpi_tad_ac_attrs[] = { + &dev_attr_ac_alarm.attr, + &dev_attr_ac_policy.attr, + &dev_attr_ac_status.attr, + NULL, + }; +-static const struct attribute_group acpi_tad_attr_group = { +- .attrs = acpi_tad_attrs, ++static const struct attribute_group acpi_tad_ac_attr_group = { ++ .attrs = acpi_tad_ac_attrs, + }; + + static ssize_t dc_alarm_store(struct device *dev, struct device_attribute *attr, +@@ -563,13 +570,18 @@ static int acpi_tad_remove(struct platform_device *pdev) + + pm_runtime_get_sync(dev); + ++ if (dd->capabilities & ACPI_TAD_AC_WAKE) ++ sysfs_remove_group(&dev->kobj, &acpi_tad_ac_attr_group); ++ + if (dd->capabilities & ACPI_TAD_DC_WAKE) + sysfs_remove_group(&dev->kobj, &acpi_tad_dc_attr_group); + + sysfs_remove_group(&dev->kobj, &acpi_tad_attr_group); + +- acpi_tad_disable_timer(dev, ACPI_TAD_AC_TIMER); +- acpi_tad_clear_status(dev, ACPI_TAD_AC_TIMER); ++ if (dd->capabilities & ACPI_TAD_AC_WAKE) { ++ acpi_tad_disable_timer(dev, ACPI_TAD_AC_TIMER); ++ acpi_tad_clear_status(dev, ACPI_TAD_AC_TIMER); ++ } + if (dd->capabilities & ACPI_TAD_DC_WAKE) { + acpi_tad_disable_timer(dev, ACPI_TAD_DC_TIMER); + acpi_tad_clear_status(dev, ACPI_TAD_DC_TIMER); +@@ -604,11 +616,6 @@ static int acpi_tad_probe(struct platform_device *pdev) + return -ENODEV; + } + +- if (!acpi_has_method(handle, "_PRW")) { +- dev_info(dev, "Missing _PRW\n"); +- return -ENODEV; +- } +- + dd = devm_kzalloc(dev, sizeof(*dd), GFP_KERNEL); + if (!dd) + return -ENOMEM; +@@ -637,6 +644,12 @@ static int acpi_tad_probe(struct platform_device *pdev) + if (ret) + goto fail; + ++ if (caps & ACPI_TAD_AC_WAKE) { ++ ret = sysfs_create_group(&dev->kobj, &acpi_tad_ac_attr_group); ++ if (ret) ++ goto fail; ++ } ++ + if (caps & ACPI_TAD_DC_WAKE) { + ret = sysfs_create_group(&dev->kobj, &acpi_tad_dc_attr_group); + if (ret) +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 5a80379253a7..5651b4bfe72c 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -65,6 +65,7 @@ static struct usb_driver btusb_driver; + #define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25) + #define BTUSB_INTEL_NO_WBS_SUPPORT BIT(26) + #define BTUSB_ACTIONS_SEMI BIT(27) ++#define BTUSB_LOWER_LESCAN_INTERVAL BIT(28) + + static const struct usb_device_id btusb_table[] = { + /* Generic Bluetooth USB device */ +@@ -468,6 +469,7 @@ static const struct usb_device_id blacklist_table[] = { + { USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL }, + { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, + { USB_DEVICE(0x1286, 0x204e), .driver_info = BTUSB_MARVELL }, ++ { USB_DEVICE(0x1286, 0x204c), .driver_info = BTUSB_LOWER_LESCAN_INTERVAL }, + + /* Intel Bluetooth devices */ + { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_COMBINED }, +@@ -4033,6 +4035,19 @@ static int btusb_probe(struct usb_interface *intf, + if (id->driver_info & BTUSB_MARVELL) + hdev->set_bdaddr = btusb_set_bdaddr_marvell; + ++ /* The Marvell 88W8897 combined wifi and bluetooth card is known for ++ * very bad bt+wifi coexisting performance. ++ * ++ * Decrease the passive BT Low Energy scan interval a bit ++ * (0x0190 * 0.625 msec = 250 msec) and make the scan window shorter ++ * (0x000a * 0,625 msec = 6.25 msec). This allows for significantly ++ * higher wifi throughput while passively scanning for BT LE devices. ++ */ ++ if (id->driver_info & BTUSB_LOWER_LESCAN_INTERVAL) { ++ hdev->le_scan_interval = 0x0190; ++ hdev->le_scan_window = 0x000a; ++ } ++ + if (IS_ENABLED(CONFIG_BT_HCIBTUSB_MTK) && + (id->driver_info & BTUSB_MEDIATEK)) { + hdev->setup = btusb_mtk_setup; +diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig +index 4ce012f83253..aebb62488cf1 100644 +--- a/drivers/hid/Kconfig ++++ b/drivers/hid/Kconfig +@@ -1300,6 +1300,10 @@ config HID_KUNIT_TEST + + If in doubt, say "N". + ++source "drivers/hid/ipts/Kconfig" ++ ++source "drivers/hid/ithc/Kconfig" ++ + endmenu + + source "drivers/hid/bpf/Kconfig" +diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile +index 5d37cacbde33..a3ff62e922f1 100644 +--- a/drivers/hid/Makefile ++++ b/drivers/hid/Makefile +@@ -167,3 +167,6 @@ obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/ + obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/ + + obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/ ++ ++obj-$(CONFIG_HID_IPTS) += ipts/ ++obj-$(CONFIG_HID_ITHC) += ithc/ +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index e31be0cb8b85..508a250ff4bf 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -34,7 +34,10 @@ + #include + #include + #include ++#include + #include ++#include ++#include + #include + #include + #include +@@ -47,6 +50,7 @@ MODULE_DESCRIPTION("HID multitouch panels"); + MODULE_LICENSE("GPL"); + + #include "hid-ids.h" ++#include "usbhid/usbhid.h" + + /* quirks to control the device */ + #define MT_QUIRK_NOT_SEEN_MEANS_UP BIT(0) +@@ -72,12 +76,18 @@ MODULE_LICENSE("GPL"); + #define MT_QUIRK_FORCE_MULTI_INPUT BIT(20) + #define MT_QUIRK_DISABLE_WAKEUP BIT(21) + #define MT_QUIRK_ORIENTATION_INVERT BIT(22) ++#define MT_QUIRK_HAS_TYPE_COVER_BACKLIGHT BIT(23) ++#define MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH BIT(24) + + #define MT_INPUTMODE_TOUCHSCREEN 0x02 + #define MT_INPUTMODE_TOUCHPAD 0x03 + + #define MT_BUTTONTYPE_CLICKPAD 0 + ++#define MS_TYPE_COVER_FEATURE_REPORT_USAGE 0xff050086 ++#define MS_TYPE_COVER_TABLET_MODE_SWITCH_USAGE 0xff050072 ++#define MS_TYPE_COVER_APPLICATION 0xff050050 ++ + enum latency_mode { + HID_LATENCY_NORMAL = 0, + HID_LATENCY_HIGH = 1, +@@ -169,6 +179,8 @@ struct mt_device { + + struct list_head applications; + struct list_head reports; ++ ++ struct notifier_block pm_notifier; + }; + + static void mt_post_parse_default_settings(struct mt_device *td, +@@ -213,6 +225,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app); + #define MT_CLS_GOOGLE 0x0111 + #define MT_CLS_RAZER_BLADE_STEALTH 0x0112 + #define MT_CLS_SMART_TECH 0x0113 ++#define MT_CLS_WIN_8_MS_SURFACE_TYPE_COVER 0x0114 + + #define MT_DEFAULT_MAXCONTACT 10 + #define MT_MAX_MAXCONTACT 250 +@@ -397,6 +410,17 @@ static const struct mt_class mt_classes[] = { + MT_QUIRK_CONTACT_CNT_ACCURATE | + MT_QUIRK_SEPARATE_APP_REPORT, + }, ++ { .name = MT_CLS_WIN_8_MS_SURFACE_TYPE_COVER, ++ .quirks = MT_QUIRK_HAS_TYPE_COVER_BACKLIGHT | ++ MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH | ++ MT_QUIRK_ALWAYS_VALID | ++ MT_QUIRK_IGNORE_DUPLICATES | ++ MT_QUIRK_HOVERING | ++ MT_QUIRK_CONTACT_CNT_ACCURATE | ++ MT_QUIRK_STICKY_FINGERS | ++ MT_QUIRK_WIN8_PTP_BUTTONS, ++ .export_all_inputs = true ++ }, + { } + }; + +@@ -1370,6 +1394,9 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, + field->application != HID_CP_CONSUMER_CONTROL && + field->application != HID_GD_WIRELESS_RADIO_CTLS && + field->application != HID_GD_SYSTEM_MULTIAXIS && ++ !(field->application == MS_TYPE_COVER_APPLICATION && ++ application->quirks & MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH && ++ usage->hid == MS_TYPE_COVER_TABLET_MODE_SWITCH_USAGE) && + !(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS && + application->quirks & MT_QUIRK_ASUS_CUSTOM_UP)) + return -1; +@@ -1397,6 +1424,21 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, + return 1; + } + ++ /* ++ * The Microsoft Surface Pro Typecover has a non-standard HID ++ * tablet mode switch on a vendor specific usage page with vendor ++ * specific usage. ++ */ ++ if (field->application == MS_TYPE_COVER_APPLICATION && ++ application->quirks & MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH && ++ usage->hid == MS_TYPE_COVER_TABLET_MODE_SWITCH_USAGE) { ++ usage->type = EV_SW; ++ usage->code = SW_TABLET_MODE; ++ *max = SW_MAX; ++ *bit = hi->input->swbit; ++ return 1; ++ } ++ + if (rdata->is_mt_collection) + return mt_touch_input_mapping(hdev, hi, field, usage, bit, max, + application); +@@ -1418,6 +1460,7 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi, + { + struct mt_device *td = hid_get_drvdata(hdev); + struct mt_report_data *rdata; ++ struct input_dev *input; + + rdata = mt_find_report_data(td, field->report); + if (rdata && rdata->is_mt_collection) { +@@ -1425,6 +1468,19 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi, + return -1; + } + ++ /* ++ * We own an input device which acts as a tablet mode switch for ++ * the Surface Pro Typecover. ++ */ ++ if (field->application == MS_TYPE_COVER_APPLICATION && ++ rdata->application->quirks & MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH && ++ usage->hid == MS_TYPE_COVER_TABLET_MODE_SWITCH_USAGE) { ++ input = hi->input; ++ input_set_capability(input, EV_SW, SW_TABLET_MODE); ++ input_report_switch(input, SW_TABLET_MODE, 0); ++ return -1; ++ } ++ + /* let hid-core decide for the others */ + return 0; + } +@@ -1434,11 +1490,21 @@ static int mt_event(struct hid_device *hid, struct hid_field *field, + { + struct mt_device *td = hid_get_drvdata(hid); + struct mt_report_data *rdata; ++ struct input_dev *input; + + rdata = mt_find_report_data(td, field->report); + if (rdata && rdata->is_mt_collection) + return mt_touch_event(hid, field, usage, value); + ++ if (field->application == MS_TYPE_COVER_APPLICATION && ++ rdata->application->quirks & MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH && ++ usage->hid == MS_TYPE_COVER_TABLET_MODE_SWITCH_USAGE) { ++ input = field->hidinput->input; ++ input_report_switch(input, SW_TABLET_MODE, (value & 0xFF) != 0x22); ++ input_sync(input); ++ return 1; ++ } ++ + return 0; + } + +@@ -1591,6 +1657,42 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app) + app->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE; + } + ++static int get_type_cover_field(struct hid_report_enum *rep_enum, ++ struct hid_field **field, int usage) ++{ ++ struct hid_report *rep; ++ struct hid_field *cur_field; ++ int i, j; ++ ++ list_for_each_entry(rep, &rep_enum->report_list, list) { ++ for (i = 0; i < rep->maxfield; i++) { ++ cur_field = rep->field[i]; ++ if (cur_field->application != MS_TYPE_COVER_APPLICATION) ++ continue; ++ for (j = 0; j < cur_field->maxusage; j++) { ++ if (cur_field->usage[j].hid == usage) { ++ *field = cur_field; ++ return true; ++ } ++ } ++ } ++ } ++ return false; ++} ++ ++static void request_type_cover_tablet_mode_switch(struct hid_device *hdev) ++{ ++ struct hid_field *field; ++ ++ if (get_type_cover_field(&hdev->report_enum[HID_INPUT_REPORT], ++ &field, ++ MS_TYPE_COVER_TABLET_MODE_SWITCH_USAGE)) { ++ hid_hw_request(hdev, field->report, HID_REQ_GET_REPORT); ++ } else { ++ hid_err(hdev, "couldn't find tablet mode field\n"); ++ } ++} ++ + static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi) + { + struct mt_device *td = hid_get_drvdata(hdev); +@@ -1640,6 +1742,13 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi) + /* force BTN_STYLUS to allow tablet matching in udev */ + __set_bit(BTN_STYLUS, hi->input->keybit); + break; ++ case MS_TYPE_COVER_APPLICATION: ++ if (td->mtclass.quirks & MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH) { ++ suffix = "Tablet Mode Switch"; ++ request_type_cover_tablet_mode_switch(hdev); ++ break; ++ } ++ fallthrough; + default: + suffix = "UNKNOWN"; + break; +@@ -1728,6 +1837,46 @@ static void mt_expired_timeout(struct timer_list *t) + clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); + } + ++static void update_keyboard_backlight(struct hid_device *hdev, bool enabled) ++{ ++ struct usb_device *udev = hid_to_usb_dev(hdev); ++ struct hid_field *field = NULL; ++ ++ /* Wake up the device in case it's already suspended */ ++ pm_runtime_get_sync(&udev->dev); ++ ++ if (!get_type_cover_field(&hdev->report_enum[HID_FEATURE_REPORT], ++ &field, ++ MS_TYPE_COVER_FEATURE_REPORT_USAGE)) { ++ hid_err(hdev, "couldn't find backlight field\n"); ++ goto out; ++ } ++ ++ field->value[field->index] = enabled ? 0x01ff00ff : 0x00ff00ff; ++ hid_hw_request(hdev, field->report, HID_REQ_SET_REPORT); ++ ++out: ++ pm_runtime_put_sync(&udev->dev); ++} ++ ++static int mt_pm_notifier(struct notifier_block *notifier, ++ unsigned long pm_event, ++ void *unused) ++{ ++ struct mt_device *td = ++ container_of(notifier, struct mt_device, pm_notifier); ++ struct hid_device *hdev = td->hdev; ++ ++ if (td->mtclass.quirks & MT_QUIRK_HAS_TYPE_COVER_BACKLIGHT) { ++ if (pm_event == PM_SUSPEND_PREPARE) ++ update_keyboard_backlight(hdev, 0); ++ else if (pm_event == PM_POST_SUSPEND) ++ update_keyboard_backlight(hdev, 1); ++ } ++ ++ return NOTIFY_DONE; ++} ++ + static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) + { + int ret, i; +@@ -1751,6 +1900,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) + td->inputmode_value = MT_INPUTMODE_TOUCHSCREEN; + hid_set_drvdata(hdev, td); + ++ td->pm_notifier.notifier_call = mt_pm_notifier; ++ register_pm_notifier(&td->pm_notifier); ++ + INIT_LIST_HEAD(&td->applications); + INIT_LIST_HEAD(&td->reports); + +@@ -1789,15 +1941,19 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) + timer_setup(&td->release_timer, mt_expired_timeout, 0); + + ret = hid_parse(hdev); +- if (ret != 0) ++ if (ret != 0) { ++ unregister_pm_notifier(&td->pm_notifier); + return ret; ++ } + + if (mtclass->quirks & MT_QUIRK_FIX_CONST_CONTACT_ID) + mt_fix_const_fields(hdev, HID_DG_CONTACTID); + + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); +- if (ret) ++ if (ret) { ++ unregister_pm_notifier(&td->pm_notifier); + return ret; ++ } + + ret = sysfs_create_group(&hdev->dev.kobj, &mt_attribute_group); + if (ret) +@@ -1826,13 +1982,24 @@ static int mt_suspend(struct hid_device *hdev, pm_message_t state) + + static int mt_reset_resume(struct hid_device *hdev) + { ++ struct mt_device *td = hid_get_drvdata(hdev); ++ + mt_release_contacts(hdev); + mt_set_modes(hdev, HID_LATENCY_NORMAL, true, true); ++ ++ /* Request an update on the typecover folding state on resume ++ * after reset. ++ */ ++ if (td->mtclass.quirks & MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH) ++ request_type_cover_tablet_mode_switch(hdev); ++ + return 0; + } + + static int mt_resume(struct hid_device *hdev) + { ++ struct mt_device *td = hid_get_drvdata(hdev); ++ + /* Some Elan legacy devices require SET_IDLE to be set on resume. + * It should be safe to send it to other devices too. + * Tested on 3M, Stantum, Cypress, Zytronic, eGalax, and Elan panels. */ +@@ -1841,6 +2008,10 @@ static int mt_resume(struct hid_device *hdev) + + mt_set_modes(hdev, HID_LATENCY_NORMAL, true, true); + ++ /* Request an update on the typecover folding state on resume. */ ++ if (td->mtclass.quirks & MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH) ++ request_type_cover_tablet_mode_switch(hdev); ++ + return 0; + } + #endif +@@ -1848,7 +2019,23 @@ static int mt_resume(struct hid_device *hdev) + static void mt_remove(struct hid_device *hdev) + { + struct mt_device *td = hid_get_drvdata(hdev); ++ struct hid_field *field; ++ struct input_dev *input; + ++ /* Reset tablet mode switch on disconnect. */ ++ if (td->mtclass.quirks & MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH) { ++ if (get_type_cover_field(&hdev->report_enum[HID_INPUT_REPORT], ++ &field, ++ MS_TYPE_COVER_TABLET_MODE_SWITCH_USAGE)) { ++ input = field->hidinput->input; ++ input_report_switch(input, SW_TABLET_MODE, 0); ++ input_sync(input); ++ } else { ++ hid_err(hdev, "couldn't find tablet mode field\n"); ++ } ++ } ++ ++ unregister_pm_notifier(&td->pm_notifier); + del_timer_sync(&td->release_timer); + + sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group); +@@ -2226,6 +2413,11 @@ static const struct hid_device_id mt_devices[] = { + MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, + USB_DEVICE_ID_XIROKU_CSR2) }, + ++ /* Microsoft Surface type cover */ ++ { .driver_data = MT_CLS_WIN_8_MS_SURFACE_TYPE_COVER, ++ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, ++ USB_VENDOR_ID_MICROSOFT, 0x09c0) }, ++ + /* Google MT devices */ + { .driver_data = MT_CLS_GOOGLE, + HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE, +diff --git a/drivers/hid/ipts/Kconfig b/drivers/hid/ipts/Kconfig +new file mode 100644 +index 000000000000..297401bd388d +--- /dev/null ++++ b/drivers/hid/ipts/Kconfig +@@ -0,0 +1,14 @@ ++# SPDX-License-Identifier: GPL-2.0-or-later ++ ++config HID_IPTS ++ tristate "Intel Precise Touch & Stylus" ++ depends on INTEL_MEI ++ depends on HID ++ help ++ Say Y here if your system has a touchscreen using Intels ++ Precise Touch & Stylus (IPTS) technology. ++ ++ If unsure say N. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called ipts. +diff --git a/drivers/hid/ipts/Makefile b/drivers/hid/ipts/Makefile +new file mode 100644 +index 000000000000..0fe655bccdc0 +--- /dev/null ++++ b/drivers/hid/ipts/Makefile +@@ -0,0 +1,14 @@ ++# SPDX-License-Identifier: GPL-2.0-or-later ++# ++# Makefile for the IPTS touchscreen driver ++# ++ ++obj-$(CONFIG_HID_IPTS) += ipts.o ++ipts-objs := cmd.o ++ipts-objs += control.o ++ipts-objs += hid.o ++ipts-objs += main.o ++ipts-objs += mei.o ++ipts-objs += receiver.o ++ipts-objs += resources.o ++ipts-objs += thread.o +diff --git a/drivers/hid/ipts/cmd.c b/drivers/hid/ipts/cmd.c +new file mode 100644 +index 000000000000..7fd69271ccd5 +--- /dev/null ++++ b/drivers/hid/ipts/cmd.c +@@ -0,0 +1,62 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#include ++#include ++ ++#include "cmd.h" ++#include "context.h" ++#include "mei.h" ++#include "spec-device.h" ++ ++int ipts_cmd_recv_timeout(struct ipts_context *ipts, enum ipts_command_code code, ++ struct ipts_response *rsp, u64 timeout) ++{ ++ int ret = 0; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!rsp) ++ return -EFAULT; ++ ++ /* ++ * In a response, the command code will have the most significant bit flipped to 1. ++ * If code is passed to ipts_mei_recv as is, no messages will be received. ++ */ ++ ret = ipts_mei_recv(&ipts->mei, code | IPTS_RSP_BIT, rsp, timeout); ++ if (ret < 0) ++ return ret; ++ ++ dev_dbg(ipts->dev, "Received 0x%02X with status 0x%02X\n", code, rsp->status); ++ ++ /* ++ * Some devices will always return this error. ++ * It is allowed to ignore it and to try continuing. ++ */ ++ if (rsp->status == IPTS_STATUS_COMPAT_CHECK_FAIL) ++ rsp->status = IPTS_STATUS_SUCCESS; ++ ++ return 0; ++} ++ ++int ipts_cmd_send(struct ipts_context *ipts, enum ipts_command_code code, void *data, size_t size) ++{ ++ struct ipts_command cmd = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ cmd.cmd = code; ++ ++ if (data && size > 0) ++ memcpy(cmd.payload, data, size); ++ ++ dev_dbg(ipts->dev, "Sending 0x%02X with %ld bytes payload\n", code, size); ++ return ipts_mei_send(&ipts->mei, &cmd, sizeof(cmd.cmd) + size); ++} +diff --git a/drivers/hid/ipts/cmd.h b/drivers/hid/ipts/cmd.h +new file mode 100644 +index 000000000000..924758ffee67 +--- /dev/null ++++ b/drivers/hid/ipts/cmd.h +@@ -0,0 +1,61 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_CMD_H ++#define IPTS_CMD_H ++ ++#include ++ ++#include "context.h" ++#include "spec-device.h" ++ ++/* ++ * The default timeout for receiving responses ++ */ ++#define IPTS_CMD_DEFAULT_TIMEOUT 1000 ++ ++/* ++ * ipts_cmd_recv_timeout() - Receives a response to a command. ++ * @ipts: The IPTS driver context. ++ * @code: The type of the command / response. ++ * @rsp: The address that the received response will be copied to. ++ * @timeout: How many milliseconds the function will wait at most. ++ * ++ * A negative timeout means to wait forever. ++ * ++ * Returns: 0 on success, <0 on error, -EAGAIN if no response has been received. ++ */ ++int ipts_cmd_recv_timeout(struct ipts_context *ipts, enum ipts_command_code code, ++ struct ipts_response *rsp, u64 timeout); ++ ++/* ++ * ipts_cmd_recv() - Receives a response to a command. ++ * @ipts: The IPTS driver context. ++ * @code: The type of the command / response. ++ * @rsp: The address that the received response will be copied to. ++ * ++ * Returns: 0 on success, <0 on error, -EAGAIN if no response has been received. ++ */ ++static inline int ipts_cmd_recv(struct ipts_context *ipts, enum ipts_command_code code, ++ struct ipts_response *rsp) ++{ ++ return ipts_cmd_recv_timeout(ipts, code, rsp, IPTS_CMD_DEFAULT_TIMEOUT); ++} ++ ++/* ++ * ipts_cmd_send() - Executes a command on the device. ++ * @ipts: The IPTS driver context. ++ * @code: The type of the command to execute. ++ * @data: The payload containing parameters for the command. ++ * @size: The size of the payload. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_cmd_send(struct ipts_context *ipts, enum ipts_command_code code, void *data, size_t size); ++ ++#endif /* IPTS_CMD_H */ +diff --git a/drivers/hid/ipts/context.h b/drivers/hid/ipts/context.h +new file mode 100644 +index 000000000000..3450a95e66ee +--- /dev/null ++++ b/drivers/hid/ipts/context.h +@@ -0,0 +1,51 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_CONTEXT_H ++#define IPTS_CONTEXT_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "mei.h" ++#include "resources.h" ++#include "spec-device.h" ++#include "thread.h" ++ ++struct ipts_context { ++ struct device *dev; ++ struct ipts_mei mei; ++ ++ enum ipts_mode mode; ++ ++ /* ++ * Prevents concurrent GET_FEATURE reports. ++ */ ++ struct mutex feature_lock; ++ struct completion feature_event; ++ ++ /* ++ * These are not inside of struct ipts_resources ++ * because they don't own the memory they point to. ++ */ ++ struct ipts_buffer feature_report; ++ struct ipts_buffer descriptor; ++ ++ struct hid_device *hid; ++ struct ipts_device_info info; ++ struct ipts_resources resources; ++ ++ struct ipts_thread receiver_loop; ++}; ++ ++#endif /* IPTS_CONTEXT_H */ +diff --git a/drivers/hid/ipts/control.c b/drivers/hid/ipts/control.c +new file mode 100644 +index 000000000000..2f61500b5119 +--- /dev/null ++++ b/drivers/hid/ipts/control.c +@@ -0,0 +1,495 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "cmd.h" ++#include "context.h" ++#include "control.h" ++#include "desc.h" ++#include "hid.h" ++#include "receiver.h" ++#include "resources.h" ++#include "spec-data.h" ++#include "spec-device.h" ++ ++static int ipts_control_get_device_info(struct ipts_context *ipts, struct ipts_device_info *info) ++{ ++ int ret = 0; ++ struct ipts_response rsp = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!info) ++ return -EFAULT; ++ ++ ret = ipts_cmd_send(ipts, IPTS_CMD_GET_DEVICE_INFO, NULL, 0); ++ if (ret) { ++ dev_err(ipts->dev, "GET_DEVICE_INFO: send failed: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_cmd_recv(ipts, IPTS_CMD_GET_DEVICE_INFO, &rsp); ++ if (ret) { ++ dev_err(ipts->dev, "GET_DEVICE_INFO: recv failed: %d\n", ret); ++ return ret; ++ } ++ ++ if (rsp.status != IPTS_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "GET_DEVICE_INFO: cmd failed: %d\n", rsp.status); ++ return -EBADR; ++ } ++ ++ memcpy(info, rsp.payload, sizeof(*info)); ++ return 0; ++} ++ ++static int ipts_control_set_mode(struct ipts_context *ipts, enum ipts_mode mode) ++{ ++ int ret = 0; ++ struct ipts_set_mode cmd = { 0 }; ++ struct ipts_response rsp = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ cmd.mode = mode; ++ ++ ret = ipts_cmd_send(ipts, IPTS_CMD_SET_MODE, &cmd, sizeof(cmd)); ++ if (ret) { ++ dev_err(ipts->dev, "SET_MODE: send failed: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_cmd_recv(ipts, IPTS_CMD_SET_MODE, &rsp); ++ if (ret) { ++ dev_err(ipts->dev, "SET_MODE: recv failed: %d\n", ret); ++ return ret; ++ } ++ ++ if (rsp.status != IPTS_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "SET_MODE: cmd failed: %d\n", rsp.status); ++ return -EBADR; ++ } ++ ++ return 0; ++} ++ ++static int ipts_control_set_mem_window(struct ipts_context *ipts, struct ipts_resources *res) ++{ ++ int ret = 0; ++ struct ipts_mem_window cmd = { 0 }; ++ struct ipts_response rsp = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!res) ++ return -EFAULT; ++ ++ for (int i = 0; i < IPTS_BUFFERS; i++) { ++ cmd.data_addr_lower[i] = lower_32_bits(res->data[i].dma_address); ++ cmd.data_addr_upper[i] = upper_32_bits(res->data[i].dma_address); ++ cmd.feedback_addr_lower[i] = lower_32_bits(res->feedback[i].dma_address); ++ cmd.feedback_addr_upper[i] = upper_32_bits(res->feedback[i].dma_address); ++ } ++ ++ cmd.workqueue_addr_lower = lower_32_bits(res->workqueue.dma_address); ++ cmd.workqueue_addr_upper = upper_32_bits(res->workqueue.dma_address); ++ ++ cmd.doorbell_addr_lower = lower_32_bits(res->doorbell.dma_address); ++ cmd.doorbell_addr_upper = upper_32_bits(res->doorbell.dma_address); ++ ++ cmd.hid2me_addr_lower = lower_32_bits(res->hid2me.dma_address); ++ cmd.hid2me_addr_upper = upper_32_bits(res->hid2me.dma_address); ++ ++ cmd.workqueue_size = IPTS_WORKQUEUE_SIZE; ++ cmd.workqueue_item_size = IPTS_WORKQUEUE_ITEM_SIZE; ++ ++ ret = ipts_cmd_send(ipts, IPTS_CMD_SET_MEM_WINDOW, &cmd, sizeof(cmd)); ++ if (ret) { ++ dev_err(ipts->dev, "SET_MEM_WINDOW: send failed: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_cmd_recv(ipts, IPTS_CMD_SET_MEM_WINDOW, &rsp); ++ if (ret) { ++ dev_err(ipts->dev, "SET_MEM_WINDOW: recv failed: %d\n", ret); ++ return ret; ++ } ++ ++ if (rsp.status != IPTS_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "SET_MEM_WINDOW: cmd failed: %d\n", rsp.status); ++ return -EBADR; ++ } ++ ++ return 0; ++} ++ ++static int ipts_control_get_descriptor(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ struct ipts_data_header *header = NULL; ++ struct ipts_get_descriptor cmd = { 0 }; ++ struct ipts_response rsp = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!ipts->resources.descriptor.address) ++ return -EFAULT; ++ ++ memset(ipts->resources.descriptor.address, 0, ipts->resources.descriptor.size); ++ ++ cmd.addr_lower = lower_32_bits(ipts->resources.descriptor.dma_address); ++ cmd.addr_upper = upper_32_bits(ipts->resources.descriptor.dma_address); ++ cmd.magic = 8; ++ ++ ret = ipts_cmd_send(ipts, IPTS_CMD_GET_DESCRIPTOR, &cmd, sizeof(cmd)); ++ if (ret) { ++ dev_err(ipts->dev, "GET_DESCRIPTOR: send failed: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_cmd_recv(ipts, IPTS_CMD_GET_DESCRIPTOR, &rsp); ++ if (ret) { ++ dev_err(ipts->dev, "GET_DESCRIPTOR: recv failed: %d\n", ret); ++ return ret; ++ } ++ ++ if (rsp.status != IPTS_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "GET_DESCRIPTOR: cmd failed: %d\n", rsp.status); ++ return -EBADR; ++ } ++ ++ header = (struct ipts_data_header *)ipts->resources.descriptor.address; ++ ++ if (header->type == IPTS_DATA_TYPE_DESCRIPTOR) { ++ ipts->descriptor.address = &header->data[8]; ++ ipts->descriptor.size = header->size - 8; ++ ++ return 0; ++ } ++ ++ return -ENODATA; ++} ++ ++int ipts_control_request_flush(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ struct ipts_quiesce_io cmd = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ ret = ipts_cmd_send(ipts, IPTS_CMD_QUIESCE_IO, &cmd, sizeof(cmd)); ++ if (ret) ++ dev_err(ipts->dev, "QUIESCE_IO: send failed: %d\n", ret); ++ ++ return ret; ++} ++ ++int ipts_control_wait_flush(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ struct ipts_response rsp = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ ret = ipts_cmd_recv(ipts, IPTS_CMD_QUIESCE_IO, &rsp); ++ if (ret) { ++ dev_err(ipts->dev, "QUIESCE_IO: recv failed: %d\n", ret); ++ return ret; ++ } ++ ++ if (rsp.status == IPTS_STATUS_TIMEOUT) ++ return -EAGAIN; ++ ++ if (rsp.status != IPTS_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "QUIESCE_IO: cmd failed: %d\n", rsp.status); ++ return -EBADR; ++ } ++ ++ return 0; ++} ++ ++int ipts_control_request_data(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ ret = ipts_cmd_send(ipts, IPTS_CMD_READY_FOR_DATA, NULL, 0); ++ if (ret) ++ dev_err(ipts->dev, "READY_FOR_DATA: send failed: %d\n", ret); ++ ++ return ret; ++} ++ ++int ipts_control_wait_data(struct ipts_context *ipts, bool shutdown) ++{ ++ int ret = 0; ++ struct ipts_response rsp = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!shutdown) ++ ret = ipts_cmd_recv_timeout(ipts, IPTS_CMD_READY_FOR_DATA, &rsp, 0); ++ else ++ ret = ipts_cmd_recv(ipts, IPTS_CMD_READY_FOR_DATA, &rsp); ++ ++ if (ret) { ++ if (ret != -EAGAIN) ++ dev_err(ipts->dev, "READY_FOR_DATA: recv failed: %d\n", ret); ++ ++ return ret; ++ } ++ ++ /* ++ * During shutdown, it is possible that the sensor has already been disabled. ++ */ ++ if (rsp.status == IPTS_STATUS_SENSOR_DISABLED) ++ return 0; ++ ++ if (rsp.status == IPTS_STATUS_TIMEOUT) ++ return -EAGAIN; ++ ++ if (rsp.status != IPTS_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "READY_FOR_DATA: cmd failed: %d\n", rsp.status); ++ return -EBADR; ++ } ++ ++ return 0; ++} ++ ++int ipts_control_send_feedback(struct ipts_context *ipts, u32 buffer) ++{ ++ int ret = 0; ++ struct ipts_feedback cmd = { 0 }; ++ struct ipts_response rsp = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ cmd.buffer = buffer; ++ ++ ret = ipts_cmd_send(ipts, IPTS_CMD_FEEDBACK, &cmd, sizeof(cmd)); ++ if (ret) { ++ dev_err(ipts->dev, "FEEDBACK: send failed: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_cmd_recv(ipts, IPTS_CMD_FEEDBACK, &rsp); ++ if (ret) { ++ dev_err(ipts->dev, "FEEDBACK: recv failed: %d\n", ret); ++ return ret; ++ } ++ ++ /* ++ * We don't know what feedback data looks like so we are sending zeros. ++ * See also ipts_control_refill_buffer. ++ */ ++ if (rsp.status == IPTS_STATUS_INVALID_PARAMS) ++ return 0; ++ ++ if (rsp.status != IPTS_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "FEEDBACK: cmd failed: %d\n", rsp.status); ++ return -EBADR; ++ } ++ ++ return 0; ++} ++ ++int ipts_control_hid2me_feedback(struct ipts_context *ipts, enum ipts_feedback_cmd_type cmd, ++ enum ipts_feedback_data_type type, void *data, size_t size) ++{ ++ struct ipts_feedback_header *header = NULL; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!ipts->resources.hid2me.address) ++ return -EFAULT; ++ ++ memset(ipts->resources.hid2me.address, 0, ipts->resources.hid2me.size); ++ header = (struct ipts_feedback_header *)ipts->resources.hid2me.address; ++ ++ header->cmd_type = cmd; ++ header->data_type = type; ++ header->size = size; ++ header->buffer = IPTS_HID2ME_BUFFER; ++ ++ if (size + sizeof(*header) > ipts->resources.hid2me.size) ++ return -EINVAL; ++ ++ if (data && size > 0) ++ memcpy(header->payload, data, size); ++ ++ return ipts_control_send_feedback(ipts, IPTS_HID2ME_BUFFER); ++} ++ ++static inline int ipts_control_reset_sensor(struct ipts_context *ipts) ++{ ++ return ipts_control_hid2me_feedback(ipts, IPTS_FEEDBACK_CMD_TYPE_SOFT_RESET, ++ IPTS_FEEDBACK_DATA_TYPE_VENDOR, NULL, 0); ++} ++ ++int ipts_control_start(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ struct ipts_device_info info = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ dev_info(ipts->dev, "Starting IPTS\n"); ++ ++ ret = ipts_control_get_device_info(ipts, &info); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to get device info: %d\n", ret); ++ return ret; ++ } ++ ++ ipts->info = info; ++ ++ ret = ipts_resources_init(&ipts->resources, ipts->dev, info.data_size, info.feedback_size); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to allocate buffers: %d", ret); ++ return ret; ++ } ++ ++ dev_info(ipts->dev, "IPTS EDS Version: %d\n", info.intf_eds); ++ ++ /* ++ * Handle newer devices ++ */ ++ if (info.intf_eds > 1) { ++ /* ++ * Fetching the descriptor will only work on newer devices. ++ * For older devices, a fallback descriptor will be used. ++ */ ++ ret = ipts_control_get_descriptor(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to fetch HID descriptor: %d\n", ret); ++ return ret; ++ } ++ ++ /* ++ * Newer devices can be directly initialized in doorbell mode. ++ */ ++ ipts->mode = IPTS_MODE_DOORBELL; ++ } ++ ++ ret = ipts_control_set_mode(ipts, ipts->mode); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to set mode: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_control_set_mem_window(ipts, &ipts->resources); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to set memory window: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_receiver_start(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to start receiver: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_control_request_data(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to request data: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_hid_init(ipts, info); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to initialize HID device: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int _ipts_control_stop(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ dev_info(ipts->dev, "Stopping IPTS\n"); ++ ++ ret = ipts_receiver_stop(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to stop receiver: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_control_reset_sensor(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to reset sensor: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_resources_free(&ipts->resources); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to free resources: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ipts_control_stop(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ ++ ret = _ipts_control_stop(ipts); ++ if (ret) ++ return ret; ++ ++ ret = ipts_hid_free(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to free HID device: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ipts_control_restart(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ ++ ret = _ipts_control_stop(ipts); ++ if (ret) ++ return ret; ++ ++ /* ++ * Give the sensor some time to come back from resetting ++ */ ++ msleep(1000); ++ ++ ret = ipts_control_start(ipts); ++ if (ret) ++ return ret; ++ ++ return 0; ++} +diff --git a/drivers/hid/ipts/control.h b/drivers/hid/ipts/control.h +new file mode 100644 +index 000000000000..744bb92d682a +--- /dev/null ++++ b/drivers/hid/ipts/control.h +@@ -0,0 +1,127 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_CONTROL_H ++#define IPTS_CONTROL_H ++ ++#include ++ ++#include "context.h" ++#include "spec-data.h" ++#include "spec-device.h" ++ ++/* ++ * ipts_control_request_flush() - Stop the data flow. ++ * @ipts: The IPTS driver context. ++ * ++ * Runs the command to stop the data flow on the device. ++ * All outstanding data needs to be acknowledged using feedback before the command will return. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_control_request_flush(struct ipts_context *ipts); ++ ++/* ++ * ipts_control_wait_flush() - Wait until data flow has been stopped. ++ * @ipts: The IPTS driver context. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_control_wait_flush(struct ipts_context *ipts); ++ ++/* ++ * ipts_control_wait_flush() - Notify the device that the driver can receive new data. ++ * @ipts: The IPTS driver context. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_control_request_data(struct ipts_context *ipts); ++ ++/* ++ * ipts_control_wait_data() - Wait until new data is available. ++ * @ipts: The IPTS driver context. ++ * @block: Whether to block execution until data is available. ++ * ++ * In doorbell mode, this function will never return while the data flow is active. Instead, ++ * the doorbell will be incremented when new data is available. ++ * ++ * Returns: 0 on success, <0 on error, -EAGAIN if no data is available. ++ */ ++int ipts_control_wait_data(struct ipts_context *ipts, bool block); ++ ++/* ++ * ipts_control_send_feedback() - Submits a feedback buffer to the device. ++ * @ipts: The IPTS driver context. ++ * @buffer: The ID of the buffer containing feedback data. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_control_send_feedback(struct ipts_context *ipts, u32 buffer); ++ ++/* ++ * ipts_control_hid2me_feedback() - Sends HID2ME feedback, a special type of feedback. ++ * @ipts: The IPTS driver context. ++ * @cmd: The command that will be run on the device. ++ * @type: The type of the payload that is sent to the device. ++ * @data: The payload of the feedback command. ++ * @size: The size of the payload. ++ * ++ * HID2ME feedback is a special type of feedback, because it allows interfacing with ++ * the HID API of the device at any moment, without requiring a buffer that has to ++ * be acknowledged. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_control_hid2me_feedback(struct ipts_context *ipts, enum ipts_feedback_cmd_type cmd, ++ enum ipts_feedback_data_type type, void *data, size_t size); ++ ++/* ++ * ipts_control_refill_buffer() - Acknowledges that data in a buffer has been processed. ++ * @ipts: The IPTS driver context. ++ * @buffer: The buffer that has been processed and can be refilled. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++static inline int ipts_control_refill_buffer(struct ipts_context *ipts, u32 buffer) ++{ ++ /* ++ * IPTS expects structured data in the feedback buffer matching the buffer that will be ++ * refilled. We don't know what that data looks like, so we just keep the buffer empty. ++ * This results in an INVALID_PARAMS error, but the buffer gets refilled without an issue. ++ * Sending a minimal structure with the buffer ID fixes the error, but breaks refilling ++ * the buffers on some devices. ++ */ ++ ++ return ipts_control_send_feedback(ipts, buffer); ++} ++ ++/* ++ * ipts_control_start() - Initialized the device and starts the data flow. ++ * @ipts: The IPTS driver context. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_control_start(struct ipts_context *ipts); ++ ++/* ++ * ipts_control_stop() - Stops the data flow and resets the device. ++ * @ipts: The IPTS driver context. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_control_stop(struct ipts_context *ipts); ++ ++/* ++ * ipts_control_restart() - Stops the device and starts it again. ++ * @ipts: The IPTS driver context. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_control_restart(struct ipts_context *ipts); ++ ++#endif /* IPTS_CONTROL_H */ +diff --git a/drivers/hid/ipts/desc.h b/drivers/hid/ipts/desc.h +new file mode 100644 +index 000000000000..c058974a03a1 +--- /dev/null ++++ b/drivers/hid/ipts/desc.h +@@ -0,0 +1,81 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2022-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_DESC_H ++#define IPTS_DESC_H ++ ++#include ++ ++#define IPTS_HID_REPORT_SINGLETOUCH 64 ++#define IPTS_HID_REPORT_DATA 65 ++#define IPTS_HID_REPORT_SET_MODE 66 ++ ++#define IPTS_HID_REPORT_DATA_SIZE 7485 ++ ++/* ++ * HID descriptor for singletouch data. ++ * This descriptor should be present on all IPTS devices. ++ */ ++static const u8 ipts_singletouch_descriptor[] = { ++ 0x05, 0x0D, /* Usage Page (Digitizer), */ ++ 0x09, 0x04, /* Usage (Touchscreen), */ ++ 0xA1, 0x01, /* Collection (Application), */ ++ 0x85, 0x40, /* Report ID (64), */ ++ 0x09, 0x42, /* Usage (Tip Switch), */ ++ 0x15, 0x00, /* Logical Minimum (0), */ ++ 0x25, 0x01, /* Logical Maximum (1), */ ++ 0x75, 0x01, /* Report Size (1), */ ++ 0x95, 0x01, /* Report Count (1), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0x95, 0x07, /* Report Count (7), */ ++ 0x81, 0x03, /* Input (Constant, Variable), */ ++ 0x05, 0x01, /* Usage Page (Desktop), */ ++ 0x09, 0x30, /* Usage (X), */ ++ 0x75, 0x10, /* Report Size (16), */ ++ 0x95, 0x01, /* Report Count (1), */ ++ 0xA4, /* Push, */ ++ 0x55, 0x0E, /* Unit Exponent (14), */ ++ 0x65, 0x11, /* Unit (Centimeter), */ ++ 0x46, 0x76, 0x0B, /* Physical Maximum (2934), */ ++ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0x09, 0x31, /* Usage (Y), */ ++ 0x46, 0x74, 0x06, /* Physical Maximum (1652), */ ++ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0xB4, /* Pop, */ ++ 0xC0, /* End Collection */ ++}; ++ ++/* ++ * Fallback HID descriptor for older devices that do not have ++ * the ability to query their HID descriptor. ++ */ ++static const u8 ipts_fallback_descriptor[] = { ++ 0x05, 0x0D, /* Usage Page (Digitizer), */ ++ 0x09, 0x0F, /* Usage (Capacitive Hm Digitizer), */ ++ 0xA1, 0x01, /* Collection (Application), */ ++ 0x85, 0x41, /* Report ID (65), */ ++ 0x09, 0x56, /* Usage (Scan Time), */ ++ 0x95, 0x01, /* Report Count (1), */ ++ 0x75, 0x10, /* Report Size (16), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0x09, 0x61, /* Usage (Gesture Char Quality), */ ++ 0x75, 0x08, /* Report Size (8), */ ++ 0x96, 0x3D, 0x1D, /* Report Count (7485), */ ++ 0x81, 0x03, /* Input (Constant, Variable), */ ++ 0x85, 0x42, /* Report ID (66), */ ++ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ ++ 0x09, 0xC8, /* Usage (C8h), */ ++ 0x75, 0x08, /* Report Size (8), */ ++ 0x95, 0x01, /* Report Count (1), */ ++ 0xB1, 0x02, /* Feature (Variable), */ ++ 0xC0, /* End Collection, */ ++}; ++ ++#endif /* IPTS_DESC_H */ +diff --git a/drivers/hid/ipts/hid.c b/drivers/hid/ipts/hid.c +new file mode 100644 +index 000000000000..6782394e8dde +--- /dev/null ++++ b/drivers/hid/ipts/hid.c +@@ -0,0 +1,348 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2022-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "context.h" ++#include "control.h" ++#include "desc.h" ++#include "hid.h" ++#include "spec-data.h" ++#include "spec-device.h" ++#include "spec-hid.h" ++ ++static int ipts_hid_start(struct hid_device *hid) ++{ ++ return 0; ++} ++ ++static void ipts_hid_stop(struct hid_device *hid) ++{ ++} ++ ++static int ipts_hid_switch_mode(struct ipts_context *ipts, enum ipts_mode mode) ++{ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (ipts->mode == mode) ++ return 0; ++ ++ /* ++ * This is only allowed on older devices. ++ */ ++ if (ipts->info.intf_eds > 1) ++ return 0; ++ ++ ipts->mode = mode; ++ return ipts_control_restart(ipts); ++} ++ ++static int ipts_hid_parse(struct hid_device *hid) ++{ ++ int ret = 0; ++ struct ipts_context *ipts = NULL; ++ ++ bool has_native_descriptor = false; ++ ++ u8 *buffer = NULL; ++ size_t size = 0; ++ ++ if (!hid) ++ return -ENODEV; ++ ++ ipts = hid->driver_data; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ size = sizeof(ipts_singletouch_descriptor); ++ has_native_descriptor = ipts->descriptor.address && ipts->descriptor.size > 0; ++ ++ if (has_native_descriptor) ++ size += ipts->descriptor.size; ++ else ++ size += sizeof(ipts_fallback_descriptor); ++ ++ buffer = kzalloc(size, GFP_KERNEL); ++ if (!buffer) ++ return -ENOMEM; ++ ++ memcpy(buffer, ipts_singletouch_descriptor, sizeof(ipts_singletouch_descriptor)); ++ ++ if (has_native_descriptor) { ++ memcpy(&buffer[sizeof(ipts_singletouch_descriptor)], ipts->descriptor.address, ++ ipts->descriptor.size); ++ } else { ++ memcpy(&buffer[sizeof(ipts_singletouch_descriptor)], ipts_fallback_descriptor, ++ sizeof(ipts_fallback_descriptor)); ++ } ++ ++ ret = hid_parse_report(hid, buffer, size); ++ kfree(buffer); ++ ++ if (ret) { ++ dev_err(ipts->dev, "Failed to parse HID descriptor: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int ipts_hid_get_feature(struct ipts_context *ipts, unsigned char reportnum, __u8 *buf, ++ size_t size, enum ipts_feedback_data_type type) ++{ ++ int ret = 0; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!buf) ++ return -EFAULT; ++ ++ mutex_lock(&ipts->feature_lock); ++ ++ memset(buf, 0, size); ++ buf[0] = reportnum; ++ ++ memset(&ipts->feature_report, 0, sizeof(ipts->feature_report)); ++ reinit_completion(&ipts->feature_event); ++ ++ ret = ipts_control_hid2me_feedback(ipts, IPTS_FEEDBACK_CMD_TYPE_NONE, type, buf, size); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to send hid2me feedback: %d\n", ret); ++ goto out; ++ } ++ ++ ret = wait_for_completion_timeout(&ipts->feature_event, msecs_to_jiffies(5000)); ++ if (ret == 0) { ++ dev_warn(ipts->dev, "GET_FEATURES timed out!\n"); ++ ret = -EIO; ++ goto out; ++ } ++ ++ if (!ipts->feature_report.address) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ if (ipts->feature_report.size > size) { ++ ret = -ETOOSMALL; ++ goto out; ++ } ++ ++ ret = ipts->feature_report.size; ++ memcpy(buf, ipts->feature_report.address, ipts->feature_report.size); ++ ++out: ++ mutex_unlock(&ipts->feature_lock); ++ return ret; ++} ++ ++static int ipts_hid_set_feature(struct ipts_context *ipts, unsigned char reportnum, __u8 *buf, ++ size_t size, enum ipts_feedback_data_type type) ++{ ++ int ret = 0; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!buf) ++ return -EFAULT; ++ ++ buf[0] = reportnum; ++ ++ ret = ipts_control_hid2me_feedback(ipts, IPTS_FEEDBACK_CMD_TYPE_NONE, type, buf, size); ++ if (ret) ++ dev_err(ipts->dev, "Failed to send hid2me feedback: %d\n", ret); ++ ++ return ret; ++} ++ ++static int ipts_hid_raw_request(struct hid_device *hid, unsigned char reportnum, __u8 *buf, ++ size_t size, unsigned char rtype, int reqtype) ++{ ++ int ret = 0; ++ struct ipts_context *ipts = NULL; ++ ++ enum ipts_feedback_data_type type = IPTS_FEEDBACK_DATA_TYPE_VENDOR; ++ ++ if (!hid) ++ return -ENODEV; ++ ++ ipts = hid->driver_data; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!buf) ++ return -EFAULT; ++ ++ if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) ++ type = IPTS_FEEDBACK_DATA_TYPE_OUTPUT_REPORT; ++ else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) ++ type = IPTS_FEEDBACK_DATA_TYPE_GET_FEATURES; ++ else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) ++ type = IPTS_FEEDBACK_DATA_TYPE_SET_FEATURES; ++ else ++ return -EIO; ++ ++ // Implemente mode switching report for older devices without native HID support ++ if (type == IPTS_FEEDBACK_DATA_TYPE_SET_FEATURES && reportnum == IPTS_HID_REPORT_SET_MODE) { ++ ret = ipts_hid_switch_mode(ipts, buf[1]); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to switch modes: %d\n", ret); ++ return ret; ++ } ++ } ++ ++ if (reqtype == HID_REQ_GET_REPORT) ++ return ipts_hid_get_feature(ipts, reportnum, buf, size, type); ++ else ++ return ipts_hid_set_feature(ipts, reportnum, buf, size, type); ++} ++ ++static int ipts_hid_output_report(struct hid_device *hid, __u8 *data, size_t size) ++{ ++ struct ipts_context *ipts = NULL; ++ ++ if (!hid) ++ return -ENODEV; ++ ++ ipts = hid->driver_data; ++ ++ return ipts_control_hid2me_feedback(ipts, IPTS_FEEDBACK_CMD_TYPE_NONE, ++ IPTS_FEEDBACK_DATA_TYPE_OUTPUT_REPORT, data, size); ++} ++ ++static struct hid_ll_driver ipts_hid_driver = { ++ .start = ipts_hid_start, ++ .stop = ipts_hid_stop, ++ .open = ipts_hid_start, ++ .close = ipts_hid_stop, ++ .parse = ipts_hid_parse, ++ .raw_request = ipts_hid_raw_request, ++ .output_report = ipts_hid_output_report, ++}; ++ ++int ipts_hid_input_data(struct ipts_context *ipts, u32 buffer) ++{ ++ int ret = 0; ++ u8 *temp = NULL; ++ struct ipts_hid_header *frame = NULL; ++ struct ipts_data_header *header = NULL; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!ipts->hid) ++ return -ENODEV; ++ ++ header = (struct ipts_data_header *)ipts->resources.data[buffer].address; ++ ++ if (!header) ++ return -EFAULT; ++ ++ if (header->size == 0) ++ return 0; ++ ++ if (header->type == IPTS_DATA_TYPE_HID) ++ return hid_input_report(ipts->hid, HID_INPUT_REPORT, header->data, header->size, 1); ++ ++ if (header->type == IPTS_DATA_TYPE_GET_FEATURES) { ++ ipts->feature_report.address = header->data; ++ ipts->feature_report.size = header->size; ++ ++ complete_all(&ipts->feature_event); ++ return 0; ++ } ++ ++ if (header->type != IPTS_DATA_TYPE_FRAME) ++ return 0; ++ ++ if (header->size + 3 + sizeof(struct ipts_hid_header) > IPTS_HID_REPORT_DATA_SIZE) ++ return -ERANGE; ++ ++ temp = kzalloc(IPTS_HID_REPORT_DATA_SIZE, GFP_KERNEL); ++ if (!temp) ++ return -ENOMEM; ++ ++ /* ++ * Synthesize a HID report matching the devices that natively send HID reports ++ */ ++ temp[0] = IPTS_HID_REPORT_DATA; ++ ++ frame = (struct ipts_hid_header *)&temp[3]; ++ frame->type = IPTS_HID_FRAME_TYPE_RAW; ++ frame->size = header->size + sizeof(*frame); ++ ++ memcpy(frame->data, header->data, header->size); ++ ++ ret = hid_input_report(ipts->hid, HID_INPUT_REPORT, temp, IPTS_HID_REPORT_DATA_SIZE, 1); ++ kfree(temp); ++ ++ return ret; ++} ++ ++int ipts_hid_init(struct ipts_context *ipts, struct ipts_device_info info) ++{ ++ int ret = 0; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (ipts->hid) ++ return 0; ++ ++ ipts->hid = hid_allocate_device(); ++ if (IS_ERR(ipts->hid)) { ++ int err = PTR_ERR(ipts->hid); ++ ++ dev_err(ipts->dev, "Failed to allocate HID device: %d\n", err); ++ return err; ++ } ++ ++ ipts->hid->driver_data = ipts; ++ ipts->hid->dev.parent = ipts->dev; ++ ipts->hid->ll_driver = &ipts_hid_driver; ++ ++ ipts->hid->vendor = info.vendor; ++ ipts->hid->product = info.product; ++ ipts->hid->group = HID_GROUP_MULTITOUCH; ++ ++ snprintf(ipts->hid->name, sizeof(ipts->hid->name), "IPTS %04X:%04X", info.vendor, ++ info.product); ++ ++ ret = hid_add_device(ipts->hid); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to add HID device: %d\n", ret); ++ ipts_hid_free(ipts); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ipts_hid_free(struct ipts_context *ipts) ++{ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!ipts->hid) ++ return 0; ++ ++ hid_destroy_device(ipts->hid); ++ ipts->hid = NULL; ++ ++ return 0; ++} +diff --git a/drivers/hid/ipts/hid.h b/drivers/hid/ipts/hid.h +new file mode 100644 +index 000000000000..62bf3cd48608 +--- /dev/null ++++ b/drivers/hid/ipts/hid.h +@@ -0,0 +1,22 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2022-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_HID_H ++#define IPTS_HID_H ++ ++#include ++ ++#include "context.h" ++#include "spec-device.h" ++ ++int ipts_hid_input_data(struct ipts_context *ipts, u32 buffer); ++ ++int ipts_hid_init(struct ipts_context *ipts, struct ipts_device_info info); ++int ipts_hid_free(struct ipts_context *ipts); ++ ++#endif /* IPTS_HID_H */ +diff --git a/drivers/hid/ipts/main.c b/drivers/hid/ipts/main.c +new file mode 100644 +index 000000000000..0f20c6c08c38 +--- /dev/null ++++ b/drivers/hid/ipts/main.c +@@ -0,0 +1,127 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "context.h" ++#include "control.h" ++#include "mei.h" ++#include "receiver.h" ++#include "spec-device.h" ++ ++/* ++ * The MEI client ID for IPTS functionality. ++ */ ++#define IPTS_ID UUID_LE(0x3e8d0870, 0x271a, 0x4208, 0x8e, 0xb5, 0x9a, 0xcb, 0x94, 0x02, 0xae, 0x04) ++ ++static int ipts_set_dma_mask(struct mei_cl_device *cldev) ++{ ++ if (!cldev) ++ return -EFAULT; ++ ++ if (!dma_coerce_mask_and_coherent(&cldev->dev, DMA_BIT_MASK(64))) ++ return 0; ++ ++ return dma_coerce_mask_and_coherent(&cldev->dev, DMA_BIT_MASK(32)); ++} ++ ++static int ipts_probe(struct mei_cl_device *cldev, const struct mei_cl_device_id *id) ++{ ++ int ret = 0; ++ struct ipts_context *ipts = NULL; ++ ++ if (!cldev) ++ return -EFAULT; ++ ++ ret = ipts_set_dma_mask(cldev); ++ if (ret) { ++ dev_err(&cldev->dev, "Failed to set DMA mask for IPTS: %d\n", ret); ++ return ret; ++ } ++ ++ ret = mei_cldev_enable(cldev); ++ if (ret) { ++ dev_err(&cldev->dev, "Failed to enable MEI device: %d\n", ret); ++ return ret; ++ } ++ ++ ipts = devm_kzalloc(&cldev->dev, sizeof(*ipts), GFP_KERNEL); ++ if (!ipts) { ++ mei_cldev_disable(cldev); ++ return -ENOMEM; ++ } ++ ++ ret = ipts_mei_init(&ipts->mei, cldev); ++ if (ret) { ++ dev_err(&cldev->dev, "Failed to init MEI bus logic: %d\n", ret); ++ return ret; ++ } ++ ++ ipts->dev = &cldev->dev; ++ ipts->mode = IPTS_MODE_EVENT; ++ ++ mutex_init(&ipts->feature_lock); ++ init_completion(&ipts->feature_event); ++ ++ mei_cldev_set_drvdata(cldev, ipts); ++ ++ ret = ipts_control_start(ipts); ++ if (ret) { ++ dev_err(&cldev->dev, "Failed to start IPTS: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void ipts_remove(struct mei_cl_device *cldev) ++{ ++ int ret = 0; ++ struct ipts_context *ipts = NULL; ++ ++ if (!cldev) { ++ pr_err("MEI device is NULL!"); ++ return; ++ } ++ ++ ipts = mei_cldev_get_drvdata(cldev); ++ ++ ret = ipts_control_stop(ipts); ++ if (ret) ++ dev_err(&cldev->dev, "Failed to stop IPTS: %d\n", ret); ++ ++ mei_cldev_disable(cldev); ++} ++ ++static struct mei_cl_device_id ipts_device_id_table[] = { ++ { .uuid = IPTS_ID, .version = MEI_CL_VERSION_ANY }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(mei, ipts_device_id_table); ++ ++static struct mei_cl_driver ipts_driver = { ++ .id_table = ipts_device_id_table, ++ .name = "ipts", ++ .probe = ipts_probe, ++ .remove = ipts_remove, ++}; ++module_mei_cl_driver(ipts_driver); ++ ++MODULE_DESCRIPTION("IPTS touchscreen driver"); ++MODULE_AUTHOR("Dorian Stoll "); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/hid/ipts/mei.c b/drivers/hid/ipts/mei.c +new file mode 100644 +index 000000000000..26666fd99b0c +--- /dev/null ++++ b/drivers/hid/ipts/mei.c +@@ -0,0 +1,189 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "context.h" ++#include "mei.h" ++ ++static void locked_list_add(struct list_head *new, struct list_head *head, ++ struct rw_semaphore *lock) ++{ ++ down_write(lock); ++ list_add(new, head); ++ up_write(lock); ++} ++ ++static void locked_list_del(struct list_head *entry, struct rw_semaphore *lock) ++{ ++ down_write(lock); ++ list_del(entry); ++ up_write(lock); ++} ++ ++static void ipts_mei_incoming(struct mei_cl_device *cldev) ++{ ++ ssize_t ret = 0; ++ struct ipts_mei_message *entry = NULL; ++ struct ipts_context *ipts = NULL; ++ ++ if (!cldev) { ++ pr_err("MEI device is NULL!"); ++ return; ++ } ++ ++ ipts = mei_cldev_get_drvdata(cldev); ++ if (!ipts) { ++ pr_err("IPTS driver context is NULL!"); ++ return; ++ } ++ ++ entry = devm_kzalloc(ipts->dev, sizeof(*entry), GFP_KERNEL); ++ if (!entry) ++ return; ++ ++ INIT_LIST_HEAD(&entry->list); ++ ++ do { ++ ret = mei_cldev_recv(cldev, (u8 *)&entry->rsp, sizeof(entry->rsp)); ++ } while (ret == -EINTR); ++ ++ if (ret < 0) { ++ dev_err(ipts->dev, "Error while reading response: %ld\n", ret); ++ return; ++ } ++ ++ if (ret == 0) { ++ dev_err(ipts->dev, "Received empty response\n"); ++ return; ++ } ++ ++ locked_list_add(&entry->list, &ipts->mei.messages, &ipts->mei.message_lock); ++ wake_up_all(&ipts->mei.message_queue); ++} ++ ++static int ipts_mei_search(struct ipts_mei *mei, enum ipts_command_code code, ++ struct ipts_response *rsp) ++{ ++ struct ipts_mei_message *entry = NULL; ++ ++ if (!mei) ++ return -EFAULT; ++ ++ if (!rsp) ++ return -EFAULT; ++ ++ down_read(&mei->message_lock); ++ ++ /* ++ * Iterate over the list of received messages, and check if there is one ++ * matching the requested command code. ++ */ ++ list_for_each_entry(entry, &mei->messages, list) { ++ if (entry->rsp.cmd == code) ++ break; ++ } ++ ++ up_read(&mei->message_lock); ++ ++ /* ++ * If entry is not the list head, this means that the loop above has been stopped early, ++ * and that we found a matching element. We drop the message from the list and return it. ++ */ ++ if (!list_entry_is_head(entry, &mei->messages, list)) { ++ locked_list_del(&entry->list, &mei->message_lock); ++ ++ *rsp = entry->rsp; ++ devm_kfree(&mei->cldev->dev, entry); ++ ++ return 0; ++ } ++ ++ return -EAGAIN; ++} ++ ++int ipts_mei_recv(struct ipts_mei *mei, enum ipts_command_code code, struct ipts_response *rsp, ++ u64 timeout) ++{ ++ int ret = 0; ++ ++ if (!mei) ++ return -EFAULT; ++ ++ /* ++ * A timeout of 0 means check and return immideately. ++ */ ++ if (timeout == 0) ++ return ipts_mei_search(mei, code, rsp); ++ ++ /* ++ * A timeout of less than 0 means to wait forever. ++ */ ++ if (timeout < 0) { ++ wait_event(mei->message_queue, ipts_mei_search(mei, code, rsp) == 0); ++ return 0; ++ } ++ ++ ret = wait_event_timeout(mei->message_queue, ipts_mei_search(mei, code, rsp) == 0, ++ msecs_to_jiffies(timeout)); ++ ++ if (ret > 0) ++ return 0; ++ ++ return -EAGAIN; ++} ++ ++int ipts_mei_send(struct ipts_mei *mei, void *data, size_t length) ++{ ++ int ret = 0; ++ ++ if (!mei) ++ return -EFAULT; ++ ++ if (!mei->cldev) ++ return -EFAULT; ++ ++ if (!data) ++ return -EFAULT; ++ ++ do { ++ ret = mei_cldev_send(mei->cldev, (u8 *)data, length); ++ } while (ret == -EINTR); ++ ++ if (ret < 0) ++ return ret; ++ ++ return 0; ++} ++ ++int ipts_mei_init(struct ipts_mei *mei, struct mei_cl_device *cldev) ++{ ++ if (!mei) ++ return -EFAULT; ++ ++ if (!cldev) ++ return -EFAULT; ++ ++ mei->cldev = cldev; ++ ++ INIT_LIST_HEAD(&mei->messages); ++ init_waitqueue_head(&mei->message_queue); ++ init_rwsem(&mei->message_lock); ++ ++ mei_cldev_register_rx_cb(cldev, ipts_mei_incoming); ++ ++ return 0; ++} +diff --git a/drivers/hid/ipts/mei.h b/drivers/hid/ipts/mei.h +new file mode 100644 +index 000000000000..eadacae54c40 +--- /dev/null ++++ b/drivers/hid/ipts/mei.h +@@ -0,0 +1,67 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_MEI_H ++#define IPTS_MEI_H ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "spec-device.h" ++ ++struct ipts_mei_message { ++ struct list_head list; ++ struct ipts_response rsp; ++}; ++ ++struct ipts_mei { ++ struct mei_cl_device *cldev; ++ ++ struct list_head messages; ++ ++ wait_queue_head_t message_queue; ++ struct rw_semaphore message_lock; ++}; ++ ++/* ++ * ipts_mei_recv() - Receive data from a MEI device. ++ * @mei: The IPTS MEI device context. ++ * @code: The IPTS command code to look for. ++ * @rsp: The address that the received data will be copied to. ++ * @timeout: How many milliseconds the function will wait at most. ++ * ++ * A negative timeout means to wait forever. ++ * ++ * Returns: 0 on success, <0 on error, -EAGAIN if no response has been received. ++ */ ++int ipts_mei_recv(struct ipts_mei *mei, enum ipts_command_code code, struct ipts_response *rsp, ++ u64 timeout); ++ ++/* ++ * ipts_mei_send() - Send data to a MEI device. ++ * @ipts: The IPTS MEI device context. ++ * @data: The data to send. ++ * @size: The size of the data. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_mei_send(struct ipts_mei *mei, void *data, size_t length); ++ ++/* ++ * ipts_mei_init() - Initialize the MEI device context. ++ * @mei: The MEI device context to initialize. ++ * @cldev: The MEI device the context will be bound to. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_mei_init(struct ipts_mei *mei, struct mei_cl_device *cldev); ++ ++#endif /* IPTS_MEI_H */ +diff --git a/drivers/hid/ipts/receiver.c b/drivers/hid/ipts/receiver.c +new file mode 100644 +index 000000000000..77234f9e0e17 +--- /dev/null ++++ b/drivers/hid/ipts/receiver.c +@@ -0,0 +1,249 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "cmd.h" ++#include "context.h" ++#include "control.h" ++#include "hid.h" ++#include "resources.h" ++#include "spec-device.h" ++#include "thread.h" ++ ++static void ipts_receiver_next_doorbell(struct ipts_context *ipts) ++{ ++ u32 *doorbell = (u32 *)ipts->resources.doorbell.address; ++ *doorbell = *doorbell + 1; ++} ++ ++static u32 ipts_receiver_current_doorbell(struct ipts_context *ipts) ++{ ++ u32 *doorbell = (u32 *)ipts->resources.doorbell.address; ++ return *doorbell; ++} ++ ++static void ipts_receiver_backoff(time64_t last, u32 n) ++{ ++ /* ++ * If the last change was less than n seconds ago, ++ * sleep for a shorter period so that new data can be ++ * processed quickly. If there was no change for more than ++ * n seconds, sleep longer to avoid wasting CPU cycles. ++ */ ++ if (last + n > ktime_get_seconds()) ++ msleep(20); ++ else ++ msleep(200); ++} ++ ++static int ipts_receiver_event_loop(struct ipts_thread *thread) ++{ ++ int ret = 0; ++ u32 buffer = 0; ++ ++ struct ipts_context *ipts = NULL; ++ time64_t last = ktime_get_seconds(); ++ ++ if (!thread) ++ return -EFAULT; ++ ++ ipts = thread->data; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ dev_info(ipts->dev, "IPTS running in event mode\n"); ++ ++ while (!ipts_thread_should_stop(thread)) { ++ for (int i = 0; i < IPTS_BUFFERS; i++) { ++ ret = ipts_control_wait_data(ipts, false); ++ if (ret == -EAGAIN) ++ break; ++ ++ if (ret) { ++ dev_err(ipts->dev, "Failed to wait for data: %d\n", ret); ++ continue; ++ } ++ ++ buffer = ipts_receiver_current_doorbell(ipts) % IPTS_BUFFERS; ++ ipts_receiver_next_doorbell(ipts); ++ ++ ret = ipts_hid_input_data(ipts, buffer); ++ if (ret) ++ dev_err(ipts->dev, "Failed to process buffer: %d\n", ret); ++ ++ ret = ipts_control_refill_buffer(ipts, buffer); ++ if (ret) ++ dev_err(ipts->dev, "Failed to send feedback: %d\n", ret); ++ ++ ret = ipts_control_request_data(ipts); ++ if (ret) ++ dev_err(ipts->dev, "Failed to request data: %d\n", ret); ++ ++ last = ktime_get_seconds(); ++ } ++ ++ ipts_receiver_backoff(last, 5); ++ } ++ ++ ret = ipts_control_request_flush(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to request flush: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_control_wait_data(ipts, true); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to wait for data: %d\n", ret); ++ ++ if (ret != -EAGAIN) ++ return ret; ++ else ++ return 0; ++ } ++ ++ ret = ipts_control_wait_flush(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to wait for flush: %d\n", ret); ++ ++ if (ret != -EAGAIN) ++ return ret; ++ else ++ return 0; ++ } ++ ++ return 0; ++} ++ ++static int ipts_receiver_doorbell_loop(struct ipts_thread *thread) ++{ ++ int ret = 0; ++ u32 buffer = 0; ++ ++ u32 doorbell = 0; ++ u32 lastdb = 0; ++ ++ struct ipts_context *ipts = NULL; ++ time64_t last = ktime_get_seconds(); ++ ++ if (!thread) ++ return -EFAULT; ++ ++ ipts = thread->data; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ dev_info(ipts->dev, "IPTS running in doorbell mode\n"); ++ ++ while (true) { ++ if (ipts_thread_should_stop(thread)) { ++ ret = ipts_control_request_flush(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to request flush: %d\n", ret); ++ return ret; ++ } ++ } ++ ++ doorbell = ipts_receiver_current_doorbell(ipts); ++ ++ /* ++ * After filling up one of the data buffers, IPTS will increment ++ * the doorbell. The value of the doorbell stands for the *next* ++ * buffer that IPTS is going to fill. ++ */ ++ while (lastdb != doorbell) { ++ buffer = lastdb % IPTS_BUFFERS; ++ ++ ret = ipts_hid_input_data(ipts, buffer); ++ if (ret) ++ dev_err(ipts->dev, "Failed to process buffer: %d\n", ret); ++ ++ ret = ipts_control_refill_buffer(ipts, buffer); ++ if (ret) ++ dev_err(ipts->dev, "Failed to send feedback: %d\n", ret); ++ ++ last = ktime_get_seconds(); ++ lastdb++; ++ } ++ ++ if (ipts_thread_should_stop(thread)) ++ break; ++ ++ ipts_receiver_backoff(last, 5); ++ } ++ ++ ret = ipts_control_wait_data(ipts, true); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to wait for data: %d\n", ret); ++ ++ if (ret != -EAGAIN) ++ return ret; ++ else ++ return 0; ++ } ++ ++ ret = ipts_control_wait_flush(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to wait for flush: %d\n", ret); ++ ++ if (ret != -EAGAIN) ++ return ret; ++ else ++ return 0; ++ } ++ ++ return 0; ++} ++ ++int ipts_receiver_start(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (ipts->mode == IPTS_MODE_EVENT) { ++ ret = ipts_thread_start(&ipts->receiver_loop, ipts_receiver_event_loop, ipts, ++ "ipts_event"); ++ } else if (ipts->mode == IPTS_MODE_DOORBELL) { ++ ret = ipts_thread_start(&ipts->receiver_loop, ipts_receiver_doorbell_loop, ipts, ++ "ipts_doorbell"); ++ } else { ++ ret = -EINVAL; ++ } ++ ++ if (ret) { ++ dev_err(ipts->dev, "Failed to start receiver loop: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ipts_receiver_stop(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ ret = ipts_thread_stop(&ipts->receiver_loop); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to stop receiver loop: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} +diff --git a/drivers/hid/ipts/receiver.h b/drivers/hid/ipts/receiver.h +new file mode 100644 +index 000000000000..96070f34fbca +--- /dev/null ++++ b/drivers/hid/ipts/receiver.h +@@ -0,0 +1,17 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_RECEIVER_H ++#define IPTS_RECEIVER_H ++ ++#include "context.h" ++ ++int ipts_receiver_start(struct ipts_context *ipts); ++int ipts_receiver_stop(struct ipts_context *ipts); ++ ++#endif /* IPTS_RECEIVER_H */ +diff --git a/drivers/hid/ipts/resources.c b/drivers/hid/ipts/resources.c +new file mode 100644 +index 000000000000..80ba5885bb55 +--- /dev/null ++++ b/drivers/hid/ipts/resources.c +@@ -0,0 +1,108 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#include ++#include ++ ++#include "resources.h" ++#include "spec-device.h" ++ ++static int ipts_resources_alloc_buffer(struct ipts_buffer *buffer, struct device *dev, size_t size) ++{ ++ if (!buffer) ++ return -EFAULT; ++ ++ if (buffer->address) ++ return 0; ++ ++ buffer->address = dma_alloc_coherent(dev, size, &buffer->dma_address, GFP_KERNEL); ++ ++ if (!buffer->address) ++ return -ENOMEM; ++ ++ buffer->size = size; ++ buffer->device = dev; ++ ++ return 0; ++} ++ ++static void ipts_resources_free_buffer(struct ipts_buffer *buffer) ++{ ++ if (!buffer->address) ++ return; ++ ++ dma_free_coherent(buffer->device, buffer->size, buffer->address, buffer->dma_address); ++ ++ buffer->address = NULL; ++ buffer->size = 0; ++ ++ buffer->dma_address = 0; ++ buffer->device = NULL; ++} ++ ++int ipts_resources_init(struct ipts_resources *res, struct device *dev, size_t ds, size_t fs) ++{ ++ int ret = 0; ++ ++ if (!res) ++ return -EFAULT; ++ ++ for (int i = 0; i < IPTS_BUFFERS; i++) { ++ ret = ipts_resources_alloc_buffer(&res->data[i], dev, ds); ++ if (ret) ++ goto err; ++ } ++ ++ for (int i = 0; i < IPTS_BUFFERS; i++) { ++ ret = ipts_resources_alloc_buffer(&res->feedback[i], dev, fs); ++ if (ret) ++ goto err; ++ } ++ ++ ret = ipts_resources_alloc_buffer(&res->doorbell, dev, sizeof(u32)); ++ if (ret) ++ goto err; ++ ++ ret = ipts_resources_alloc_buffer(&res->workqueue, dev, sizeof(u32)); ++ if (ret) ++ goto err; ++ ++ ret = ipts_resources_alloc_buffer(&res->hid2me, dev, fs); ++ if (ret) ++ goto err; ++ ++ ret = ipts_resources_alloc_buffer(&res->descriptor, dev, ds + 8); ++ if (ret) ++ goto err; ++ ++ return 0; ++ ++err: ++ ++ ipts_resources_free(res); ++ return ret; ++} ++ ++int ipts_resources_free(struct ipts_resources *res) ++{ ++ if (!res) ++ return -EFAULT; ++ ++ for (int i = 0; i < IPTS_BUFFERS; i++) ++ ipts_resources_free_buffer(&res->data[i]); ++ ++ for (int i = 0; i < IPTS_BUFFERS; i++) ++ ipts_resources_free_buffer(&res->feedback[i]); ++ ++ ipts_resources_free_buffer(&res->doorbell); ++ ipts_resources_free_buffer(&res->workqueue); ++ ipts_resources_free_buffer(&res->hid2me); ++ ipts_resources_free_buffer(&res->descriptor); ++ ++ return 0; ++} +diff --git a/drivers/hid/ipts/resources.h b/drivers/hid/ipts/resources.h +new file mode 100644 +index 000000000000..6cbb24a8a054 +--- /dev/null ++++ b/drivers/hid/ipts/resources.h +@@ -0,0 +1,39 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_RESOURCES_H ++#define IPTS_RESOURCES_H ++ ++#include ++#include ++ ++#include "spec-device.h" ++ ++struct ipts_buffer { ++ u8 *address; ++ size_t size; ++ ++ dma_addr_t dma_address; ++ struct device *device; ++}; ++ ++struct ipts_resources { ++ struct ipts_buffer data[IPTS_BUFFERS]; ++ struct ipts_buffer feedback[IPTS_BUFFERS]; ++ ++ struct ipts_buffer doorbell; ++ struct ipts_buffer workqueue; ++ struct ipts_buffer hid2me; ++ ++ struct ipts_buffer descriptor; ++}; ++ ++int ipts_resources_init(struct ipts_resources *res, struct device *dev, size_t ds, size_t fs); ++int ipts_resources_free(struct ipts_resources *res); ++ ++#endif /* IPTS_RESOURCES_H */ +diff --git a/drivers/hid/ipts/spec-data.h b/drivers/hid/ipts/spec-data.h +new file mode 100644 +index 000000000000..e8dd98895a7e +--- /dev/null ++++ b/drivers/hid/ipts/spec-data.h +@@ -0,0 +1,100 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_SPEC_DATA_H ++#define IPTS_SPEC_DATA_H ++ ++#include ++#include ++ ++/** ++ * enum ipts_feedback_cmd_type - Commands that can be executed on the sensor through feedback. ++ */ ++enum ipts_feedback_cmd_type { ++ IPTS_FEEDBACK_CMD_TYPE_NONE = 0, ++ IPTS_FEEDBACK_CMD_TYPE_SOFT_RESET = 1, ++ IPTS_FEEDBACK_CMD_TYPE_GOTO_ARMED = 2, ++ IPTS_FEEDBACK_CMD_TYPE_GOTO_SENSING = 3, ++ IPTS_FEEDBACK_CMD_TYPE_GOTO_SLEEP = 4, ++ IPTS_FEEDBACK_CMD_TYPE_GOTO_DOZE = 5, ++ IPTS_FEEDBACK_CMD_TYPE_HARD_RESET = 6, ++}; ++ ++/** ++ * enum ipts_feedback_data_type - Defines what data a feedback buffer contains. ++ * @IPTS_FEEDBACK_DATA_TYPE_VENDOR: The buffer contains vendor specific feedback. ++ * @IPTS_FEEDBACK_DATA_TYPE_SET_FEATURES: The buffer contains a HID set features report. ++ * @IPTS_FEEDBACK_DATA_TYPE_GET_FEATURES: The buffer contains a HID get features report. ++ * @IPTS_FEEDBACK_DATA_TYPE_OUTPUT_REPORT: The buffer contains a HID output report. ++ * @IPTS_FEEDBACK_DATA_TYPE_STORE_DATA: The buffer contains calibration data for the sensor. ++ */ ++enum ipts_feedback_data_type { ++ IPTS_FEEDBACK_DATA_TYPE_VENDOR = 0, ++ IPTS_FEEDBACK_DATA_TYPE_SET_FEATURES = 1, ++ IPTS_FEEDBACK_DATA_TYPE_GET_FEATURES = 2, ++ IPTS_FEEDBACK_DATA_TYPE_OUTPUT_REPORT = 3, ++ IPTS_FEEDBACK_DATA_TYPE_STORE_DATA = 4, ++}; ++ ++/** ++ * struct ipts_feedback_header - Header that is prefixed to the data in a feedback buffer. ++ * @cmd_type: A command that should be executed on the sensor. ++ * @size: The size of the payload to be written. ++ * @buffer: The ID of the buffer that contains this feedback data. ++ * @protocol: The protocol version of the EDS. ++ * @data_type: The type of data that the buffer contains. ++ * @spi_offset: The offset at which to write the payload data to the sensor. ++ * @payload: Payload for the feedback command, or 0 if no payload is sent. ++ */ ++struct ipts_feedback_header { ++ enum ipts_feedback_cmd_type cmd_type; ++ u32 size; ++ u32 buffer; ++ u32 protocol; ++ enum ipts_feedback_data_type data_type; ++ u32 spi_offset; ++ u8 reserved[40]; ++ u8 payload[]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_feedback_header) == 64); ++ ++/** ++ * enum ipts_data_type - Defines what type of data a buffer contains. ++ * @IPTS_DATA_TYPE_FRAME: Raw data frame. ++ * @IPTS_DATA_TYPE_ERROR: Error data. ++ * @IPTS_DATA_TYPE_VENDOR: Vendor specific data. ++ * @IPTS_DATA_TYPE_HID: A HID report. ++ * @IPTS_DATA_TYPE_GET_FEATURES: The response to a GET_FEATURES HID2ME command. ++ */ ++enum ipts_data_type { ++ IPTS_DATA_TYPE_FRAME = 0x00, ++ IPTS_DATA_TYPE_ERROR = 0x01, ++ IPTS_DATA_TYPE_VENDOR = 0x02, ++ IPTS_DATA_TYPE_HID = 0x03, ++ IPTS_DATA_TYPE_GET_FEATURES = 0x04, ++ IPTS_DATA_TYPE_DESCRIPTOR = 0x05, ++}; ++ ++/** ++ * struct ipts_data_header - Header that is prefixed to the data in a data buffer. ++ * @type: What data the buffer contains. ++ * @size: How much data the buffer contains. ++ * @buffer: Which buffer the data is in. ++ */ ++struct ipts_data_header { ++ enum ipts_data_type type; ++ u32 size; ++ u32 buffer; ++ u8 reserved[52]; ++ u8 data[]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_data_header) == 64); ++ ++#endif /* IPTS_SPEC_DATA_H */ +diff --git a/drivers/hid/ipts/spec-device.h b/drivers/hid/ipts/spec-device.h +new file mode 100644 +index 000000000000..93f673d981f7 +--- /dev/null ++++ b/drivers/hid/ipts/spec-device.h +@@ -0,0 +1,285 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_SPEC_DEVICE_H ++#define IPTS_SPEC_DEVICE_H ++ ++#include ++#include ++ ++/* ++ * The amount of buffers that IPTS can use for data transfer. ++ */ ++#define IPTS_BUFFERS 16 ++ ++/* ++ * The buffer ID that is used for HID2ME feedback ++ */ ++#define IPTS_HID2ME_BUFFER IPTS_BUFFERS ++ ++/** ++ * enum ipts_command - Commands that can be sent to the IPTS hardware. ++ * @IPTS_CMD_GET_DEVICE_INFO: Retrieves vendor information from the device. ++ * @IPTS_CMD_SET_MODE: Changes the mode that the device will operate in. ++ * @IPTS_CMD_SET_MEM_WINDOW: Configures memory buffers for passing data between device and driver. ++ * @IPTS_CMD_QUIESCE_IO: Stops the data flow from the device to the driver. ++ * @IPTS_CMD_READY_FOR_DATA: Informs the device that the driver is ready to receive data. ++ * @IPTS_CMD_FEEDBACK: Informs the device that a buffer was processed and can be refilled. ++ * @IPTS_CMD_CLEAR_MEM_WINDOW: Stops the data flow and clears the buffer addresses on the device. ++ * @IPTS_CMD_RESET_SENSOR: Resets the sensor to its default state. ++ * @IPTS_CMD_GET_DESCRIPTOR: Retrieves the HID descriptor of the device. ++ */ ++enum ipts_command_code { ++ IPTS_CMD_GET_DEVICE_INFO = 0x01, ++ IPTS_CMD_SET_MODE = 0x02, ++ IPTS_CMD_SET_MEM_WINDOW = 0x03, ++ IPTS_CMD_QUIESCE_IO = 0x04, ++ IPTS_CMD_READY_FOR_DATA = 0x05, ++ IPTS_CMD_FEEDBACK = 0x06, ++ IPTS_CMD_CLEAR_MEM_WINDOW = 0x07, ++ IPTS_CMD_RESET_SENSOR = 0x0B, ++ IPTS_CMD_GET_DESCRIPTOR = 0x0F, ++}; ++ ++/** ++ * enum ipts_status - Possible status codes returned by the IPTS device. ++ * @IPTS_STATUS_SUCCESS: Operation completed successfully. ++ * @IPTS_STATUS_INVALID_PARAMS: Command contained an invalid payload. ++ * @IPTS_STATUS_ACCESS_DENIED: ME could not validate a buffer address. ++ * @IPTS_STATUS_CMD_SIZE_ERROR: Command contains an invalid payload. ++ * @IPTS_STATUS_NOT_READY: Buffer addresses have not been set. ++ * @IPTS_STATUS_REQUEST_OUTSTANDING: There is an outstanding command of the same type. ++ * @IPTS_STATUS_NO_SENSOR_FOUND: No sensor could be found. ++ * @IPTS_STATUS_OUT_OF_MEMORY: Not enough free memory for requested operation. ++ * @IPTS_STATUS_INTERNAL_ERROR: An unexpected error occurred. ++ * @IPTS_STATUS_SENSOR_DISABLED: The sensor has been disabled and must be reinitialized. ++ * @IPTS_STATUS_COMPAT_CHECK_FAIL: Compatibility revision check between sensor and ME failed. ++ * The host can ignore this error and attempt to continue. ++ * @IPTS_STATUS_SENSOR_EXPECTED_RESET: The sensor went through a reset initiated by the driver. ++ * @IPTS_STATUS_SENSOR_UNEXPECTED_RESET: The sensor went through an unexpected reset. ++ * @IPTS_STATUS_RESET_FAILED: Requested sensor reset failed to complete. ++ * @IPTS_STATUS_TIMEOUT: The operation timed out. ++ * @IPTS_STATUS_TEST_MODE_FAIL: Test mode pattern did not match expected values. ++ * @IPTS_STATUS_SENSOR_FAIL_FATAL: The sensor reported an error during reset sequence. ++ * Further progress is not possible. ++ * @IPTS_STATUS_SENSOR_FAIL_NONFATAL: The sensor reported an error during reset sequence. ++ * The driver can attempt to continue. ++ * @IPTS_STATUS_INVALID_DEVICE_CAPS: The device reported invalid capabilities. ++ * @IPTS_STATUS_QUIESCE_IO_IN_PROGRESS: Command cannot be completed until Quiesce IO is done. ++ */ ++enum ipts_status { ++ IPTS_STATUS_SUCCESS = 0x00, ++ IPTS_STATUS_INVALID_PARAMS = 0x01, ++ IPTS_STATUS_ACCESS_DENIED = 0x02, ++ IPTS_STATUS_CMD_SIZE_ERROR = 0x03, ++ IPTS_STATUS_NOT_READY = 0x04, ++ IPTS_STATUS_REQUEST_OUTSTANDING = 0x05, ++ IPTS_STATUS_NO_SENSOR_FOUND = 0x06, ++ IPTS_STATUS_OUT_OF_MEMORY = 0x07, ++ IPTS_STATUS_INTERNAL_ERROR = 0x08, ++ IPTS_STATUS_SENSOR_DISABLED = 0x09, ++ IPTS_STATUS_COMPAT_CHECK_FAIL = 0x0A, ++ IPTS_STATUS_SENSOR_EXPECTED_RESET = 0x0B, ++ IPTS_STATUS_SENSOR_UNEXPECTED_RESET = 0x0C, ++ IPTS_STATUS_RESET_FAILED = 0x0D, ++ IPTS_STATUS_TIMEOUT = 0x0E, ++ IPTS_STATUS_TEST_MODE_FAIL = 0x0F, ++ IPTS_STATUS_SENSOR_FAIL_FATAL = 0x10, ++ IPTS_STATUS_SENSOR_FAIL_NONFATAL = 0x11, ++ IPTS_STATUS_INVALID_DEVICE_CAPS = 0x12, ++ IPTS_STATUS_QUIESCE_IO_IN_PROGRESS = 0x13, ++}; ++ ++/** ++ * struct ipts_command - Message that is sent to the device for calling a command. ++ * @cmd: The command that will be called. ++ * @payload: Payload containing parameters for the called command. ++ */ ++struct ipts_command { ++ enum ipts_command_code cmd; ++ u8 payload[320]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_command) == 324); ++ ++/** ++ * enum ipts_mode - Configures what data the device produces and how its sent. ++ * @IPTS_MODE_EVENT: The device will send an event once a buffer was filled. ++ * Older devices will return singletouch data in this mode. ++ * @IPTS_MODE_DOORBELL: The device will notify the driver by incrementing the doorbell value. ++ * Older devices will return multitouch data in this mode. ++ */ ++enum ipts_mode { ++ IPTS_MODE_EVENT = 0x00, ++ IPTS_MODE_DOORBELL = 0x01, ++}; ++ ++/** ++ * struct ipts_set_mode - Payload for the SET_MODE command. ++ * @mode: Changes the mode that IPTS will operate in. ++ */ ++struct ipts_set_mode { ++ enum ipts_mode mode; ++ u8 reserved[12]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_set_mode) == 16); ++ ++#define IPTS_WORKQUEUE_SIZE 8192 ++#define IPTS_WORKQUEUE_ITEM_SIZE 16 ++ ++/** ++ * struct ipts_mem_window - Payload for the SET_MEM_WINDOW command. ++ * @data_addr_lower: Lower 32 bits of the data buffer addresses. ++ * @data_addr_upper: Upper 32 bits of the data buffer addresses. ++ * @workqueue_addr_lower: Lower 32 bits of the workqueue buffer address. ++ * @workqueue_addr_upper: Upper 32 bits of the workqueue buffer address. ++ * @doorbell_addr_lower: Lower 32 bits of the doorbell buffer address. ++ * @doorbell_addr_upper: Upper 32 bits of the doorbell buffer address. ++ * @feedbackaddr_lower: Lower 32 bits of the feedback buffer addresses. ++ * @feedbackaddr_upper: Upper 32 bits of the feedback buffer addresses. ++ * @hid2me_addr_lower: Lower 32 bits of the hid2me buffer address. ++ * @hid2me_addr_upper: Upper 32 bits of the hid2me buffer address. ++ * @hid2me_size: Size of the hid2me feedback buffer. ++ * @workqueue_item_size: Magic value. Must be 16. ++ * @workqueue_size: Magic value. Must be 8192. ++ * ++ * The workqueue related items in this struct are required for using ++ * GuC submission with binary processing firmware. Since this driver does ++ * not use GuC submission and instead exports raw data to userspace, these ++ * items are not actually used, but they need to be allocated and passed ++ * to the device, otherwise initialization will fail. ++ */ ++struct ipts_mem_window { ++ u32 data_addr_lower[IPTS_BUFFERS]; ++ u32 data_addr_upper[IPTS_BUFFERS]; ++ u32 workqueue_addr_lower; ++ u32 workqueue_addr_upper; ++ u32 doorbell_addr_lower; ++ u32 doorbell_addr_upper; ++ u32 feedback_addr_lower[IPTS_BUFFERS]; ++ u32 feedback_addr_upper[IPTS_BUFFERS]; ++ u32 hid2me_addr_lower; ++ u32 hid2me_addr_upper; ++ u32 hid2me_size; ++ u8 reserved1; ++ u8 workqueue_item_size; ++ u16 workqueue_size; ++ u8 reserved[32]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_mem_window) == 320); ++ ++/** ++ * struct ipts_quiesce_io - Payload for the QUIESCE_IO command. ++ */ ++struct ipts_quiesce_io { ++ u8 reserved[12]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_quiesce_io) == 12); ++ ++/** ++ * struct ipts_feedback - Payload for the FEEDBACK command. ++ * @buffer: The buffer that the device should refill. ++ */ ++struct ipts_feedback { ++ u32 buffer; ++ u8 reserved[12]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_feedback) == 16); ++ ++/** ++ * enum ipts_reset_type - Possible ways of resetting the device. ++ * @IPTS_RESET_TYPE_HARD: Perform hardware reset using GPIO pin. ++ * @IPTS_RESET_TYPE_SOFT: Perform software reset using SPI command. ++ */ ++enum ipts_reset_type { ++ IPTS_RESET_TYPE_HARD = 0x00, ++ IPTS_RESET_TYPE_SOFT = 0x01, ++}; ++ ++/** ++ * struct ipts_reset - Payload for the RESET_SENSOR command. ++ * @type: How the device should get reset. ++ */ ++struct ipts_reset_sensor { ++ enum ipts_reset_type type; ++ u8 reserved[4]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_reset_sensor) == 8); ++ ++/** ++ * struct ipts_get_descriptor - Payload for the GET_DESCRIPTOR command. ++ * @addr_lower: The lower 32 bits of the descriptor buffer address. ++ * @addr_upper: The upper 32 bits of the descriptor buffer address. ++ * @magic: A magic value. Must be 8. ++ */ ++struct ipts_get_descriptor { ++ u32 addr_lower; ++ u32 addr_upper; ++ u32 magic; ++ u8 reserved[12]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_get_descriptor) == 24); ++ ++/* ++ * The type of a response is indicated by a ++ * command code, with the most significant bit flipped to 1. ++ */ ++#define IPTS_RSP_BIT BIT(31) ++ ++/** ++ * struct ipts_response - Data returned from the device in response to a command. ++ * @cmd: The command that this response answers (IPTS_RSP_BIT will be 1). ++ * @status: The return code of the command. ++ * @payload: The data that was produced by the command. ++ */ ++struct ipts_response { ++ enum ipts_command_code cmd; ++ enum ipts_status status; ++ u8 payload[80]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_response) == 88); ++ ++/** ++ * struct ipts_device_info - Vendor information of the IPTS device. ++ * @vendor: Vendor ID of this device. ++ * @product: Product ID of this device. ++ * @hw_version: Hardware revision of this device. ++ * @fw_version: Firmware revision of this device. ++ * @data_size: Requested size for a data buffer. ++ * @feedback_size: Requested size for a feedback buffer. ++ * @mode: Mode that the device currently operates in. ++ * @max_contacts: Maximum amount of concurrent touches the sensor can process. ++ */ ++struct ipts_device_info { ++ u16 vendor; ++ u16 product; ++ u32 hw_version; ++ u32 fw_version; ++ u32 data_size; ++ u32 feedback_size; ++ enum ipts_mode mode; ++ u8 max_contacts; ++ u8 reserved1[3]; ++ u8 sensor_min_eds; ++ u8 sensor_maj_eds; ++ u8 me_min_eds; ++ u8 me_maj_eds; ++ u8 intf_eds; ++ u8 reserved2[11]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_device_info) == 44); ++ ++#endif /* IPTS_SPEC_DEVICE_H */ +diff --git a/drivers/hid/ipts/spec-hid.h b/drivers/hid/ipts/spec-hid.h +new file mode 100644 +index 000000000000..ea70f29ff00c +--- /dev/null ++++ b/drivers/hid/ipts/spec-hid.h +@@ -0,0 +1,35 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_SPEC_HID_H ++#define IPTS_SPEC_HID_H ++ ++#include ++#include ++ ++/* ++ * Made-up type for passing raw IPTS data in a HID report. ++ */ ++#define IPTS_HID_FRAME_TYPE_RAW 0xEE ++ ++/** ++ * struct ipts_hid_frame - Header that is prefixed to raw IPTS data wrapped in a HID report. ++ * @size: Size of the data inside the report, including this header. ++ * @type: What type of data does this report contain. ++ */ ++struct ipts_hid_header { ++ u32 size; ++ u8 reserved1; ++ u8 type; ++ u8 reserved2; ++ u8 data[]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_hid_header) == 7); ++ ++#endif /* IPTS_SPEC_HID_H */ +diff --git a/drivers/hid/ipts/thread.c b/drivers/hid/ipts/thread.c +new file mode 100644 +index 000000000000..8b46f775c107 +--- /dev/null ++++ b/drivers/hid/ipts/thread.c +@@ -0,0 +1,85 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include "thread.h" ++ ++bool ipts_thread_should_stop(struct ipts_thread *thread) ++{ ++ if (!thread) ++ return false; ++ ++ return READ_ONCE(thread->should_stop); ++} ++ ++static int ipts_thread_runner(void *data) ++{ ++ int ret = 0; ++ struct ipts_thread *thread = data; ++ ++ if (!thread) ++ return -EFAULT; ++ ++ if (!thread->threadfn) ++ return -EFAULT; ++ ++ ret = thread->threadfn(thread); ++ complete_all(&thread->done); ++ ++ return ret; ++} ++ ++int ipts_thread_start(struct ipts_thread *thread, int (*threadfn)(struct ipts_thread *thread), ++ void *data, const char *name) ++{ ++ if (!thread) ++ return -EFAULT; ++ ++ if (!threadfn) ++ return -EFAULT; ++ ++ init_completion(&thread->done); ++ ++ thread->data = data; ++ thread->should_stop = false; ++ thread->threadfn = threadfn; ++ ++ thread->thread = kthread_run(ipts_thread_runner, thread, name); ++ return PTR_ERR_OR_ZERO(thread->thread); ++} ++ ++int ipts_thread_stop(struct ipts_thread *thread) ++{ ++ int ret = 0; ++ ++ if (!thread) ++ return -EFAULT; ++ ++ if (!thread->thread) ++ return 0; ++ ++ WRITE_ONCE(thread->should_stop, true); ++ ++ /* ++ * Make sure that the write has gone through before waiting. ++ */ ++ wmb(); ++ ++ wait_for_completion(&thread->done); ++ ret = kthread_stop(thread->thread); ++ ++ thread->thread = NULL; ++ thread->data = NULL; ++ thread->threadfn = NULL; ++ ++ return ret; ++} +diff --git a/drivers/hid/ipts/thread.h b/drivers/hid/ipts/thread.h +new file mode 100644 +index 000000000000..a314843599fc +--- /dev/null ++++ b/drivers/hid/ipts/thread.h +@@ -0,0 +1,60 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_THREAD_H ++#define IPTS_THREAD_H ++ ++#include ++#include ++#include ++ ++/* ++ * This wrapper over kthread is necessary, because calling kthread_stop makes it impossible ++ * to issue MEI commands from that thread while it shuts itself down. By using a custom ++ * boolean variable and a completion object, we can call kthread_stop only when the thread ++ * already finished all of its work and has returned. ++ */ ++struct ipts_thread { ++ struct task_struct *thread; ++ ++ bool should_stop; ++ struct completion done; ++ ++ void *data; ++ int (*threadfn)(struct ipts_thread *thread); ++}; ++ ++/* ++ * ipts_thread_should_stop() - Returns true if the thread is asked to terminate. ++ * @thread: The current thread. ++ * ++ * Returns: true if the thread should stop, false if not. ++ */ ++bool ipts_thread_should_stop(struct ipts_thread *thread); ++ ++/* ++ * ipts_thread_start() - Starts an IPTS thread. ++ * @thread: The thread to initialize and start. ++ * @threadfn: The function to execute. ++ * @data: An argument that will be passed to threadfn. ++ * @name: The name of the new thread. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_thread_start(struct ipts_thread *thread, int (*threadfn)(struct ipts_thread *thread), ++ void *data, const char name[]); ++ ++/* ++ * ipts_thread_stop() - Asks the thread to terminate and waits until it has finished. ++ * @thread: The thread that should stop. ++ * ++ * Returns: The return value of the thread function. ++ */ ++int ipts_thread_stop(struct ipts_thread *thread); ++ ++#endif /* IPTS_THREAD_H */ +diff --git a/drivers/hid/ithc/Kbuild b/drivers/hid/ithc/Kbuild +new file mode 100644 +index 000000000000..aea83f2ac07b +--- /dev/null ++++ b/drivers/hid/ithc/Kbuild +@@ -0,0 +1,6 @@ ++obj-$(CONFIG_HID_ITHC) := ithc.o ++ ++ithc-objs := ithc-main.o ithc-regs.o ithc-dma.o ithc-debug.o ++ ++ccflags-y := -std=gnu11 -Wno-declaration-after-statement ++ +diff --git a/drivers/hid/ithc/Kconfig b/drivers/hid/ithc/Kconfig +new file mode 100644 +index 000000000000..ede713023609 +--- /dev/null ++++ b/drivers/hid/ithc/Kconfig +@@ -0,0 +1,12 @@ ++config HID_ITHC ++ tristate "Intel Touch Host Controller" ++ depends on PCI ++ depends on HID ++ help ++ Say Y here if your system has a touchscreen using Intels ++ Touch Host Controller (ITHC / IPTS) technology. ++ ++ If unsure say N. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called ithc. +diff --git a/drivers/hid/ithc/ithc-debug.c b/drivers/hid/ithc/ithc-debug.c +new file mode 100644 +index 000000000000..57bf125c45bd +--- /dev/null ++++ b/drivers/hid/ithc/ithc-debug.c +@@ -0,0 +1,96 @@ ++#include "ithc.h" ++ ++void ithc_log_regs(struct ithc *ithc) { ++ if (!ithc->prev_regs) return; ++ u32 __iomem *cur = (__iomem void*)ithc->regs; ++ u32 *prev = (void*)ithc->prev_regs; ++ for (int i = 1024; i < sizeof *ithc->regs / 4; i++) { ++ u32 x = readl(cur + i); ++ if (x != prev[i]) { ++ pci_info(ithc->pci, "reg %04x: %08x -> %08x\n", i * 4, prev[i], x); ++ prev[i] = x; ++ } ++ } ++} ++ ++static ssize_t ithc_debugfs_cmd_write(struct file *f, const char __user *buf, size_t len, loff_t *offset) { ++ struct ithc *ithc = file_inode(f)->i_private; ++ char cmd[256]; ++ if (!ithc || !ithc->pci) return -ENODEV; ++ if (!len) return -EINVAL; ++ if (len >= sizeof cmd) return -EINVAL; ++ if (copy_from_user(cmd, buf, len)) return -EFAULT; ++ cmd[len] = 0; ++ if (cmd[len-1] == '\n') cmd[len-1] = 0; ++ pci_info(ithc->pci, "debug command: %s\n", cmd); ++ u32 n = 0; ++ const char *s = cmd + 1; ++ u32 a[32]; ++ while (*s && *s != '\n') { ++ if (n >= ARRAY_SIZE(a)) return -EINVAL; ++ if (*s++ != ' ') return -EINVAL; ++ char *e; ++ a[n++] = simple_strtoul(s, &e, 0); ++ if (e == s) return -EINVAL; ++ s = e; ++ } ++ ithc_log_regs(ithc); ++ switch(cmd[0]) { ++ case 'x': // reset ++ ithc_reset(ithc); ++ break; ++ case 'w': // write register: offset mask value ++ if (n != 3 || (a[0] & 3)) return -EINVAL; ++ pci_info(ithc->pci, "debug write 0x%04x = 0x%08x (mask 0x%08x)\n", a[0], a[2], a[1]); ++ bitsl(((__iomem u32 *)ithc->regs) + a[0] / 4, a[1], a[2]); ++ break; ++ case 'r': // read register: offset ++ if (n != 1 || (a[0] & 3)) return -EINVAL; ++ pci_info(ithc->pci, "debug read 0x%04x = 0x%08x\n", a[0], readl(((__iomem u32 *)ithc->regs) + a[0] / 4)); ++ break; ++ case 's': // spi command: cmd offset len data... ++ // read config: s 4 0 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ++ // set touch cfg: s 6 12 4 XX ++ if (n < 3 || a[2] > (n - 3) * 4) return -EINVAL; ++ pci_info(ithc->pci, "debug spi command %u with %u bytes of data\n", a[0], a[2]); ++ if (!CHECK(ithc_spi_command, ithc, a[0], a[1], a[2], a + 3)) ++ for (u32 i = 0; i < (a[2] + 3) / 4; i++) pci_info(ithc->pci, "resp %u = 0x%08x\n", i, a[3+i]); ++ break; ++ case 'd': // dma command: cmd len data... ++ // get report descriptor: d 7 8 0 0 ++ // enable multitouch: d 3 2 0x0105 ++ if (n < 2 || a[1] > (n - 2) * 4) return -EINVAL; ++ pci_info(ithc->pci, "debug dma command %u with %u bytes of data\n", a[0], a[1]); ++ if (ithc_dma_tx(ithc, a[0], a[1], a + 2)) pci_err(ithc->pci, "dma tx failed\n"); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ithc_log_regs(ithc); ++ return len; ++} ++ ++static const struct file_operations ithc_debugfops_cmd = { ++ .owner = THIS_MODULE, ++ .write = ithc_debugfs_cmd_write, ++}; ++ ++static void ithc_debugfs_devres_release(struct device *dev, void *res) { ++ struct dentry **dbgm = res; ++ if (*dbgm) debugfs_remove_recursive(*dbgm); ++} ++ ++int ithc_debug_init(struct ithc *ithc) { ++ struct dentry **dbgm = devres_alloc(ithc_debugfs_devres_release, sizeof *dbgm, GFP_KERNEL); ++ if (!dbgm) return -ENOMEM; ++ devres_add(&ithc->pci->dev, dbgm); ++ struct dentry *dbg = debugfs_create_dir(DEVNAME, NULL); ++ if (IS_ERR(dbg)) return PTR_ERR(dbg); ++ *dbgm = dbg; ++ ++ struct dentry *cmd = debugfs_create_file("cmd", 0220, dbg, ithc, &ithc_debugfops_cmd); ++ if (IS_ERR(cmd)) return PTR_ERR(cmd); ++ ++ return 0; ++} ++ +diff --git a/drivers/hid/ithc/ithc-dma.c b/drivers/hid/ithc/ithc-dma.c +new file mode 100644 +index 000000000000..7e89b3496918 +--- /dev/null ++++ b/drivers/hid/ithc/ithc-dma.c +@@ -0,0 +1,258 @@ ++#include "ithc.h" ++ ++static int ithc_dma_prd_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *p, unsigned num_buffers, unsigned num_pages, enum dma_data_direction dir) { ++ p->num_pages = num_pages; ++ p->dir = dir; ++ p->size = round_up(num_buffers * num_pages * sizeof(struct ithc_phys_region_desc), PAGE_SIZE); ++ p->addr = dmam_alloc_coherent(&ithc->pci->dev, p->size, &p->dma_addr, GFP_KERNEL); ++ if (!p->addr) return -ENOMEM; ++ if (p->dma_addr & (PAGE_SIZE - 1)) return -EFAULT; ++ return 0; ++} ++ ++struct ithc_sg_table { ++ void *addr; ++ struct sg_table sgt; ++ enum dma_data_direction dir; ++}; ++static void ithc_dma_sgtable_free(struct sg_table *sgt) { ++ struct scatterlist *sg; ++ int i; ++ for_each_sgtable_sg(sgt, sg, i) { ++ struct page *p = sg_page(sg); ++ if (p) __free_page(p); ++ } ++ sg_free_table(sgt); ++} ++static void ithc_dma_data_devres_release(struct device *dev, void *res) { ++ struct ithc_sg_table *sgt = res; ++ if (sgt->addr) vunmap(sgt->addr); ++ dma_unmap_sgtable(dev, &sgt->sgt, sgt->dir, 0); ++ ithc_dma_sgtable_free(&sgt->sgt); ++} ++ ++static int ithc_dma_data_alloc(struct ithc* ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b) { ++ // We don't use dma_alloc_coherent for data buffers, because they don't have to be contiguous (we can use one PRD per page) or coherent (they are unidirectional). ++ // Instead we use an sg_table of individually allocated pages (5.13 has dma_alloc_noncontiguous for this, but we'd like to support 5.10 for now). ++ struct page *pages[16]; ++ if (prds->num_pages == 0 || prds->num_pages > ARRAY_SIZE(pages)) return -EINVAL; ++ b->active_idx = -1; ++ struct ithc_sg_table *sgt = devres_alloc(ithc_dma_data_devres_release, sizeof *sgt, GFP_KERNEL); ++ if (!sgt) return -ENOMEM; ++ sgt->dir = prds->dir; ++ if (!sg_alloc_table(&sgt->sgt, prds->num_pages, GFP_KERNEL)) { ++ struct scatterlist *sg; ++ int i; ++ bool ok = true; ++ for_each_sgtable_sg(&sgt->sgt, sg, i) { ++ struct page *p = pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); // don't need __GFP_DMA for PCI DMA ++ if (!p) { ok = false; break; } ++ sg_set_page(sg, p, PAGE_SIZE, 0); ++ } ++ if (ok && !dma_map_sgtable(&ithc->pci->dev, &sgt->sgt, prds->dir, 0)) { ++ devres_add(&ithc->pci->dev, sgt); ++ b->sgt = &sgt->sgt; ++ b->addr = sgt->addr = vmap(pages, prds->num_pages, 0, PAGE_KERNEL); ++ if (!b->addr) return -ENOMEM; ++ return 0; ++ } ++ ithc_dma_sgtable_free(&sgt->sgt); ++ } ++ devres_free(sgt); ++ return -ENOMEM; ++} ++ ++static int ithc_dma_data_buffer_put(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) { ++ struct ithc_phys_region_desc *prd = prds->addr; ++ prd += idx * prds->num_pages; ++ if (b->active_idx >= 0) { pci_err(ithc->pci, "buffer already active\n"); return -EINVAL; } ++ b->active_idx = idx; ++ if (prds->dir == DMA_TO_DEVICE) { ++ if (b->data_size > PAGE_SIZE) return -EINVAL; ++ prd->addr = sg_dma_address(b->sgt->sgl) >> 10; ++ prd->size = b->data_size | PRD_FLAG_END; ++ flush_kernel_vmap_range(b->addr, b->data_size); ++ } else if (prds->dir == DMA_FROM_DEVICE) { ++ struct scatterlist *sg; ++ int i; ++ for_each_sgtable_dma_sg(b->sgt, sg, i) { ++ prd->addr = sg_dma_address(sg) >> 10; ++ prd->size = sg_dma_len(sg); ++ prd++; ++ } ++ prd[-1].size |= PRD_FLAG_END; ++ } ++ dma_wmb(); // for the prds ++ dma_sync_sgtable_for_device(&ithc->pci->dev, b->sgt, prds->dir); ++ return 0; ++} ++ ++static int ithc_dma_data_buffer_get(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) { ++ struct ithc_phys_region_desc *prd = prds->addr; ++ prd += idx * prds->num_pages; ++ if (b->active_idx != idx) { pci_err(ithc->pci, "wrong buffer index\n"); return -EINVAL; } ++ b->active_idx = -1; ++ if (prds->dir == DMA_FROM_DEVICE) { ++ dma_rmb(); // for the prds ++ b->data_size = 0; ++ struct scatterlist *sg; ++ int i; ++ for_each_sgtable_dma_sg(b->sgt, sg, i) { ++ unsigned size = prd->size; ++ b->data_size += size & PRD_SIZE_MASK; ++ if (size & PRD_FLAG_END) break; ++ if ((size & PRD_SIZE_MASK) != sg_dma_len(sg)) { pci_err(ithc->pci, "truncated prd\n"); break; } ++ prd++; ++ } ++ invalidate_kernel_vmap_range(b->addr, b->data_size); ++ } ++ dma_sync_sgtable_for_cpu(&ithc->pci->dev, b->sgt, prds->dir); ++ return 0; ++} ++ ++int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname) { ++ struct ithc_dma_rx *rx = &ithc->dma_rx[channel]; ++ mutex_init(&rx->mutex); ++ u32 buf_size = DEVCFG_DMA_RX_SIZE(ithc->config.dma_buf_sizes); ++ unsigned num_pages = (buf_size + PAGE_SIZE - 1) / PAGE_SIZE; ++ pci_dbg(ithc->pci, "allocating rx buffers: num = %u, size = %u, pages = %u\n", NUM_RX_BUF, buf_size, num_pages); ++ CHECK_RET(ithc_dma_prd_alloc, ithc, &rx->prds, NUM_RX_BUF, num_pages, DMA_FROM_DEVICE); ++ for (unsigned i = 0; i < NUM_RX_BUF; i++) ++ CHECK_RET(ithc_dma_data_alloc, ithc, &rx->prds, &rx->bufs[i]); ++ writeb(DMA_RX_CONTROL2_RESET, &ithc->regs->dma_rx[channel].control2); ++ lo_hi_writeq(rx->prds.dma_addr, &ithc->regs->dma_rx[channel].addr); ++ writeb(NUM_RX_BUF - 1, &ithc->regs->dma_rx[channel].num_bufs); ++ writeb(num_pages - 1, &ithc->regs->dma_rx[channel].num_prds); ++ u8 head = readb(&ithc->regs->dma_rx[channel].head); ++ if (head) { pci_err(ithc->pci, "head is nonzero (%u)\n", head); return -EIO; } ++ for (unsigned i = 0; i < NUM_RX_BUF; i++) ++ CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, &rx->bufs[i], i); ++ writeb(head ^ DMA_RX_WRAP_FLAG, &ithc->regs->dma_rx[channel].tail); ++ return 0; ++} ++void ithc_dma_rx_enable(struct ithc *ithc, u8 channel) { ++ bitsb_set(&ithc->regs->dma_rx[channel].control, DMA_RX_CONTROL_ENABLE | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_DATA); ++ CHECK(waitl, ithc, &ithc->regs->dma_rx[1].status, DMA_RX_STATUS_ENABLED, DMA_RX_STATUS_ENABLED); ++} ++ ++int ithc_dma_tx_init(struct ithc *ithc) { ++ struct ithc_dma_tx *tx = &ithc->dma_tx; ++ mutex_init(&tx->mutex); ++ tx->max_size = DEVCFG_DMA_TX_SIZE(ithc->config.dma_buf_sizes); ++ unsigned num_pages = (tx->max_size + PAGE_SIZE - 1) / PAGE_SIZE; ++ pci_dbg(ithc->pci, "allocating tx buffers: size = %u, pages = %u\n", tx->max_size, num_pages); ++ CHECK_RET(ithc_dma_prd_alloc, ithc, &tx->prds, 1, num_pages, DMA_TO_DEVICE); ++ CHECK_RET(ithc_dma_data_alloc, ithc, &tx->prds, &tx->buf); ++ lo_hi_writeq(tx->prds.dma_addr, &ithc->regs->dma_tx.addr); ++ writeb(num_pages - 1, &ithc->regs->dma_tx.num_prds); ++ CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0); ++ return 0; ++} ++ ++static int ithc_dma_rx_process_buf(struct ithc *ithc, struct ithc_dma_data_buffer *data, u8 channel, u8 buf) { ++ if (buf >= NUM_RX_BUF) { ++ pci_err(ithc->pci, "invalid dma ringbuffer index\n"); ++ return -EINVAL; ++ } ++ ithc_set_active(ithc); ++ u32 len = data->data_size; ++ struct ithc_dma_rx_header *hdr = data->addr; ++ u8 *hiddata = (void *)(hdr + 1); ++ if (len >= sizeof *hdr && hdr->code == DMA_RX_CODE_RESET) { ++ CHECK(ithc_reset, ithc); ++ } else if (len < sizeof *hdr || len != sizeof *hdr + hdr->data_size) { ++ if (hdr->code == DMA_RX_CODE_INPUT_REPORT) { ++ // When the CPU enters a low power state during DMA, we can get truncated messages. ++ // Typically this will be a single touch HID report that is only 1 byte, or a multitouch report that is 257 bytes. ++ // See also ithc_set_active(). ++ } else { ++ pci_err(ithc->pci, "invalid dma rx data! channel %u, buffer %u, size %u, code %u, data size %u\n", channel, buf, len, hdr->code, hdr->data_size); ++ print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0); ++ } ++ } else if (hdr->code == DMA_RX_CODE_REPORT_DESCRIPTOR && hdr->data_size > 8) { ++ CHECK(hid_parse_report, ithc->hid, hiddata + 8, hdr->data_size - 8); ++ WRITE_ONCE(ithc->hid_parse_done, true); ++ wake_up(&ithc->wait_hid_parse); ++ } else if (hdr->code == DMA_RX_CODE_INPUT_REPORT) { ++ CHECK(hid_input_report, ithc->hid, HID_INPUT_REPORT, hiddata, hdr->data_size, 1); ++ } else if (hdr->code == DMA_RX_CODE_FEATURE_REPORT) { ++ bool done = false; ++ mutex_lock(&ithc->hid_get_feature_mutex); ++ if (ithc->hid_get_feature_buf) { ++ if (hdr->data_size < ithc->hid_get_feature_size) ithc->hid_get_feature_size = hdr->data_size; ++ memcpy(ithc->hid_get_feature_buf, hiddata, ithc->hid_get_feature_size); ++ ithc->hid_get_feature_buf = NULL; ++ done = true; ++ } ++ mutex_unlock(&ithc->hid_get_feature_mutex); ++ if (done) wake_up(&ithc->wait_hid_get_feature); ++ else CHECK(hid_input_report, ithc->hid, HID_FEATURE_REPORT, hiddata, hdr->data_size, 1); ++ } else { ++ pci_dbg(ithc->pci, "unhandled dma rx data! channel %u, buffer %u, size %u, code %u\n", channel, buf, len, hdr->code); ++ print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0); ++ } ++ return 0; ++} ++ ++static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel) { ++ struct ithc_dma_rx *rx = &ithc->dma_rx[channel]; ++ unsigned n = rx->num_received; ++ u8 head_wrap = readb(&ithc->regs->dma_rx[channel].head); ++ while (1) { ++ u8 tail = n % NUM_RX_BUF; ++ u8 tail_wrap = tail | ((n / NUM_RX_BUF) & 1 ? 0 : DMA_RX_WRAP_FLAG); ++ writeb(tail_wrap, &ithc->regs->dma_rx[channel].tail); ++ // ringbuffer is full if tail_wrap == head_wrap ++ // ringbuffer is empty if tail_wrap == head_wrap ^ WRAP_FLAG ++ if (tail_wrap == (head_wrap ^ DMA_RX_WRAP_FLAG)) return 0; ++ ++ // take the buffer that the device just filled ++ struct ithc_dma_data_buffer *b = &rx->bufs[n % NUM_RX_BUF]; ++ CHECK_RET(ithc_dma_data_buffer_get, ithc, &rx->prds, b, tail); ++ rx->num_received = ++n; ++ ++ // process data ++ CHECK(ithc_dma_rx_process_buf, ithc, b, channel, tail); ++ ++ // give the buffer back to the device ++ CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, b, tail); ++ } ++} ++int ithc_dma_rx(struct ithc *ithc, u8 channel) { ++ struct ithc_dma_rx *rx = &ithc->dma_rx[channel]; ++ mutex_lock(&rx->mutex); ++ int ret = ithc_dma_rx_unlocked(ithc, channel); ++ mutex_unlock(&rx->mutex); ++ return ret; ++} ++ ++static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) { ++ pci_dbg(ithc->pci, "dma tx command %u, size %u\n", cmdcode, datasize); ++ struct ithc_dma_tx_header *hdr; ++ u8 padding = datasize & 3 ? 4 - (datasize & 3) : 0; ++ unsigned fullsize = sizeof *hdr + datasize + padding; ++ if (fullsize > ithc->dma_tx.max_size || fullsize > PAGE_SIZE) return -EINVAL; ++ CHECK_RET(ithc_dma_data_buffer_get, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0); ++ ++ ithc->dma_tx.buf.data_size = fullsize; ++ hdr = ithc->dma_tx.buf.addr; ++ hdr->code = cmdcode; ++ hdr->data_size = datasize; ++ u8 *dest = (void *)(hdr + 1); ++ memcpy(dest, data, datasize); ++ dest += datasize; ++ for (u8 p = 0; p < padding; p++) *dest++ = 0; ++ CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0); ++ ++ bitsb_set(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND); ++ CHECK_RET(waitb, ithc, &ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0); ++ writel(DMA_TX_STATUS_DONE, &ithc->regs->dma_tx.status); ++ return 0; ++} ++int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) { ++ mutex_lock(&ithc->dma_tx.mutex); ++ int ret = ithc_dma_tx_unlocked(ithc, cmdcode, datasize, data); ++ mutex_unlock(&ithc->dma_tx.mutex); ++ return ret; ++} ++ +diff --git a/drivers/hid/ithc/ithc-dma.h b/drivers/hid/ithc/ithc-dma.h +new file mode 100644 +index 000000000000..d9f2c19a13f3 +--- /dev/null ++++ b/drivers/hid/ithc/ithc-dma.h +@@ -0,0 +1,67 @@ ++#define PRD_SIZE_MASK 0xffffff ++#define PRD_FLAG_END 0x1000000 ++#define PRD_FLAG_SUCCESS 0x2000000 ++#define PRD_FLAG_ERROR 0x4000000 ++ ++struct ithc_phys_region_desc { ++ u64 addr; // physical addr/1024 ++ u32 size; // num bytes, PRD_FLAG_END marks last prd for data split over multiple prds ++ u32 unused; ++}; ++ ++#define DMA_RX_CODE_INPUT_REPORT 3 ++#define DMA_RX_CODE_FEATURE_REPORT 4 ++#define DMA_RX_CODE_REPORT_DESCRIPTOR 5 ++#define DMA_RX_CODE_RESET 7 ++ ++struct ithc_dma_rx_header { ++ u32 code; ++ u32 data_size; ++ u32 _unknown[14]; ++}; ++ ++#define DMA_TX_CODE_SET_FEATURE 3 ++#define DMA_TX_CODE_GET_FEATURE 4 ++#define DMA_TX_CODE_OUTPUT_REPORT 5 ++#define DMA_TX_CODE_GET_REPORT_DESCRIPTOR 7 ++ ++struct ithc_dma_tx_header { ++ u32 code; ++ u32 data_size; ++}; ++ ++struct ithc_dma_prd_buffer { ++ void *addr; ++ dma_addr_t dma_addr; ++ u32 size; ++ u32 num_pages; // per data buffer ++ enum dma_data_direction dir; ++}; ++ ++struct ithc_dma_data_buffer { ++ void *addr; ++ struct sg_table *sgt; ++ int active_idx; ++ u32 data_size; ++}; ++ ++struct ithc_dma_tx { ++ struct mutex mutex; ++ u32 max_size; ++ struct ithc_dma_prd_buffer prds; ++ struct ithc_dma_data_buffer buf; ++}; ++ ++struct ithc_dma_rx { ++ struct mutex mutex; ++ u32 num_received; ++ struct ithc_dma_prd_buffer prds; ++ struct ithc_dma_data_buffer bufs[NUM_RX_BUF]; ++}; ++ ++int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname); ++void ithc_dma_rx_enable(struct ithc *ithc, u8 channel); ++int ithc_dma_tx_init(struct ithc *ithc); ++int ithc_dma_rx(struct ithc *ithc, u8 channel); ++int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *cmddata); ++ +diff --git a/drivers/hid/ithc/ithc-main.c b/drivers/hid/ithc/ithc-main.c +new file mode 100644 +index 000000000000..09512b9cb4d3 +--- /dev/null ++++ b/drivers/hid/ithc/ithc-main.c +@@ -0,0 +1,534 @@ ++#include "ithc.h" ++ ++MODULE_DESCRIPTION("Intel Touch Host Controller driver"); ++MODULE_LICENSE("Dual BSD/GPL"); ++ ++// Lakefield ++#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT1 0x98d0 ++#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT2 0x98d1 ++// Tiger Lake ++#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1 0xa0d0 ++#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2 0xa0d1 ++#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1 0x43d0 ++#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2 0x43d1 ++// Alder Lake ++#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1 0x7ad8 ++#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2 0x7ad9 ++#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1 0x51d0 ++#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2 0x51d1 ++#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1 0x54d0 ++#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2 0x54d1 ++// Raptor Lake ++#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1 0x7a58 ++#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2 0x7a59 ++// Meteor Lake ++#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT1 0x7e48 ++#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT2 0x7e4a ++ ++static const struct pci_device_id ithc_pci_tbl[] = { ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT1) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT2) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT1) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT2) }, ++ {} ++}; ++MODULE_DEVICE_TABLE(pci, ithc_pci_tbl); ++ ++// Module parameters ++ ++static bool ithc_use_polling = false; ++module_param_named(poll, ithc_use_polling, bool, 0); ++MODULE_PARM_DESC(poll, "Use polling instead of interrupts"); ++ ++static bool ithc_use_rx0 = false; ++module_param_named(rx0, ithc_use_rx0, bool, 0); ++MODULE_PARM_DESC(rx0, "Use DMA RX channel 0"); ++ ++static bool ithc_use_rx1 = true; ++module_param_named(rx1, ithc_use_rx1, bool, 0); ++MODULE_PARM_DESC(rx1, "Use DMA RX channel 1"); ++ ++static bool ithc_log_regs_enabled = false; ++module_param_named(logregs, ithc_log_regs_enabled, bool, 0); ++MODULE_PARM_DESC(logregs, "Log changes in register values (for debugging)"); ++ ++// Sysfs attributes ++ ++static bool ithc_is_config_valid(struct ithc *ithc) { ++ return ithc->config.device_id == DEVCFG_DEVICE_ID_TIC; ++} ++ ++static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { ++ struct ithc *ithc = dev_get_drvdata(dev); ++ if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV; ++ return sprintf(buf, "0x%04x", ithc->config.vendor_id); ++} ++static DEVICE_ATTR_RO(vendor); ++static ssize_t product_show(struct device *dev, struct device_attribute *attr, char *buf) { ++ struct ithc *ithc = dev_get_drvdata(dev); ++ if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV; ++ return sprintf(buf, "0x%04x", ithc->config.product_id); ++} ++static DEVICE_ATTR_RO(product); ++static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf) { ++ struct ithc *ithc = dev_get_drvdata(dev); ++ if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV; ++ return sprintf(buf, "%u", ithc->config.revision); ++} ++static DEVICE_ATTR_RO(revision); ++static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { ++ struct ithc *ithc = dev_get_drvdata(dev); ++ if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV; ++ u32 v = ithc->config.fw_version; ++ return sprintf(buf, "%i.%i.%i.%i", v >> 24, v >> 16 & 0xff, v >> 8 & 0xff, v & 0xff); ++} ++static DEVICE_ATTR_RO(fw_version); ++ ++static const struct attribute_group *ithc_attribute_groups[] = { ++ &(const struct attribute_group){ ++ .name = DEVNAME, ++ .attrs = (struct attribute *[]){ ++ &dev_attr_vendor.attr, ++ &dev_attr_product.attr, ++ &dev_attr_revision.attr, ++ &dev_attr_fw_version.attr, ++ NULL ++ }, ++ }, ++ NULL ++}; ++ ++// HID setup ++ ++static int ithc_hid_start(struct hid_device *hdev) { return 0; } ++static void ithc_hid_stop(struct hid_device *hdev) { } ++static int ithc_hid_open(struct hid_device *hdev) { return 0; } ++static void ithc_hid_close(struct hid_device *hdev) { } ++ ++static int ithc_hid_parse(struct hid_device *hdev) { ++ struct ithc *ithc = hdev->driver_data; ++ u64 val = 0; ++ WRITE_ONCE(ithc->hid_parse_done, false); ++ CHECK_RET(ithc_dma_tx, ithc, DMA_TX_CODE_GET_REPORT_DESCRIPTOR, sizeof val, &val); ++ if (!wait_event_timeout(ithc->wait_hid_parse, READ_ONCE(ithc->hid_parse_done), msecs_to_jiffies(1000))) return -ETIMEDOUT; ++ return 0; ++} ++ ++static int ithc_hid_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf, size_t len, unsigned char rtype, int reqtype) { ++ struct ithc *ithc = hdev->driver_data; ++ if (!buf || !len) return -EINVAL; ++ u32 code; ++ if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) code = DMA_TX_CODE_OUTPUT_REPORT; ++ else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) code = DMA_TX_CODE_SET_FEATURE; ++ else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) code = DMA_TX_CODE_GET_FEATURE; ++ else { ++ pci_err(ithc->pci, "unhandled hid request %i %i for report id %i\n", rtype, reqtype, reportnum); ++ return -EINVAL; ++ } ++ buf[0] = reportnum; ++ if (reqtype == HID_REQ_GET_REPORT) { ++ mutex_lock(&ithc->hid_get_feature_mutex); ++ ithc->hid_get_feature_buf = buf; ++ ithc->hid_get_feature_size = len; ++ mutex_unlock(&ithc->hid_get_feature_mutex); ++ int r = CHECK(ithc_dma_tx, ithc, code, 1, buf); ++ if (!r) { ++ r = wait_event_interruptible_timeout(ithc->wait_hid_get_feature, !ithc->hid_get_feature_buf, msecs_to_jiffies(1000)); ++ if (!r) r = -ETIMEDOUT; ++ else if (r < 0) r = -EINTR; ++ else r = 0; ++ } ++ mutex_lock(&ithc->hid_get_feature_mutex); ++ ithc->hid_get_feature_buf = NULL; ++ if (!r) r = ithc->hid_get_feature_size; ++ mutex_unlock(&ithc->hid_get_feature_mutex); ++ return r; ++ } ++ CHECK_RET(ithc_dma_tx, ithc, code, len, buf); ++ return 0; ++} ++ ++static struct hid_ll_driver ithc_ll_driver = { ++ .start = ithc_hid_start, ++ .stop = ithc_hid_stop, ++ .open = ithc_hid_open, ++ .close = ithc_hid_close, ++ .parse = ithc_hid_parse, ++ .raw_request = ithc_hid_raw_request, ++}; ++ ++static void ithc_hid_devres_release(struct device *dev, void *res) { ++ struct hid_device **hidm = res; ++ if (*hidm) hid_destroy_device(*hidm); ++} ++ ++static int ithc_hid_init(struct ithc *ithc) { ++ struct hid_device **hidm = devres_alloc(ithc_hid_devres_release, sizeof *hidm, GFP_KERNEL); ++ if (!hidm) return -ENOMEM; ++ devres_add(&ithc->pci->dev, hidm); ++ struct hid_device *hid = hid_allocate_device(); ++ if (IS_ERR(hid)) return PTR_ERR(hid); ++ *hidm = hid; ++ ++ strscpy(hid->name, DEVFULLNAME, sizeof(hid->name)); ++ strscpy(hid->phys, ithc->phys, sizeof(hid->phys)); ++ hid->ll_driver = &ithc_ll_driver; ++ hid->bus = BUS_PCI; ++ hid->vendor = ithc->config.vendor_id; ++ hid->product = ithc->config.product_id; ++ hid->version = 0x100; ++ hid->dev.parent = &ithc->pci->dev; ++ hid->driver_data = ithc; ++ ++ ithc->hid = hid; ++ return 0; ++} ++ ++// Interrupts/polling ++ ++static void ithc_activity_timer_callback(struct timer_list *t) { ++ struct ithc *ithc = container_of(t, struct ithc, activity_timer); ++ cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE); ++} ++ ++void ithc_set_active(struct ithc *ithc) { ++ // When CPU usage is very low, the CPU can enter various low power states (C2-C10). ++ // This disrupts DMA, causing truncated DMA messages. ERROR_FLAG_DMA_UNKNOWN_12 will be set when this happens. ++ // The amount of truncated messages can become very high, resulting in user-visible effects (laggy/stuttering cursor). ++ // To avoid this, we use a CPU latency QoS request to prevent the CPU from entering low power states during touch interactions. ++ cpu_latency_qos_update_request(&ithc->activity_qos, 0); ++ mod_timer(&ithc->activity_timer, jiffies + msecs_to_jiffies(1000)); ++} ++ ++static int ithc_set_device_enabled(struct ithc *ithc, bool enable) { ++ u32 x = ithc->config.touch_cfg = (ithc->config.touch_cfg & ~(u32)DEVCFG_TOUCH_MASK) | DEVCFG_TOUCH_UNKNOWN_2 ++ | (enable ? DEVCFG_TOUCH_ENABLE | DEVCFG_TOUCH_UNKNOWN_3 | DEVCFG_TOUCH_UNKNOWN_4 : 0); ++ return ithc_spi_command(ithc, SPI_CMD_CODE_WRITE, offsetof(struct ithc_device_config, touch_cfg), sizeof x, &x); ++} ++ ++static void ithc_disable_interrupts(struct ithc *ithc) { ++ writel(0, &ithc->regs->error_control); ++ bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_IRQ, 0); ++ bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0); ++ bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0); ++ bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_IRQ, 0); ++} ++ ++static void ithc_clear_dma_rx_interrupts(struct ithc *ithc, unsigned channel) { ++ writel(DMA_RX_STATUS_ERROR | DMA_RX_STATUS_UNKNOWN_4 | DMA_RX_STATUS_HAVE_DATA, &ithc->regs->dma_rx[channel].status); ++} ++ ++static void ithc_clear_interrupts(struct ithc *ithc) { ++ writel(0xffffffff, &ithc->regs->error_flags); ++ writel(ERROR_STATUS_DMA | ERROR_STATUS_SPI, &ithc->regs->error_status); ++ writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status); ++ ithc_clear_dma_rx_interrupts(ithc, 0); ++ ithc_clear_dma_rx_interrupts(ithc, 1); ++ writel(DMA_TX_STATUS_DONE | DMA_TX_STATUS_ERROR | DMA_TX_STATUS_UNKNOWN_2, &ithc->regs->dma_tx.status); ++} ++ ++static void ithc_process(struct ithc *ithc) { ++ ithc_log_regs(ithc); ++ ++ // read and clear error bits ++ u32 err = readl(&ithc->regs->error_flags); ++ if (err) { ++ if (err & ~ERROR_FLAG_DMA_UNKNOWN_12) pci_err(ithc->pci, "error flags: 0x%08x\n", err); ++ writel(err, &ithc->regs->error_flags); ++ } ++ ++ // process DMA rx ++ if (ithc_use_rx0) { ++ ithc_clear_dma_rx_interrupts(ithc, 0); ++ ithc_dma_rx(ithc, 0); ++ } ++ if (ithc_use_rx1) { ++ ithc_clear_dma_rx_interrupts(ithc, 1); ++ ithc_dma_rx(ithc, 1); ++ } ++ ++ ithc_log_regs(ithc); ++} ++ ++static irqreturn_t ithc_interrupt_thread(int irq, void *arg) { ++ struct ithc *ithc = arg; ++ pci_dbg(ithc->pci, "IRQ! err=%08x/%08x/%08x, cmd=%02x/%08x, rx0=%02x/%08x, rx1=%02x/%08x, tx=%02x/%08x\n", ++ readl(&ithc->regs->error_control), readl(&ithc->regs->error_status), readl(&ithc->regs->error_flags), ++ readb(&ithc->regs->spi_cmd.control), readl(&ithc->regs->spi_cmd.status), ++ readb(&ithc->regs->dma_rx[0].control), readl(&ithc->regs->dma_rx[0].status), ++ readb(&ithc->regs->dma_rx[1].control), readl(&ithc->regs->dma_rx[1].status), ++ readb(&ithc->regs->dma_tx.control), readl(&ithc->regs->dma_tx.status)); ++ ithc_process(ithc); ++ return IRQ_HANDLED; ++} ++ ++static int ithc_poll_thread(void *arg) { ++ struct ithc *ithc = arg; ++ unsigned sleep = 100; ++ while (!kthread_should_stop()) { ++ u32 n = ithc->dma_rx[1].num_received; ++ ithc_process(ithc); ++ if (n != ithc->dma_rx[1].num_received) sleep = 20; ++ else sleep = min(200u, sleep + (sleep >> 4) + 1); ++ msleep_interruptible(sleep); ++ } ++ return 0; ++} ++ ++// Device initialization and shutdown ++ ++static void ithc_disable(struct ithc *ithc) { ++ bitsl_set(&ithc->regs->control_bits, CONTROL_QUIESCE); ++ CHECK(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, CONTROL_IS_QUIESCED); ++ bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0); ++ bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND, 0); ++ bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0); ++ bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_ENABLE, 0); ++ bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_ENABLE, 0); ++ ithc_disable_interrupts(ithc); ++ ithc_clear_interrupts(ithc); ++} ++ ++static int ithc_init_device(struct ithc *ithc) { ++ ithc_log_regs(ithc); ++ bool was_enabled = (readl(&ithc->regs->control_bits) & CONTROL_NRESET) != 0; ++ ithc_disable(ithc); ++ CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_READY, CONTROL_READY); ++ ithc_set_spi_config(ithc, 10, 0); ++ bitsl_set(&ithc->regs->dma_rx[0].unknown_init_bits, 0x80000000); // seems to help with reading config ++ ++ if (was_enabled) if (msleep_interruptible(100)) return -EINTR; ++ bitsl(&ithc->regs->control_bits, CONTROL_QUIESCE, 0); ++ CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, 0); ++ for (int retries = 0; ; retries++) { ++ ithc_log_regs(ithc); ++ bitsl_set(&ithc->regs->control_bits, CONTROL_NRESET); ++ if (!waitl(ithc, &ithc->regs->state, 0xf, 2)) break; ++ if (retries > 5) { ++ pci_err(ithc->pci, "too many retries, failed to reset device\n"); ++ return -ETIMEDOUT; ++ } ++ pci_err(ithc->pci, "invalid state, retrying reset\n"); ++ bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0); ++ if (msleep_interruptible(1000)) return -EINTR; ++ } ++ ithc_log_regs(ithc); ++ ++ CHECK(waitl, ithc, &ithc->regs->dma_rx[0].status, DMA_RX_STATUS_UNKNOWN_4, DMA_RX_STATUS_UNKNOWN_4); ++ ++ // read config ++ for (int retries = 0; ; retries++) { ++ ithc_log_regs(ithc); ++ memset(&ithc->config, 0, sizeof ithc->config); ++ CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, 0, sizeof ithc->config, &ithc->config); ++ u32 *p = (void *)&ithc->config; ++ pci_info(ithc->pci, "config: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", ++ p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); ++ if (ithc_is_config_valid(ithc)) break; ++ if (retries > 10) { ++ pci_err(ithc->pci, "failed to read config, unknown device ID 0x%08x\n", ithc->config.device_id); ++ return -EIO; ++ } ++ pci_err(ithc->pci, "failed to read config, retrying\n"); ++ if (msleep_interruptible(100)) return -EINTR; ++ } ++ ithc_log_regs(ithc); ++ ++ CHECK_RET(ithc_set_spi_config, ithc, DEVCFG_SPI_MAX_FREQ(ithc->config.spi_config), DEVCFG_SPI_MODE(ithc->config.spi_config)); ++ CHECK_RET(ithc_set_device_enabled, ithc, true); ++ ithc_log_regs(ithc); ++ return 0; ++} ++ ++int ithc_reset(struct ithc *ithc) { ++ // FIXME This should probably do devres_release_group()+ithc_start(). But because this is called during DMA ++ // processing, that would have to be done asynchronously (schedule_work()?). And with extra locking? ++ pci_err(ithc->pci, "reset\n"); ++ CHECK(ithc_init_device, ithc); ++ if (ithc_use_rx0) ithc_dma_rx_enable(ithc, 0); ++ if (ithc_use_rx1) ithc_dma_rx_enable(ithc, 1); ++ ithc_log_regs(ithc); ++ pci_dbg(ithc->pci, "reset completed\n"); ++ return 0; ++} ++ ++static void ithc_stop(void *res) { ++ struct ithc *ithc = res; ++ pci_dbg(ithc->pci, "stopping\n"); ++ ithc_log_regs(ithc); ++ if (ithc->poll_thread) CHECK(kthread_stop, ithc->poll_thread); ++ if (ithc->irq >= 0) disable_irq(ithc->irq); ++ CHECK(ithc_set_device_enabled, ithc, false); ++ ithc_disable(ithc); ++ del_timer_sync(&ithc->activity_timer); ++ cpu_latency_qos_remove_request(&ithc->activity_qos); ++ // clear dma config ++ for(unsigned i = 0; i < 2; i++) { ++ CHECK(waitl, ithc, &ithc->regs->dma_rx[i].status, DMA_RX_STATUS_ENABLED, 0); ++ lo_hi_writeq(0, &ithc->regs->dma_rx[i].addr); ++ writeb(0, &ithc->regs->dma_rx[i].num_bufs); ++ writeb(0, &ithc->regs->dma_rx[i].num_prds); ++ } ++ lo_hi_writeq(0, &ithc->regs->dma_tx.addr); ++ writeb(0, &ithc->regs->dma_tx.num_prds); ++ ithc_log_regs(ithc); ++ pci_dbg(ithc->pci, "stopped\n"); ++} ++ ++static void ithc_clear_drvdata(void *res) { ++ struct pci_dev *pci = res; ++ pci_set_drvdata(pci, NULL); ++} ++ ++static int ithc_start(struct pci_dev *pci) { ++ pci_dbg(pci, "starting\n"); ++ if (pci_get_drvdata(pci)) { ++ pci_err(pci, "device already initialized\n"); ++ return -EINVAL; ++ } ++ if (!devres_open_group(&pci->dev, ithc_start, GFP_KERNEL)) return -ENOMEM; ++ ++ struct ithc *ithc = devm_kzalloc(&pci->dev, sizeof *ithc, GFP_KERNEL); ++ if (!ithc) return -ENOMEM; ++ ithc->irq = -1; ++ ithc->pci = pci; ++ snprintf(ithc->phys, sizeof ithc->phys, "pci-%s/" DEVNAME, pci_name(pci)); ++ init_waitqueue_head(&ithc->wait_hid_parse); ++ init_waitqueue_head(&ithc->wait_hid_get_feature); ++ mutex_init(&ithc->hid_get_feature_mutex); ++ pci_set_drvdata(pci, ithc); ++ CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_clear_drvdata, pci); ++ if (ithc_log_regs_enabled) ithc->prev_regs = devm_kzalloc(&pci->dev, sizeof *ithc->prev_regs, GFP_KERNEL); ++ ++ CHECK_RET(pcim_enable_device, pci); ++ pci_set_master(pci); ++ CHECK_RET(pcim_iomap_regions, pci, BIT(0), DEVNAME " regs"); ++ CHECK_RET(dma_set_mask_and_coherent, &pci->dev, DMA_BIT_MASK(64)); ++ CHECK_RET(pci_set_power_state, pci, PCI_D0); ++ ithc->regs = pcim_iomap_table(pci)[0]; ++ ++ if (!ithc_use_polling) { ++ CHECK_RET(pci_alloc_irq_vectors, pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX); ++ ithc->irq = CHECK(pci_irq_vector, pci, 0); ++ if (ithc->irq < 0) return ithc->irq; ++ } ++ ++ CHECK_RET(ithc_init_device, ithc); ++ CHECK(devm_device_add_groups, &pci->dev, ithc_attribute_groups); ++ if (ithc_use_rx0) CHECK_RET(ithc_dma_rx_init, ithc, 0, ithc_use_rx1 ? DEVNAME "0" : DEVNAME); ++ if (ithc_use_rx1) CHECK_RET(ithc_dma_rx_init, ithc, 1, ithc_use_rx0 ? DEVNAME "1" : DEVNAME); ++ CHECK_RET(ithc_dma_tx_init, ithc); ++ ++ CHECK_RET(ithc_hid_init, ithc); ++ ++ cpu_latency_qos_add_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE); ++ timer_setup(&ithc->activity_timer, ithc_activity_timer_callback, 0); ++ ++ // add ithc_stop callback AFTER setting up DMA buffers, so that polling/irqs/DMA are disabled BEFORE the buffers are freed ++ CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_stop, ithc); ++ ++ if (ithc_use_polling) { ++ pci_info(pci, "using polling instead of irq\n"); ++ // use a thread instead of simple timer because we want to be able to sleep ++ ithc->poll_thread = kthread_run(ithc_poll_thread, ithc, DEVNAME "poll"); ++ if (IS_ERR(ithc->poll_thread)) { ++ int err = PTR_ERR(ithc->poll_thread); ++ ithc->poll_thread = NULL; ++ return err; ++ } ++ } else { ++ CHECK_RET(devm_request_threaded_irq, &pci->dev, ithc->irq, NULL, ithc_interrupt_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, DEVNAME, ithc); ++ } ++ ++ if (ithc_use_rx0) ithc_dma_rx_enable(ithc, 0); ++ if (ithc_use_rx1) ithc_dma_rx_enable(ithc, 1); ++ ++ // hid_add_device can only be called after irq/polling is started and DMA is enabled, because it calls ithc_hid_parse which reads the report descriptor via DMA ++ CHECK_RET(hid_add_device, ithc->hid); ++ ++ CHECK(ithc_debug_init, ithc); ++ ++ pci_dbg(pci, "started\n"); ++ return 0; ++} ++ ++static int ithc_probe(struct pci_dev *pci, const struct pci_device_id *id) { ++ pci_dbg(pci, "device probe\n"); ++ return ithc_start(pci); ++} ++ ++static void ithc_remove(struct pci_dev *pci) { ++ pci_dbg(pci, "device remove\n"); ++ // all cleanup is handled by devres ++} ++ ++static int ithc_suspend(struct device *dev) { ++ struct pci_dev *pci = to_pci_dev(dev); ++ pci_dbg(pci, "pm suspend\n"); ++ devres_release_group(dev, ithc_start); ++ return 0; ++} ++ ++static int ithc_resume(struct device *dev) { ++ struct pci_dev *pci = to_pci_dev(dev); ++ pci_dbg(pci, "pm resume\n"); ++ return ithc_start(pci); ++} ++ ++static int ithc_freeze(struct device *dev) { ++ struct pci_dev *pci = to_pci_dev(dev); ++ pci_dbg(pci, "pm freeze\n"); ++ devres_release_group(dev, ithc_start); ++ return 0; ++} ++ ++static int ithc_thaw(struct device *dev) { ++ struct pci_dev *pci = to_pci_dev(dev); ++ pci_dbg(pci, "pm thaw\n"); ++ return ithc_start(pci); ++} ++ ++static int ithc_restore(struct device *dev) { ++ struct pci_dev *pci = to_pci_dev(dev); ++ pci_dbg(pci, "pm restore\n"); ++ return ithc_start(pci); ++} ++ ++static struct pci_driver ithc_driver = { ++ .name = DEVNAME, ++ .id_table = ithc_pci_tbl, ++ .probe = ithc_probe, ++ .remove = ithc_remove, ++ .driver.pm = &(const struct dev_pm_ops) { ++ .suspend = ithc_suspend, ++ .resume = ithc_resume, ++ .freeze = ithc_freeze, ++ .thaw = ithc_thaw, ++ .restore = ithc_restore, ++ }, ++ //.dev_groups = ithc_attribute_groups, // could use this (since 5.14), however the attributes won't have valid values until config has been read anyway ++}; ++ ++static int __init ithc_init(void) { ++ return pci_register_driver(&ithc_driver); ++} ++ ++static void __exit ithc_exit(void) { ++ pci_unregister_driver(&ithc_driver); ++} ++ ++module_init(ithc_init); ++module_exit(ithc_exit); ++ +diff --git a/drivers/hid/ithc/ithc-regs.c b/drivers/hid/ithc/ithc-regs.c +new file mode 100644 +index 000000000000..85d567b05761 +--- /dev/null ++++ b/drivers/hid/ithc/ithc-regs.c +@@ -0,0 +1,64 @@ ++#include "ithc.h" ++ ++#define reg_num(r) (0x1fff & (u16)(__force u64)(r)) ++ ++void bitsl(__iomem u32 *reg, u32 mask, u32 val) { ++ if (val & ~mask) pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", reg_num(reg), val, mask); ++ writel((readl(reg) & ~mask) | (val & mask), reg); ++} ++ ++void bitsb(__iomem u8 *reg, u8 mask, u8 val) { ++ if (val & ~mask) pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", reg_num(reg), val, mask); ++ writeb((readb(reg) & ~mask) | (val & mask), reg); ++} ++ ++int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val) { ++ pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", reg_num(reg), mask, val); ++ u32 x; ++ if (readl_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) { ++ pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", reg_num(reg), mask, val); ++ return -ETIMEDOUT; ++ } ++ pci_dbg(ithc->pci, "done waiting\n"); ++ return 0; ++} ++ ++int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val) { ++ pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", reg_num(reg), mask, val); ++ u8 x; ++ if (readb_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) { ++ pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", reg_num(reg), mask, val); ++ return -ETIMEDOUT; ++ } ++ pci_dbg(ithc->pci, "done waiting\n"); ++ return 0; ++} ++ ++int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode) { ++ pci_dbg(ithc->pci, "setting SPI speed to %i, mode %i\n", speed, mode); ++ if (mode == 3) mode = 2; ++ bitsl(&ithc->regs->spi_config, ++ SPI_CONFIG_MODE(0xff) | SPI_CONFIG_SPEED(0xff) | SPI_CONFIG_UNKNOWN_18(0xff) | SPI_CONFIG_SPEED2(0xff), ++ SPI_CONFIG_MODE(mode) | SPI_CONFIG_SPEED(speed) | SPI_CONFIG_UNKNOWN_18(0) | SPI_CONFIG_SPEED2(speed)); ++ return 0; ++} ++ ++int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data) { ++ pci_dbg(ithc->pci, "SPI command %u, size %u, offset %u\n", command, size, offset); ++ if (size > sizeof ithc->regs->spi_cmd.data) return -EINVAL; ++ CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0); ++ writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status); ++ writeb(command, &ithc->regs->spi_cmd.code); ++ writew(size, &ithc->regs->spi_cmd.size); ++ writel(offset, &ithc->regs->spi_cmd.offset); ++ u32 *p = data, n = (size + 3) / 4; ++ for (u32 i = 0; i < n; i++) writel(p[i], &ithc->regs->spi_cmd.data[i]); ++ bitsb_set(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND); ++ CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0); ++ if ((readl(&ithc->regs->spi_cmd.status) & (SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR)) != SPI_CMD_STATUS_DONE) return -EIO; ++ if (readw(&ithc->regs->spi_cmd.size) != size) return -EMSGSIZE; ++ for (u32 i = 0; i < n; i++) p[i] = readl(&ithc->regs->spi_cmd.data[i]); ++ writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status); ++ return 0; ++} ++ +diff --git a/drivers/hid/ithc/ithc-regs.h b/drivers/hid/ithc/ithc-regs.h +new file mode 100644 +index 000000000000..1a96092ed7ee +--- /dev/null ++++ b/drivers/hid/ithc/ithc-regs.h +@@ -0,0 +1,186 @@ ++#define CONTROL_QUIESCE BIT(1) ++#define CONTROL_IS_QUIESCED BIT(2) ++#define CONTROL_NRESET BIT(3) ++#define CONTROL_READY BIT(29) ++ ++#define SPI_CONFIG_MODE(x) (((x) & 3) << 2) ++#define SPI_CONFIG_SPEED(x) (((x) & 7) << 4) ++#define SPI_CONFIG_UNKNOWN_18(x) (((x) & 3) << 18) ++#define SPI_CONFIG_SPEED2(x) (((x) & 0xf) << 20) // high bit = high speed mode? ++ ++#define ERROR_CONTROL_UNKNOWN_0 BIT(0) ++#define ERROR_CONTROL_DISABLE_DMA BIT(1) // clears DMA_RX_CONTROL_ENABLE when a DMA error occurs ++#define ERROR_CONTROL_UNKNOWN_2 BIT(2) ++#define ERROR_CONTROL_UNKNOWN_3 BIT(3) ++#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_9 BIT(9) ++#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_10 BIT(10) ++#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_12 BIT(12) ++#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_13 BIT(13) ++#define ERROR_CONTROL_UNKNOWN_16(x) (((x) & 0xff) << 16) // spi error code irq? ++#define ERROR_CONTROL_SET_DMA_STATUS BIT(29) // sets DMA_RX_STATUS_ERROR when a DMA error occurs ++ ++#define ERROR_STATUS_DMA BIT(28) ++#define ERROR_STATUS_SPI BIT(30) ++ ++#define ERROR_FLAG_DMA_UNKNOWN_9 BIT(9) ++#define ERROR_FLAG_DMA_UNKNOWN_10 BIT(10) ++#define ERROR_FLAG_DMA_UNKNOWN_12 BIT(12) // set when we receive a truncated DMA message ++#define ERROR_FLAG_DMA_UNKNOWN_13 BIT(13) ++#define ERROR_FLAG_SPI_BUS_TURNAROUND BIT(16) ++#define ERROR_FLAG_SPI_RESPONSE_TIMEOUT BIT(17) ++#define ERROR_FLAG_SPI_INTRA_PACKET_TIMEOUT BIT(18) ++#define ERROR_FLAG_SPI_INVALID_RESPONSE BIT(19) ++#define ERROR_FLAG_SPI_HS_RX_TIMEOUT BIT(20) ++#define ERROR_FLAG_SPI_TOUCH_IC_INIT BIT(21) ++ ++#define SPI_CMD_CONTROL_SEND BIT(0) // cleared by device when sending is complete ++#define SPI_CMD_CONTROL_IRQ BIT(1) ++ ++#define SPI_CMD_CODE_READ 4 ++#define SPI_CMD_CODE_WRITE 6 ++ ++#define SPI_CMD_STATUS_DONE BIT(0) ++#define SPI_CMD_STATUS_ERROR BIT(1) ++#define SPI_CMD_STATUS_BUSY BIT(3) ++ ++#define DMA_TX_CONTROL_SEND BIT(0) // cleared by device when sending is complete ++#define DMA_TX_CONTROL_IRQ BIT(3) ++ ++#define DMA_TX_STATUS_DONE BIT(0) ++#define DMA_TX_STATUS_ERROR BIT(1) ++#define DMA_TX_STATUS_UNKNOWN_2 BIT(2) ++#define DMA_TX_STATUS_UNKNOWN_3 BIT(3) // busy? ++ ++#define DMA_RX_CONTROL_ENABLE BIT(0) ++#define DMA_RX_CONTROL_IRQ_UNKNOWN_1 BIT(1) // rx1 only? ++#define DMA_RX_CONTROL_IRQ_ERROR BIT(3) // rx1 only? ++#define DMA_RX_CONTROL_IRQ_UNKNOWN_4 BIT(4) // rx0 only? ++#define DMA_RX_CONTROL_IRQ_DATA BIT(5) ++ ++#define DMA_RX_CONTROL2_UNKNOWN_5 BIT(5) // rx0 only? ++#define DMA_RX_CONTROL2_RESET BIT(7) // resets ringbuffer indices ++ ++#define DMA_RX_WRAP_FLAG BIT(7) ++ ++#define DMA_RX_STATUS_ERROR BIT(3) ++#define DMA_RX_STATUS_UNKNOWN_4 BIT(4) // set in rx0 after using CONTROL_NRESET when it becomes possible to read config (can take >100ms) ++#define DMA_RX_STATUS_HAVE_DATA BIT(5) ++#define DMA_RX_STATUS_ENABLED BIT(8) ++ ++#define COUNTER_RESET BIT(31) ++ ++struct ithc_registers { ++ /* 0000 */ u32 _unknown_0000[1024]; ++ /* 1000 */ u32 _unknown_1000; ++ /* 1004 */ u32 _unknown_1004; ++ /* 1008 */ u32 control_bits; ++ /* 100c */ u32 _unknown_100c; ++ /* 1010 */ u32 spi_config; ++ /* 1014 */ u32 _unknown_1014[3]; ++ /* 1020 */ u32 error_control; ++ /* 1024 */ u32 error_status; // write to clear ++ /* 1028 */ u32 error_flags; // write to clear ++ /* 102c */ u32 _unknown_102c[5]; ++ struct { ++ /* 1040 */ u8 control; ++ /* 1041 */ u8 code; ++ /* 1042 */ u16 size; ++ /* 1044 */ u32 status; // write to clear ++ /* 1048 */ u32 offset; ++ /* 104c */ u32 data[16]; ++ /* 108c */ u32 _unknown_108c; ++ } spi_cmd; ++ struct { ++ /* 1090 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq() ++ /* 1098 */ u8 control; ++ /* 1099 */ u8 _unknown_1099; ++ /* 109a */ u8 _unknown_109a; ++ /* 109b */ u8 num_prds; ++ /* 109c */ u32 status; // write to clear ++ } dma_tx; ++ /* 10a0 */ u32 _unknown_10a0[7]; ++ /* 10bc */ u32 state; // is 0xe0000402 (dev config val 0) after CONTROL_NRESET, 0xe0000461 after first touch, 0xe0000401 after DMA_RX_CODE_RESET ++ /* 10c0 */ u32 _unknown_10c0[8]; ++ /* 10e0 */ u32 _unknown_10e0_counters[3]; ++ /* 10ec */ u32 _unknown_10ec[5]; ++ struct { ++ /* 1100/1200 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq() ++ /* 1108/1208 */ u8 num_bufs; ++ /* 1109/1209 */ u8 num_prds; ++ /* 110a/120a */ u16 _unknown_110a; ++ /* 110c/120c */ u8 control; ++ /* 110d/120d */ u8 head; ++ /* 110e/120e */ u8 tail; ++ /* 110f/120f */ u8 control2; ++ /* 1110/1210 */ u32 status; // write to clear ++ /* 1114/1214 */ u32 _unknown_1114; ++ /* 1118/1218 */ u64 _unknown_1118_guc_addr; ++ /* 1120/1220 */ u32 _unknown_1120_guc; ++ /* 1124/1224 */ u32 _unknown_1124_guc; ++ /* 1128/1228 */ u32 unknown_init_bits; // bit 2 = guc related, bit 3 = rx1 related, bit 4 = guc related ++ /* 112c/122c */ u32 _unknown_112c; ++ /* 1130/1230 */ u64 _unknown_1130_guc_addr; ++ /* 1138/1238 */ u32 _unknown_1138_guc; ++ /* 113c/123c */ u32 _unknown_113c; ++ /* 1140/1240 */ u32 _unknown_1140_guc; ++ /* 1144/1244 */ u32 _unknown_1144[23]; ++ /* 11a0/12a0 */ u32 _unknown_11a0_counters[6]; ++ /* 11b8/12b8 */ u32 _unknown_11b8[18]; ++ } dma_rx[2]; ++}; ++static_assert(sizeof(struct ithc_registers) == 0x1300); ++ ++#define DEVCFG_DMA_RX_SIZE(x) ((((x) & 0x3fff) + 1) << 6) ++#define DEVCFG_DMA_TX_SIZE(x) (((((x) >> 14) & 0x3ff) + 1) << 6) ++ ++#define DEVCFG_TOUCH_MASK 0x3f ++#define DEVCFG_TOUCH_ENABLE BIT(0) ++#define DEVCFG_TOUCH_UNKNOWN_1 BIT(1) ++#define DEVCFG_TOUCH_UNKNOWN_2 BIT(2) ++#define DEVCFG_TOUCH_UNKNOWN_3 BIT(3) ++#define DEVCFG_TOUCH_UNKNOWN_4 BIT(4) ++#define DEVCFG_TOUCH_UNKNOWN_5 BIT(5) ++#define DEVCFG_TOUCH_UNKNOWN_6 BIT(6) ++ ++#define DEVCFG_DEVICE_ID_TIC 0x43495424 // "$TIC" ++ ++#define DEVCFG_SPI_MAX_FREQ(x) (((x) >> 1) & 0xf) // high bit = use high speed mode? ++#define DEVCFG_SPI_MODE(x) (((x) >> 6) & 3) ++#define DEVCFG_SPI_UNKNOWN_8(x) (((x) >> 8) & 0x3f) ++#define DEVCFG_SPI_NEEDS_HEARTBEAT BIT(20) ++#define DEVCFG_SPI_HEARTBEAT_INTERVAL (((x) >> 21) & 7) ++#define DEVCFG_SPI_UNKNOWN_25 BIT(25) ++#define DEVCFG_SPI_UNKNOWN_26 BIT(26) ++#define DEVCFG_SPI_UNKNOWN_27 BIT(27) ++#define DEVCFG_SPI_DELAY (((x) >> 28) & 7) ++#define DEVCFG_SPI_USE_EXT_READ_CFG BIT(31) ++ ++struct ithc_device_config { ++ u32 _unknown_00; // 00 = 0xe0000402 (0xe0000401 after DMA_RX_CODE_RESET) ++ u32 _unknown_04; // 04 = 0x00000000 ++ u32 dma_buf_sizes; // 08 = 0x000a00ff ++ u32 touch_cfg; // 0c = 0x0000001c ++ u32 _unknown_10; // 10 = 0x0000001c ++ u32 device_id; // 14 = 0x43495424 = "$TIC" ++ u32 spi_config; // 18 = 0xfda00a2e ++ u16 vendor_id; // 1c = 0x045e = Microsoft Corp. ++ u16 product_id; // 1e = 0x0c1a ++ u32 revision; // 20 = 0x00000001 ++ u32 fw_version; // 24 = 0x05008a8b = 5.0.138.139 ++ u32 _unknown_28; // 28 = 0x00000000 ++ u32 fw_mode; // 2c = 0x00000000 ++ u32 _unknown_30; // 30 = 0x00000000 ++ u32 _unknown_34; // 34 = 0x0404035e (u8,u8,u8,u8 = version?) ++ u32 _unknown_38; // 38 = 0x000001c0 (0x000001c1 after DMA_RX_CODE_RESET) ++ u32 _unknown_3c; // 3c = 0x00000002 ++}; ++ ++void bitsl(__iomem u32 *reg, u32 mask, u32 val); ++void bitsb(__iomem u8 *reg, u8 mask, u8 val); ++#define bitsl_set(reg, x) bitsl(reg, x, x) ++#define bitsb_set(reg, x) bitsb(reg, x, x) ++int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val); ++int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val); ++int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode); ++int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data); ++ +diff --git a/drivers/hid/ithc/ithc.h b/drivers/hid/ithc/ithc.h +new file mode 100644 +index 000000000000..6a9b0d480bc1 +--- /dev/null ++++ b/drivers/hid/ithc/ithc.h +@@ -0,0 +1,60 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DEVNAME "ithc" ++#define DEVFULLNAME "Intel Touch Host Controller" ++ ++#undef pr_fmt ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ ++#define CHECK(fn, ...) ({ int r = fn(__VA_ARGS__); if (r < 0) pci_err(ithc->pci, "%s: %s failed with %i\n", __func__, #fn, r); r; }) ++#define CHECK_RET(...) do { int r = CHECK(__VA_ARGS__); if (r < 0) return r; } while(0) ++ ++#define NUM_RX_BUF 16 ++ ++struct ithc; ++ ++#include "ithc-regs.h" ++#include "ithc-dma.h" ++ ++struct ithc { ++ char phys[32]; ++ struct pci_dev *pci; ++ int irq; ++ struct task_struct *poll_thread; ++ struct pm_qos_request activity_qos; ++ struct timer_list activity_timer; ++ ++ struct hid_device *hid; ++ bool hid_parse_done; ++ wait_queue_head_t wait_hid_parse; ++ wait_queue_head_t wait_hid_get_feature; ++ struct mutex hid_get_feature_mutex; ++ void *hid_get_feature_buf; ++ size_t hid_get_feature_size; ++ ++ struct ithc_registers __iomem *regs; ++ struct ithc_registers *prev_regs; // for debugging ++ struct ithc_device_config config; ++ struct ithc_dma_rx dma_rx[2]; ++ struct ithc_dma_tx dma_tx; ++}; ++ ++int ithc_reset(struct ithc *ithc); ++void ithc_set_active(struct ithc *ithc); ++int ithc_debug_init(struct ithc *ithc); ++void ithc_log_regs(struct ithc *ithc); ++ +diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c +index d6037a328669..a290ebc77aea 100644 +--- a/drivers/i2c/i2c-core-acpi.c ++++ b/drivers/i2c/i2c-core-acpi.c +@@ -628,6 +628,28 @@ static int acpi_gsb_i2c_write_bytes(struct i2c_client *client, + return (ret == 1) ? 0 : -EIO; + } + ++static int acpi_gsb_i2c_write_raw_bytes(struct i2c_client *client, ++ u8 *data, u8 data_len) ++{ ++ struct i2c_msg msgs[1]; ++ int ret = AE_OK; ++ ++ msgs[0].addr = client->addr; ++ msgs[0].flags = client->flags; ++ msgs[0].len = data_len + 1; ++ msgs[0].buf = data; ++ ++ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); ++ ++ if (ret < 0) { ++ dev_err(&client->adapter->dev, "i2c write failed: %d\n", ret); ++ return ret; ++ } ++ ++ /* 1 transfer must have completed successfully */ ++ return (ret == 1) ? 0 : -EIO; ++} ++ + static acpi_status + i2c_acpi_space_handler(u32 function, acpi_physical_address command, + u32 bits, u64 *value64, +@@ -729,6 +751,19 @@ i2c_acpi_space_handler(u32 function, acpi_physical_address command, + } + break; + ++ case ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES: ++ if (action == ACPI_READ) { ++ dev_warn(&adapter->dev, ++ "protocol 0x%02x not supported for client 0x%02x\n", ++ accessor_type, client->addr); ++ ret = AE_BAD_PARAMETER; ++ goto err; ++ } else { ++ status = acpi_gsb_i2c_write_raw_bytes(client, ++ gsb->data, info->access_length); ++ } ++ break; ++ + default: + dev_warn(&adapter->dev, "protocol 0x%02x not supported for client 0x%02x\n", + accessor_type, client->addr); +diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c +index 09489380afda..0f02411a60f1 100644 +--- a/drivers/input/misc/soc_button_array.c ++++ b/drivers/input/misc/soc_button_array.c +@@ -507,8 +507,8 @@ static const struct soc_device_data soc_device_MSHW0028 = { + * Both, the Surface Pro 4 (surfacepro3_button.c) and the above mentioned + * devices use MSHW0040 for power and volume buttons, however the way they + * have to be addressed differs. Make sure that we only load this drivers +- * for the correct devices by checking the OEM Platform Revision provided by +- * the _DSM method. ++ * for the correct devices by checking if the OEM Platform Revision DSM call ++ * exists. + */ + #define MSHW0040_DSM_REVISION 0x01 + #define MSHW0040_DSM_GET_OMPR 0x02 // get OEM Platform Revision +@@ -519,31 +519,14 @@ static const guid_t MSHW0040_DSM_UUID = + static int soc_device_check_MSHW0040(struct device *dev) + { + acpi_handle handle = ACPI_HANDLE(dev); +- union acpi_object *result; +- u64 oem_platform_rev = 0; // valid revisions are nonzero +- +- // get OEM platform revision +- result = acpi_evaluate_dsm_typed(handle, &MSHW0040_DSM_UUID, +- MSHW0040_DSM_REVISION, +- MSHW0040_DSM_GET_OMPR, NULL, +- ACPI_TYPE_INTEGER); +- +- if (result) { +- oem_platform_rev = result->integer.value; +- ACPI_FREE(result); +- } +- +- /* +- * If the revision is zero here, the _DSM evaluation has failed. This +- * indicates that we have a Pro 4 or Book 1 and this driver should not +- * be used. +- */ +- if (oem_platform_rev == 0) +- return -ENODEV; ++ bool exists; + +- dev_dbg(dev, "OEM Platform Revision %llu\n", oem_platform_rev); ++ // check if OEM platform revision DSM call exists ++ exists = acpi_check_dsm(handle, &MSHW0040_DSM_UUID, ++ MSHW0040_DSM_REVISION, ++ BIT(MSHW0040_DSM_GET_OMPR)); + +- return 0; ++ return exists ? 0 : -ENODEV; + } + + /* +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c +index 7c2f4bd33582..3ebd2260cdab 100644 +--- a/drivers/iommu/intel/iommu.c ++++ b/drivers/iommu/intel/iommu.c +@@ -37,6 +37,8 @@ + #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) + #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB) + #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) ++#define IS_IPTS(pdev) ((pdev)->vendor == PCI_VENDOR_ID_INTEL && \ ++ ((pdev)->device == 0x9d3e)) + #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e) + + #define IOAPIC_RANGE_START (0xfee00000) +@@ -287,12 +289,14 @@ int intel_iommu_enabled = 0; + EXPORT_SYMBOL_GPL(intel_iommu_enabled); + + static int dmar_map_gfx = 1; ++static int dmar_map_ipts = 1; + static int intel_iommu_superpage = 1; + static int iommu_identity_mapping; + static int iommu_skip_te_disable; + + #define IDENTMAP_GFX 2 + #define IDENTMAP_AZALIA 4 ++#define IDENTMAP_IPTS 16 + + const struct iommu_ops intel_iommu_ops; + +@@ -2588,6 +2592,9 @@ static int device_def_domain_type(struct device *dev) + + if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) + return IOMMU_DOMAIN_IDENTITY; ++ ++ if ((iommu_identity_mapping & IDENTMAP_IPTS) && IS_IPTS(pdev)) ++ return IOMMU_DOMAIN_IDENTITY; + } + + return 0; +@@ -2977,6 +2984,9 @@ static int __init init_dmars(void) + if (!dmar_map_gfx) + iommu_identity_mapping |= IDENTMAP_GFX; + ++ if (!dmar_map_ipts) ++ iommu_identity_mapping |= IDENTMAP_IPTS; ++ + check_tylersburg_isoch(); + + ret = si_domain_init(hw_pass_through); +@@ -4819,6 +4829,17 @@ static void quirk_iommu_igfx(struct pci_dev *dev) + dmar_map_gfx = 0; + } + ++static void quirk_iommu_ipts(struct pci_dev *dev) ++{ ++ if (!IS_IPTS(dev)) ++ return; ++ ++ if (risky_device(dev)) ++ return; ++ ++ pci_info(dev, "Passthrough IOMMU for IPTS\n"); ++ dmar_map_ipts = 0; ++} + /* G4x/GM45 integrated gfx dmar support is totally busted. */ + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx); +@@ -4854,6 +4875,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx); + ++/* disable IPTS dmar support */ ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9D3E, quirk_iommu_ipts); ++ + static void quirk_iommu_rwbf(struct pci_dev *dev) + { + if (risky_device(dev)) +diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c +index df9e261af0b5..bc2a0aefedf2 100644 +--- a/drivers/iommu/intel/irq_remapping.c ++++ b/drivers/iommu/intel/irq_remapping.c +@@ -390,6 +390,22 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev) + data.busmatch_count = 0; + pci_for_each_dma_alias(dev, set_msi_sid_cb, &data); + ++ /* ++ * The Intel Touch Host Controller is at 00:10.6, but for some reason ++ * the MSI interrupts have request id 01:05.0. ++ * Disable id verification to work around this. ++ * FIXME Find proper fix or turn this into a quirk. ++ */ ++ if (dev->vendor == PCI_VENDOR_ID_INTEL && (dev->class >> 8) == PCI_CLASS_INPUT_PEN) { ++ switch(dev->device) { ++ case 0x98d0: case 0x98d1: // LKF ++ case 0xa0d0: case 0xa0d1: // TGL LP ++ case 0x43d0: case 0x43d1: // TGL H ++ set_irte_sid(irte, SVT_NO_VERIFY, SQ_ALL_16, 0); ++ return 0; ++ } ++ } ++ + /* + * DMA alias provides us with a PCI device and alias. The only case + * where the it will return an alias on a different bus than the +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h +index bdc65d50b945..08723c01d727 100644 +--- a/drivers/misc/mei/hw-me-regs.h ++++ b/drivers/misc/mei/hw-me-regs.h +@@ -92,6 +92,7 @@ + #define MEI_DEV_ID_CDF 0x18D3 /* Cedar Fork */ + + #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ ++#define MEI_DEV_ID_ICP_LP_3 0x34E4 /* Ice Lake Point LP 3 (iTouch) */ + #define MEI_DEV_ID_ICP_N 0x38E0 /* Ice Lake Point N */ + + #define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */ +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c +index 5bf0d50d55a0..c13864512229 100644 +--- a/drivers/misc/mei/pci-me.c ++++ b/drivers/misc/mei/pci-me.c +@@ -97,6 +97,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = { + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP_3, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_N, MEI_ME_PCH12_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)}, +diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c +index 5eb131ab916f..67f074a126d1 100644 +--- a/drivers/net/wireless/ath/ath10k/core.c ++++ b/drivers/net/wireless/ath/ath10k/core.c +@@ -38,6 +38,9 @@ static bool fw_diag_log; + /* frame mode values are mapped as per enum ath10k_hw_txrx_mode */ + unsigned int ath10k_frame_mode = ATH10K_HW_TXRX_NATIVE_WIFI; + ++static char *override_board = ""; ++static char *override_board2 = ""; ++ + unsigned long ath10k_coredump_mask = BIT(ATH10K_FW_CRASH_DUMP_REGISTERS) | + BIT(ATH10K_FW_CRASH_DUMP_CE_DATA); + +@@ -50,6 +53,9 @@ module_param(fw_diag_log, bool, 0644); + module_param_named(frame_mode, ath10k_frame_mode, uint, 0644); + module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444); + ++module_param(override_board, charp, 0644); ++module_param(override_board2, charp, 0644); ++ + MODULE_PARM_DESC(debug_mask, "Debugging mask"); + MODULE_PARM_DESC(uart_print, "Uart target debugging"); + MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode"); +@@ -59,6 +65,9 @@ MODULE_PARM_DESC(frame_mode, + MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file"); + MODULE_PARM_DESC(fw_diag_log, "Diag based fw log debugging"); + ++MODULE_PARM_DESC(override_board, "Override for board.bin file"); ++MODULE_PARM_DESC(override_board2, "Override for board-2.bin file"); ++ + static const struct ath10k_hw_params ath10k_hw_params_list[] = { + { + .id = QCA988X_HW_2_0_VERSION, +@@ -911,6 +920,42 @@ static int ath10k_init_configure_target(struct ath10k *ar) + return 0; + } + ++static const char *ath10k_override_board_fw_file(struct ath10k *ar, ++ const char *file) ++{ ++ if (strcmp(file, "board.bin") == 0) { ++ if (strcmp(override_board, "") == 0) ++ return file; ++ ++ if (strcmp(override_board, "none") == 0) { ++ dev_info(ar->dev, "firmware override: pretending 'board.bin' does not exist\n"); ++ return NULL; ++ } ++ ++ dev_info(ar->dev, "firmware override: replacing 'board.bin' with '%s'\n", ++ override_board); ++ ++ return override_board; ++ } ++ ++ if (strcmp(file, "board-2.bin") == 0) { ++ if (strcmp(override_board2, "") == 0) ++ return file; ++ ++ if (strcmp(override_board2, "none") == 0) { ++ dev_info(ar->dev, "firmware override: pretending 'board-2.bin' does not exist\n"); ++ return NULL; ++ } ++ ++ dev_info(ar->dev, "firmware override: replacing 'board-2.bin' with '%s'\n", ++ override_board2); ++ ++ return override_board2; ++ } ++ ++ return file; ++} ++ + static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar, + const char *dir, + const char *file) +@@ -925,6 +970,19 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar, + if (dir == NULL) + dir = "."; + ++ /* HACK: Override board.bin and board-2.bin files if specified. ++ * ++ * Some Surface devices perform better with a different board ++ * configuration. To this end, one would need to replace the board.bin ++ * file with the modified config and remove the board-2.bin file. ++ * Unfortunately, that's not a solution that we can easily package. So ++ * we add module options to perform these overrides here. ++ */ ++ ++ file = ath10k_override_board_fw_file(ar, file); ++ if (!file) ++ return ERR_PTR(-ENOENT); ++ + snprintf(filename, sizeof(filename), "%s/%s", dir, file); + ret = firmware_request_nowarn(&fw, filename, ar->dev); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n", +diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c +index 9a698a16a8f3..5e1a341f63df 100644 +--- a/drivers/net/wireless/marvell/mwifiex/pcie.c ++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c +@@ -368,6 +368,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) + { + struct pcie_service_card *card; ++ struct pci_dev *parent_pdev = pci_upstream_bridge(pdev); + int ret; + + pr_debug("info: vendor=0x%4.04X device=0x%4.04X rev=%d\n", +@@ -409,6 +410,12 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, + return -1; + } + ++ /* disable bridge_d3 for Surface gen4+ devices to fix fw crashing ++ * after suspend ++ */ ++ if (card->quirks & QUIRK_NO_BRIDGE_D3) ++ parent_pdev->bridge_d3 = false; ++ + return 0; + } + +@@ -1762,9 +1769,21 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) + static int mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter) + { + struct pcie_service_card *card = adapter->card; ++ struct pci_dev *pdev = card->dev; ++ struct pci_dev *parent_pdev = pci_upstream_bridge(pdev); + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + int tx_wrap = card->txbd_wrptr & reg->tx_wrap_mask; + ++ /* Trigger a function level reset of the PCI bridge device, this makes ++ * the firmware of PCIe 88W8897 cards stop reporting a fixed LTR value ++ * that prevents the system from entering package C10 and S0ix powersaving ++ * states. ++ * We need to do it here because it must happen after firmware ++ * initialization and this function is called after that is done. ++ */ ++ if (card->quirks & QUIRK_DO_FLR_ON_BRIDGE) ++ pci_reset_function(parent_pdev); ++ + /* Write the RX ring read pointer in to reg->rx_rdptr */ + if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr | + tx_wrap)) { +diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c +index dd6d21f1dbfd..99b024ecbade 100644 +--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c ++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c +@@ -13,7 +13,9 @@ static const struct dmi_system_id mwifiex_quirk_table[] = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"), + }, +- .driver_data = (void *)QUIRK_FW_RST_D3COLD, ++ .driver_data = (void *)(QUIRK_FW_RST_D3COLD | ++ QUIRK_DO_FLR_ON_BRIDGE | ++ QUIRK_NO_BRIDGE_D3), + }, + { + .ident = "Surface Pro 5", +@@ -22,7 +24,9 @@ static const struct dmi_system_id mwifiex_quirk_table[] = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"), + }, +- .driver_data = (void *)QUIRK_FW_RST_D3COLD, ++ .driver_data = (void *)(QUIRK_FW_RST_D3COLD | ++ QUIRK_DO_FLR_ON_BRIDGE | ++ QUIRK_NO_BRIDGE_D3), + }, + { + .ident = "Surface Pro 5 (LTE)", +@@ -31,7 +35,9 @@ static const struct dmi_system_id mwifiex_quirk_table[] = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"), + }, +- .driver_data = (void *)QUIRK_FW_RST_D3COLD, ++ .driver_data = (void *)(QUIRK_FW_RST_D3COLD | ++ QUIRK_DO_FLR_ON_BRIDGE | ++ QUIRK_NO_BRIDGE_D3), + }, + { + .ident = "Surface Pro 6", +@@ -39,7 +45,9 @@ static const struct dmi_system_id mwifiex_quirk_table[] = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"), + }, +- .driver_data = (void *)QUIRK_FW_RST_D3COLD, ++ .driver_data = (void *)(QUIRK_FW_RST_D3COLD | ++ QUIRK_DO_FLR_ON_BRIDGE | ++ QUIRK_NO_BRIDGE_D3), + }, + { + .ident = "Surface Book 1", +@@ -47,7 +55,9 @@ static const struct dmi_system_id mwifiex_quirk_table[] = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"), + }, +- .driver_data = (void *)QUIRK_FW_RST_D3COLD, ++ .driver_data = (void *)(QUIRK_FW_RST_D3COLD | ++ QUIRK_DO_FLR_ON_BRIDGE | ++ QUIRK_NO_BRIDGE_D3), + }, + { + .ident = "Surface Book 2", +@@ -55,7 +65,9 @@ static const struct dmi_system_id mwifiex_quirk_table[] = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"), + }, +- .driver_data = (void *)QUIRK_FW_RST_D3COLD, ++ .driver_data = (void *)(QUIRK_FW_RST_D3COLD | ++ QUIRK_DO_FLR_ON_BRIDGE | ++ QUIRK_NO_BRIDGE_D3), + }, + { + .ident = "Surface Laptop 1", +@@ -63,7 +75,9 @@ static const struct dmi_system_id mwifiex_quirk_table[] = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"), + }, +- .driver_data = (void *)QUIRK_FW_RST_D3COLD, ++ .driver_data = (void *)(QUIRK_FW_RST_D3COLD | ++ QUIRK_DO_FLR_ON_BRIDGE | ++ QUIRK_NO_BRIDGE_D3), + }, + { + .ident = "Surface Laptop 2", +@@ -71,7 +85,9 @@ static const struct dmi_system_id mwifiex_quirk_table[] = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"), + }, +- .driver_data = (void *)QUIRK_FW_RST_D3COLD, ++ .driver_data = (void *)(QUIRK_FW_RST_D3COLD | ++ QUIRK_DO_FLR_ON_BRIDGE | ++ QUIRK_NO_BRIDGE_D3), + }, + {} + }; +@@ -89,6 +105,11 @@ void mwifiex_initialize_quirks(struct pcie_service_card *card) + dev_info(&pdev->dev, "no quirks enabled\n"); + if (card->quirks & QUIRK_FW_RST_D3COLD) + dev_info(&pdev->dev, "quirk reset_d3cold enabled\n"); ++ if (card->quirks & QUIRK_DO_FLR_ON_BRIDGE) ++ dev_info(&pdev->dev, "quirk do_flr_on_bridge enabled\n"); ++ if (card->quirks & QUIRK_NO_BRIDGE_D3) ++ dev_info(&pdev->dev, ++ "quirk no_brigde_d3 enabled\n"); + } + + static void mwifiex_pcie_set_power_d3cold(struct pci_dev *pdev) +diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h +index d6ff964aec5b..c14eb56eb911 100644 +--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h ++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h +@@ -4,6 +4,8 @@ + #include "pcie.h" + + #define QUIRK_FW_RST_D3COLD BIT(0) ++#define QUIRK_DO_FLR_ON_BRIDGE BIT(1) ++#define QUIRK_NO_BRIDGE_D3 BIT(2) + + void mwifiex_initialize_quirks(struct pcie_service_card *card); + int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev); +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c +index 57ddcc59af30..497cbadd2c6c 100644 +--- a/drivers/pci/pci-driver.c ++++ b/drivers/pci/pci-driver.c +@@ -507,6 +507,9 @@ static void pci_device_shutdown(struct device *dev) + struct pci_dev *pci_dev = to_pci_dev(dev); + struct pci_driver *drv = pci_dev->driver; + ++ if (pci_dev->no_shutdown) ++ return; ++ + pm_runtime_resume(dev); + + if (drv && drv->shutdown) +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 25edf55de985..6ab563cc58f6 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -6124,3 +6124,39 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size); + #endif ++ ++static const struct dmi_system_id no_shutdown_dmi_table[] = { ++ /* ++ * Systems on which some devices should not be touched during shutdown. ++ */ ++ { ++ .ident = "Microsoft Surface Pro 9", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Pro 9"), ++ }, ++ }, ++ { ++ .ident = "Microsoft Surface Laptop 5", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 5"), ++ }, ++ }, ++ {} ++}; ++ ++static void quirk_no_shutdown(struct pci_dev *dev) ++{ ++ if (!dmi_check_system(no_shutdown_dmi_table)) ++ return; ++ ++ dev->no_shutdown = 1; ++ pci_info(dev, "disabling shutdown ops for [%04x:%04x]\n", ++ dev->vendor, dev->device); ++} ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x461e, quirk_no_shutdown); // Thunderbolt 4 USB Controller ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x461f, quirk_no_shutdown); // Thunderbolt 4 PCI Express Root Port ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x462f, quirk_no_shutdown); // Thunderbolt 4 PCI Express Root Port ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x466d, quirk_no_shutdown); // Thunderbolt 4 NHI ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x46a8, quirk_no_shutdown); // GPU +diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig +index b629e82af97c..68656e8f309e 100644 +--- a/drivers/platform/surface/Kconfig ++++ b/drivers/platform/surface/Kconfig +@@ -149,6 +149,13 @@ config SURFACE_AGGREGATOR_TABLET_SWITCH + Select M or Y here, if you want to provide tablet-mode switch input + events on the Surface Pro 8, Surface Pro X, and Surface Laptop Studio. + ++config SURFACE_BOOK1_DGPU_SWITCH ++ tristate "Surface Book 1 dGPU Switch Driver" ++ depends on SYSFS ++ help ++ This driver provides a sysfs switch to set the power-state of the ++ discrete GPU found on the Microsoft Surface Book 1. ++ + config SURFACE_DTX + tristate "Surface DTX (Detachment System) Driver" + depends on SURFACE_AGGREGATOR +diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile +index 53344330939b..7efcd0cdb532 100644 +--- a/drivers/platform/surface/Makefile ++++ b/drivers/platform/surface/Makefile +@@ -12,6 +12,7 @@ obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV) += surface_aggregator_cdev.o + obj-$(CONFIG_SURFACE_AGGREGATOR_HUB) += surface_aggregator_hub.o + obj-$(CONFIG_SURFACE_AGGREGATOR_REGISTRY) += surface_aggregator_registry.o + obj-$(CONFIG_SURFACE_AGGREGATOR_TABLET_SWITCH) += surface_aggregator_tabletsw.o ++obj-$(CONFIG_SURFACE_BOOK1_DGPU_SWITCH) += surfacebook1_dgpu_switch.o + obj-$(CONFIG_SURFACE_DTX) += surface_dtx.o + obj-$(CONFIG_SURFACE_GPE) += surface_gpe.o + obj-$(CONFIG_SURFACE_HOTPLUG) += surface_hotplug.o +diff --git a/drivers/platform/surface/surface3-wmi.c b/drivers/platform/surface/surface3-wmi.c +index ca4602bcc7de..490b9731068a 100644 +--- a/drivers/platform/surface/surface3-wmi.c ++++ b/drivers/platform/surface/surface3-wmi.c +@@ -37,6 +37,13 @@ static const struct dmi_system_id surface3_dmi_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"), + }, + }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), ++ DMI_MATCH(DMI_SYS_VENDOR, "OEMB"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "OEMB"), ++ }, ++ }, + #endif + { } + }; +diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c +index c219b840d491..69c4352e8406 100644 +--- a/drivers/platform/surface/surface_gpe.c ++++ b/drivers/platform/surface/surface_gpe.c +@@ -41,6 +41,11 @@ static const struct property_entry lid_device_props_l4F[] = { + {}, + }; + ++static const struct property_entry lid_device_props_l52[] = { ++ PROPERTY_ENTRY_U32("gpe", 0x52), ++ {}, ++}; ++ + static const struct property_entry lid_device_props_l57[] = { + PROPERTY_ENTRY_U32("gpe", 0x57), + {}, +@@ -107,6 +112,18 @@ static const struct dmi_system_id dmi_lid_device_table[] = { + }, + .driver_data = (void *)lid_device_props_l4B, + }, ++ { ++ /* ++ * We match for SKU here due to product name clash with the ARM ++ * version. ++ */ ++ .ident = "Surface Pro 9", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_9_2038"), ++ }, ++ .driver_data = (void *)lid_device_props_l52, ++ }, + { + .ident = "Surface Book 1", + .matches = { +diff --git a/drivers/platform/surface/surfacebook1_dgpu_switch.c b/drivers/platform/surface/surfacebook1_dgpu_switch.c +new file mode 100644 +index 000000000000..8b816ed8f35c +--- /dev/null ++++ b/drivers/platform/surface/surfacebook1_dgpu_switch.c +@@ -0,0 +1,162 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++#include ++#include ++#include ++#include ++ ++ ++#ifdef pr_fmt ++#undef pr_fmt ++#endif ++#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ ++ ++ ++static const guid_t dgpu_sw_guid = GUID_INIT(0x6fd05c69, 0xcde3, 0x49f4, ++ 0x95, 0xed, 0xab, 0x16, 0x65, 0x49, 0x80, 0x35); ++ ++#define DGPUSW_ACPI_PATH_DSM "\\_SB_.PCI0.LPCB.EC0_.VGBI" ++#define DGPUSW_ACPI_PATH_HGON "\\_SB_.PCI0.RP05.HGON" ++#define DGPUSW_ACPI_PATH_HGOF "\\_SB_.PCI0.RP05.HGOF" ++ ++ ++static int sb1_dgpu_sw_dsmcall(void) ++{ ++ union acpi_object *ret; ++ acpi_handle handle; ++ acpi_status status; ++ ++ status = acpi_get_handle(NULL, DGPUSW_ACPI_PATH_DSM, &handle); ++ if (status) ++ return -EINVAL; ++ ++ ret = acpi_evaluate_dsm_typed(handle, &dgpu_sw_guid, 1, 1, NULL, ACPI_TYPE_BUFFER); ++ if (!ret) ++ return -EINVAL; ++ ++ ACPI_FREE(ret); ++ return 0; ++} ++ ++static int sb1_dgpu_sw_hgon(void) ++{ ++ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; ++ acpi_status status; ++ ++ status = acpi_evaluate_object(NULL, DGPUSW_ACPI_PATH_HGON, NULL, &buf); ++ if (status) { ++ pr_err("failed to run HGON: %d\n", status); ++ return -EINVAL; ++ } ++ ++ if (buf.pointer) ++ ACPI_FREE(buf.pointer); ++ ++ pr_info("turned-on dGPU via HGON\n"); ++ return 0; ++} ++ ++static int sb1_dgpu_sw_hgof(void) ++{ ++ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; ++ acpi_status status; ++ ++ status = acpi_evaluate_object(NULL, DGPUSW_ACPI_PATH_HGOF, NULL, &buf); ++ if (status) { ++ pr_err("failed to run HGOF: %d\n", status); ++ return -EINVAL; ++ } ++ ++ if (buf.pointer) ++ ACPI_FREE(buf.pointer); ++ ++ pr_info("turned-off dGPU via HGOF\n"); ++ return 0; ++} ++ ++ ++static ssize_t dgpu_dsmcall_store(struct device *dev, struct device_attribute *attr, ++ const char *buf, size_t len) ++{ ++ int status, value; ++ ++ status = kstrtoint(buf, 0, &value); ++ if (status < 0) ++ return status; ++ ++ if (value != 1) ++ return -EINVAL; ++ ++ status = sb1_dgpu_sw_dsmcall(); ++ ++ return status < 0 ? status : len; ++} ++ ++static ssize_t dgpu_power_store(struct device *dev, struct device_attribute *attr, ++ const char *buf, size_t len) ++{ ++ bool power; ++ int status; ++ ++ status = kstrtobool(buf, &power); ++ if (status < 0) ++ return status; ++ ++ if (power) ++ status = sb1_dgpu_sw_hgon(); ++ else ++ status = sb1_dgpu_sw_hgof(); ++ ++ return status < 0 ? status : len; ++} ++ ++static DEVICE_ATTR_WO(dgpu_dsmcall); ++static DEVICE_ATTR_WO(dgpu_power); ++ ++static struct attribute *sb1_dgpu_sw_attrs[] = { ++ &dev_attr_dgpu_dsmcall.attr, ++ &dev_attr_dgpu_power.attr, ++ NULL, ++}; ++ ++static const struct attribute_group sb1_dgpu_sw_attr_group = { ++ .attrs = sb1_dgpu_sw_attrs, ++}; ++ ++ ++static int sb1_dgpu_sw_probe(struct platform_device *pdev) ++{ ++ return sysfs_create_group(&pdev->dev.kobj, &sb1_dgpu_sw_attr_group); ++} ++ ++static int sb1_dgpu_sw_remove(struct platform_device *pdev) ++{ ++ sysfs_remove_group(&pdev->dev.kobj, &sb1_dgpu_sw_attr_group); ++ return 0; ++} ++ ++/* ++ * The dGPU power seems to be actually handled by MSHW0040. However, that is ++ * also the power-/volume-button device with a mainline driver. So let's use ++ * MSHW0041 instead for now, which seems to be the LTCH (latch/DTX) device. ++ */ ++static const struct acpi_device_id sb1_dgpu_sw_match[] = { ++ { "MSHW0041", }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(acpi, sb1_dgpu_sw_match); ++ ++static struct platform_driver sb1_dgpu_sw = { ++ .probe = sb1_dgpu_sw_probe, ++ .remove = sb1_dgpu_sw_remove, ++ .driver = { ++ .name = "surfacebook1_dgpu_switch", ++ .acpi_match_table = sb1_dgpu_sw_match, ++ .probe_type = PROBE_PREFER_ASYNCHRONOUS, ++ }, ++}; ++module_platform_driver(sb1_dgpu_sw); ++ ++MODULE_AUTHOR("Maximilian Luz "); ++MODULE_DESCRIPTION("Discrete GPU Power-Switch for Surface Book 1"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/platform/surface/surfacepro3_button.c b/drivers/platform/surface/surfacepro3_button.c +index 2755601f979c..4240c98ca226 100644 +--- a/drivers/platform/surface/surfacepro3_button.c ++++ b/drivers/platform/surface/surfacepro3_button.c +@@ -149,7 +149,8 @@ static int surface_button_resume(struct device *dev) + /* + * Surface Pro 4 and Surface Book 2 / Surface Pro 2017 use the same device + * ID (MSHW0040) for the power/volume buttons. Make sure this is the right +- * device by checking for the _DSM method and OEM Platform Revision. ++ * device by checking for the _DSM method and OEM Platform Revision DSM ++ * function. + * + * Returns true if the driver should bind to this device, i.e. the device is + * either MSWH0028 (Pro 3) or MSHW0040 on a Pro 4 or Book 1. +@@ -157,30 +158,11 @@ static int surface_button_resume(struct device *dev) + static bool surface_button_check_MSHW0040(struct acpi_device *dev) + { + acpi_handle handle = dev->handle; +- union acpi_object *result; +- u64 oem_platform_rev = 0; // valid revisions are nonzero +- +- // get OEM platform revision +- result = acpi_evaluate_dsm_typed(handle, &MSHW0040_DSM_UUID, +- MSHW0040_DSM_REVISION, +- MSHW0040_DSM_GET_OMPR, +- NULL, ACPI_TYPE_INTEGER); +- +- /* +- * If evaluating the _DSM fails, the method is not present. This means +- * that we have either MSHW0028 or MSHW0040 on Pro 4 or Book 1, so we +- * should use this driver. We use revision 0 indicating it is +- * unavailable. +- */ +- +- if (result) { +- oem_platform_rev = result->integer.value; +- ACPI_FREE(result); +- } +- +- dev_dbg(&dev->dev, "OEM Platform Revision %llu\n", oem_platform_rev); + +- return oem_platform_rev == 0; ++ // make sure that OEM platform revision DSM call does not exist ++ return !acpi_check_dsm(handle, &MSHW0040_DSM_UUID, ++ MSHW0040_DSM_REVISION, ++ BIT(MSHW0040_DSM_GET_OMPR)); + } + + +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index 934b3d997702..2c6604c6e8e1 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -220,6 +220,9 @@ static const struct usb_device_id usb_quirk_list[] = { + /* Microsoft Surface Dock Ethernet (RTL8153 GigE) */ + { USB_DEVICE(0x045e, 0x07c6), .driver_info = USB_QUIRK_NO_LPM }, + ++ /* Microsoft Surface Go 3 Type-Cover */ ++ { USB_DEVICE(0x045e, 0x09b5), .driver_info = USB_QUIRK_DELAY_INIT }, ++ + /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */ + { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME }, + +diff --git a/include/linux/pci.h b/include/linux/pci.h +index a5dda515fcd1..69f6fc707ae5 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -464,6 +464,7 @@ struct pci_dev { + unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */ + unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */ + unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */ ++ unsigned int no_shutdown:1; /* Do not touch device on shutdown */ + pci_dev_flags_t dev_flags; + atomic_t enable_cnt; /* pci_enable_device has been called */ + +diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c +index 7c7cbb6362ea..81a8ff40e86e 100644 +--- a/sound/soc/codecs/rt5645.c ++++ b/sound/soc/codecs/rt5645.c +@@ -3717,6 +3717,15 @@ static const struct dmi_system_id dmi_platform_data[] = { + }, + .driver_data = (void *)&intel_braswell_platform_data, + }, ++ { ++ .ident = "Microsoft Surface 3", ++ .matches = { ++ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), ++ DMI_MATCH(DMI_SYS_VENDOR, "OEMB"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "OEMB"), ++ }, ++ .driver_data = (void *)&intel_braswell_platform_data, ++ }, + { + /* + * Match for the GPDwin which unfortunately uses somewhat +diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c +index 6beb00858c33..d82d77387a0a 100644 +--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c ++++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c +@@ -27,6 +27,14 @@ static const struct dmi_system_id cht_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"), + }, + }, ++ { ++ .callback = cht_surface_quirk_cb, ++ .matches = { ++ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), ++ DMI_MATCH(DMI_SYS_VENDOR, "OEMB"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "OEMB"), ++ }, ++ }, + { } + }; + +-- +2.40.0 + diff --git a/6.3/0011-zram.patch b/6.3/0012-zram.patch similarity index 99% rename from 6.3/0011-zram.patch rename to 6.3/0012-zram.patch index 98b45049..eba898a7 100644 --- a/6.3/0011-zram.patch +++ b/6.3/0012-zram.patch @@ -1,7 +1,7 @@ -From 6ba6433df40db59f2d25083d7ddbf7fad065825e Mon Sep 17 00:00:00 2001 +From dfd409a7ff5e935c78a89d4da47f6fe4f5c41cda Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 17 Apr 2023 18:38:04 +0200 -Subject: [PATCH 11/12] zram +Date: Sat, 22 Apr 2023 11:46:32 +0200 +Subject: [PATCH 12/13] zram Signed-off-by: Peter Jung --- diff --git a/6.3/0012-zstd-import-1.5.5.patch b/6.3/0013-zstd-import-1.5.5.patch similarity index 99% rename from 6.3/0012-zstd-import-1.5.5.patch rename to 6.3/0013-zstd-import-1.5.5.patch index 97d34265..cc1de5e7 100644 --- a/6.3/0012-zstd-import-1.5.5.patch +++ b/6.3/0013-zstd-import-1.5.5.patch @@ -1,7 +1,7 @@ -From 56a4ed844bc68f1920a53b06740409e03f47f847 Mon Sep 17 00:00:00 2001 +From 3e19b1053d5a2324f7255db192c3e520c4d33495 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 17 Apr 2023 18:38:21 +0200 -Subject: [PATCH 12/12] zstd: import 1.5.5 +Date: Sat, 22 Apr 2023 11:46:46 +0200 +Subject: [PATCH 13/13] zstd: import 1.5.5 Signed-off-by: Peter Jung --- diff --git a/6.3/all/0001-cachyos-base-all.patch b/6.3/all/0001-cachyos-base-all.patch index 398d75f5..3c3c5cdf 100644 --- a/6.3/all/0001-cachyos-base-all.patch +++ b/6.3/all/0001-cachyos-base-all.patch @@ -1,7 +1,7 @@ -From 47998571042386996a5fb55be493e9927f594f76 Mon Sep 17 00:00:00 2001 +From 9b7414fdd16442c2efd67e1e34588caf4c780f1e Mon Sep 17 00:00:00 2001 From: Peter Jung Date: Mon, 6 Mar 2023 18:43:03 +0100 -Subject: [PATCH 01/12] bbr2 +Subject: [PATCH 01/13] bbr2 Signed-off-by: Peter Jung --- @@ -3283,10 +3283,10 @@ index cb79127f45c3..70e4de876a7f 100644 -- 2.40.0 -From efd83b97f8daac5950de449458be133718d6a8c7 Mon Sep 17 00:00:00 2001 +From 3598b17e783ddfa1ab4dbe8c1867f1be9e3a1566 Mon Sep 17 00:00:00 2001 From: Peter Jung Date: Mon, 17 Apr 2023 18:21:50 +0200 -Subject: [PATCH 02/12] bfq +Subject: [PATCH 02/13] bfq Signed-off-by: Peter Jung --- @@ -3329,71 +3329,73 @@ index d9ed3108c17a..66146bbcd4af 100644 -- 2.40.0 -From d9ae45f25bc334f2258284f5a509bd2c71cca8a1 Mon Sep 17 00:00:00 2001 +From 1b79bdbcce300d92a09023d487cc7445c9665a17 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 17 Apr 2023 18:22:10 +0200 -Subject: [PATCH 03/12] cachy +Date: Sat, 22 Apr 2023 11:43:07 +0200 +Subject: [PATCH 03/13] cachy Signed-off-by: Peter Jung --- - .gitignore | 1 + - .../admin-guide/kernel-parameters.txt | 9 + - Documentation/dontdiff | 1 + - Makefile | 8 +- - arch/arc/configs/axs101_defconfig | 1 + - arch/arc/configs/axs103_defconfig | 1 + - arch/arc/configs/axs103_smp_defconfig | 1 + - arch/arc/configs/haps_hs_defconfig | 1 + - arch/arc/configs/haps_hs_smp_defconfig | 1 + - arch/arc/configs/hsdk_defconfig | 1 + - arch/arc/configs/nsim_700_defconfig | 1 + - arch/arc/configs/nsimosci_defconfig | 1 + - arch/arc/configs/nsimosci_hs_defconfig | 1 + - arch/arc/configs/nsimosci_hs_smp_defconfig | 1 + - arch/arc/configs/tb10x_defconfig | 1 + - arch/arc/configs/vdk_hs38_defconfig | 1 + - arch/arc/configs/vdk_hs38_smp_defconfig | 1 + - arch/x86/Kconfig.cpu | 416 ++++++++++- - arch/x86/Makefile | 45 +- - arch/x86/Makefile.postlink | 41 ++ - arch/x86/boot/compressed/.gitignore | 1 - - arch/x86/boot/compressed/Makefile | 10 +- - arch/x86/include/asm/vermagic.h | 72 ++ - drivers/Makefile | 15 +- - drivers/i2c/busses/Kconfig | 9 + - drivers/i2c/busses/Makefile | 1 + - drivers/i2c/busses/i2c-nct6775.c | 647 ++++++++++++++++++ - drivers/i2c/busses/i2c-piix4.c | 4 +- - drivers/md/dm-crypt.c | 5 + - drivers/pci/quirks.c | 101 +++ - drivers/platform/x86/Kconfig | 14 + - drivers/platform/x86/Makefile | 3 + - drivers/platform/x86/steamdeck.c | 523 ++++++++++++++ - include/linux/pagemap.h | 2 +- - include/linux/user_namespace.h | 4 + - include/net/netns/ipv4.h | 1 + - include/trace/events/tcp.h | 7 + - init/Kconfig | 39 ++ - kernel/Kconfig.hz | 24 + - kernel/fork.c | 14 + - kernel/module/Kconfig | 25 + - kernel/sched/fair.c | 20 +- - kernel/sysctl.c | 12 + - kernel/user_namespace.c | 7 + - mm/Kconfig | 2 +- - mm/compaction.c | 4 + - mm/page-writeback.c | 8 + - mm/swap.c | 5 + - mm/vmpressure.c | 4 + - mm/vmscan.c | 8 + - net/ipv4/sysctl_net_ipv4.c | 7 + - net/ipv4/tcp_input.c | 36 + - net/ipv4/tcp_ipv4.c | 2 + - scripts/Makefile.lib | 13 +- - scripts/Makefile.modinst | 7 +- - 55 files changed, 2144 insertions(+), 46 deletions(-) + .gitignore | 1 + + .../admin-guide/kernel-parameters.txt | 9 + + Documentation/dontdiff | 1 + + Makefile | 8 +- + arch/arc/configs/axs101_defconfig | 1 + + arch/arc/configs/axs103_defconfig | 1 + + arch/arc/configs/axs103_smp_defconfig | 1 + + arch/arc/configs/haps_hs_defconfig | 1 + + arch/arc/configs/haps_hs_smp_defconfig | 1 + + arch/arc/configs/hsdk_defconfig | 1 + + arch/arc/configs/nsim_700_defconfig | 1 + + arch/arc/configs/nsimosci_defconfig | 1 + + arch/arc/configs/nsimosci_hs_defconfig | 1 + + arch/arc/configs/nsimosci_hs_smp_defconfig | 1 + + arch/arc/configs/tb10x_defconfig | 1 + + arch/arc/configs/vdk_hs38_defconfig | 1 + + arch/arc/configs/vdk_hs38_smp_defconfig | 1 + + arch/x86/Kconfig.cpu | 416 ++- + arch/x86/Makefile | 45 +- + arch/x86/Makefile.postlink | 41 + + arch/x86/boot/compressed/.gitignore | 1 - + arch/x86/boot/compressed/Makefile | 10 +- + arch/x86/include/asm/vermagic.h | 72 + + drivers/Makefile | 15 +- + drivers/i2c/busses/Kconfig | 9 + + drivers/i2c/busses/Makefile | 1 + + drivers/i2c/busses/i2c-nct6775.c | 647 ++++ + drivers/i2c/busses/i2c-piix4.c | 4 +- + drivers/md/dm-crypt.c | 5 + + drivers/pci/quirks.c | 101 + + drivers/platform/x86/Kconfig | 24 + + drivers/platform/x86/Makefile | 4 + + drivers/platform/x86/legion-laptop.c | 2783 +++++++++++++++++ + drivers/platform/x86/steamdeck.c | 523 ++++ + include/linux/pagemap.h | 2 +- + include/linux/user_namespace.h | 4 + + include/net/netns/ipv4.h | 1 + + include/trace/events/tcp.h | 7 + + init/Kconfig | 39 + + kernel/Kconfig.hz | 24 + + kernel/fork.c | 14 + + kernel/module/Kconfig | 25 + + kernel/sched/fair.c | 20 +- + kernel/sysctl.c | 12 + + kernel/user_namespace.c | 7 + + mm/Kconfig | 2 +- + mm/compaction.c | 4 + + mm/page-writeback.c | 8 + + mm/swap.c | 5 + + mm/vmpressure.c | 4 + + mm/vmscan.c | 8 + + net/ipv4/sysctl_net_ipv4.c | 7 + + net/ipv4/tcp_input.c | 36 + + net/ipv4/tcp_ipv4.c | 2 + + scripts/Makefile.lib | 13 +- + scripts/Makefile.modinst | 7 +- + 56 files changed, 4938 insertions(+), 46 deletions(-) create mode 100644 arch/x86/Makefile.postlink create mode 100644 drivers/i2c/busses/i2c-nct6775.c + create mode 100644 drivers/platform/x86/legion-laptop.c create mode 100644 drivers/platform/x86/steamdeck.c diff --git a/.gitignore b/.gitignore @@ -5253,10 +5255,27 @@ index 44cab813bf95..25edf55de985 100644 }; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig -index 4a01b315e0a9..e9ddf76b8b57 100644 +index 4a01b315e0a9..e4a6c31a80df 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig -@@ -1099,6 +1099,20 @@ config WINMATE_FM07_KEYS +@@ -641,6 +641,16 @@ config THINKPAD_LMI + To compile this driver as a module, choose M here: the module will + be called think-lmi. + ++config LEGION_LAPTOP ++ tristate "Lenovo Legion Laptop Extras" ++ depends on ACPI ++ depends on ACPI_WMI || ACPI_WMI = n ++ depends on HWMON || HWMON = n ++ select ACPI_PLATFORM_PROFILE ++ help ++ This is a driver for Lenovo Legion laptops and contains drivers for ++ hotkey, fan control, and power mode. ++ + source "drivers/platform/x86/intel/Kconfig" + + config MSI_LAPTOP +@@ -1099,6 +1109,20 @@ config WINMATE_FM07_KEYS buttons below the display. This module adds an input device that delivers key events when these buttons are pressed. @@ -5278,11825 +5297,20684 @@ index 4a01b315e0a9..e9ddf76b8b57 100644 config P2SB diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile -index 1d3d1b02541b..75b30a3face9 100644 +index 1d3d1b02541b..fde9a683103e 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile -@@ -134,3 +134,6 @@ obj-$(CONFIG_SIEMENS_SIMATIC_IPC) += simatic-ipc.o +@@ -66,6 +66,7 @@ obj-$(CONFIG_IDEAPAD_LAPTOP) += ideapad-laptop.o + obj-$(CONFIG_SENSORS_HDAPS) += hdaps.o + obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o + obj-$(CONFIG_THINKPAD_LMI) += think-lmi.o ++obj-$(CONFIG_LEGION_LAPTOP) += legion-laptop.o + + # Intel + obj-y += intel/ +@@ -134,3 +135,6 @@ obj-$(CONFIG_SIEMENS_SIMATIC_IPC) += simatic-ipc.o # Winmate obj-$(CONFIG_WINMATE_FM07_KEYS) += winmate-fm07-keys.o + +# Steam Deck +obj-$(CONFIG_STEAMDECK) += steamdeck.o -diff --git a/drivers/platform/x86/steamdeck.c b/drivers/platform/x86/steamdeck.c +diff --git a/drivers/platform/x86/legion-laptop.c b/drivers/platform/x86/legion-laptop.c new file mode 100644 -index 000000000000..77a6677ec19e +index 000000000000..d1268d239cc5 --- /dev/null -+++ b/drivers/platform/x86/steamdeck.c -@@ -0,0 +1,523 @@ -+// SPDX-License-Identifier: GPL-2.0+ -+ ++++ b/drivers/platform/x86/legion-laptop.c +@@ -0,0 +1,2783 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later +/* -+ * Steam Deck ACPI platform driver ++ * legion-laptop.c - Extra Lenovo Legion laptop support, in ++ * particular for fan curve control and power mode. + * -+ * Copyright (C) 2021-2022 Valve Corporation ++ * Copyright (C) 2022 johnfan ++ * ++ * ++ * This driver might work on other Lenovo Legion models. If you ++ * want to try it you can pass force=1 as argument ++ * to the module which will force it to load even when the DMI ++ * data doesn't match the model AND FIRMWARE. ++ * ++ * Support for other hardware of this model is already partially ++ * provided by the module ideapd-laptop. ++ * ++ * The development page for this driver is located at ++ * https://github.com/johnfanv2/LenovoLegionLinux + * ++ * This driver exports the files: ++ * - /sys/kernel/debug/legion/fancurve (ro) ++ * The fan curve in the form stored in the firmware in an ++ * human readable table. ++ * ++ * - /sys/module/legion_laptop/drivers/platform\:legion/PNP0C09\:00/powermode (rw) ++ * 0: balanced mode (white) ++ * 1: performance mode (red) ++ * 2: quiet mode (blue) ++ * ?: custom mode (pink) ++ * ++ * NOTE: Writing to this will load the default fan curve from ++ * the firmware for this mode, so the fan curve might ++ * have to be reconfigured if needed. ++ * ++ * It implements the usual hwmon interface to monitor fan speed and temmperature ++ * and allows to set the fan curve inside the firware. ++ * ++ * - /sys/class/hwmon/X/fan1_input or /sys/class/hwmon/X/fan2_input (ro) ++ * Current fan speed of fan1/fan2. ++ * - /sys/class/hwmon/X/temp1_input (ro) ++ * - /sys/class/hwmon/X/temp2_input (ro) ++ * - /sys/class/hwmon/X/temp3_input (ro) ++ * Temperature (Celsius) of CPU, GPU, and IC used for fan control. ++ * - /sys/class/hwmon/X/pwmY_auto_pointZ_pwm (rw) ++ * PWM (0-255) of the fan at the Y-level in the fan curve ++ * - /sys/class/hwmon/X/pwmY_auto_pointZ_temp (rw) ++ * upper temperature of tempZ (CPU, GPU, or IC) at the Y-level in the fan curve ++ * - /sys/class/hwmon/X/pwmY_auto_pointZ_temp_hyst (rw) ++ * hysteris (CPU, GPU, or IC) at the Y-level in the fan curve. The lower ++ * temperatue of the level is the upper temperature minus the hysteris ++ * ++ * ++ * Credits for reverse engineering the firmware to: ++ * - David Woodhouse: heavily inspired by lenovo_laptop.c ++ * - Luke Cama: Windows version "LegionFanControl" ++ * - SmokelessCPU: reverse engineering of custom registers in EC ++ * and commincation method with EC via ports ++ * - 0x1F9F1: additional reverse engineering for complete fan curve + */ ++ +#include ++#include ++#include ++#include ++#include +#include ++#include ++#include ++#include ++#include +#include -+#include -+#include ++#include ++#include ++#include + -+#define ACPI_STEAMDECK_NOTIFY_STATUS 0x80 ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("johnfan"); ++MODULE_DESCRIPTION("Lenovo Legion laptop extras"); + -+/* 0 - port connected, 1 -port disconnected */ -+#define ACPI_STEAMDECK_PORT_CONNECT BIT(0) -+/* 0 - Upstream Facing Port, 1 - Downdstream Facing Port */ -+#define ACPI_STEAMDECK_CUR_DATA_ROLE BIT(3) -+/* -+ * Debouncing delay to allow negotiation process to settle. 2s value -+ * was arrived at via trial and error. -+ */ -+#define STEAMDECK_ROLE_SWITCH_DELAY (msecs_to_jiffies(2000)) ++static bool force; ++module_param(force, bool, 0440); ++MODULE_PARM_DESC( ++ force, ++ "Force loading this module even if model or BIOS does not match."); + -+struct steamdeck { -+ struct acpi_device *adev; -+ struct device *hwmon; -+ void *regmap; -+ long fan_target; -+ struct delayed_work role_work; -+ struct extcon_dev *edev; -+ struct device *dev; -+}; ++static bool ec_readonly; ++module_param(ec_readonly, bool, 0440); ++MODULE_PARM_DESC( ++ ec_readonly, ++ "Only read from embedded controller but do not write or change settings."); + -+static ssize_t -+steamdeck_simple_store(struct device *dev, const char *buf, size_t count, -+ const char *method, -+ unsigned long upper_limit) -+{ -+ struct steamdeck *fan = dev_get_drvdata(dev); -+ unsigned long value; ++#define LEGIONFEATURES \ ++ "fancurve powermode platformprofile platformprofilenotify minifancurve" + -+ if (kstrtoul(buf, 10, &value) || value >= upper_limit) -+ return -EINVAL; ++//Size of fancurve stored in embedded controller ++#define MAXFANCURVESIZE 10 + -+ if (ACPI_FAILURE(acpi_execute_simple_method(fan->adev->handle, -+ (char *)method, value))) -+ return -EIO; ++#define LEGION_DRVR_SHORTNAME "legion" ++#define LEGION_HWMON_NAME LEGION_DRVR_SHORTNAME "_hwmon" + -+ return count; -+} ++/* =============================== */ ++/* Embedded Controller Description */ ++/* =============================== */ + -+#define STEAMDECK_ATTR_WO(_name, _method, _upper_limit) \ -+ static ssize_t _name##_store(struct device *dev, \ -+ struct device_attribute *attr, \ -+ const char *buf, size_t count) \ -+ { \ -+ return steamdeck_simple_store(dev, buf, count, \ -+ _method, \ -+ _upper_limit); \ -+ } \ -+ static DEVICE_ATTR_WO(_name) ++/* The configuration and registers to access the embedded controller ++ * depending on different the version of the software on the ++ * embedded controller or and the BIOS/UEFI firmware. ++ * ++ * To control fan curve in the embedded controller (EC) one has to ++ * write to its "RAM". There are different possibilities: ++ * - EC RAM is memory mapped (write to it with ioremap) ++ * - access EC RAM via ported mapped IO (outb/inb) ++ * - access EC RAM via ACPI methods. It is only possible to write ++ * to part of it (first 0xFF bytes?) ++ * ++ * In later models the firmware directly exposes ACPI methods to ++ * set the fan curve direclty, without writing to EC RAM. This ++ * is done inside the ACPI method. ++ */ + -+STEAMDECK_ATTR_WO(target_cpu_temp, "STCT", U8_MAX / 2); -+STEAMDECK_ATTR_WO(gain, "SGAN", U16_MAX); -+STEAMDECK_ATTR_WO(ramp_rate, "SFRR", U8_MAX); -+STEAMDECK_ATTR_WO(hysteresis, "SHTS", U16_MAX); -+STEAMDECK_ATTR_WO(maximum_battery_charge_rate, "CHGR", U16_MAX); -+STEAMDECK_ATTR_WO(recalculate, "SCHG", U16_MAX); ++/** ++ * Offsets for interseting values inside the EC RAM (0 = start of ++ * EC RAM. These might change depending on the software inside of ++ * the EC, which can be updated by a BIOS update from Lenovo. ++ */ ++// TODO: same order as in initialization ++struct ec_register_offsets { ++ // Super I/O Configuration Registers ++ // 7.15 General Control (GCTRL) ++ // General Control (GCTRL) ++ // (see EC Interface Registers and 6.2 Plug and Play Configuration (PNPCFG)) in datasheet ++ // note: these are in two places saved ++ // in EC Interface Registers and in super io configuraion registers ++ // Chip ID ++ u16 ECHIPID1; ++ u16 ECHIPID2; ++ // Chip Version ++ u16 ECHIPVER; ++ u16 ECDEBUG; ++ ++ // Lenovo Custom OEM extension ++ // Firmware of ITE can be extended by ++ // custom program using its own "variables" ++ // These are the offsets to these "variables" ++ u16 EXT_FAN_CUR_POINT; ++ u16 EXT_FAN_POINTS_SIZE; ++ u16 EXT_FAN1_BASE; ++ u16 EXT_FAN2_BASE; ++ u16 EXT_FAN_ACC_BASE; ++ u16 EXT_FAN_DEC_BASE; ++ u16 EXT_CPU_TEMP; ++ u16 EXT_CPU_TEMP_HYST; ++ u16 EXT_GPU_TEMP; ++ u16 EXT_GPU_TEMP_HYST; ++ u16 EXT_VRM_TEMP; ++ u16 EXT_VRM_TEMP_HYST; ++ u16 EXT_FAN1_RPM_LSB; ++ u16 EXT_FAN1_RPM_MSB; ++ u16 EXT_FAN2_RPM_LSB; ++ u16 EXT_FAN2_RPM_MSB; ++ u16 EXT_FAN1_TARGET_RPM; ++ u16 EXT_FAN2_TARGET_RPM; ++ u16 EXT_POWERMODE; ++ u16 EXT_MINIFANCURVE_ON_COOL; ++ // values ++ // 0x04: enable mini fan curve if very long on cool level ++ // - this might be due to potential temp failure ++ // - or just because really so cool ++ // 0xA0: disable it ++ u16 EXT_LOCKFANCONTROLLER; ++ u16 EXT_MAXIMUMFANSPEED; ++ u16 EXT_WHITE_KEYBOARD_BACKLIGHT; ++ u16 EXT_IC_TEMP_INPUT; ++ u16 EXT_CPU_TEMP_INPUT; ++ u16 EXT_GPU_TEMP_INPUT; ++}; + -+STEAMDECK_ATTR_WO(led_brightness, "CHBV", U8_MAX); -+STEAMDECK_ATTR_WO(content_adaptive_brightness, "CABC", U8_MAX); -+STEAMDECK_ATTR_WO(gamma_set, "GAMA", U8_MAX); -+STEAMDECK_ATTR_WO(display_brightness, "WDBV", U8_MAX); -+STEAMDECK_ATTR_WO(ctrl_display, "WCDV", U8_MAX); -+STEAMDECK_ATTR_WO(cabc_minimum_brightness, "WCMB", U8_MAX); -+STEAMDECK_ATTR_WO(memory_data_access_control, "MDAC", U8_MAX); ++struct model_config { ++ const struct ec_register_offsets *registers; ++ bool check_embedded_controller_id; ++ u16 embedded_controller_id; + -+#define STEAMDECK_ATTR_WO_NOARG(_name, _method) \ -+ static ssize_t _name##_store(struct device *dev, \ -+ struct device_attribute *attr, \ -+ const char *buf, size_t count) \ -+ { \ -+ struct steamdeck *fan = dev_get_drvdata(dev); \ -+ \ -+ if (ACPI_FAILURE(acpi_evaluate_object(fan->adev->handle, \ -+ _method, NULL, NULL))) \ -+ return -EIO; \ -+ \ -+ return count; \ -+ } \ -+ static DEVICE_ATTR_WO(_name) ++ // first addr in EC we access/scan ++ phys_addr_t memoryio_physical_ec_start; ++ size_t memoryio_size; + -+STEAMDECK_ATTR_WO_NOARG(power_cycle_display, "DPCY"); -+STEAMDECK_ATTR_WO_NOARG(display_normal_mode_on, "NORO"); -+STEAMDECK_ATTR_WO_NOARG(display_inversion_off, "INOF"); -+STEAMDECK_ATTR_WO_NOARG(display_inversion_on, "INON"); -+STEAMDECK_ATTR_WO_NOARG(idle_mode_on, "WRNE"); ++ // TODO: maybe use bitfield ++ bool has_minifancurve; ++}; + -+#define STEAMDECK_ATTR_RO(_name, _method) \ -+ static ssize_t _name##_show(struct device *dev, \ -+ struct device_attribute *attr, \ -+ char *buf) \ -+ { \ -+ struct steamdeck *jup = dev_get_drvdata(dev); \ -+ unsigned long long val; \ -+ \ -+ if (ACPI_FAILURE(acpi_evaluate_integer( \ -+ jup->adev->handle, \ -+ _method, NULL, &val))) \ -+ return -EIO; \ -+ \ -+ return sprintf(buf, "%llu\n", val); \ -+ } \ -+ static DEVICE_ATTR_RO(_name) ++/* =================================== */ ++/* Coinfiguration for different models */ ++/* =================================== */ ++ ++// Idea by SmokelesssCPU (modified) ++// - all default names and register addresses are supported by datasheet ++// - register addresses for custom firmware by SmokelesssCPU ++static const struct ec_register_offsets ec_register_offsets_v0 = { ++ .ECHIPID1 = 0x2000, ++ .ECHIPID2 = 0x2001, ++ .ECHIPVER = 0x2002, ++ .ECDEBUG = 0x2003, ++ .EXT_FAN_CUR_POINT = 0xC534, ++ .EXT_FAN_POINTS_SIZE = 0xC535, ++ .EXT_FAN1_BASE = 0xC540, ++ .EXT_FAN2_BASE = 0xC550, ++ .EXT_FAN_ACC_BASE = 0xC560, ++ .EXT_FAN_DEC_BASE = 0xC570, ++ .EXT_CPU_TEMP = 0xC580, ++ .EXT_CPU_TEMP_HYST = 0xC590, ++ .EXT_GPU_TEMP = 0xC5A0, ++ .EXT_GPU_TEMP_HYST = 0xC5B0, ++ .EXT_VRM_TEMP = 0xC5C0, ++ .EXT_VRM_TEMP_HYST = 0xC5D0, ++ .EXT_FAN1_RPM_LSB = 0xC5E0, ++ .EXT_FAN1_RPM_MSB = 0xC5E1, ++ .EXT_FAN2_RPM_LSB = 0xC5E2, ++ .EXT_FAN2_RPM_MSB = 0xC5E3, ++ .EXT_MINIFANCURVE_ON_COOL = 0xC536, ++ .EXT_LOCKFANCONTROLLER = 0xc4AB, ++ .EXT_CPU_TEMP_INPUT = 0xc538, ++ .EXT_GPU_TEMP_INPUT = 0xc539, ++ .EXT_IC_TEMP_INPUT = 0xC5E8, ++ .EXT_POWERMODE = 0xc420, ++ .EXT_FAN1_TARGET_RPM = 0xc600, ++ .EXT_FAN2_TARGET_RPM = 0xc601, ++ .EXT_MAXIMUMFANSPEED = 0xBD, ++ .EXT_WHITE_KEYBOARD_BACKLIGHT = (0x3B + 0xC400) ++}; + -+STEAMDECK_ATTR_RO(firmware_version, "PDFW"); -+STEAMDECK_ATTR_RO(board_id, "BOID"); -+STEAMDECK_ATTR_RO(pdcs, "PDCS"); ++static const struct model_config model_v0 = { ++ .registers = &ec_register_offsets_v0, ++ .check_embedded_controller_id = true, ++ .embedded_controller_id = 0x8227, ++ .memoryio_physical_ec_start = 0xC400, ++ .memoryio_size = 0x300, ++ .has_minifancurve = true ++}; + -+static umode_t -+steamdeck_is_visible(struct kobject *kobj, struct attribute *attr, int index) -+{ -+ return attr->mode; -+} ++static const struct model_config model_kfcn = { ++ .registers = &ec_register_offsets_v0, ++ .check_embedded_controller_id = true, ++ .embedded_controller_id = 0x8227, ++ .memoryio_physical_ec_start = 0xC400, ++ .memoryio_size = 0x300, ++ .has_minifancurve = false ++}; + -+static struct attribute *steamdeck_attributes[] = { -+ &dev_attr_target_cpu_temp.attr, -+ &dev_attr_gain.attr, -+ &dev_attr_ramp_rate.attr, -+ &dev_attr_hysteresis.attr, -+ &dev_attr_maximum_battery_charge_rate.attr, -+ &dev_attr_recalculate.attr, -+ &dev_attr_power_cycle_display.attr, ++static const struct model_config model_hacn = { ++ .registers = &ec_register_offsets_v0, ++ .check_embedded_controller_id = false, ++ .embedded_controller_id = 0x8227, ++ .memoryio_physical_ec_start = 0xC400, ++ .memoryio_size = 0x300, ++ .has_minifancurve = false ++}; + -+ &dev_attr_led_brightness.attr, -+ &dev_attr_content_adaptive_brightness.attr, -+ &dev_attr_gamma_set.attr, -+ &dev_attr_display_brightness.attr, -+ &dev_attr_ctrl_display.attr, -+ &dev_attr_cabc_minimum_brightness.attr, -+ &dev_attr_memory_data_access_control.attr, + -+ &dev_attr_display_normal_mode_on.attr, -+ &dev_attr_display_inversion_off.attr, -+ &dev_attr_display_inversion_on.attr, -+ &dev_attr_idle_mode_on.attr, ++static const struct model_config model_k9cn = { ++ .registers = &ec_register_offsets_v0, ++ .check_embedded_controller_id = false, ++ .embedded_controller_id = 0x8227, ++ .memoryio_physical_ec_start = 0xC400, // or replace 0xC400 by 0x0400 ? ++ .memoryio_size = 0x300, ++ .has_minifancurve = false ++}; + -+ &dev_attr_firmware_version.attr, -+ &dev_attr_board_id.attr, -+ &dev_attr_pdcs.attr, + -+ NULL -+}; + -+static const struct attribute_group steamdeck_group = { -+ .attrs = steamdeck_attributes, -+ .is_visible = steamdeck_is_visible, -+}; ++static const struct dmi_system_id denylist[] = { {} }; + -+static const struct attribute_group *steamdeck_groups[] = { -+ &steamdeck_group, -+ NULL ++static const struct dmi_system_id optimistic_allowlist[] = { ++ { ++ // modelyear: 2021 ++ // generation: 6 ++ // name: Legion 5, Legion 5 pro, Legion 7 ++ // Family: Legion 5 15ACH6H, ... ++ .ident = "GKCN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "GKCN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2020 ++ .ident = "EUCN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "EUCN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2020 ++ .ident = "EFCN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "EFCN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2020 ++ .ident = "FSCN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "FSCN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2021 ++ .ident = "HHCN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "HHCN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2022 ++ .ident = "H1CN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "H1CN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2022 ++ .ident = "J2CN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "J2CN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2022 ++ .ident = "JUCN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "JUCN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2022 ++ .ident = "KFCN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "KFCN"), ++ }, ++ .driver_data = (void *)&model_kfcn ++ }, ++ { ++ // modelyear: 2021 ++ .ident = "HACN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "HACN"), ++ }, ++ .driver_data = (void *)&model_hacn ++ }, ++ { ++ // modelyear: 2021 ++ .ident = "G9CN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "G9CN"), ++ }, ++ .driver_data = (void *)&model_v0 ++ }, ++ { ++ // modelyear: 2022 ++ .ident = "K9CN", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_BIOS_VERSION, "K9CN"), ++ }, ++ .driver_data = (void *)&model_k9cn ++ }, ++ {} +}; + -+static int steamdeck_read_fan_speed(struct steamdeck *jup, long *speed) ++/* ================================= */ ++/* ACPI access */ ++/* ================================= */ ++ ++// function from ideapad-laptop.c ++static int eval_int(acpi_handle handle, const char *name, unsigned long *res) +{ -+ unsigned long long val; ++ unsigned long long result; ++ acpi_status status; + -+ if (ACPI_FAILURE(acpi_evaluate_integer(jup->adev->handle, -+ "FANR", NULL, &val))) ++ status = acpi_evaluate_integer(handle, (char *)name, NULL, &result); ++ if (ACPI_FAILURE(status)) + return -EIO; + -+ *speed = val; ++ *res = result; ++ + return 0; +} + -+static int -+steamdeck_hwmon_read(struct device *dev, enum hwmon_sensor_types type, -+ u32 attr, int channel, long *out) ++// function from ideapad-laptop.c ++static int exec_simple_method(acpi_handle handle, const char *name, ++ unsigned long arg) +{ -+ struct steamdeck *sd = dev_get_drvdata(dev); -+ unsigned long long val; -+ -+ switch (type) { -+ case hwmon_temp: -+ if (attr != hwmon_temp_input) -+ return -EOPNOTSUPP; ++ acpi_status status = ++ acpi_execute_simple_method(handle, (char *)name, arg); + -+ if (ACPI_FAILURE(acpi_evaluate_integer(sd->adev->handle, -+ "BATT", NULL, &val))) -+ return -EIO; -+ /* -+ * Assuming BATT returns deg C we need to mutiply it -+ * by 1000 to convert to mC -+ */ -+ *out = val * 1000; -+ break; -+ case hwmon_fan: -+ switch (attr) { -+ case hwmon_fan_input: -+ return steamdeck_read_fan_speed(sd, out); -+ case hwmon_fan_target: -+ *out = sd->fan_target; -+ break; -+ case hwmon_fan_fault: -+ if (ACPI_FAILURE(acpi_evaluate_integer( -+ sd->adev->handle, -+ "FANC", NULL, &val))) -+ return -EIO; -+ /* -+ * FANC (Fan check): -+ * 0: Abnormal -+ * 1: Normal -+ */ -+ *out = !val; -+ break; -+ default: -+ return -EOPNOTSUPP; -+ } -+ break; -+ default: -+ return -EOPNOTSUPP; -+ } ++ return ACPI_FAILURE(status) ? -EIO : 0; ++} + -+ return 0; ++// function from ideapad-laptop.c ++static int exec_sbmc(acpi_handle handle, unsigned long arg) ++{ ++ // \_SB.PCI0.LPC0.EC0.VPC0.SBMC ++ return exec_simple_method(handle, "SBMC", arg); +} + -+static int -+steamdeck_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, -+ u32 attr, int channel, const char **str) ++static int eval_qcho(acpi_handle handle, unsigned long *res) +{ -+ switch (type) { -+ case hwmon_temp: -+ *str = "Battery Temp"; -+ break; -+ case hwmon_fan: -+ *str = "System Fan"; -+ break; -+ default: -+ return -EOPNOTSUPP; -+ } ++ // \_SB.PCI0.LPC0.EC0.QCHO ++ return eval_int(handle, "QCHO", res); ++} ++ ++/* ================================= */ ++/* EC RAM Access with port-mapped IO */ ++/* ================================= */ ++ ++/* ++ * See datasheet of e.g. IT8502E/F/G, e.g. ++ * 6.2 Plug and Play Configuration (PNPCFG) ++ * ++ * Depending on configured BARDSEL register ++ * the ports ++ * ECRAM_PORTIO_ADDR_PORT and ++ * ECRAM_PORTIO_DATA_PORT ++ * are configured. ++ * ++ * By performing IO on these ports one can ++ * read/write to registers in the EC. ++ * ++ * "To access a register of PNPCFG, write target index to ++ * address port and access this PNPCFG register via ++ * data port" [datasheet, 6.2 Plug and Play Configuration] ++ */ ++ ++// IO ports used to write to communicate with embedded controller ++// Start of used ports ++#define ECRAM_PORTIO_START_PORT 0x4E ++// Number of used ports ++#define ECRAM_PORTIO_PORTS_SIZE 2 ++// Port used to specify address in EC RAM to read/write ++// 0x4E/0x4F is the usual port for IO super controler ++// 0x2E/0x2F also common (ITE can also be configure to use these) ++#define ECRAM_PORTIO_ADDR_PORT 0x4E ++// Port to send/receive the value to write/read ++#define ECRAM_PORTIO_DATA_PORT 0x4F ++// Name used to request ports ++#define ECRAM_PORTIO_NAME "legion" ++ ++struct ecram_portio { ++ /* protects read/write to EC RAM performed ++ * as a certain sequence of outb, inb ++ * commands on the IO ports. There can ++ * be at most one. ++ */ ++ struct mutex io_port_mutex; ++}; + ++ssize_t ecram_portio_init(struct ecram_portio *ec_portio) ++{ ++ if (!request_region(ECRAM_PORTIO_START_PORT, ECRAM_PORTIO_PORTS_SIZE, ++ ECRAM_PORTIO_NAME)) { ++ pr_info("Cannot init ecram_portio the %x ports starting at %x\n", ++ ECRAM_PORTIO_PORTS_SIZE, ECRAM_PORTIO_START_PORT); ++ return -ENODEV; ++ } ++ //pr_info("Reserved %x ports starting at %x\n", ECRAM_PORTIO_PORTS_SIZE, ECRAM_PORTIO_START_PORT); ++ mutex_init(&ec_portio->io_port_mutex); + return 0; +} + -+static int -+steamdeck_hwmon_write(struct device *dev, enum hwmon_sensor_types type, -+ u32 attr, int channel, long val) ++void ecram_portio_exit(struct ecram_portio *ec_portio) +{ -+ struct steamdeck *sd = dev_get_drvdata(dev); ++ release_region(ECRAM_PORTIO_START_PORT, ECRAM_PORTIO_PORTS_SIZE); ++} + -+ if (type != hwmon_fan || -+ attr != hwmon_fan_target) -+ return -EOPNOTSUPP; ++/* Read a byte from the EC RAM. ++ * ++ * Return status because of commong signature for alle ++ * methods to access EC RAM. ++ */ ++ssize_t ecram_portio_read(struct ecram_portio *ec_portio, u16 offset, u8 *value) ++{ ++ mutex_lock(&ec_portio->io_port_mutex); + -+ if (val > U16_MAX) -+ return -EINVAL; ++ outb(0x2E, ECRAM_PORTIO_ADDR_PORT); ++ outb(0x11, ECRAM_PORTIO_DATA_PORT); ++ outb(0x2F, ECRAM_PORTIO_ADDR_PORT); ++ // TODO: no explicit cast between types seems to be sometimes ++ // done and sometimes not ++ outb((u8)((offset >> 8) & 0xFF), ECRAM_PORTIO_DATA_PORT); + -+ sd->fan_target = val; ++ outb(0x2E, ECRAM_PORTIO_ADDR_PORT); ++ outb(0x10, ECRAM_PORTIO_DATA_PORT); ++ outb(0x2F, ECRAM_PORTIO_ADDR_PORT); ++ outb((u8)(offset & 0xFF), ECRAM_PORTIO_DATA_PORT); + -+ if (ACPI_FAILURE(acpi_execute_simple_method(sd->adev->handle, -+ "FANS", val))) -+ return -EIO; ++ outb(0x2E, ECRAM_PORTIO_ADDR_PORT); ++ outb(0x12, ECRAM_PORTIO_DATA_PORT); ++ outb(0x2F, ECRAM_PORTIO_ADDR_PORT); ++ *value = inb(ECRAM_PORTIO_DATA_PORT); + ++ mutex_unlock(&ec_portio->io_port_mutex); + return 0; +} + -+static umode_t -+steamdeck_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, -+ u32 attr, int channel) ++/* Write a byte to the EC RAM. ++ * ++ * Return status because of commong signature for alle ++ * methods to access EC RAM. ++ */ ++ssize_t ecram_portio_write(struct ecram_portio *ec_portio, u16 offset, u8 value) +{ -+ if (type == hwmon_fan && -+ attr == hwmon_fan_target) -+ return 0644; ++ mutex_lock(&ec_portio->io_port_mutex); + -+ return 0444; -+} ++ outb(0x2E, ECRAM_PORTIO_ADDR_PORT); ++ outb(0x11, ECRAM_PORTIO_DATA_PORT); ++ outb(0x2F, ECRAM_PORTIO_ADDR_PORT); ++ // TODO: no explicit cast between types seems to be sometimes ++ // done and sometimes not ++ outb((u8)((offset >> 8) & 0xFF), ECRAM_PORTIO_DATA_PORT); + -+static const struct hwmon_channel_info *steamdeck_info[] = { -+ HWMON_CHANNEL_INFO(temp, -+ HWMON_T_INPUT | HWMON_T_LABEL), -+ HWMON_CHANNEL_INFO(fan, -+ HWMON_F_INPUT | HWMON_F_LABEL | -+ HWMON_F_TARGET | HWMON_F_FAULT), -+ NULL -+}; ++ outb(0x2E, ECRAM_PORTIO_ADDR_PORT); ++ outb(0x10, ECRAM_PORTIO_DATA_PORT); ++ outb(0x2F, ECRAM_PORTIO_ADDR_PORT); ++ outb((u8)(offset & 0xFF), ECRAM_PORTIO_DATA_PORT); + -+static const struct hwmon_ops steamdeck_hwmon_ops = { -+ .is_visible = steamdeck_hwmon_is_visible, -+ .read = steamdeck_hwmon_read, -+ .read_string = steamdeck_hwmon_read_string, -+ .write = steamdeck_hwmon_write, -+}; ++ outb(0x2E, ECRAM_PORTIO_ADDR_PORT); ++ outb(0x12, ECRAM_PORTIO_DATA_PORT); ++ outb(0x2F, ECRAM_PORTIO_ADDR_PORT); ++ outb(value, ECRAM_PORTIO_DATA_PORT); + -+static const struct hwmon_chip_info steamdeck_chip_info = { -+ .ops = &steamdeck_hwmon_ops, -+ .info = steamdeck_info, -+}; ++ mutex_unlock(&ec_portio->io_port_mutex); ++ return 0; ++} + -+#define STEAMDECK_STA_OK \ -+ (ACPI_STA_DEVICE_ENABLED | \ -+ ACPI_STA_DEVICE_PRESENT | \ -+ ACPI_STA_DEVICE_FUNCTIONING) ++/* =================================== */ ++/* EC RAM Access */ ++/* =================================== */ + -+static int -+steamdeck_ddic_reg_read(void *context, unsigned int reg, unsigned int *val) -+{ -+ union acpi_object obj = { .type = ACPI_TYPE_INTEGER }; -+ struct acpi_object_list arg_list = { .count = 1, .pointer = &obj, }; -+ struct steamdeck *sd = context; -+ unsigned long long _val; ++struct ecram { ++ struct ecram_portio portio; ++}; + -+ obj.integer.value = reg; ++ssize_t ecram_init(struct ecram *ecram, phys_addr_t memoryio_ec_physical_start, ++ size_t region_size) ++{ ++ ssize_t err; + -+ if (ACPI_FAILURE(acpi_evaluate_integer(sd->adev->handle, -+ "RDDI", &arg_list, &_val))) -+ return -EIO; ++ err = ecram_portio_init(&ecram->portio); ++ if (err) { ++ pr_info("Failed ecram_portio_init\n"); ++ goto err_ecram_portio_init; ++ } + -+ *val = _val; + return 0; ++ ++err_ecram_portio_init: ++ return err; +} + -+static int steamdeck_read_pdcs(struct steamdeck *sd, unsigned long long *pdcs) ++void ecram_exit(struct ecram *ecram) +{ -+ acpi_status status; ++ pr_info("Unloading legion ecram\n"); ++ ecram_portio_exit(&ecram->portio); ++ pr_info("Unloading legion ecram done\n"); ++} + -+ status = acpi_evaluate_integer(sd->adev->handle, "PDCS", NULL, pdcs); -+ if (ACPI_FAILURE(status)) { -+ dev_err(sd->dev, "PDCS evaluation failed: %s\n", -+ acpi_format_exception(status)); -+ return -EIO; -+ } ++/** ++ * ecram_offset address on the EC ++ */ ++static u8 ecram_read(struct ecram *ecram, u16 ecram_offset) ++{ ++ u8 value; ++ int err; + -+ return 0; ++ err = ecram_portio_read(&ecram->portio, ecram_offset, &value); ++ if (err) ++ pr_info("Error reading EC RAM at 0x%x\n", ecram_offset); ++ return value; +} + -+static void steamdeck_usb_role_work(struct work_struct *work) ++static void ecram_write(struct ecram *ecram, u16 ecram_offset, u8 value) +{ -+ struct steamdeck *sd = -+ container_of(work, struct steamdeck, role_work.work); -+ unsigned long long pdcs; -+ bool usb_host; ++ int err; + -+ if (steamdeck_read_pdcs(sd, &pdcs)) ++ if (ec_readonly) { ++ pr_info("Skipping writing EC RAM at 0x%x because readonly.\n", ++ ecram_offset); + return; ++ } ++ err = ecram_portio_write(&ecram->portio, ecram_offset, value); ++ if (err) ++ pr_info("Error writing EC RAM at 0x%x\n", ecram_offset); ++} + -+ /* -+ * We only care about these two -+ */ -+ pdcs &= ACPI_STEAMDECK_PORT_CONNECT | ACPI_STEAMDECK_CUR_DATA_ROLE; ++/* =============================== */ ++/* Reads from EC */ ++/* =============================== */ + -+ /* -+ * For "connect" events our role is determined by a bit in -+ * PDCS, for "disconnect" we switch to being a gadget -+ * unconditionally. The thinking for the latter is we don't -+ * want to start acting as a USB host until we get -+ * confirmation from the firmware that we are a USB host -+ */ -+ usb_host = (pdcs & ACPI_STEAMDECK_PORT_CONNECT) ? -+ pdcs & ACPI_STEAMDECK_CUR_DATA_ROLE : false; ++u16 read_ec_id(struct ecram *ecram, const struct model_config *model) ++{ ++ u8 id1 = ecram_read(ecram, model->registers->ECHIPID1); ++ u8 id2 = ecram_read(ecram, model->registers->ECHIPID2); + -+ WARN_ON(extcon_set_state_sync(sd->edev, EXTCON_USB_HOST, -+ usb_host)); -+ dev_dbg(sd->dev, "USB role is %s\n", usb_host ? "host" : "device"); ++ return (id1 << 8) + id2; +} + -+static void steamdeck_notify(acpi_handle handle, u32 event, void *context) ++u16 read_ec_version(struct ecram *ecram, const struct model_config *model) +{ -+ struct device *dev = context; -+ struct steamdeck *sd = dev_get_drvdata(dev); -+ unsigned long long pdcs; -+ unsigned long delay; ++ u8 vers = ecram_read(ecram, model->registers->ECHIPVER); ++ u8 debug = ecram_read(ecram, model->registers->ECDEBUG); + -+ switch (event) { -+ case ACPI_STEAMDECK_NOTIFY_STATUS: -+ if (steamdeck_read_pdcs(sd, &pdcs)) -+ return; -+ /* -+ * We process "disconnect" events immediately and -+ * "connect" events with a delay to give the HW time -+ * to settle. For example attaching USB hub (at least -+ * for HW used for testing) will generate intermediary -+ * event with "host" bit not set, followed by the one -+ * that does have it set. -+ */ -+ delay = (pdcs & ACPI_STEAMDECK_PORT_CONNECT) ? -+ STEAMDECK_ROLE_SWITCH_DELAY : 0; ++ return (vers << 8) + debug; ++} + -+ queue_delayed_work(system_long_wq, &sd->role_work, delay); -+ break; -+ default: -+ dev_err(dev, "Unsupported event [0x%x]\n", event); ++/* ============================= */ ++/* Data model for sensor values */ ++/* ============================ */ ++ ++struct sensor_values { ++ u16 fan1_rpm; // current speed in rpm of fan 1 ++ u16 fan2_rpm; // current speed in rpm of fan2 ++ u16 fan1_target_rpm; // target speed in rpm of fan 1 ++ u16 fan2_target_rpm; // target speed in rpm of fan 2 ++ u8 cpu_temp_celsius; // cpu temperature in celcius ++ u8 gpu_temp_celsius; // gpu temperature in celcius ++ u8 ic_temp_celsius; // ic temperature in celcius ++}; ++ ++enum SENSOR_ATTR { ++ SENSOR_CPU_TEMP_ID = 1, ++ SENSOR_GPU_TEMP_ID = 2, ++ SENSOR_IC_TEMP_ID = 3, ++ SENSOR_FAN1_RPM_ID = 4, ++ SENSOR_FAN2_RPM_ID = 5, ++ SENSOR_FAN1_TARGET_RPM_ID = 6, ++ SENSOR_FAN2_TARGET_RPM_ID = 7 ++}; ++ ++static int read_sensor_values(struct ecram *ecram, ++ const struct model_config *model, ++ struct sensor_values *values) ++{ ++ values->fan1_target_rpm = ++ 100 * ecram_read(ecram, model->registers->EXT_FAN1_TARGET_RPM); ++ values->fan2_target_rpm = ++ 100 * ecram_read(ecram, model->registers->EXT_FAN2_TARGET_RPM); ++ ++ values->fan1_rpm = ++ ecram_read(ecram, model->registers->EXT_FAN1_RPM_LSB) + ++ (((int)ecram_read(ecram, model->registers->EXT_FAN1_RPM_MSB)) ++ << 8); ++ values->fan2_rpm = ++ ecram_read(ecram, model->registers->EXT_FAN2_RPM_LSB) + ++ (((int)ecram_read(ecram, model->registers->EXT_FAN2_RPM_MSB)) ++ << 8); ++ ++ values->cpu_temp_celsius = ++ ecram_read(ecram, model->registers->EXT_CPU_TEMP_INPUT); ++ values->gpu_temp_celsius = ++ ecram_read(ecram, model->registers->EXT_GPU_TEMP_INPUT); ++ values->ic_temp_celsius = ++ ecram_read(ecram, model->registers->EXT_IC_TEMP_INPUT); ++ ++ values->cpu_temp_celsius = ecram_read(ecram, 0xC5E6); ++ values->gpu_temp_celsius = ecram_read(ecram, 0xC5E7); ++ values->ic_temp_celsius = ecram_read(ecram, 0xC5E8); ++ ++ return 0; ++} ++ ++/* =============================== */ ++/* Behaviour changing functions */ ++/* =============================== */ ++ ++int read_powermode(struct ecram *ecram, const struct model_config *model) ++{ ++ return ecram_read(ecram, model->registers->EXT_POWERMODE); ++} ++ ++ssize_t write_powermode(struct ecram *ecram, const struct model_config *model, ++ u8 value) ++{ ++ if (!(value >= 0 && value <= 2)) { ++ pr_info("Unexpected power mode value ignored: %d\n", value); ++ return -ENOMEM; + } ++ ecram_write(ecram, model->registers->EXT_POWERMODE, value); ++ return 0; +} + -+static void steamdeck_remove_notify_handler(void *data) ++/** ++ * Shortly toggle powermode to a different mode ++ * and switch back, e.g. to reset fan curve. ++ */ ++void toggle_powermode(struct ecram *ecram, const struct model_config *model) +{ -+ struct steamdeck *sd = data; ++ int old_powermode = read_powermode(ecram, model); ++ int next_powermode = old_powermode == 0 ? 1 : 0; + -+ acpi_remove_notify_handler(sd->adev->handle, ACPI_DEVICE_NOTIFY, -+ steamdeck_notify); -+ cancel_delayed_work_sync(&sd->role_work); ++ write_powermode(ecram, model, next_powermode); ++ mdelay(1500); ++ write_powermode(ecram, model, old_powermode); +} + -+static const unsigned int steamdeck_extcon_cable[] = { -+ EXTCON_USB, -+ EXTCON_USB_HOST, -+ EXTCON_CHG_USB_SDP, -+ EXTCON_CHG_USB_CDP, -+ EXTCON_CHG_USB_DCP, -+ EXTCON_CHG_USB_ACA, -+ EXTCON_NONE, -+}; ++#define lockfancontroller_ON 8 ++#define lockfancontroller_OFF 0 + -+static int steamdeck_probe(struct platform_device *pdev) ++ssize_t write_lockfancontroller(struct ecram *ecram, ++ const struct model_config *model, bool state) +{ -+ struct device *dev = &pdev->dev; -+ struct steamdeck *sd; -+ acpi_status status; -+ unsigned long long sta; -+ int ret; ++ u8 val = state ? lockfancontroller_ON : lockfancontroller_OFF; + -+ static const struct regmap_config regmap_config = { -+ .reg_bits = 8, -+ .val_bits = 8, -+ .max_register = 255, -+ .cache_type = REGCACHE_NONE, -+ .reg_read = steamdeck_ddic_reg_read, -+ }; ++ ecram_write(ecram, model->registers->EXT_LOCKFANCONTROLLER, val); ++ return 0; ++} + -+ sd = devm_kzalloc(dev, sizeof(*sd), GFP_KERNEL); -+ if (!sd) -+ return -ENOMEM; -+ sd->adev = ACPI_COMPANION(&pdev->dev); -+ sd->dev = dev; -+ platform_set_drvdata(pdev, sd); -+ INIT_DELAYED_WORK(&sd->role_work, steamdeck_usb_role_work); ++int read_lockfancontroller(struct ecram *ecram, ++ const struct model_config *model, bool *state) ++{ ++ int value = ecram_read(ecram, model->registers->EXT_LOCKFANCONTROLLER); + -+ status = acpi_evaluate_integer(sd->adev->handle, "_STA", -+ NULL, &sta); -+ if (ACPI_FAILURE(status)) { -+ dev_err(dev, "Status check failed (0x%x)\n", status); -+ return -EINVAL; ++ switch (value) { ++ case lockfancontroller_ON: ++ *state = true; ++ break; ++ case lockfancontroller_OFF: ++ *state = false; ++ break; ++ default: ++ pr_info("Unexpected value in lockfanspeed register:%d\n", ++ value); ++ return -1; + } ++ return 0; ++} + -+ if ((sta & STEAMDECK_STA_OK) != STEAMDECK_STA_OK) { -+ dev_err(dev, "Device is not ready\n"); -+ return -EINVAL; -+ } ++#define MAXIMUMFANSPEED_ON 0x40 ++#define MAXIMUMFANSPEED_OFF 0x00 + -+ /* -+ * Our ACPI interface doesn't expose a method to read current -+ * fan target, so we use current fan speed as an -+ * approximation. -+ */ -+ if (steamdeck_read_fan_speed(sd, &sd->fan_target)) -+ dev_warn(dev, "Failed to read fan speed"); ++int read_maximumfanspeed(struct ecram *ecram, const struct model_config *model, ++ bool *state) ++{ ++ int value = ecram_read(ecram, model->registers->EXT_MAXIMUMFANSPEED); + -+ sd->hwmon = devm_hwmon_device_register_with_info(dev, -+ "steamdeck", -+ sd, -+ &steamdeck_chip_info, -+ steamdeck_groups); -+ if (IS_ERR(sd->hwmon)) { -+ dev_err(dev, "Failed to register HWMON device"); -+ return PTR_ERR(sd->hwmon); ++ switch (value) { ++ case MAXIMUMFANSPEED_ON: ++ *state = true; ++ break; ++ case MAXIMUMFANSPEED_OFF: ++ *state = false; ++ break; ++ default: ++ pr_info("Unexpected value in maximumfanspeed register:%d\n", ++ value); ++ return -1; + } ++ return 0; ++} + -+ sd->regmap = devm_regmap_init(dev, NULL, sd, ®map_config); -+ if (IS_ERR(sd->regmap)) -+ dev_err(dev, "Failed to register REGMAP"); -+ -+ sd->edev = devm_extcon_dev_allocate(dev, steamdeck_extcon_cable); -+ if (IS_ERR(sd->edev)) -+ return -ENOMEM; ++ssize_t write_maximumfanspeed(struct ecram *ecram, ++ const struct model_config *model, bool state) ++{ ++ u8 val = state ? MAXIMUMFANSPEED_ON : MAXIMUMFANSPEED_OFF; + -+ ret = devm_extcon_dev_register(dev, sd->edev); -+ if (ret < 0) { -+ dev_err(dev, "Failed to register extcon device: %d\n", ret); -+ return ret; -+ } ++ ecram_write(ecram, model->registers->EXT_MAXIMUMFANSPEED, val); ++ return 0; ++} + -+ /* -+ * Set initial role value -+ */ -+ queue_delayed_work(system_long_wq, &sd->role_work, 0); -+ flush_delayed_work(&sd->role_work); ++#define MINIFANCUVE_ON_COOL_ON 0x04 ++#define MINIFANCUVE_ON_COOL_OFF 0xA0 + -+ status = acpi_install_notify_handler(sd->adev->handle, -+ ACPI_DEVICE_NOTIFY, -+ steamdeck_notify, -+ dev); -+ if (ACPI_FAILURE(status)) { -+ dev_err(dev, "Error installing ACPI notify handler\n"); -+ return -EIO; ++int read_minifancurve(struct ecram *ecram, const struct model_config *model, ++ bool *state) ++{ ++ int value = ++ ecram_read(ecram, model->registers->EXT_MINIFANCURVE_ON_COOL); ++ ++ switch (value) { ++ case MINIFANCUVE_ON_COOL_ON: ++ *state = true; ++ break; ++ case MINIFANCUVE_ON_COOL_OFF: ++ *state = false; ++ break; ++ default: ++ pr_info("Unexpected value in MINIFANCURVE register:%d\n", ++ value); ++ return -1; + } ++ return 0; ++} + -+ ret = devm_add_action_or_reset(dev, steamdeck_remove_notify_handler, -+ sd); -+ return ret; ++ssize_t write_minifancurve(struct ecram *ecram, ++ const struct model_config *model, bool state) ++{ ++ u8 val = state ? MINIFANCUVE_ON_COOL_ON : MINIFANCUVE_ON_COOL_OFF; ++ ++ ecram_write(ecram, model->registers->EXT_MINIFANCURVE_ON_COOL, val); ++ return 0; +} + -+static const struct acpi_device_id steamdeck_device_ids[] = { -+ { "VLV0100", 0 }, -+ { "", 0 }, ++#define KEYBOARD_BACKLIGHT_OFF 18 ++#define KEYBOARD_BACKLIGHT_ON1 21 ++#define KEYBOARD_BACKLIGHT_ON2 23 ++ ++int read_keyboard_backlight(struct ecram *ecram, ++ const struct model_config *model, int *state) ++{ ++ int value = ecram_read(ecram, ++ model->registers->EXT_WHITE_KEYBOARD_BACKLIGHT); ++ ++ //switch (value) { ++ //case MINIFANCUVE_ON_COOL_ON: ++ // *state = true; ++ // break; ++ //case MINIFANCUVE_ON_COOL_OFF: ++ // *state = false; ++ // break; ++ //default: ++ // pr_info("Unexpected value in MINIFANCURVE register:%d\n", ++ // value); ++ // return -1; ++ //} ++ *state = value; ++ return 0; ++} ++ ++int write_keyboard_backlight(struct ecram *ecram, ++ const struct model_config *model, int state) ++{ ++ u8 val = state > 0 ? KEYBOARD_BACKLIGHT_ON1 : KEYBOARD_BACKLIGHT_OFF; ++ ++ ecram_write(ecram, model->registers->EXT_WHITE_KEYBOARD_BACKLIGHT, val); ++ return 0; ++} ++ ++#define FCT_RAPID_CHARGE_ON 0x07 ++#define FCT_RAPID_CHARGE_OFF 0x08 ++#define RAPID_CHARGE_ON 0x0 ++#define RAPID_CHARGE_OFF 0x1 ++ ++int read_rapidcharge(acpi_handle acpihandle, int *state) ++{ ++ unsigned long result; ++ int err; ++ ++ err = eval_qcho(acpihandle, &result); ++ if (err) ++ return err; ++ ++ *state = result; ++ return 0; ++} ++ ++int write_rapidcharge(acpi_handle acpihandle, bool state) ++{ ++ unsigned long fct_nr = state > 0 ? FCT_RAPID_CHARGE_ON : ++ FCT_RAPID_CHARGE_OFF; ++ return exec_sbmc(acpihandle, fct_nr); ++} ++ ++/* ============================= */ ++/* Data model for fan curve */ ++/* ============================ */ ++ ++struct fancurve_point { ++ // rpm1 devided by 100 ++ u8 rpm1_raw; ++ // rpm2 devided by 100 ++ u8 rpm2_raw; ++ // >=2 , <=5 (lower is faster); must be increasing by level ++ u8 accel; ++ // >=2 , <=5 (lower is faster); must be increasing by level ++ u8 decel; ++ ++ // min must be lower or equal than max ++ // last level max must be 127 ++ // <=127 cpu max temp for this level; must be increasing by level ++ u8 cpu_max_temp_celsius; ++ // <=127 cpu min temp for this level; must be increasing by level ++ u8 cpu_min_temp_celsius; ++ // <=127 gpu min temp for this level; must be increasing by level ++ u8 gpu_max_temp_celsius; ++ // <=127 gpu max temp for this level; must be increasing by level ++ u8 gpu_min_temp_celsius; ++ // <=127 ic max temp for this level; must be increasing by level ++ u8 ic_max_temp_celsius; ++ // <=127 ic max temp for this level; must be increasing by level ++ u8 ic_min_temp_celsius; +}; -+MODULE_DEVICE_TABLE(acpi, steamdeck_device_ids); + -+static struct platform_driver steamdeck_driver = { -+ .probe = steamdeck_probe, -+ .driver = { -+ .name = "steamdeck", -+ .acpi_match_table = steamdeck_device_ids, -+ }, ++enum FANCURVE_ATTR { ++ FANCURVE_ATTR_PWM1 = 1, ++ FANCURVE_ATTR_PWM2 = 2, ++ FANCURVE_ATTR_CPU_TEMP = 3, ++ FANCURVE_ATTR_CPU_HYST = 4, ++ FANCURVE_ATTR_GPU_TEMP = 5, ++ FANCURVE_ATTR_GPU_HYST = 6, ++ FANCURVE_ATTR_IC_TEMP = 7, ++ FANCURVE_ATTR_IC_HYST = 8, ++ FANCURVE_ATTR_ACCEL = 9, ++ FANCURVE_ATTR_DECEL = 10, ++ FANCURVE_SIZE = 11, ++ FANCURVE_MINIFANCURVE_ON_COOL = 12 +}; -+module_platform_driver(steamdeck_driver); + -+MODULE_AUTHOR("Andrey Smirnov "); -+MODULE_DESCRIPTION("Steam Deck ACPI platform driver"); -+MODULE_LICENSE("GPL"); -diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h -index 0acb8e1fb7af..b0b49c8653b0 100644 ---- a/include/linux/pagemap.h -+++ b/include/linux/pagemap.h -@@ -1182,7 +1182,7 @@ struct readahead_control { - ._index = i, \ - } - --#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) -+#define VM_READAHEAD_PAGES (SZ_8M / PAGE_SIZE) - - void page_cache_ra_unbounded(struct readahead_control *, - unsigned long nr_to_read, unsigned long lookahead_count); -diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h -index 45f09bec02c4..87b20e2ee274 100644 ---- a/include/linux/user_namespace.h -+++ b/include/linux/user_namespace.h -@@ -148,6 +148,8 @@ static inline void set_userns_rlimit_max(struct user_namespace *ns, - - #ifdef CONFIG_USER_NS - -+extern int unprivileged_userns_clone; ++// used for clearing table entries ++static const struct fancurve_point fancurve_point_zero = { 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0 }; + - static inline struct user_namespace *get_user_ns(struct user_namespace *ns) - { - if (ns) -@@ -181,6 +183,8 @@ extern bool current_in_userns(const struct user_namespace *target_ns); - struct ns_common *ns_get_owner(struct ns_common *ns); - #else - -+#define unprivileged_userns_clone 0 ++struct fancurve { ++ struct fancurve_point points[MAXFANCURVESIZE]; ++ // number of points used; must be <= MAXFANCURVESIZE ++ size_t size; ++ // the point that at which fans are run currently ++ size_t current_point_i; ++}; + - static inline struct user_namespace *get_user_ns(struct user_namespace *ns) - { - return &init_user_ns; -diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h -index db762e35aca9..0336791656eb 100644 ---- a/include/net/netns/ipv4.h -+++ b/include/net/netns/ipv4.h -@@ -194,6 +194,7 @@ struct netns_ipv4 { - int sysctl_udp_rmem_min; - - u8 sysctl_fib_notify_on_flag_change; -+ unsigned int sysctl_tcp_collapse_max_bytes; - - #ifdef CONFIG_NET_L3_MASTER_DEV - u8 sysctl_udp_l3mdev_accept; -diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h -index 901b440238d5..7026df84a0f6 100644 ---- a/include/trace/events/tcp.h -+++ b/include/trace/events/tcp.h -@@ -187,6 +187,13 @@ DEFINE_EVENT(tcp_event_sk, tcp_rcv_space_adjust, - TP_ARGS(sk) - ); - -+DEFINE_EVENT(tcp_event_sk, tcp_collapse_max_bytes_exceeded, ++// calculate derived values + -+ TP_PROTO(struct sock *sk), ++int fancurve_get_cpu_deltahyst(struct fancurve_point *point) ++{ ++ return ((int)point->cpu_max_temp_celsius) - ++ ((int)point->cpu_min_temp_celsius); ++} + -+ TP_ARGS(sk) -+); ++int fancurve_get_gpu_deltahyst(struct fancurve_point *point) ++{ ++ return ((int)point->gpu_max_temp_celsius) - ++ ((int)point->gpu_min_temp_celsius); ++} + - TRACE_EVENT(tcp_retransmit_synack, - - TP_PROTO(const struct sock *sk, const struct request_sock *req), -diff --git a/init/Kconfig b/init/Kconfig -index 1fb5f313d18f..9b298860cfed 100644 ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -123,6 +123,10 @@ config THREAD_INFO_IN_TASK - - menu "General setup" - -+config CACHY -+ bool "Some kernel tweaks by CachyOS" -+ default y ++int fancurve_get_ic_deltahyst(struct fancurve_point *point) ++{ ++ return ((int)point->ic_max_temp_celsius) - ++ ((int)point->ic_min_temp_celsius); ++} + - config BROKEN - bool - -@@ -348,6 +352,19 @@ config KERNEL_UNCOMPRESSED - - endchoice - -+menu "ZSTD compression options" -+ depends on KERNEL_ZSTD ++// validation functions + -+config ZSTD_COMPRESSION_LEVEL -+ int "Compression level (1-22)" -+ range 1 22 -+ default "22" -+ help -+ Choose a compression level for zstd kernel compression. -+ Default is 22, which is the maximum. ++bool fancurve_is_valid_min_temp(int min_temp) ++{ ++ return min_temp >= 0 && min_temp <= 127; ++} + -+endmenu ++bool fancurve_is_valid_max_temp(int max_temp) ++{ ++ return max_temp >= 0 && max_temp <= 127; ++} + - config DEFAULT_INIT - string "Default init path" - default "" -@@ -1253,6 +1270,22 @@ config USER_NS - - If unsure, say N. - -+config USER_NS_UNPRIVILEGED -+ bool "Allow unprivileged users to create namespaces" -+ default y -+ depends on USER_NS -+ help -+ When disabled, unprivileged users will not be able to create -+ new namespaces. Allowing users to create their own namespaces -+ has been part of several recent local privilege escalation -+ exploits, so if you need user namespaces but are -+ paranoid^Wsecurity-conscious you want to disable this. ++// setters with validation ++// - make hwmon implementation easier ++// - keep fancurve valid, otherwise EC will not properly control fan + -+ This setting can be overridden at runtime via the -+ kernel.unprivileged_userns_clone sysctl. ++bool fancurve_set_rpm1(struct fancurve *fancurve, int point_id, int rpm) ++{ ++ bool valid = point_id == 0 ? rpm == 0 : (rpm >= 0 && rpm <= 4500); + -+ If unsure, say Y. ++ if (valid) ++ fancurve->points[point_id].rpm1_raw = rpm / 100; ++ return valid; ++} + - config PID_NS - bool "PID Namespaces" - default y -@@ -1433,6 +1466,12 @@ config CC_OPTIMIZE_FOR_PERFORMANCE - with the "-O2" compiler flag for best performance and most - helpful compile-time warnings. - -+config CC_OPTIMIZE_FOR_PERFORMANCE_O3 -+ bool "Optimize more for performance (-O3)" -+ help -+ Choosing this option will pass "-O3" to your compiler to optimize -+ the kernel yet more for performance. ++bool fancurve_set_rpm2(struct fancurve *fancurve, int point_id, int rpm) ++{ ++ bool valid = point_id == 0 ? rpm == 0 : (rpm >= 0 && rpm <= 4500); + - config CC_OPTIMIZE_FOR_SIZE - bool "Optimize for size (-Os)" - help -diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz -index 38ef6d06888e..0f78364efd4f 100644 ---- a/kernel/Kconfig.hz -+++ b/kernel/Kconfig.hz -@@ -40,6 +40,27 @@ choice - on SMP and NUMA systems and exactly dividing by both PAL and - NTSC frame rates for video and multimedia work. - -+ config HZ_500 -+ bool "500 HZ" -+ help -+ 500 Hz is a balanced timer frequency. Provides fast interactivity -+ on desktops with good smoothness without increasing CPU power -+ consumption and sacrificing the battery life on laptops. ++ if (valid) ++ fancurve->points[point_id].rpm2_raw = rpm / 100; ++ return valid; ++} + -+ config HZ_600 -+ bool "600 HZ" -+ help -+ 600 Hz is a balanced timer frequency. Provides fast interactivity -+ on desktops with good smoothness without increasing CPU power -+ consumption and sacrificing the battery life on laptops. ++// TODO: remove { ... } from single line if body + -+ config HZ_750 -+ bool "750 HZ" -+ help -+ 750 Hz is a balanced timer frequency. Provides fast interactivity -+ on desktops with good smoothness without increasing CPU power -+ consumption and sacrificing the battery life on laptops. ++bool fancurve_set_accel(struct fancurve *fancurve, int point_id, int accel) ++{ ++ bool valid = accel >= 2 && accel <= 5; + - config HZ_1000 - bool "1000 HZ" - help -@@ -53,6 +74,9 @@ config HZ - default 100 if HZ_100 - default 250 if HZ_250 - default 300 if HZ_300 -+ default 500 if HZ_500 -+ default 600 if HZ_600 -+ default 750 if HZ_750 - default 1000 if HZ_1000 - - config SCHED_HRTICK -diff --git a/kernel/fork.c b/kernel/fork.c -index 0c92f224c68c..49c173e367d2 100644 ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -98,6 +98,10 @@ - #include - #include - -+#ifdef CONFIG_USER_NS -+#include -+#endif ++ if (valid) ++ fancurve->points[point_id].accel = accel; ++ return valid; ++} + - #include - #include - #include -@@ -2031,6 +2035,10 @@ static __latent_entropy struct task_struct *copy_process( - if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) - return ERR_PTR(-EINVAL); - -+ if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) -+ if (!capable(CAP_SYS_ADMIN)) -+ return ERR_PTR(-EPERM); ++bool fancurve_set_decel(struct fancurve *fancurve, int point_id, int decel) ++{ ++ bool valid = decel >= 2 && decel <= 5; + - /* - * Thread groups must share signals as well, and detached threads - * can only be started up within the thread group. -@@ -3181,6 +3189,12 @@ int ksys_unshare(unsigned long unshare_flags) - if (unshare_flags & CLONE_NEWNS) - unshare_flags |= CLONE_FS; - -+ if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) { -+ err = -EPERM; -+ if (!capable(CAP_SYS_ADMIN)) -+ goto bad_unshare_out; ++ if (valid) ++ fancurve->points[point_id].decel = decel; ++ return valid; ++} ++ ++bool fancurve_set_cpu_temp_max(struct fancurve *fancurve, int point_id, ++ int value) ++{ ++ bool valid = fancurve_is_valid_max_temp(value); ++ ++ if (valid) ++ fancurve->points[point_id].cpu_max_temp_celsius = value; ++ ++ return valid; ++} ++ ++bool fancurve_set_gpu_temp_max(struct fancurve *fancurve, int point_id, ++ int value) ++{ ++ bool valid = fancurve_is_valid_max_temp(value); ++ ++ if (valid) ++ fancurve->points[point_id].gpu_max_temp_celsius = value; ++ return valid; ++} ++ ++bool fancurve_set_ic_temp_max(struct fancurve *fancurve, int point_id, ++ int value) ++{ ++ bool valid = fancurve_is_valid_max_temp(value); ++ ++ if (valid) ++ fancurve->points[point_id].ic_max_temp_celsius = value; ++ return valid; ++} ++ ++bool fancurve_set_cpu_temp_min(struct fancurve *fancurve, int point_id, ++ int value) ++{ ++ bool valid = fancurve_is_valid_max_temp(value); ++ ++ if (valid) ++ fancurve->points[point_id].cpu_min_temp_celsius = value; ++ return valid; ++} ++ ++bool fancurve_set_gpu_temp_min(struct fancurve *fancurve, int point_id, ++ int value) ++{ ++ bool valid = fancurve_is_valid_max_temp(value); ++ ++ if (valid) ++ fancurve->points[point_id].gpu_min_temp_celsius = value; ++ return valid; ++} ++ ++bool fancurve_set_ic_temp_min(struct fancurve *fancurve, int point_id, ++ int value) ++{ ++ bool valid = fancurve_is_valid_max_temp(value); ++ ++ if (valid) ++ fancurve->points[point_id].ic_min_temp_celsius = value; ++ return valid; ++} ++ ++bool fancurve_set_size(struct fancurve *fancurve, int size, bool init_values) ++{ ++ bool valid = size >= 1 && size <= MAXFANCURVESIZE; ++ ++ if (!valid) ++ return false; ++ if (init_values && size < fancurve->size) { ++ // fancurve size is decreased, but last etnry alwasy needs 127 temperatures ++ // Note: size >=1 ++ fancurve->points[size - 1].cpu_max_temp_celsius = 127; ++ fancurve->points[size - 1].ic_max_temp_celsius = 127; ++ fancurve->points[size - 1].gpu_max_temp_celsius = 127; + } ++ if (init_values && size > fancurve->size) { ++ // fancurve increased, so new entries need valid values ++ int i; ++ int last = fancurve->size > 0 ? fancurve->size - 1 : 0; + - err = check_unshare_flags(unshare_flags); - if (err) - goto bad_unshare_out; -diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig -index 424b3bc58f3f..ecf2798c5ccf 100644 ---- a/kernel/module/Kconfig -+++ b/kernel/module/Kconfig -@@ -219,6 +219,31 @@ config MODULE_COMPRESS_ZSTD - - endchoice - -+menu "ZSTD module compression options" -+ depends on MODULE_COMPRESS_ZSTD ++ for (i = fancurve->size; i < size; ++i) ++ fancurve->points[i] = fancurve->points[last]; ++ } ++ return true; ++} + -+config MODULE_COMPRESS_ZSTD_LEVEL -+ int "Compression level (1-19)" -+ range 1 19 -+ default 9 -+ help -+ Compression level used by zstd for compressing modules. ++/* Read the fan curve from the EC. ++ * ++ * In newer models (>=2022) there is an ACPI/WMI to read fan curve as ++ * a whole. So read/write fan table as a whole to use ++ * same interface for both cases. ++ * ++ * It reads all points from EC memory, even if stored fancurve is smaller, so ++ * it can contain 0 entries. ++ */ ++static int read_fancurve(struct ecram *ecram, const struct model_config *model, ++ struct fancurve *fancurve) ++{ ++ size_t i = 0; ++ ++ for (i = 0; i < MAXFANCURVESIZE; ++i) { ++ struct fancurve_point *point = &fancurve->points[i]; ++ ++ point->rpm1_raw = ++ ecram_read(ecram, model->registers->EXT_FAN1_BASE + i); ++ point->rpm2_raw = ++ ecram_read(ecram, model->registers->EXT_FAN2_BASE + i); ++ ++ point->accel = ecram_read( ++ ecram, model->registers->EXT_FAN_ACC_BASE + i); ++ point->decel = ecram_read( ++ ecram, model->registers->EXT_FAN_DEC_BASE + i); ++ point->cpu_max_temp_celsius = ++ ecram_read(ecram, model->registers->EXT_CPU_TEMP + i); ++ point->cpu_min_temp_celsius = ecram_read( ++ ecram, model->registers->EXT_CPU_TEMP_HYST + i); ++ point->gpu_max_temp_celsius = ++ ecram_read(ecram, model->registers->EXT_GPU_TEMP + i); ++ point->gpu_min_temp_celsius = ecram_read( ++ ecram, model->registers->EXT_GPU_TEMP_HYST + i); ++ point->ic_max_temp_celsius = ++ ecram_read(ecram, model->registers->EXT_VRM_TEMP + i); ++ point->ic_min_temp_celsius = ecram_read( ++ ecram, model->registers->EXT_VRM_TEMP_HYST + i); ++ } + -+config MODULE_COMPRESS_ZSTD_ULTRA -+ bool "Enable ZSTD ultra compression" -+ help -+ Compress modules with ZSTD using the highest possible compression. ++ // Do not trust that hardware; It might suddendly report ++ // a larger size, so clamp it. ++ fancurve->size = ++ ecram_read(ecram, model->registers->EXT_FAN_POINTS_SIZE); ++ fancurve->size = ++ min(fancurve->size, (typeof(fancurve->size))(MAXFANCURVESIZE)); ++ fancurve->current_point_i = ++ ecram_read(ecram, model->registers->EXT_FAN_CUR_POINT); ++ fancurve->current_point_i = ++ min(fancurve->current_point_i, fancurve->size); ++ return 0; ++} + -+config MODULE_COMPRESS_ZSTD_LEVEL_ULTRA -+ int "Compression level (20-22)" -+ depends on MODULE_COMPRESS_ZSTD_ULTRA -+ range 20 22 -+ default 20 -+ help -+ Ultra compression level used by zstd for compressing modules. ++static int write_fancurve(struct ecram *ecram, const struct model_config *model, ++ const struct fancurve *fancurve, bool write_size) ++{ ++ size_t i; ++ // Reset fan update counters (try to avoid any race conditions) ++ ecram_write(ecram, 0xC5FE, 0); ++ ecram_write(ecram, 0xC5FF, 0); ++ for (i = 0; i < MAXFANCURVESIZE; ++i) { ++ // Entries for points larger than fancurve size should be cleared ++ // to 0 ++ const struct fancurve_point *point = ++ i < fancurve->size ? &fancurve->points[i] : ++ &fancurve_point_zero; ++ ++ ecram_write(ecram, model->registers->EXT_FAN1_BASE + i, ++ point->rpm1_raw); ++ ecram_write(ecram, model->registers->EXT_FAN2_BASE + i, ++ point->rpm2_raw); ++ ++ ecram_write(ecram, model->registers->EXT_FAN_ACC_BASE + i, ++ point->accel); ++ ecram_write(ecram, model->registers->EXT_FAN_DEC_BASE + i, ++ point->decel); ++ ++ ecram_write(ecram, model->registers->EXT_CPU_TEMP + i, ++ point->cpu_max_temp_celsius); ++ ecram_write(ecram, model->registers->EXT_CPU_TEMP_HYST + i, ++ point->cpu_min_temp_celsius); ++ ecram_write(ecram, model->registers->EXT_GPU_TEMP + i, ++ point->gpu_max_temp_celsius); ++ ecram_write(ecram, model->registers->EXT_GPU_TEMP_HYST + i, ++ point->gpu_min_temp_celsius); ++ ecram_write(ecram, model->registers->EXT_VRM_TEMP + i, ++ point->ic_max_temp_celsius); ++ ecram_write(ecram, model->registers->EXT_VRM_TEMP_HYST + i, ++ point->ic_min_temp_celsius); ++ } + -+endmenu ++ if (write_size) { ++ ecram_write(ecram, model->registers->EXT_FAN_POINTS_SIZE, ++ fancurve->size); ++ } + - config MODULE_DECOMPRESS - bool "Support in-kernel module decompression" - depends on MODULE_COMPRESS_GZIP || MODULE_COMPRESS_XZ || MODULE_COMPRESS_ZSTD -diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 5f6587d94c1d..96c66b50ee48 100644 ---- a/kernel/sched/fair.c -+++ b/kernel/sched/fair.c -@@ -69,9 +69,13 @@ - * - * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) - */ -+#ifdef CONFIG_CACHY -+unsigned int sysctl_sched_latency = 3000000ULL; -+static unsigned int normalized_sysctl_sched_latency = 3000000ULL; -+#else - unsigned int sysctl_sched_latency = 6000000ULL; - static unsigned int normalized_sysctl_sched_latency = 6000000ULL; -- -+#endif - /* - * The initial- and re-scaling of tunables is configurable - * -@@ -90,8 +94,13 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; - * - * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) - */ -+#ifdef CONFIG_CACHY -+unsigned int sysctl_sched_min_granularity = 400000ULL; -+static unsigned int normalized_sysctl_sched_min_granularity = 400000ULL; -+#else - unsigned int sysctl_sched_min_granularity = 750000ULL; - static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; -+#endif - - /* - * Minimal preemption granularity for CPU-bound SCHED_IDLE tasks. -@@ -121,8 +130,13 @@ unsigned int sysctl_sched_child_runs_first __read_mostly; - * - * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) - */ -+#ifdef CONFIG_CACHY -+unsigned int sysctl_sched_wakeup_granularity = 500000UL; -+static unsigned int normalized_sysctl_sched_wakeup_granularity = 500000UL; -+#else - unsigned int sysctl_sched_wakeup_granularity = 1000000UL; - static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; -+#endif - - const_debug unsigned int sysctl_sched_migration_cost = 500000UL; - -@@ -175,8 +189,12 @@ int __weak arch_asym_cpu_priority(int cpu) - * - * (default: 5 msec, units: microseconds) - */ -+#ifdef CONFIG_CACHY -+static unsigned int sysctl_sched_cfs_bandwidth_slice = 3000UL; -+#else - static unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; - #endif -+#endif - - #ifdef CONFIG_NUMA_BALANCING - /* Restrict the NUMA promotion throughput (MB/s) for each target node. */ -diff --git a/kernel/sysctl.c b/kernel/sysctl.c -index 1c240d2c99bc..98e1a7472fd2 100644 ---- a/kernel/sysctl.c -+++ b/kernel/sysctl.c -@@ -96,6 +96,9 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals); - #ifdef CONFIG_PERF_EVENTS - static const int six_hundred_forty_kb = 640 * 1024; - #endif -+#ifdef CONFIG_USER_NS -+#include -+#endif - - - static const int ngroups_max = NGROUPS_MAX; -@@ -1645,6 +1648,15 @@ static struct ctl_table kern_table[] = { - .mode = 0644, - .proc_handler = proc_dointvec, - }, -+#ifdef CONFIG_USER_NS -+ { -+ .procname = "unprivileged_userns_clone", -+ .data = &unprivileged_userns_clone, -+ .maxlen = sizeof(int), -+ .mode = 0644, -+ .proc_handler = proc_dointvec, -+ }, -+#endif - #ifdef CONFIG_PROC_SYSCTL - { - .procname = "tainted", -diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c -index 1d8e47bed3f1..fec01d016a35 100644 ---- a/kernel/user_namespace.c -+++ b/kernel/user_namespace.c -@@ -22,6 +22,13 @@ - #include - #include - -+/* sysctl */ -+#ifdef CONFIG_USER_NS_UNPRIVILEGED -+int unprivileged_userns_clone = 1; -+#else -+int unprivileged_userns_clone; -+#endif ++ // Reset current fan level to 0, so algorithm in EC ++ // selects fan curve point again and resetting hysterisis ++ // effects ++ ecram_write(ecram, model->registers->EXT_FAN_CUR_POINT, 0); + - static struct kmem_cache *user_ns_cachep __read_mostly; - static DEFINE_MUTEX(userns_state_mutex); - -diff --git a/mm/Kconfig b/mm/Kconfig -index 4751031f3f05..cf2e47030fe8 100644 ---- a/mm/Kconfig -+++ b/mm/Kconfig -@@ -621,7 +621,7 @@ config COMPACTION - config COMPACT_UNEVICTABLE_DEFAULT - int - depends on COMPACTION -- default 0 if PREEMPT_RT -+ default 0 if PREEMPT_RT || CACHY - default 1 - - # -diff --git a/mm/compaction.c b/mm/compaction.c -index 5a9501e0ae01..4d8c63b9cdca 100644 ---- a/mm/compaction.c -+++ b/mm/compaction.c -@@ -2735,7 +2735,11 @@ static void compact_nodes(void) - * aggressively the kernel should compact memory in the - * background. It takes values in the range [0, 100]. - */ -+#ifdef CONFIG_CACHY -+unsigned int __read_mostly sysctl_compaction_proactiveness; -+#else - unsigned int __read_mostly sysctl_compaction_proactiveness = 20; -+#endif - - int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, - void *buffer, size_t *length, loff_t *ppos) -diff --git a/mm/page-writeback.c b/mm/page-writeback.c -index 516b1aa247e8..78fb31d27ed7 100644 ---- a/mm/page-writeback.c -+++ b/mm/page-writeback.c -@@ -71,7 +71,11 @@ static long ratelimit_pages = 32; - /* - * Start background writeback (via writeback threads) at this percentage - */ -+#ifdef CONFIG_CACHY -+static int dirty_background_ratio = 5; -+#else - static int dirty_background_ratio = 10; -+#endif - - /* - * dirty_background_bytes starts at 0 (disabled) so that it is a function of -@@ -99,7 +103,11 @@ static unsigned long vm_dirty_bytes; - /* - * The interval between `kupdate'-style writebacks - */ -+#ifdef CONFIG_CACHY -+unsigned int dirty_writeback_interval = 10 * 100; /* centiseconds */ -+#else - unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */ -+#endif - - EXPORT_SYMBOL_GPL(dirty_writeback_interval); - -diff --git a/mm/swap.c b/mm/swap.c -index 57cb01b042f6..3a7bec75480f 100644 ---- a/mm/swap.c -+++ b/mm/swap.c -@@ -1090,6 +1090,10 @@ void folio_batch_remove_exceptionals(struct folio_batch *fbatch) - */ - void __init swap_setup(void) - { -+#ifdef CONFIG_CACHY -+ /* Only swap-in pages requested, avoid readahead */ -+ page_cluster = 0; -+#else - unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); - - /* Use a smaller cluster for small-memory machines */ -@@ -1101,4 +1105,5 @@ void __init swap_setup(void) - * Right now other parts of the system means that we - * _really_ don't want to cluster much more - */ -+#endif - } -diff --git a/mm/vmpressure.c b/mm/vmpressure.c -index b52644771cc4..11a4b0e3b583 100644 ---- a/mm/vmpressure.c -+++ b/mm/vmpressure.c -@@ -43,7 +43,11 @@ static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; - * essence, they are percents: the higher the value, the more number - * unsuccessful reclaims there were. - */ -+#ifdef CONFIG_CACHY -+static const unsigned int vmpressure_level_med = 65; -+#else - static const unsigned int vmpressure_level_med = 60; -+#endif - static const unsigned int vmpressure_level_critical = 95; - - /* -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 9c1c5e8b24b8..71a7f4517e5a 100644 ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -186,7 +186,11 @@ struct scan_control { - /* - * From 0 .. 200. Higher means more swappy. - */ -+#ifdef CONFIG_CACHY -+int vm_swappiness = 20; -+#else - int vm_swappiness = 60; -+#endif - - static void set_task_reclaim_state(struct task_struct *task, - struct reclaim_state *rs) -@@ -4536,7 +4540,11 @@ static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc - } - - /* to protect the working set of the last N jiffies */ -+#ifdef CONFIG_CACHY -+static unsigned long lru_gen_min_ttl __read_mostly = HZ; -+#else - static unsigned long lru_gen_min_ttl __read_mostly; -+#endif - - static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) - { -diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c -index 40fe70fc2015..3028e27897d9 100644 ---- a/net/ipv4/sysctl_net_ipv4.c -+++ b/net/ipv4/sysctl_net_ipv4.c -@@ -1470,6 +1470,13 @@ static struct ctl_table ipv4_net_table[] = { - .extra1 = SYSCTL_ZERO, - .extra2 = &tcp_plb_max_cong_thresh, - }, -+ { -+ .procname = "tcp_collapse_max_bytes", -+ .data = &init_net.ipv4.sysctl_tcp_collapse_max_bytes, -+ .maxlen = sizeof(unsigned int), -+ .mode = 0644, -+ .proc_handler = proc_douintvec_minmax, -+ }, - { } - }; - -diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c -index 754e0212c951..b6d7faeb737a 100644 ---- a/net/ipv4/tcp_input.c -+++ b/net/ipv4/tcp_input.c -@@ -5414,6 +5414,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb) - static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb) - { - struct tcp_sock *tp = tcp_sk(sk); -+ struct net *net = sock_net(sk); - - NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED); - -@@ -5425,6 +5426,39 @@ static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb) - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) - return 0; - -+ /* For context and additional information about this patch, see the -+ * blog post at -+ * -+ * sysctl: net.ipv4.tcp_collapse_max_bytes -+ * -+ * If tcp_collapse_max_bytes is non-zero, attempt to collapse the -+ * queue to free up memory if the current amount of memory allocated -+ * is less than tcp_collapse_max_bytes. Otherwise, the packet is -+ * dropped without attempting to collapse the queue. -+ * -+ * If tcp_collapse_max_bytes is zero, this feature is disabled -+ * and the default Linux behavior is used. The default Linux -+ * behavior is to always perform the attempt to collapse the -+ * queue to free up memory. -+ * -+ * When the receive queue is small, we want to collapse the -+ * queue. There are two reasons for this: (a) the latency of -+ * performing the collapse will be small on a small queue, and -+ * (b) we want to avoid sending a congestion signal (via a -+ * packet drop) to the sender when the receive queue is small. -+ * -+ * The result is that we avoid latency spikes caused by the -+ * time it takes to perform the collapse logic when the receive -+ * queue is large and full, while preserving existing behavior -+ * and performance for all other cases. -+ */ -+ if (net->ipv4.sysctl_tcp_collapse_max_bytes && -+ (atomic_read(&sk->sk_rmem_alloc) > net->ipv4.sysctl_tcp_collapse_max_bytes)) { -+ /* We are dropping the packet */ -+ trace_tcp_collapse_max_bytes_exceeded(sk); -+ goto do_not_collapse; ++ // Reset internal fan levels ++ ecram_write(ecram, 0xC634, 0); // CPU ++ ecram_write(ecram, 0xC635, 0); // GPU ++ ecram_write(ecram, 0xC636, 0); // SENSOR ++ ++ return 0; ++} ++ ++static ssize_t fancurve_print_seqfile(const struct fancurve *fancurve, ++ struct seq_file *s) ++{ ++ int i; ++ ++ seq_printf( ++ s, ++ "rpm1|rpm2|acceleration|deceleration|cpu_min_temp|cpu_max_temp|gpu_min_temp|gpu_max_temp|ic_min_temp|ic_max_temp\n"); ++ for (i = 0; i < fancurve->size; ++i) { ++ const struct fancurve_point *point = &fancurve->points[i]; ++ ++ seq_printf( ++ s, "%d\t %d\t %d\t %d\t %d\t %d\t %d\t %d\t %d\t %d\n", ++ point->rpm1_raw * 100, point->rpm2_raw * 100, ++ point->accel, point->decel, point->cpu_min_temp_celsius, ++ point->cpu_max_temp_celsius, ++ point->gpu_min_temp_celsius, ++ point->gpu_max_temp_celsius, point->ic_min_temp_celsius, ++ point->ic_max_temp_celsius); + } ++ return 0; ++} + - tcp_collapse_ofo_queue(sk); - if (!skb_queue_empty(&sk->sk_receive_queue)) - tcp_collapse(sk, &sk->sk_receive_queue, NULL, -@@ -5443,6 +5477,8 @@ static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb) - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) - return 0; - -+do_not_collapse: ++/* ============================= */ ++/* Global and shared data between */ ++/* all calls to this module */ ++/* ============================ */ ++// Implemented like ideapad-laptop.c but currenlty still ++// wihtout dynamic memory allocation (instaed global _priv) + - /* If we are really being abused, tell the caller to silently - * drop receive data on the floor. It will get retransmitted - * and hopefully then we'll have sufficient space. -diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c -index b9d55277cb85..5e577877158b 100644 ---- a/net/ipv4/tcp_ipv4.c -+++ b/net/ipv4/tcp_ipv4.c -@@ -3275,6 +3275,8 @@ static int __net_init tcp_sk_init(struct net *net) - else - net->ipv4.tcp_congestion_control = &tcp_reno; - -+ net->ipv4.sysctl_tcp_collapse_max_bytes = 0; ++struct legion_private { ++ struct platform_device *platform_device; ++ // TODO: remove or keep? init? ++ // struct acpi_device *adev; + - return 0; - } - -diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib -index 100a386fcd71..a3ec7265fb57 100644 ---- a/scripts/Makefile.lib -+++ b/scripts/Makefile.lib -@@ -542,14 +542,21 @@ quiet_cmd_xzmisc = XZMISC $@ - # decompression is used, like initramfs decompression, zstd22 should likely not - # be used because it would require zstd to allocate a 128 MB buffer. - -+ifdef CONFIG_ZSTD_COMPRESSION_LEVEL -+zstd_comp_val := $(CONFIG_ZSTD_COMPRESSION_LEVEL) -+ifeq ($(shell test $(zstd_comp_val) -gt 19; echo $$?),0) -+zstd_comp_val += --ultra -+endif -+endif ++ // Method to access ECRAM ++ struct ecram ecram; ++ // Configuration with registers an ECRAM access method ++ const struct model_config *conf; + - quiet_cmd_zstd = ZSTD $@ -- cmd_zstd = cat $(real-prereqs) | $(ZSTD) -19 > $@ -+ cmd_zstd = cat $(real-prereqs) | $(ZSTD) -T0 -19 > $@ - - quiet_cmd_zstd22 = ZSTD22 $@ -- cmd_zstd22 = cat $(real-prereqs) | $(ZSTD) -22 --ultra > $@ -+ cmd_zstd22 = cat $(real-prereqs) | $(ZSTD) -T0 -22 --ultra > $@ - - quiet_cmd_zstd22_with_size = ZSTD22 $@ -- cmd_zstd22_with_size = { cat $(real-prereqs) | $(ZSTD) -22 --ultra; $(size_append); } > $@ -+ cmd_zstd22_with_size = { cat $(real-prereqs) | $(ZSTD) -T0 -$(zstd_comp_val); $(size_append); } > $@ - - # ASM offsets - # --------------------------------------------------------------------------- -diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst -index ab0c5bd1a60f..f4989f706d7f 100644 ---- a/scripts/Makefile.modinst -+++ b/scripts/Makefile.modinst -@@ -100,8 +100,13 @@ quiet_cmd_gzip = GZIP $@ - cmd_gzip = $(KGZIP) -n -f $< - quiet_cmd_xz = XZ $@ - cmd_xz = $(XZ) --lzma2=dict=2MiB -f $< -+ifdef CONFIG_MODULE_COMPRESS_ZSTD_ULTRA - quiet_cmd_zstd = ZSTD $@ -- cmd_zstd = $(ZSTD) -T0 --rm -f -q $< -+ cmd_zstd = $(ZSTD) -$(CONFIG_MODULE_COMPRESS_ZSTD_LEVEL_ULTRA) --ultra --zstd=wlog=21 -T0 --rm -f -q $< -+else -+quiet_cmd_zstd = ZSTD $@ -+ cmd_zstd = $(ZSTD) -$(CONFIG_MODULE_COMPRESS_ZSTD_LEVEL) --zstd=wlog=21 -T0 --rm -f -q $< -+endif - - $(dst)/%.ko.gz: $(dst)/%.ko FORCE - $(call cmd,gzip) --- -2.40.0 - -From c10af555acefbb465dbe1834391511fb21569a68 Mon Sep 17 00:00:00 2001 -From: Peter Jung -Date: Mon, 17 Apr 2023 18:24:53 +0200 -Subject: [PATCH 04/12] fixes - -Signed-off-by: Peter Jung ---- - Documentation/ABI/stable/sysfs-block | 10 + - .../testing/sysfs-class-led-trigger-blkdev | 78 ++ - Documentation/admin-guide/mm/ksm.rst | 7 + - Documentation/leds/index.rst | 1 + - Documentation/leds/ledtrig-blkdev.rst | 158 +++ - Documentation/x86/topology.rst | 26 + - arch/x86/include/asm/cacheinfo.h | 1 + - arch/x86/kernel/cpu/amd.c | 1 + - arch/x86/kernel/cpu/cacheinfo.c | 36 + - arch/x86/kernel/cpu/hygon.c | 1 + - arch/x86/net/bpf_jit_comp.c | 5 +- - drivers/bluetooth/btusb.c | 2 +- - drivers/leds/trigger/Kconfig | 9 + - drivers/leds/trigger/Makefile | 1 + - drivers/leds/trigger/ledtrig-blkdev.c | 1221 +++++++++++++++++ - fs/eventpoll.c | 188 ++- - fs/proc/base.c | 1 + - include/linux/atomic/atomic-arch-fallback.h | 208 ++- - include/linux/atomic/atomic-instrumented.h | 68 +- - include/linux/atomic/atomic-long.h | 38 +- - include/linux/mm_types.h | 7 +- - include/linux/pageblock-flags.h | 2 +- - include/linux/rcuref.h | 155 +++ - include/linux/types.h | 6 + - include/net/dst.h | 30 +- - include/net/ip6_fib.h | 3 - - include/net/ip6_route.h | 2 +- - include/net/route.h | 3 - - include/net/sock.h | 2 +- - kernel/fork.c | 1 + - kernel/kheaders.c | 10 +- - kernel/padata.c | 4 +- - lib/Makefile | 2 +- - lib/rcuref.c | 281 ++++ - mm/ksm.c | 185 ++- - mm/mempolicy.c | 104 +- - mm/mprotect.c | 2 +- - net/bridge/br_nf_core.c | 2 +- - net/core/dst.c | 26 +- - net/core/rtnetlink.c | 2 +- - net/ipv4/route.c | 20 +- - net/ipv4/xfrm4_policy.c | 4 +- - net/ipv6/route.c | 32 +- - net/ipv6/xfrm6_policy.c | 4 +- - net/netfilter/ipvs/ip_vs_xmit.c | 4 +- - scripts/Makefile.vmlinux_o | 2 +- - scripts/atomic/atomics.tbl | 2 +- - scripts/atomic/fallbacks/add_negative | 11 +- - sound/pci/hda/cs35l41_hda.c | 2 +- - .../selftests/mm/ksm_functional_tests.c | 96 +- - 50 files changed, 2796 insertions(+), 270 deletions(-) - create mode 100644 Documentation/ABI/testing/sysfs-class-led-trigger-blkdev - create mode 100644 Documentation/leds/ledtrig-blkdev.rst - create mode 100644 drivers/leds/trigger/ledtrig-blkdev.c - create mode 100644 include/linux/rcuref.h - create mode 100644 lib/rcuref.c - -diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block -index 282de3680367..ac1dd2fbd855 100644 ---- a/Documentation/ABI/stable/sysfs-block -+++ b/Documentation/ABI/stable/sysfs-block -@@ -101,6 +101,16 @@ Description: - devices that support receiving integrity metadata. - - -+What: /sys/block//linked_leds -+Date: January 2023 -+Contact: Ian Pilcher -+Description: -+ Directory that contains symbolic links to all LEDs that -+ are associated with (linked to) this block device by the -+ blkdev LED trigger. Only present when at least one LED -+ is linked. (See Documentation/leds/ledtrig-blkdev.rst.) ++ // TODO: maybe refactor an keep only local to each function ++ // last known fan curve ++ struct fancurve fancurve; ++ // configured fan curve from user space ++ struct fancurve fancurve_configured; + ++ // update lock, when partial values of fancurve are changed ++ struct mutex fancurve_mutex; + - What: /sys/block///alignment_offset - Date: April 2009 - Contact: Martin K. Petersen -diff --git a/Documentation/ABI/testing/sysfs-class-led-trigger-blkdev b/Documentation/ABI/testing/sysfs-class-led-trigger-blkdev -new file mode 100644 -index 000000000000..28ce8c814fb7 ---- /dev/null -+++ b/Documentation/ABI/testing/sysfs-class-led-trigger-blkdev -@@ -0,0 +1,78 @@ -+What: /sys/class/leds//blink_time -+Date: January 2023 -+Contact: Ian Pilcher -+Description: -+ Time (in milliseconds) that the LED will be on during a single -+ "blink". ++ //interfaces ++ struct dentry *debugfs_dir; ++ struct device *hwmon_dev; ++ struct platform_profile_handler platform_profile_handler; + -+What: /sys/class/leds//check_interval -+Date: January 2023 -+Contact: Ian Pilcher -+Description: -+ Interval (in milliseconds) between checks of the block devices -+ linked to this LED. The LED will be blinked if the correct type -+ of activity (see blink_on_{read,write,discard,flush} attributes) -+ has occurred on any of the linked devices since the previous -+ check. ++ // TODO: remove? ++ bool loaded; ++}; + -+What: /sys/class/leds//blink_on_read -+Date: January 2023 -+Contact: Ian Pilcher -+Description: -+ Boolean that determines whether the LED will blink in response -+ to read activity on any of its linked block devices. ++// shared between different drivers: WMI, platform and proteced by mutex ++static struct legion_private *legion_shared; ++static struct legion_private _priv; ++static DEFINE_MUTEX(legion_shared_mutex); + -+What: /sys/class/leds//blink_on_write -+Date: January 2023 -+Contact: Ian Pilcher -+Description: -+ Boolean that determines whether the LED will blink in response -+ to write activity on any of its linked block devices. ++static int legion_shared_init(struct legion_private *priv) ++{ ++ int ret; + -+What: /sys/class/leds//blink_on_discard -+Date: January 2023 -+Contact: Ian Pilcher -+Description: -+ Boolean that determines whether the LED will blink in response -+ to discard activity on any of its linked block devices. ++ mutex_lock(&legion_shared_mutex); + -+What: /sys/class/leds//blink_on_flush -+Date: January 2023 -+Contact: Ian Pilcher -+Description: -+ Boolean that determines whether the LED will blink in response -+ to cache flush activity on any of its linked block devices. ++ if (!legion_shared) { ++ legion_shared = priv; ++ mutex_init(&legion_shared->fancurve_mutex); ++ ret = 0; ++ } else { ++ pr_warn("Found multiple platform devices\n"); ++ ret = -EINVAL; ++ } + -+What: /sys/class/leds//link_dev_by_path -+Date: January 2023 -+Contact: Ian Pilcher -+Description: -+ Associate a block device with this LED by writing the path to -+ the device special file (e.g. /dev/sda) to this attribute. -+ Symbolic links are followed. ++ priv->loaded = true; ++ mutex_unlock(&legion_shared_mutex); + -+What: /sys/class/leds//unlink_dev_by_path -+Date: January 2023 -+Contact: Ian Pilcher -+Description: -+ Remove the association between this LED and a block device by -+ writing the path to the device special file (e.g. /dev/sda) to -+ this attribute. Symbolic links are followed. ++ return ret; ++} + -+What: /sys/class/leds//unlink_dev_by_name -+Date: January 2023 -+Contact: Ian Pilcher -+Description: -+ Remove the association between this LED and a block device by -+ writing the kernel name of the device (e.g. sda) to this -+ attribute. ++static void legion_shared_exit(struct legion_private *priv) ++{ ++ pr_info("Unloading legion shared\n"); ++ mutex_lock(&legion_shared_mutex); + -+What: /sys/class/leds//linked_devices -+Date: January 2023 -+Contact: Ian Pilcher -+Description: -+ Directory containing links to all block devices that are -+ associated with this LED. (Note that the names of the -+ symbolic links in this directory are *kernel* names, which -+ may not match the device special file paths written to -+ link_device and unlink_device.) -diff --git a/Documentation/admin-guide/mm/ksm.rst b/Documentation/admin-guide/mm/ksm.rst -index eed51a910c94..270560fef3b2 100644 ---- a/Documentation/admin-guide/mm/ksm.rst -+++ b/Documentation/admin-guide/mm/ksm.rst -@@ -171,6 +171,13 @@ stable_node_chains - the number of KSM pages that hit the ``max_page_sharing`` limit - stable_node_dups - number of duplicated KSM pages -+zero_pages_sharing -+ how many empty pages are sharing kernel zero page(s) instead of -+ with each other as it would happen normally. Only effective when -+ enabling ``use_zero_pages`` knob. ++ if (legion_shared == priv) ++ legion_shared = NULL; + -+When enabling ``use_zero_pages``, the sum of ``pages_sharing`` + -+``zero_pages_sharing`` represents how much really saved by KSM. - - A high ratio of ``pages_sharing`` to ``pages_shared`` indicates good - sharing, but a high ratio of ``pages_unshared`` to ``pages_sharing`` -diff --git a/Documentation/leds/index.rst b/Documentation/leds/index.rst -index b9ca081fac71..5e37d8e7bd28 100644 ---- a/Documentation/leds/index.rst -+++ b/Documentation/leds/index.rst -@@ -10,6 +10,7 @@ LEDs - leds-class - leds-class-flash - leds-class-multicolor -+ ledtrig-blkdev - ledtrig-oneshot - ledtrig-transient - ledtrig-usbport -diff --git a/Documentation/leds/ledtrig-blkdev.rst b/Documentation/leds/ledtrig-blkdev.rst -new file mode 100644 -index 000000000000..9ff5b99de451 ---- /dev/null -+++ b/Documentation/leds/ledtrig-blkdev.rst -@@ -0,0 +1,158 @@ -+.. SPDX-License-Identifier: GPL-2.0 ++ mutex_unlock(&legion_shared_mutex); ++ pr_info("Unloading legion shared done\n"); ++} + -+================================= -+Block Device (blkdev) LED Trigger -+================================= ++/* ============================= */ ++/* debugfs interface */ ++/* ============================ */ + -+Available when ``CONFIG_LEDS_TRIGGER_BLKDEV=y`` or -+``CONFIG_LEDS_TRIGGER_BLKDEV=m``. ++static int debugfs_ecmemory_show(struct seq_file *s, void *unused) ++{ ++ struct legion_private *priv = s->private; ++ size_t offset; + -+See also: ++ for (offset = 0; offset < priv->conf->memoryio_size; ++offset) { ++ char value = ecram_read(&priv->ecram, ++ priv->conf->memoryio_physical_ec_start + ++ offset); + -+* ``Documentation/ABI/testing/sysfs-class-led-trigger-blkdev`` -+* ``Documentation/ABI/stable/sysfs-block`` (``/sys/block//linked_leds``) ++ seq_write(s, &value, 1); ++ } ++ return 0; ++} + -+Overview -+======== ++DEFINE_SHOW_ATTRIBUTE(debugfs_ecmemory); + -+.. note:: -+ The examples below use ```` to refer to the name of a -+ system-specific LED. If no suitable LED is available on a test -+ system (in a virtual machine, for example), it is possible to -+ use a userspace LED. (See ``Documentation/leds/uleds.rst``.) ++static int debugfs_fancurve_show(struct seq_file *s, void *unused) ++{ ++ struct legion_private *priv = s->private; ++ bool is_minifancurve; ++ bool is_lockfancontroller; ++ bool is_maximumfanspeed; ++ int err; + -+Verify that the ``blkdev`` LED trigger is available:: ++ seq_printf(s, "EC Chip ID: %x\n", read_ec_id(&priv->ecram, priv->conf)); ++ seq_printf(s, "EC Chip Version: %x\n", ++ read_ec_version(&priv->ecram, priv->conf)); ++ seq_printf(s, "legion_laptop features: %s\n", LEGIONFEATURES); ++ seq_printf(s, "legion_laptop ec_readonly: %d\n", ec_readonly); ++ read_fancurve(&priv->ecram, priv->conf, &priv->fancurve); ++ ++ seq_printf(s, "minifancurve feature enabled: %d\n", ++ priv->conf->has_minifancurve); ++ err = read_minifancurve(&priv->ecram, priv->conf, &is_minifancurve); ++ seq_printf(s, "minifancurve on cool: %s\n", ++ err ? "error" : (is_minifancurve ? "true" : "false")); ++ err = read_lockfancontroller(&priv->ecram, priv->conf, ++ &is_lockfancontroller); ++ seq_printf(s, "lock fan controller: %s\n", ++ err ? "error" : (is_lockfancontroller ? "true" : "false")); ++ err = read_maximumfanspeed(&priv->ecram, priv->conf, ++ &is_maximumfanspeed); ++ seq_printf(s, "enable maximumfanspeed: %s\n", ++ err ? "error" : (is_maximumfanspeed ? "true" : "false")); ++ seq_printf(s, "enable maximumfanspeed status: %d\n", err); ++ ++ seq_printf(s, "fan curve current point id: %ld\n", ++ priv->fancurve.current_point_i); ++ seq_printf(s, "fan curve points size: %ld\n", priv->fancurve.size); ++ ++ seq_puts(s, "Current fan curve in hardware (embedded controller):\n"); ++ fancurve_print_seqfile(&priv->fancurve, s); ++ seq_puts(s, "=====================\n"); ++ return 0; ++} + -+ # grep blkdev /sys/class/leds//trigger -+ ... rfkill-none blkdev ++DEFINE_SHOW_ATTRIBUTE(debugfs_fancurve); + -+(If the previous command produces no output, you may need to load the trigger -+module - ``modprobe ledtrig_blkdev``. If the module is not available, check -+the value of ``CONFIG_LEDS_TRIGGER_BLKDEV`` in your kernel configuration.) ++static void legion_debugfs_init(struct legion_private *priv) ++{ ++ struct dentry *dir; + -+Associate the LED with the ``blkdev`` LED trigger:: ++ // TODO: remove this note ++ // Note: as other kernel modules, do not catch errors here ++ // because if kernel is build without debugfs this ++ // will return an error but module still has to ++ // work, just without debugfs ++ // TODO: what permissions; some modules do 400 ++ // other do 444 ++ dir = debugfs_create_dir(LEGION_DRVR_SHORTNAME, NULL); ++ debugfs_create_file("fancurve", 0444, dir, priv, ++ &debugfs_fancurve_fops); ++ debugfs_create_file("ecmemory", 0444, dir, priv, ++ &debugfs_ecmemory_fops); + -+ # echo blkdev > /sys/class/leds//trigger ++ priv->debugfs_dir = dir; ++} + -+ # cat /sys/class/leds//trigger -+ ... rfkill-none [blkdev] ++static void legion_debugfs_exit(struct legion_private *priv) ++{ ++ pr_info("Unloading legion dubugfs\n"); ++ // The following is does nothing if pointer is NULL ++ debugfs_remove_recursive(priv->debugfs_dir); ++ priv->debugfs_dir = NULL; ++ pr_info("Unloading legion dubugfs done\n"); ++} + -+Note that several new device attributes are available in the -+``/sys/class/leds/`` directory. ++/* ============================= */ ++/* sysfs interface */ ++/* ============================ */ + -+* ``link_dev_by_path``, ``unlink_dev_by_path``, and ``unlink_dev_by_name`` are -+ used to manage the set of block devices associated with this LED. The LED -+ will blink when activity occurs on any of its linked devices. ++static ssize_t powermode_show(struct device *dev, struct device_attribute *attr, ++ char *buf) ++{ ++ struct legion_private *priv = dev_get_drvdata(dev); ++ int power_mode = read_powermode(&priv->ecram, priv->conf); + -+* ``blink_on_read``, ``blink_on_write``, ``blink_on_discard``, and -+ ``blink_on_flush`` are boolean values that determine whether the LED will -+ blink when a particular type of activity is detected on one of its linked -+ block devices. ++ return sysfs_emit(buf, "%d\n", power_mode); ++} + -+* ``blink_time`` is the duration (in milliseconds) of each blink of this LED. -+ (The minimum value is 10 milliseconds.) ++static ssize_t powermode_store(struct device *dev, ++ struct device_attribute *attr, const char *buf, ++ size_t count) ++{ ++ struct legion_private *priv = dev_get_drvdata(dev); ++ int powermode; ++ int err; + -+* ``check_interval`` is the frequency (in milliseconds) with which block devices -+ linked to this LED will be checked for activity and the LED blinked (if the -+ correct type of activity has occurred). ++ err = kstrtouint(buf, 0, &powermode); ++ if (err) ++ return err; + -+* The ``linked_devices`` directory will contain a symbolic link to every device -+ that is associated with this LED. ++ err = write_powermode(&priv->ecram, priv->conf, powermode); ++ if (err) ++ return -EINVAL; + -+Link a block device to the LED:: ++ // TODO: better? ++ // we have to wait a bit before change is done in hardware and ++ // readback done after notifying returns correct value, otherwise ++ // the notified reader will read old value ++ msleep(500); ++ platform_profile_notify(); + -+ # echo /dev/sda > /sys/class/leds//link_dev_by_path ++ return count; ++} + -+ # ls /sys/class/leds//linked_devices -+ sda ++static DEVICE_ATTR_RW(powermode); + -+(The value written to ``link_dev_by_path`` must be the path of the device -+special file, such as ``/dev/sda``, that represents the block device - or the -+path of a symbolic link to such a device special file.) ++static ssize_t lockfancontroller_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct legion_private *priv = dev_get_drvdata(dev); ++ bool is_lockfancontroller; ++ int err; + -+Activity on the device will now cause the LED to blink. The duration of each -+blink (in milliseconds) can be adjusted by setting -+``/sys/class/leds//blink_time``. (But see **check_interval and -+blink_time** below.) ++ mutex_lock(&priv->fancurve_mutex); ++ err = read_lockfancontroller(&priv->ecram, priv->conf, ++ &is_lockfancontroller); ++ mutex_unlock(&priv->fancurve_mutex); ++ if (err) ++ return -EINVAL; + -+Associate a second device with the LED:: ++ return sysfs_emit(buf, "%d\n", is_lockfancontroller); ++} + -+ # echo /dev/sdb > /sys/class/leds//link_dev_by_path ++static ssize_t lockfancontroller_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct legion_private *priv = dev_get_drvdata(dev); ++ bool is_lockfancontroller; ++ int err; + -+ # ls /sys/class/leds//linked_devices -+ sda sdb ++ err = kstrtobool(buf, &is_lockfancontroller); ++ if (err) ++ return err; + -+When a block device is linked to one or more LEDs, the LEDs are linked from -+the device's ``linked_leds`` directory:: ++ mutex_lock(&priv->fancurve_mutex); ++ err = write_lockfancontroller(&priv->ecram, priv->conf, ++ is_lockfancontroller); ++ mutex_unlock(&priv->fancurve_mutex); ++ if (err) ++ return -EINVAL; + -+ # ls /sys/class/block/sd{a,b}/linked_leds -+ /sys/class/block/sda/linked_leds: -+ ++ return count; ++} + -+ /sys/class/block/sdb/linked_leds: -+ ++static DEVICE_ATTR_RW(lockfancontroller); + -+(The ``linked_leds`` directory only exists when the block device is linked to -+at least one LED.) ++static ssize_t keyboard_backlight_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ int state; ++ struct legion_private *priv = dev_get_drvdata(dev); + -+``check_interval`` and ``blink_time`` -+===================================== ++ read_keyboard_backlight(&priv->ecram, priv->conf, &state); ++ return sysfs_emit(buf, "%d\n", state); ++} + -+* By default, linked block devices are checked for activity every 100 -+ milliseconds. This frequency can be changed for an LED via the -+ ``/sys/class/leds//check_interval`` attribute. (The minimum value is 25 -+ milliseconds.) ++static ssize_t keyboard_backlight_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct legion_private *priv = dev_get_drvdata(dev); ++ int state; ++ int err; + -+* All block devices associated with an LED are checked for activity every -+ ``check_interval`` milliseconds, and a blink is triggered if the correct type -+ of activity (as determined by the LED's ``blink_on_*`` attributes) is -+ detected. The duration of an LED's blink is determined by its ``blink_time`` -+ attribute. Thus (when the correct type of activity is detected), the LED will -+ be on for ``blink_time`` milliseconds and off for -+ ``check_interval - blink_time`` milliseconds. ++ err = kstrtouint(buf, 0, &state); ++ if (err) ++ return err; + -+* The LED subsystem ignores new blink requests for an LED that is already in -+ in the process of blinking, so setting a ``blink_time`` greater than or equal -+ to ``check_interval`` will cause some blinks to be missed. ++ err = write_keyboard_backlight(&priv->ecram, priv->conf, state); ++ if (err) ++ return -EINVAL; + -+* Because of processing times, scheduling latencies, etc., avoiding missed -+ blinks actually requires a difference of at least a few milliseconds between -+ the ``blink_time`` and ``check_interval``. The required difference is likely -+ to vary from system to system. As a reference, a Thecus N5550 NAS requires a -+ difference of 7 milliseconds (e.g. ``check_interval == 100``, -+ ``blink_time == 93``). ++ return count; ++} + -+* The default values (``check_interval == 100``, ``blink_time == 75``) cause the -+ LED associated with a continuously active device to blink rapidly. For a more -+ "always on" effect, increase the ``blink_time`` (but not too much; see the -+ previous bullet). ++static DEVICE_ATTR_RW(keyboard_backlight); + -+Other Notes -+=========== ++static struct attribute *legion_sysfs_attributes[] = { ++ &dev_attr_powermode.attr, &dev_attr_lockfancontroller.attr, ++ &dev_attr_keyboard_backlight.attr, NULL ++}; + -+* Many (possibly all) types of block devices work with this trigger, including: ++static const struct attribute_group legion_attribute_group = { ++ .attrs = legion_sysfs_attributes ++}; + -+ * SCSI (including SATA and USB) hard disk drives and SSDs -+ * SCSI (including SATA and USB) optical drives -+ * NVMe SSDs -+ * SD cards -+ * loopback block devices (``/dev/loop*``) -+ * device mapper devices, such as LVM logical volumes -+ * MD RAID devices -+ * zRAM compressed RAM-disks -+ * partitions on block devices that support them ++static int legion_sysfs_init(struct legion_private *priv) ++{ ++ return device_add_group(&priv->platform_device->dev, ++ &legion_attribute_group); ++} + -+* The names of the symbolic links in ``/sys/class/leds//linked_devices`` -+ are **kernel** names, which may not match the paths used for -+ ``link_dev_by_path`` and ``unlink_dev_by_path``. This is most likely when a -+ symbolic link is used to refer to the device (as is common with logical -+ volumes), but it can be true for any device, because nothing prevents the -+ creation of device special files with arbitrary names (e.g. -+ ``sudo mknod /foo b 8 0``). ++static void legion_sysfs_exit(struct legion_private *priv) ++{ ++ pr_info("Unloading legion sysfs\n"); ++ device_remove_group(&priv->platform_device->dev, ++ &legion_attribute_group); ++ pr_info("Unloading legion sysfs done\n"); ++} + -+ Kernel names can be used to unlink block devices from LEDs by writing them to -+ the LED's ``unlink_dev_by_name`` attribute. ++/* ============================= */ ++/* WMI + ACPI */ ++/* ============================ */ ++// heavily based on ideapad_laptop.c + -+* The ``blkdev`` LED trigger supports many-to-many device/LED associations. -+ A device can be associated with multiple LEDs, and an LED can be associated -+ with multiple devices. -diff --git a/Documentation/x86/topology.rst b/Documentation/x86/topology.rst -index 7f58010ea86a..9de14f3f7783 100644 ---- a/Documentation/x86/topology.rst -+++ b/Documentation/x86/topology.rst -@@ -33,6 +33,7 @@ historical nature and should be cleaned up. - The topology of a system is described in the units of: - - - packages -+ - cluster - - cores - - threads - -@@ -90,6 +91,22 @@ Package-related topology information in the kernel: - Cache. In general, it is a number identifying an LLC uniquely on the - system. - -+Clusters -+======== -+A cluster consists of threads of one or more cores sharing the same L2 cache. ++// TODO: proper names if meaning of all events is clear ++enum LEGION_WMI_EVENT { ++ LEGION_WMI_EVENT_GAMEZONE = 1, ++ LEGION_EVENT_A, ++ LEGION_EVENT_B, ++ LEGION_EVENT_C, ++ LEGION_EVENT_D, ++ LEGION_EVENT_E, ++ LEGION_EVENT_F, ++ LEGION_EVENT_G ++}; + -+Cluster-related topology information in the kernel: ++struct legion_wmi_private { ++ enum LEGION_WMI_EVENT event; ++}; + -+ - cluster_id: ++//static void legion_wmi_notify2(u32 value, void *context) ++// { ++// pr_info("WMI notify\n" ); ++// } + -+ A per-CPU variable containing: ++static void legion_wmi_notify(struct wmi_device *wdev, union acpi_object *data) ++{ ++ struct legion_wmi_private *wpriv; ++ struct legion_private *priv; + -+ - Upper bits extracted from the APIC ID. CPUs which have the same value -+ in these bits share an L2 and have the same cluster_id. ++ mutex_lock(&legion_shared_mutex); ++ priv = legion_shared; ++ if ((!priv) && (priv->loaded)) { ++ pr_info("Received WMI event while not initialized!\n"); ++ goto unlock; ++ } + -+ CPUs for which cluster information is unavailable will show 65535 -+ (BAD_APICID) as the cluster_id. ++ wpriv = dev_get_drvdata(&wdev->dev); ++ switch (wpriv->event) { ++ case LEGION_EVENT_A: ++ pr_info("Fan event: legion type: %d; acpi type: %d (%d=integer)", ++ wpriv->event, data->type, ACPI_TYPE_INTEGER); ++ // TODO: here it is too early (first unlock mutext, then wait a bit) ++ //platform_profile_notify(); ++ break; ++ default: ++ pr_info("Event: legion type: %d; acpi type: %d (%d=integer)", ++ wpriv->event, data->type, ACPI_TYPE_INTEGER); ++ break; ++ } + - Cores - ===== - A core consists of 1 or more threads. It does not matter whether the threads -@@ -125,6 +142,11 @@ Thread-related topology information in the kernel: - - The number of online threads is also printed in /proc/cpuinfo "siblings." - -+ - topology_cluster_cpumask(): ++unlock: ++ mutex_unlock(&legion_shared_mutex); ++ // todo; fix that! ++ // problem: we get a event just before the powermode change (from the key?), ++ // so if we notify to early, it will read the old power mode/platform profile ++ msleep(500); ++ platform_profile_notify(); ++} + -+ The cpumask contains all online threads in the cluster to which a thread -+ belongs. ++static int legion_wmi_probe(struct wmi_device *wdev, const void *context) ++{ ++ struct legion_wmi_private *wpriv; + - - topology_sibling_cpumask(): - - The cpumask contains all online threads in the core to which a thread -@@ -138,6 +160,10 @@ Thread-related topology information in the kernel: - - The physical package ID to which a thread belongs. - -+ - topology_cluster_id(); ++ wpriv = devm_kzalloc(&wdev->dev, sizeof(*wpriv), GFP_KERNEL); ++ if (!wpriv) ++ return -ENOMEM; + -+ The ID of the cluster to which a thread belongs. ++ *wpriv = *(const struct legion_wmi_private *)context; + - - topology_core_id(); - - The ID of the core to which a thread belongs. It is also printed in /proc/cpuinfo -diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h -index ce9685fc78d8..2034cd556c07 100644 ---- a/arch/x86/include/asm/cacheinfo.h -+++ b/arch/x86/include/asm/cacheinfo.h -@@ -7,6 +7,7 @@ extern unsigned int memory_caching_control; - #define CACHE_MTRR 0x01 - #define CACHE_PAT 0x02 - -+void cacheinfo_topoext_init_l2c_id(struct cpuinfo_x86 *c, int cpu); - void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu); - void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu); - -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index 95cdd08c4cbb..d6594727f924 100644 ---- a/arch/x86/kernel/cpu/amd.c -+++ b/arch/x86/kernel/cpu/amd.c -@@ -358,6 +358,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c) - if (!err) - c->x86_coreid_bits = get_count_order(c->x86_max_cores); - -+ cacheinfo_topoext_init_l2c_id(c, cpu); - cacheinfo_amd_init_llc_id(c, cpu); - - } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { -diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c -index 4063e8991211..947a1f27278c 100644 ---- a/arch/x86/kernel/cpu/cacheinfo.c -+++ b/arch/x86/kernel/cpu/cacheinfo.c -@@ -659,6 +659,42 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c) - return i; - } - -+void cacheinfo_topoext_init_l2c_id(struct cpuinfo_x86 *c, int cpu) ++ dev_set_drvdata(&wdev->dev, wpriv); ++ dev_info(&wdev->dev, "Register after probing for WMI.\n"); ++ return 0; ++} ++ ++static const struct legion_wmi_private legion_wmi_context_gamezone = { ++ .event = LEGION_WMI_EVENT_GAMEZONE ++}; ++static const struct legion_wmi_private legion_wmi_context_a = { ++ .event = LEGION_EVENT_A ++}; ++static const struct legion_wmi_private legion_wmi_context_b = { ++ .event = LEGION_EVENT_B ++}; ++static const struct legion_wmi_private legion_wmi_context_c = { ++ .event = LEGION_EVENT_C ++}; ++static const struct legion_wmi_private legion_wmi_context_d = { ++ .event = LEGION_EVENT_D ++}; ++static const struct legion_wmi_private legion_wmi_context_e = { ++ .event = LEGION_EVENT_E ++}; ++static const struct legion_wmi_private legion_wmi_context_f = { ++ .event = LEGION_EVENT_F ++}; ++ ++// check if really a method ++#define LEGION_WMI_GAMEZONE_GUID "887B54E3-DDDC-4B2C-8B88-68A26A8835D0" ++ ++#define LEGION_WMI_GUID_FAN_EVENT "D320289E-8FEA-41E0-86F9-611D83151B5F" ++#define LEGION_WMI_GUID_FAN2_EVENT "bc72a435-e8c1-4275-b3e2-d8b8074aba59" ++#define LEGION_WMI_GUID_GAMEZONE_KEY_EVENT \ ++ "10afc6d9-ea8b-4590-a2e7-1cd3c84bb4b1" ++#define LEGION_WMI_GUID_GAMEZONE_GPU_EVENT \ ++ "bfd42481-aee3-4502-a107-afb68425c5f8" ++#define LEGION_WMI_GUID_GAMEZONE_OC_EVENT "d062906b-12d4-4510-999d-4831ee80e985" ++#define LEGION_WMI_GUID_GAMEZONE_TEMP_EVENT \ ++ "bfd42481-aee3-4501-a107-afb68425c5f8" ++//#define LEGION_WMI_GUID_GAMEZONE_DATA_EVENT "887b54e3-dddc-4b2c-8b88-68a26a8835d0" ++ ++static const struct wmi_device_id legion_wmi_ids[] = { ++ { LEGION_WMI_GAMEZONE_GUID, &legion_wmi_context_gamezone }, ++ { LEGION_WMI_GUID_FAN_EVENT, &legion_wmi_context_a }, ++ { LEGION_WMI_GUID_FAN2_EVENT, &legion_wmi_context_b }, ++ { LEGION_WMI_GUID_GAMEZONE_KEY_EVENT, &legion_wmi_context_c }, ++ { LEGION_WMI_GUID_GAMEZONE_GPU_EVENT, &legion_wmi_context_d }, ++ { LEGION_WMI_GUID_GAMEZONE_OC_EVENT, &legion_wmi_context_e }, ++ { LEGION_WMI_GUID_GAMEZONE_TEMP_EVENT, &legion_wmi_context_f }, ++ { "8FC0DE0C-B4E4-43FD-B0F3-8871711C1294", ++ &legion_wmi_context_gamezone }, /* Legion 5 */ ++ {}, ++}; ++MODULE_DEVICE_TABLE(wmi, legion_wmi_ids); ++ ++static struct wmi_driver legion_wmi_driver = { ++ .driver = { ++ .name = "legion_wmi", ++ }, ++ .id_table = legion_wmi_ids, ++ .probe = legion_wmi_probe, ++ .notify = legion_wmi_notify, ++}; ++ ++//acpi_status status = wmi_install_notify_handler(LEGION_WMI_GAMEZONE_GUID, ++// legion_wmi_notify2, NULL); ++//if (ACPI_FAILURE(status)) { ++// return -ENODEV; ++//} ++//return 0; ++ ++static int legion_wmi_init(void) +{ -+ u32 eax, ebx, ecx, edx, num_sharing_cache; -+ int i = 0, bits; ++ return wmi_driver_register(&legion_wmi_driver); ++} + -+ /* Check if L2 cache identifiers exists. */ -+ if (!cpuid_ecx(0x80000006)) -+ return; ++static void legion_wmi_exit(void) ++{ ++ // TODO: remove this ++ pr_info("Unloading legion WMI\n"); + -+ while (true) { -+ u32 level; ++ //wmi_remove_notify_handler(LEGION_WMI_GAMEZONE_GUID); ++ wmi_driver_unregister(&legion_wmi_driver); ++ pr_info("Unloading legion WMI done\n"); ++} + -+ cpuid_count(0x8000001d, i, &eax, &ebx, &ecx, &edx); -+ if (!eax) -+ return; ++/* ============================= */ ++/* Platform profile */ ++/* ============================ */ + -+ /* -+ * Check if the current leaf is for L2 cache using -+ * eax[7:5] used to describe the cache level. -+ */ -+ level = (eax >> 5) & 0x7; -+ if (level == 2) -+ break; ++enum LEGION_POWERMODE { ++ LEGION_POWERMODE_BALANCED = 0, ++ LEGION_POWERMODE_PERFORMANCE = 1, ++ LEGION_POWERMODE_QUIET = 2, ++}; + -+ ++i; ++static int legion_platform_profile_get(struct platform_profile_handler *pprof, ++ enum platform_profile_option *profile) ++{ ++ int powermode; ++ struct legion_private *priv; ++ ++ priv = container_of(pprof, struct legion_private, ++ platform_profile_handler); ++ powermode = read_powermode(&priv->ecram, priv->conf); ++ ++ switch (powermode) { ++ case LEGION_POWERMODE_BALANCED: ++ *profile = PLATFORM_PROFILE_BALANCED; ++ break; ++ case LEGION_POWERMODE_PERFORMANCE: ++ *profile = PLATFORM_PROFILE_PERFORMANCE; ++ break; ++ case LEGION_POWERMODE_QUIET: ++ *profile = PLATFORM_PROFILE_QUIET; ++ break; ++ default: ++ return -EINVAL; + } ++ return 0; ++} + -+ /* -+ * L2 ID is calculated from the number of threads -+ * sharing the L2 cache. -+ */ -+ num_sharing_cache = ((eax >> 14) & 0xfff) + 1; -+ bits = get_count_order(num_sharing_cache); -+ per_cpu(cpu_l2c_id, cpu) = c->apicid >> bits; ++static int legion_platform_profile_set(struct platform_profile_handler *pprof, ++ enum platform_profile_option profile) ++{ ++ int powermode; ++ struct legion_private *priv; ++ ++ priv = container_of(pprof, struct legion_private, ++ platform_profile_handler); ++ ++ switch (profile) { ++ case PLATFORM_PROFILE_BALANCED: ++ powermode = LEGION_POWERMODE_BALANCED; ++ break; ++ case PLATFORM_PROFILE_PERFORMANCE: ++ powermode = LEGION_POWERMODE_PERFORMANCE; ++ break; ++ case PLATFORM_PROFILE_QUIET: ++ powermode = LEGION_POWERMODE_QUIET; ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ return write_powermode(&priv->ecram, priv->conf, powermode); +} + - void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu) - { - /* -diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c -index 5a2962c492d3..cb0025b4a2fd 100644 ---- a/arch/x86/kernel/cpu/hygon.c -+++ b/arch/x86/kernel/cpu/hygon.c -@@ -89,6 +89,7 @@ static void hygon_get_topology(struct cpuinfo_x86 *c) - /* Socket ID is ApicId[6] for these processors. */ - c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT; - -+ cacheinfo_topoext_init_l2c_id(c, cpu); - cacheinfo_hygon_init_llc_id(c, cpu); - } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { - u64 value; -diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c -index 1056bbf55b17..212bfd1517ec 100644 ---- a/arch/x86/net/bpf_jit_comp.c -+++ b/arch/x86/net/bpf_jit_comp.c -@@ -343,9 +343,10 @@ static int emit_call(u8 **pprog, void *func, void *ip) - - static int emit_rsb_call(u8 **pprog, void *func, void *ip) - { -+ void *adjusted_ip; - OPTIMIZER_HIDE_VAR(func); -- x86_call_depth_emit_accounting(pprog, func); -- return emit_patch(pprog, func, ip, 0xE8); -+ adjusted_ip = (u8 *)ip + x86_call_depth_emit_accounting(pprog, func); -+ return emit_patch(pprog, func, adjusted_ip, 0xE8); - } - - static int emit_jump(u8 **pprog, void *func, void *ip) -diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c -index 5c536151ef83..5a80379253a7 100644 ---- a/drivers/bluetooth/btusb.c -+++ b/drivers/bluetooth/btusb.c -@@ -912,7 +912,7 @@ static void btusb_qca_cmd_timeout(struct hci_dev *hdev) - } - - gpiod_set_value_cansleep(reset_gpio, 0); -- msleep(200); -+ usleep_range(USEC_PER_SEC / 2, USEC_PER_SEC); - gpiod_set_value_cansleep(reset_gpio, 1); - - return; -diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig -index dc6816d36d06..bda249068182 100644 ---- a/drivers/leds/trigger/Kconfig -+++ b/drivers/leds/trigger/Kconfig -@@ -154,4 +154,13 @@ config LEDS_TRIGGER_TTY - - When build as a module this driver will be called ledtrig-tty. - -+config LEDS_TRIGGER_BLKDEV -+ tristate "LED Trigger for block devices" -+ depends on BLOCK -+ help -+ The blkdev LED trigger allows LEDs to be controlled by block device -+ activity (reads and writes). ++static int legion_platform_profile_init(struct legion_private *priv) ++{ ++ int err; + -+ See Documentation/leds/ledtrig-blkdev.rst. ++ priv->platform_profile_handler.profile_get = ++ legion_platform_profile_get; ++ priv->platform_profile_handler.profile_set = ++ legion_platform_profile_set; + - endif # LEDS_TRIGGERS -diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile -index 25c4db97cdd4..d53bab5d93f1 100644 ---- a/drivers/leds/trigger/Makefile -+++ b/drivers/leds/trigger/Makefile -@@ -16,3 +16,4 @@ obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o - obj-$(CONFIG_LEDS_TRIGGER_PATTERN) += ledtrig-pattern.o - obj-$(CONFIG_LEDS_TRIGGER_AUDIO) += ledtrig-audio.o - obj-$(CONFIG_LEDS_TRIGGER_TTY) += ledtrig-tty.o -+obj-$(CONFIG_LEDS_TRIGGER_BLKDEV) += ledtrig-blkdev.o -diff --git a/drivers/leds/trigger/ledtrig-blkdev.c b/drivers/leds/trigger/ledtrig-blkdev.c -new file mode 100644 -index 000000000000..067eedb003b5 ---- /dev/null -+++ b/drivers/leds/trigger/ledtrig-blkdev.c -@@ -0,0 +1,1221 @@ -+// SPDX-License-Identifier: GPL-2.0-only ++ set_bit(PLATFORM_PROFILE_QUIET, priv->platform_profile_handler.choices); ++ set_bit(PLATFORM_PROFILE_BALANCED, ++ priv->platform_profile_handler.choices); ++ set_bit(PLATFORM_PROFILE_PERFORMANCE, ++ priv->platform_profile_handler.choices); + -+/* -+ * Block device LED trigger -+ * -+ * Copyright 2021-2022 Ian Pilcher -+ */ ++ err = platform_profile_register(&priv->platform_profile_handler); ++ if (err) ++ return err; + -+#include -+#include -+#include -+#include -+#include ++ return 0; ++} + -+/** -+ * DOC: Overview -+ * -+ * The ``blkdev`` LED trigger works by periodically checking the activity -+ * counters of block devices that have been linked to one or more LEDs and -+ * blinking those LED(s) if the correct type of activity has occurred. The -+ * periodic check is scheduled with the Linux kernel's deferred work facility. -+ * -+ * Trigger-specific data about block devices and LEDs is stored in two data -+ * structures --- &struct blkdev_trig_bdev (a "BTB") and &struct blkdev_trig_led -+ * (a "BTL"). Each structure contains a &struct xarray that holds links to any -+ * linked devices of the other type. I.e. &blkdev_trig_bdev.linked_btls -+ * contains links to all BTLs whose LEDs have been linked to the BTB's block -+ * device, and &blkdev_trig_led.linked_btbs contains links to all BTBs whose -+ * block devices have been linked to the BTL's LED. Thus, a block device can -+ * be linked to more than one LED, and an LED can be linked to more than one -+ * block device. -+ */ ++static void legion_platform_profile_exit(struct legion_private *priv) ++{ ++ pr_info("Unloading legion platform profile\n"); ++ platform_profile_remove(); ++ pr_info("Unloading legion platform profile done\n"); ++} + -+/* Default, minimum & maximum blink duration (milliseconds) */ -+#define BLKDEV_TRIG_BLINK_DEF 75 -+#define BLKDEV_TRIG_BLINK_MIN 10 -+#define BLKDEV_TRIG_BLINK_MAX 86400000 /* 24 hours */ ++/* ============================= */ ++/* hwom interface */ ++/* ============================ */ + -+/* Default, minimum & maximum activity check interval (milliseconds) */ -+#define BLKDEV_TRIG_CHECK_DEF 100 -+#define BLKDEV_TRIG_CHECK_MIN 25 -+#define BLKDEV_TRIG_CHECK_MAX 86400000 /* 24 hours */ ++// hw-mon interface + -+/* -+ * If blkdev_trig_check() can't lock the mutex, how long to wait before trying -+ * again (milliseconds) -+ */ -+#define BLKDEV_TRIG_CHECK_RETRY 5 ++// todo: register_group or register_info? + -+/* Mode argument for calls to blkdev_get_by_path() and blkdev_put() */ -+#define BLKDEV_TRIG_FMODE 0 ++// TODO: use one common function (like here) or one function per attribute? ++static ssize_t sensor_label_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ int sensor_id = (to_sensor_dev_attr(attr))->index; ++ const char *label; + -+/** -+ * struct blkdev_trig_bdev - Trigger-specific data about a block device. -+ * @last_checked: Time (in jiffies) at which the trigger last checked this -+ * block device for activity. -+ * @last_activity: Time (in jiffies) at which the trigger last detected -+ * activity of each type. -+ * @ios: Activity counter values for each type, corresponding to -+ * the timestamps in &last_activity. -+ * @index: &xarray index, so the BTB can be included in one or more -+ * &blkdev_trig_led.linked_btbs. -+ * @bdev: The block device. -+ * @linked_btls: The BTLs that represent the LEDs linked to the BTB's -+ * block device. -+ * -+ * Every block device linked to at least one LED gets a "BTB." A BTB is created -+ * when a block device that is not currently linked to any LEDs is linked to an -+ * LED. -+ * -+ * A BTB is freed when one of the following occurs: -+ * -+ * * The number of LEDs linked to the block device becomes zero, because it has -+ * been unlinked from its last LED using the trigger's &sysfs interface. -+ * -+ * * The number of LEDs linked to the block device becomes zero, because the -+ * last LED to which it was linked has been disassociated from the trigger -+ * (which happens automatically if the LED device is removed from the system). -+ * -+ * * The BTB's block device is removed from the system. To accomodate this -+ * scenario, BTB's are created as device resources, so that the release -+ * function will be called by the driver core when the device is removed. -+ */ -+struct blkdev_trig_bdev { -+ unsigned long last_checked; -+ unsigned long last_activity[NR_STAT_GROUPS]; -+ unsigned long ios[NR_STAT_GROUPS]; -+ unsigned long index; -+ struct block_device *bdev; -+ struct xarray linked_btls; -+}; ++ switch (sensor_id) { ++ case SENSOR_CPU_TEMP_ID: ++ label = "CPU Temperature\n"; ++ break; ++ case SENSOR_GPU_TEMP_ID: ++ label = "GPU Temperature\n"; ++ break; ++ case SENSOR_IC_TEMP_ID: ++ label = "IC Temperature\n"; ++ break; ++ case SENSOR_FAN1_RPM_ID: ++ label = "Fan 1\n"; ++ break; ++ case SENSOR_FAN2_RPM_ID: ++ label = "Fan 2\n"; ++ break; ++ case SENSOR_FAN1_TARGET_RPM_ID: ++ label = "Fan 1 Target\n"; ++ break; ++ case SENSOR_FAN2_TARGET_RPM_ID: ++ label = "Fan 2 Target\n"; ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } + -+/** -+ * struct blkdev_trig_led - Trigger-specific data about an LED. -+ * @last_checked: Time (in jiffies) at which the trigger last checked the -+ * the block devices linked to this LED for activity. -+ * @index: &xarray index, so the BTL can be included in one or more -+ * &blkdev_trig_bdev.linked_btls. -+ * @mode: Bitmask for types of block device activity that will -+ * cause this LED to blink --- reads, writes, discards, -+ * etc. -+ * @led: The LED device. -+ * @blink_msec: Duration of a blink (milliseconds). -+ * @check_jiffies: Frequency with which block devices linked to this LED -+ * should be checked for activity (jiffies). -+ * @linked_btbs: The BTBs that represent the block devices linked to the -+ * BTL's LED. -+ * @all_btls_node: The BTL's node in the module's list of all BTLs. -+ * -+ * Every LED associated with the block device trigger gets a "BTL." A BTL is -+ * created when the trigger is "activated" on an LED (usually by writing -+ * ``blkdev`` to the LED's &sysfs &trigger attribute). A BTL is freed wnen its -+ * LED is disassociated from the trigger, either through the trigger's &sysfs -+ * interface or because the LED device is removed from the system. -+ */ -+struct blkdev_trig_led { -+ unsigned long last_checked; -+ unsigned long index; -+ unsigned long mode; /* must be ulong for atomic bit ops */ -+ struct led_classdev *led; -+ unsigned int blink_msec; -+ unsigned int check_jiffies; -+ struct xarray linked_btbs; -+ struct hlist_node all_btls_node; -+}; ++ return sprintf(buf, label); ++} + -+/* Protects everything except atomic LED attributes */ -+static DEFINE_MUTEX(blkdev_trig_mutex); ++// TODO: use one common function (like here) or one function per attribute? ++static ssize_t sensor_show(struct device *dev, struct device_attribute *devattr, ++ char *buf) ++{ ++ struct legion_private *priv = dev_get_drvdata(dev); ++ int sensor_id = (to_sensor_dev_attr(devattr))->index; ++ struct sensor_values values; ++ int outval; + -+/* BTB device resource release function */ -+static void blkdev_trig_btb_release(struct device *dev, void *res); ++ read_sensor_values(&priv->ecram, priv->conf, &values); + -+/* Index for next BTB or BTL */ -+static unsigned long blkdev_trig_next_index; ++ switch (sensor_id) { ++ case SENSOR_CPU_TEMP_ID: ++ outval = 1000 * values.cpu_temp_celsius; ++ break; ++ case SENSOR_GPU_TEMP_ID: ++ outval = 1000 * values.gpu_temp_celsius; ++ break; ++ case SENSOR_IC_TEMP_ID: ++ outval = 1000 * values.ic_temp_celsius; ++ break; ++ case SENSOR_FAN1_RPM_ID: ++ outval = values.fan1_rpm; ++ break; ++ case SENSOR_FAN2_RPM_ID: ++ outval = values.fan2_rpm; ++ break; ++ case SENSOR_FAN1_TARGET_RPM_ID: ++ outval = values.fan1_target_rpm; ++ break; ++ case SENSOR_FAN2_TARGET_RPM_ID: ++ outval = values.fan2_target_rpm; ++ break; ++ default: ++ pr_info("Error reading sensor value with id %d\n", sensor_id); ++ return -EOPNOTSUPP; ++ } + -+/* All LEDs associated with the trigger */ -+static HLIST_HEAD(blkdev_trig_all_btls); ++ return sprintf(buf, "%d\n", outval); ++} ++ ++static SENSOR_DEVICE_ATTR_RO(temp1_input, sensor, SENSOR_CPU_TEMP_ID); ++static SENSOR_DEVICE_ATTR_RO(temp1_label, sensor_label, SENSOR_CPU_TEMP_ID); ++static SENSOR_DEVICE_ATTR_RO(temp2_input, sensor, SENSOR_GPU_TEMP_ID); ++static SENSOR_DEVICE_ATTR_RO(temp2_label, sensor_label, SENSOR_GPU_TEMP_ID); ++static SENSOR_DEVICE_ATTR_RO(temp3_input, sensor, SENSOR_IC_TEMP_ID); ++static SENSOR_DEVICE_ATTR_RO(temp3_label, sensor_label, SENSOR_IC_TEMP_ID); ++static SENSOR_DEVICE_ATTR_RO(fan1_input, sensor, SENSOR_FAN1_RPM_ID); ++static SENSOR_DEVICE_ATTR_RO(fan1_label, sensor_label, SENSOR_FAN1_RPM_ID); ++static SENSOR_DEVICE_ATTR_RO(fan2_input, sensor, SENSOR_FAN2_RPM_ID); ++static SENSOR_DEVICE_ATTR_RO(fan2_label, sensor_label, SENSOR_FAN2_RPM_ID); ++static SENSOR_DEVICE_ATTR_RO(fan1_target, sensor, SENSOR_FAN1_TARGET_RPM_ID); ++static SENSOR_DEVICE_ATTR_RO(fan2_target, sensor, SENSOR_FAN2_TARGET_RPM_ID); ++ ++static struct attribute *sensor_hwmon_attributes[] = { ++ &sensor_dev_attr_temp1_input.dev_attr.attr, ++ &sensor_dev_attr_temp1_label.dev_attr.attr, ++ &sensor_dev_attr_temp2_input.dev_attr.attr, ++ &sensor_dev_attr_temp2_label.dev_attr.attr, ++ &sensor_dev_attr_temp3_input.dev_attr.attr, ++ &sensor_dev_attr_temp3_label.dev_attr.attr, ++ &sensor_dev_attr_fan1_input.dev_attr.attr, ++ &sensor_dev_attr_fan1_label.dev_attr.attr, ++ &sensor_dev_attr_fan2_input.dev_attr.attr, ++ &sensor_dev_attr_fan2_label.dev_attr.attr, ++ &sensor_dev_attr_fan1_target.dev_attr.attr, ++ &sensor_dev_attr_fan2_target.dev_attr.attr, ++ NULL ++}; + -+/* Delayed work to periodically check for activity & blink LEDs */ -+static void blkdev_trig_check(struct work_struct *work); -+static DECLARE_DELAYED_WORK(blkdev_trig_work, blkdev_trig_check); ++static ssize_t autopoint_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct fancurve fancurve; ++ int err; ++ int value; ++ struct legion_private *priv = dev_get_drvdata(dev); ++ int fancurve_attr_id = to_sensor_dev_attr_2(devattr)->nr; ++ int point_id = to_sensor_dev_attr_2(devattr)->index; + -+/* When is the delayed work scheduled to run next (jiffies) */ -+static unsigned long blkdev_trig_next_check; ++ mutex_lock(&priv->fancurve_mutex); ++ err = read_fancurve(&priv->ecram, priv->conf, &fancurve); ++ mutex_unlock(&priv->fancurve_mutex); + -+/* Total number of BTB-to-BTL links */ -+static unsigned int blkdev_trig_link_count; ++ if (err) { ++ pr_info("Reading fancurve failed\n"); ++ return -EOPNOTSUPP; ++ } ++ if (!(point_id >= 0 && point_id < MAXFANCURVESIZE)) { ++ pr_info("Reading fancurve failed due to wrong point id: %d\n", ++ point_id); ++ return -EOPNOTSUPP; ++ } + -+/* Empty sysfs attribute list for next 2 declarations */ -+static struct attribute *blkdev_trig_attrs_empty[] = { NULL }; ++ switch (fancurve_attr_id) { ++ case FANCURVE_ATTR_PWM1: ++ value = fancurve.points[point_id].rpm1_raw * 100; ++ break; ++ case FANCURVE_ATTR_PWM2: ++ value = fancurve.points[point_id].rpm2_raw * 100; ++ break; ++ case FANCURVE_ATTR_CPU_TEMP: ++ value = fancurve.points[point_id].cpu_max_temp_celsius; ++ break; ++ case FANCURVE_ATTR_CPU_HYST: ++ value = fancurve.points[point_id].cpu_min_temp_celsius; ++ break; ++ case FANCURVE_ATTR_GPU_TEMP: ++ value = fancurve.points[point_id].gpu_max_temp_celsius; ++ break; ++ case FANCURVE_ATTR_GPU_HYST: ++ value = fancurve.points[point_id].gpu_min_temp_celsius; ++ break; ++ case FANCURVE_ATTR_IC_TEMP: ++ value = fancurve.points[point_id].ic_max_temp_celsius; ++ break; ++ case FANCURVE_ATTR_IC_HYST: ++ value = fancurve.points[point_id].ic_min_temp_celsius; ++ break; ++ case FANCURVE_ATTR_ACCEL: ++ value = fancurve.points[point_id].accel; ++ break; ++ case FANCURVE_ATTR_DECEL: ++ value = fancurve.points[point_id].decel; ++ break; ++ case FANCURVE_SIZE: ++ value = fancurve.size; ++ break; ++ default: ++ pr_info("Reading fancurve failed due to wrong attribute id: %d\n", ++ fancurve_attr_id); ++ return -EOPNOTSUPP; ++ } + -+/* linked_leds sysfs directory for block devs linked to 1 or more LEDs */ -+static const struct attribute_group blkdev_trig_linked_leds = { -+ .name = "linked_leds", -+ .attrs = blkdev_trig_attrs_empty, -+}; ++ return sprintf(buf, "%d\n", value); ++} + -+/* linked_devices sysfs directory for each LED associated with the trigger */ -+static const struct attribute_group blkdev_trig_linked_devs = { -+ .name = "linked_devices", -+ .attrs = blkdev_trig_attrs_empty, -+}; ++static ssize_t autopoint_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct fancurve fancurve; ++ int err; ++ int value; ++ bool valid; ++ struct legion_private *priv = dev_get_drvdata(dev); ++ int fancurve_attr_id = to_sensor_dev_attr_2(devattr)->nr; ++ int point_id = to_sensor_dev_attr_2(devattr)->index; ++ ++ if (!(point_id >= 0 && point_id < MAXFANCURVESIZE)) { ++ pr_info("Reading fancurve failed due to wrong point id: %d\n", ++ point_id); ++ err = -EOPNOTSUPP; ++ goto error; ++ } + ++ err = kstrtoint(buf, 0, &value); ++ if (err) { ++ pr_info("Parse for hwmon store is not succesful: error:%d; point_id: %d; fancurve_attr_id: %d\\n", ++ err, point_id, fancurve_attr_id); ++ goto error; ++ } + -+/* -+ * -+ * Delayed work to check for activity & blink LEDs -+ * -+ */ ++ mutex_lock(&priv->fancurve_mutex); ++ err = read_fancurve(&priv->ecram, priv->conf, &fancurve); + -+/** -+ * blkdev_trig_blink() - Blink an LED, if the correct type of activity has -+ * occurred on the block device. -+ * @btl: The BTL that represents the LED -+ * @btb: The BTB that represents the block device -+ * -+ * Context: Process context. Caller must hold &blkdev_trig_mutex. -+ * Return: &true if the LED is blinked, &false if not. -+ */ -+static bool blkdev_trig_blink(const struct blkdev_trig_led *btl, -+ const struct blkdev_trig_bdev *btb) -+{ -+ unsigned long mode, mask, delay_on, delay_off; -+ enum stat_group i; ++ if (err) { ++ pr_info("Reading fancurve failed\n"); ++ err = -EOPNOTSUPP; ++ goto error_mutex; ++ } + -+ mode = READ_ONCE(btl->mode); ++ switch (fancurve_attr_id) { ++ case FANCURVE_ATTR_PWM1: ++ valid = fancurve_set_rpm1(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_PWM2: ++ valid = fancurve_set_rpm2(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_CPU_TEMP: ++ valid = fancurve_set_cpu_temp_max(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_CPU_HYST: ++ valid = fancurve_set_cpu_temp_min(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_GPU_TEMP: ++ valid = fancurve_set_gpu_temp_max(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_GPU_HYST: ++ valid = fancurve_set_gpu_temp_min(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_IC_TEMP: ++ valid = fancurve_set_ic_temp_max(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_IC_HYST: ++ valid = fancurve_set_ic_temp_min(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_ACCEL: ++ valid = fancurve_set_accel(&fancurve, point_id, value); ++ break; ++ case FANCURVE_ATTR_DECEL: ++ valid = fancurve_set_decel(&fancurve, point_id, value); ++ break; ++ case FANCURVE_SIZE: ++ valid = fancurve_set_size(&fancurve, value, true); ++ break; ++ default: ++ pr_info("Writing fancurve failed due to wrong attribute id: %d\n", ++ fancurve_attr_id); ++ err = -EOPNOTSUPP; ++ goto error_mutex; ++ } + -+ for (i = STAT_READ, mask = 1; i <= STAT_FLUSH; ++i, mask <<= 1) { ++ if (!valid) { ++ pr_info("Ignoring invalid fancurve value %d for attribute %d at point %d\n", ++ value, fancurve_attr_id, point_id); ++ err = -EOPNOTSUPP; ++ goto error_mutex; ++ } + -+ if (!(mode & mask)) -+ continue; ++ err = write_fancurve(&priv->ecram, priv->conf, &fancurve, false); ++ if (err) { ++ pr_info("Writing fancurve failed for accessing hwmon at point_id: %d\n", ++ point_id); ++ err = -EOPNOTSUPP; ++ goto error_mutex; ++ } + -+ if (time_before_eq(btb->last_activity[i], btl->last_checked)) -+ continue; ++ mutex_unlock(&priv->fancurve_mutex); ++ return count; + -+ delay_on = READ_ONCE(btl->blink_msec); -+ delay_off = 1; /* 0 leaves LED turned on */ ++error_mutex: ++ mutex_unlock(&priv->fancurve_mutex); ++error: ++ return count; ++} + -+ led_blink_set_oneshot(btl->led, &delay_on, &delay_off, 0); -+ return true; ++// rpm1 ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point4_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point5_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point6_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point7_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point9_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point10_pwm, autopoint, ++ FANCURVE_ATTR_PWM1, 9); ++// rpm2 ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point1_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point2_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point3_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point4_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point5_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point6_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point7_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point8_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point9_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point10_pwm, autopoint, ++ FANCURVE_ATTR_PWM2, 9); ++// CPU temp ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point4_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point5_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point6_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point7_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point9_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point10_temp, autopoint, ++ FANCURVE_ATTR_CPU_TEMP, 9); ++// CPU temp hyst ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point4_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point5_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point6_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point7_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point9_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point10_temp_hyst, autopoint, ++ FANCURVE_ATTR_CPU_HYST, 9); ++// GPU temp ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point1_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point2_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point3_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point4_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point5_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point6_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point7_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point8_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point9_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point10_temp, autopoint, ++ FANCURVE_ATTR_GPU_TEMP, 9); ++// GPU temp hyst ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point1_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point2_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point3_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point4_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point5_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point6_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point7_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point8_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point9_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point10_temp_hyst, autopoint, ++ FANCURVE_ATTR_GPU_HYST, 9); ++// IC temp ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point1_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point2_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point3_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point4_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point5_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point6_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point7_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point8_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point9_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point10_temp, autopoint, ++ FANCURVE_ATTR_IC_TEMP, 9); ++// IC temp hyst ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point1_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point2_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point3_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point4_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point5_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point6_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point7_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point8_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point9_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point10_temp_hyst, autopoint, ++ FANCURVE_ATTR_IC_HYST, 9); ++// accel ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point4_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point5_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point6_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point7_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point9_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point10_accel, autopoint, ++ FANCURVE_ATTR_ACCEL, 9); ++// decel ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 0); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 1); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 2); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point4_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 3); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point5_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 4); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point6_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 5); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point7_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 6); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 7); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point9_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 8); ++static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point10_decel, autopoint, ++ FANCURVE_ATTR_DECEL, 9); ++//size ++static SENSOR_DEVICE_ATTR_2_RW(auto_points_size, autopoint, FANCURVE_SIZE, 0); ++ ++static ssize_t minifancurve_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ bool value; ++ int err; ++ struct legion_private *priv = dev_get_drvdata(dev); ++ ++ mutex_lock(&priv->fancurve_mutex); ++ err = read_minifancurve(&priv->ecram, priv->conf, &value); ++ if (err) { ++ err = -1; ++ pr_info("Reading minifancurve not succesful\n"); ++ goto error_unlock; + } ++ mutex_unlock(&priv->fancurve_mutex); ++ return sprintf(buf, "%d\n", value); + -+ return false; ++error_unlock: ++ mutex_unlock(&priv->fancurve_mutex); ++ return -1; +} + -+/** -+ * blkdev_trig_update_btb() - Update a BTB's activity counters and timestamps. -+ * @btb: The BTB -+ * @now: Timestamp (in jiffies) -+ * -+ * Context: Process context. Caller must hold &blkdev_trig_mutex. -+ */ -+static void blkdev_trig_update_btb(struct blkdev_trig_bdev *btb, -+ unsigned long now) ++static ssize_t minifancurve_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) +{ -+ unsigned long new_ios; -+ enum stat_group i; ++ int value; ++ int err; ++ struct legion_private *priv = dev_get_drvdata(dev); ++ ++ err = kstrtoint(buf, 0, &value); ++ if (err) { ++ err = -1; ++ pr_info("Parse for hwmon store is not succesful: error:%d\n", ++ err); ++ goto error; ++ } + -+ for (i = STAT_READ; i <= STAT_FLUSH; ++i) { ++ mutex_lock(&priv->fancurve_mutex); ++ err = write_minifancurve(&priv->ecram, priv->conf, value); ++ if (err) { ++ err = -1; ++ pr_info("Writing minifancurve not succesful\n"); ++ goto error_unlock; ++ } ++ mutex_unlock(&priv->fancurve_mutex); ++ return count; + -+ new_ios = part_stat_read(btb->bdev, ios[i]); ++error_unlock: ++ mutex_unlock(&priv->fancurve_mutex); ++error: ++ return err; ++} + -+ if (new_ios != btb->ios[i]) { -+ btb->ios[i] = new_ios; -+ btb->last_activity[i] = now; -+ } ++static SENSOR_DEVICE_ATTR_RW(minifancurve, minifancurve, 0); ++ ++static ssize_t pwm1_mode_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ bool value; ++ int err; ++ struct legion_private *priv = dev_get_drvdata(dev); ++ ++ mutex_lock(&priv->fancurve_mutex); ++ err = read_maximumfanspeed(&priv->ecram, priv->conf, &value); ++ if (err) { ++ err = -1; ++ pr_info("Reading pwm1_mode/maximumfanspeed not succesful\n"); ++ goto error_unlock; + } ++ mutex_unlock(&priv->fancurve_mutex); ++ return sprintf(buf, "%d\n", value ? 0 : 2); + -+ btb->last_checked = now; ++error_unlock: ++ mutex_unlock(&priv->fancurve_mutex); ++ return -1; +} + -+/** -+ * blkdev_trig_check() - Check linked devices for activity and blink LEDs. -+ * @work: Delayed work (&blkdev_trig_work) -+ * -+ * Context: Process context. Takes and releases &blkdev_trig_mutex. -+ */ -+static void blkdev_trig_check(struct work_struct *work) ++static ssize_t pwm1_mode_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) +{ -+ struct blkdev_trig_led *btl; -+ struct blkdev_trig_bdev *btb; -+ unsigned long index, delay, now, led_check, led_delay; -+ bool blinked; -+ -+ if (!mutex_trylock(&blkdev_trig_mutex)) { -+ delay = msecs_to_jiffies(BLKDEV_TRIG_CHECK_RETRY); -+ goto exit_reschedule; ++ int value; ++ int is_maximumfanspeed; ++ int err; ++ struct legion_private *priv = dev_get_drvdata(dev); ++ ++ err = kstrtoint(buf, 0, &value); ++ if (err) { ++ err = -1; ++ pr_info("Parse for hwmon store is not succesful: error:%d\n", ++ err); ++ goto error; + } ++ is_maximumfanspeed = value == 0; ++ ++ mutex_lock(&priv->fancurve_mutex); ++ err = write_maximumfanspeed(&priv->ecram, priv->conf, ++ is_maximumfanspeed); ++ if (err) { ++ err = -1; ++ pr_info("Writing pwm1_mode/maximumfanspeed not succesful\n"); ++ goto error_unlock; ++ } ++ mutex_unlock(&priv->fancurve_mutex); ++ return count; + -+ now = jiffies; -+ delay = ULONG_MAX; -+ -+ hlist_for_each_entry (btl, &blkdev_trig_all_btls, all_btls_node) { ++error_unlock: ++ mutex_unlock(&priv->fancurve_mutex); ++error: ++ return err; ++} + -+ led_check = btl->last_checked + btl->check_jiffies; ++static SENSOR_DEVICE_ATTR_RW(pwm1_mode, pwm1_mode, 0); ++ ++static struct attribute *fancurve_hwmon_attributes[] = { ++ &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point4_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point5_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point6_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point7_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point8_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point9_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point10_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point3_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point4_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point5_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point6_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point7_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point8_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point9_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point10_pwm.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point5_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point6_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point7_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point8_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point9_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point10_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point1_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point2_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point3_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point4_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point5_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point6_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point7_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point8_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point9_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point10_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point1_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point2_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point3_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point4_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point5_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point6_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point7_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point8_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point9_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point10_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point1_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point2_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point3_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point4_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point5_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point6_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point7_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point8_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point9_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm2_auto_point10_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point1_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point2_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point3_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point4_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point5_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point6_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point7_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point8_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point9_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point10_temp.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point1_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point2_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point3_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point4_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point5_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point6_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point7_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point8_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point9_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm3_auto_point10_temp_hyst.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point1_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point2_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point3_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point4_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point5_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point6_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point7_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point8_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point9_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point10_accel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point1_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point2_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point3_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point4_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point5_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point6_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point7_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point8_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point9_decel.dev_attr.attr, ++ &sensor_dev_attr_pwm1_auto_point10_decel.dev_attr.attr, ++ // ++ &sensor_dev_attr_auto_points_size.dev_attr.attr, ++ &sensor_dev_attr_minifancurve.dev_attr.attr, ++ &sensor_dev_attr_pwm1_mode.dev_attr.attr, NULL ++}; + -+ if (time_before_eq(led_check, now)) { ++static umode_t legion_is_visible(struct kobject *kobj, struct attribute *attr, ++ int idx) ++{ ++ bool supported = true; ++ struct device *dev = kobj_to_dev(kobj); ++ struct legion_private *priv = dev_get_drvdata(dev); + -+ blinked = false; ++ if (attr == &sensor_dev_attr_minifancurve.dev_attr.attr) ++ supported = priv->conf->has_minifancurve; + -+ xa_for_each (&btl->linked_btbs, index, btb) { ++ return supported ? attr->mode : 0; ++} + -+ if (btb->last_checked != now) -+ blkdev_trig_update_btb(btb, now); -+ if (!blinked) -+ blinked = blkdev_trig_blink(btl, btb); -+ } ++static const struct attribute_group legion_hwmon_sensor_group = { ++ .attrs = sensor_hwmon_attributes, ++ .is_visible = NULL ++}; + -+ btl->last_checked = now; -+ led_delay = btl->check_jiffies; ++static const struct attribute_group legion_hwmon_fancurve_group = { ++ .attrs = fancurve_hwmon_attributes, ++ .is_visible = legion_is_visible, ++}; + -+ } else { -+ led_delay = led_check - now; -+ } ++static const struct attribute_group *legion_hwmon_groups[] = { ++ &legion_hwmon_sensor_group, &legion_hwmon_fancurve_group, NULL ++}; + -+ if (led_delay < delay) -+ delay = led_delay; ++ssize_t legion_hwmon_init(struct legion_private *priv) ++{ ++ //TODO: use hwmon_device_register_with_groups or ++ // hwmon_device_register_with_info (latter means all hwmon functions have to be ++ // changed) ++ // some laptop driver do it in one way, some in the other ++ // TODO: Use devm_hwmon_device_register_with_groups ? ++ // some laptop drivers use this, some ++ struct device *hwmon_dev = hwmon_device_register_with_groups( ++ &priv->platform_device->dev, "legion_hwmon", priv, ++ legion_hwmon_groups); ++ if (IS_ERR_OR_NULL(hwmon_dev)) { ++ pr_err("hwmon_device_register failed!\n"); ++ return PTR_ERR(hwmon_dev); + } -+ -+ mutex_unlock(&blkdev_trig_mutex); -+ -+exit_reschedule: -+ WARN_ON_ONCE(delay == ULONG_MAX); -+ WARN_ON_ONCE(!schedule_delayed_work(&blkdev_trig_work, delay)); ++ dev_set_drvdata(hwmon_dev, priv); ++ priv->hwmon_dev = hwmon_dev; ++ return 0; +} + -+/** -+ * blkdev_trig_sched_led() - Set the schedule of the delayed work when a new -+ * LED is added to the schedule. -+ * @btl: The BTL that represents the LED -+ * -+ * Called when the number of block devices to which an LED is linked becomes -+ * non-zero. -+ * -+ * Context: Process context. Caller must hold &blkdev_trig_mutex. -+ */ -+static void blkdev_trig_sched_led(const struct blkdev_trig_led *btl) ++void legion_hwmon_exit(struct legion_private *priv) +{ -+ unsigned long delay = READ_ONCE(btl->check_jiffies); -+ unsigned long check_by = jiffies + delay; -+ -+ /* -+ * If no other LED-to-block device links exist, simply schedule the -+ * delayed work according to this LED's check_interval attribute -+ * (check_jiffies). -+ */ -+ if (blkdev_trig_link_count == 0) { -+ WARN_ON(!schedule_delayed_work(&blkdev_trig_work, delay)); -+ blkdev_trig_next_check = check_by; -+ return; ++ pr_info("Unloading legion hwon\n"); ++ if (priv->hwmon_dev) { ++ hwmon_device_unregister(priv->hwmon_dev); ++ priv->hwmon_dev = NULL; + } -+ -+ /* -+ * If the next check is already scheduled to occur soon enough to -+ * accomodate this LED's check_interval, the schedule doesn't need -+ * to be changed. -+ */ -+ if (time_after_eq(check_by, blkdev_trig_next_check)) -+ return; -+ -+ /* -+ * Modify the schedule, so that the delayed work runs soon enough for -+ * this LED. -+ */ -+ WARN_ON(!mod_delayed_work(system_wq, &blkdev_trig_work, delay)); -+ blkdev_trig_next_check = check_by; ++ pr_info("Unloading legion hwon done\n"); +} + ++/* ============================= */ ++/* Platform driver */ ++/* ============================ */ + -+/* -+ * -+ * Linking and unlinking LEDs and block devices -+ * -+ */ -+ -+/** -+ * blkdev_trig_link() - Link a block device to an LED. -+ * @btl: The BTL that represents the LED -+ * @btb: The BTB that represents the block device -+ * -+ * Context: Process context. Caller must hold &blkdev_trig_mutex. -+ * Return: &0 on success, negative &errno on error. -+ */ -+static int blkdev_trig_link(struct blkdev_trig_led *btl, -+ struct blkdev_trig_bdev *btb) ++int legion_add(struct platform_device *pdev) +{ -+ bool led_first_link; ++ struct legion_private *priv; ++ const struct dmi_system_id *dmi_sys; + int err; ++ u16 ec_read_id; ++ bool is_denied = true; ++ bool is_allowed = false; ++ bool do_load_by_list = false; ++ bool do_load = false; ++ //struct legion_private *priv = dev_get_drvdata(&pdev->dev); ++ dev_info(&pdev->dev, "legion_laptop platform driver probing\n"); ++ ++ dev_info(&pdev->dev, "Read identifying information: DMI_SYS_VENDOR: %s; DMI_PRODUCT_NAME: %s; DMI_BIOS_VERSION:%s\n", ++ dmi_get_system_info(DMI_SYS_VENDOR), ++ dmi_get_system_info(DMI_PRODUCT_NAME), ++ dmi_get_system_info(DMI_BIOS_VERSION)); ++ ++ // TODO: allocate? ++ priv = &_priv; ++ priv->platform_device = pdev; ++ err = legion_shared_init(priv); ++ if (err) { ++ dev_info(&pdev->dev, "legion_laptop is forced to load.\n"); ++ goto err_legion_shared_init; ++ } ++ dev_set_drvdata(&pdev->dev, priv); ++ ++ // TODO: remove ++ pr_info("Read identifying information: DMI_SYS_VENDOR: %s; DMI_PRODUCT_NAME: %s; DMI_BIOS_VERSION:%s\n", ++ dmi_get_system_info(DMI_SYS_VENDOR), ++ dmi_get_system_info(DMI_PRODUCT_NAME), ++ dmi_get_system_info(DMI_BIOS_VERSION)); ++ ++ dmi_sys = dmi_first_match(optimistic_allowlist); ++ is_allowed = dmi_sys != NULL; ++ is_denied = dmi_check_system(denylist); ++ do_load_by_list = is_allowed && !is_denied; ++ do_load = do_load_by_list || force; ++ ++ dev_info( ++ &pdev->dev, ++ "is_denied: %d; is_allowed: %d; do_load_by_list: %d; do_load: %d\n", ++ is_denied, is_allowed, do_load_by_list, do_load); ++ ++ if (!(do_load)) { ++ dev_info( ++ &pdev->dev, ++ "Module not useable for this laptop because it is not in allowlist. Notify maintainer if you want to add your device or force load with param force.\n"); ++ err = -ENOMEM; ++ goto err_model_mismtach; ++ } + -+ led_first_link = xa_empty(&btl->linked_btbs); ++ if (force) ++ dev_info(&pdev->dev, "legion_laptop is forced to load.\n"); + -+ err = xa_insert(&btb->linked_btls, btl->index, btl, GFP_KERNEL); -+ if (err) -+ return err; ++ if (!do_load_by_list && do_load) { ++ dev_info( ++ &pdev->dev, ++ "legion_laptop is forced to load and would otherwise be not loaded\n"); ++ } + -+ err = xa_insert(&btl->linked_btbs, btb->index, btb, GFP_KERNEL); -+ if (err) -+ goto error_erase_btl; ++ // if forced and no module found, use config for first model ++ if (dmi_sys == NULL) ++ dmi_sys = &optimistic_allowlist[0]; ++ dev_info(&pdev->dev, "Using configuration for system: %s\n", ++ dmi_sys->ident); + -+ /* Create /sys/class/block//linked_leds/ symlink */ -+ err = sysfs_add_link_to_group(bdev_kobj(btb->bdev), -+ blkdev_trig_linked_leds.name, -+ &btl->led->dev->kobj, btl->led->name); -+ if (err) -+ goto error_erase_btb; ++ priv->conf = dmi_sys->driver_data; + -+ /* Create /sys/class/leds//linked_devices/ symlink */ -+ err = sysfs_add_link_to_group(&btl->led->dev->kobj, -+ blkdev_trig_linked_devs.name, -+ bdev_kobj(btb->bdev), -+ dev_name(&btb->bdev->bd_device)); ++ err = ecram_init(&priv->ecram, priv->conf->memoryio_physical_ec_start, ++ priv->conf->memoryio_size); ++ if (err) { ++ dev_info(&pdev->dev, ++ "Could not init access to embedded controller\n"); ++ goto err_ecram_init; ++ } ++ ++ ec_read_id = read_ec_id(&priv->ecram, priv->conf); ++ dev_info(&pdev->dev, "Read embedded controller ID 0x%x\n", ec_read_id); ++ if (priv->conf->check_embedded_controller_id && ++ !(ec_read_id == priv->conf->embedded_controller_id)) { ++ err = -ENOMEM; ++ dev_info(&pdev->dev, "Expected EC chip id 0x%x but read 0x%x\n", ++ priv->conf->embedded_controller_id, ec_read_id); ++ goto err_ecram_id; ++ } ++ if (!priv->conf->check_embedded_controller_id) { ++ dev_info(&pdev->dev, ++ "Skipped checking embedded controller id\n"); ++ } ++ ++ dev_info(&pdev->dev, "Creating debugfs inteface\n"); ++ legion_debugfs_init(priv); ++ ++ pr_info("Creating sysfs inteface\n"); ++ err = legion_sysfs_init(priv); ++ if (err) { ++ dev_info(&pdev->dev, "Creating sysfs interface failed\n"); ++ goto err_sysfs_init; ++ } ++ ++ pr_info("Creating hwmon interface"); ++ err = legion_hwmon_init(priv); + if (err) -+ goto error_remove_symlink; ++ goto err_hwmon_init; + -+ /* -+ * If this is the first block device linked to this LED, the delayed -+ * work schedule may need to be changed. -+ */ -+ if (led_first_link) -+ blkdev_trig_sched_led(btl); ++ pr_info("Creating platform profile support\n"); ++ err = legion_platform_profile_init(priv); ++ if (err) { ++ dev_info(&pdev->dev, "Creating platform profile failed\n"); ++ goto err_platform_profile; ++ } + -+ ++blkdev_trig_link_count; ++ pr_info("Init WMI driver support\n"); ++ err = legion_wmi_init(); ++ if (err) { ++ dev_info(&pdev->dev, "Init WMI driver failed\n"); ++ goto err_wmi; ++ } + ++ dev_info(&pdev->dev, "legion_laptop loaded for this device\n"); + return 0; + -+error_remove_symlink: -+ sysfs_remove_link_from_group(bdev_kobj(btb->bdev), -+ blkdev_trig_linked_leds.name, -+ btl->led->name); -+error_erase_btb: -+ xa_erase(&btl->linked_btbs, btb->index); -+error_erase_btl: -+ xa_erase(&btb->linked_btls, btl->index); ++ // TODO: remove eventually ++ legion_wmi_exit(); ++err_wmi: ++ legion_platform_profile_exit(priv); ++err_platform_profile: ++ legion_hwmon_exit(priv); ++err_hwmon_init: ++ legion_sysfs_exit(priv); ++err_sysfs_init: ++ legion_debugfs_exit(priv); ++err_ecram_id: ++ ecram_exit(&priv->ecram); ++err_ecram_init: ++ legion_shared_exit(priv); ++err_legion_shared_init: ++err_model_mismtach: ++ dev_info(&pdev->dev, "legion_laptop not loaded for this device\n"); + return err; +} + -+/** -+ * blkdev_trig_put_btb() - Remove and free a BTB, if it is no longer needed. -+ * @btb: The BTB -+ * -+ * Does nothing if the BTB (block device) is still linked to at least one LED. -+ * -+ * Context: Process context. Caller must hold &blkdev_trig_mutex. -+ */ -+static void blkdev_trig_put_btb(struct blkdev_trig_bdev *btb) ++int legion_remove(struct platform_device *pdev) +{ -+ struct block_device *bdev = btb->bdev; -+ int err; ++ struct legion_private *priv = dev_get_drvdata(&pdev->dev); + -+ if (xa_empty(&btb->linked_btls)) { ++ mutex_lock(&legion_shared_mutex); ++ priv->loaded = false; ++ mutex_unlock(&legion_shared_mutex); + -+ sysfs_remove_group(bdev_kobj(bdev), &blkdev_trig_linked_leds); -+ err = devres_destroy(&bdev->bd_device, blkdev_trig_btb_release, -+ NULL, NULL); -+ WARN_ON(err); -+ } ++ // first unregister wmi, so toggling powermode does not ++ // generate events anymore that even might be delayed ++ legion_wmi_exit(); ++ legion_platform_profile_exit(priv); ++ ++ // toggle power mode to load default setting from embedded controller ++ // again ++ toggle_powermode(&priv->ecram, priv->conf); ++ ++ legion_hwmon_exit(priv); ++ legion_sysfs_exit(priv); ++ legion_debugfs_exit(priv); ++ ecram_exit(&priv->ecram); ++ legion_shared_exit(priv); ++ ++ pr_info("Legion platform unloaded\n"); ++ return 0; +} + -+/** -+ * _blkdev_trig_unlink_always() - Perform the unconditionally required steps of -+ * unlinking a block device from an LED. -+ * @btl: The BTL that represents the LED -+ * @btb: The BTB that represents the block device -+ * -+ * When a block device is unlinked from an LED, certain steps must be performed -+ * only if the block device is **not** being released. This function performs -+ * those steps that are **always** required, whether or not the block device is -+ * being released. -+ * -+ * Context: Process context. Caller must hold &blkdev_trig_mutex. -+ */ -+static void _blkdev_trig_unlink_always(struct blkdev_trig_led *btl, -+ struct blkdev_trig_bdev *btb) ++int legion_resume(struct platform_device *pdev) +{ -+ --blkdev_trig_link_count; ++ //struct legion_private *priv = dev_get_drvdata(&pdev->dev); ++ dev_info(&pdev->dev, "Resumed in legion-laptop\n"); + -+ if (blkdev_trig_link_count == 0) -+ WARN_ON(!cancel_delayed_work_sync(&blkdev_trig_work)); ++ return 0; ++} + -+ xa_erase(&btb->linked_btls, btl->index); -+ xa_erase(&btl->linked_btbs, btb->index); ++#ifdef CONFIG_PM_SLEEP ++static int legion_pm_resume(struct device *dev) ++{ ++ //struct legion_private *priv = dev_get_drvdata(dev); ++ dev_info(dev, "Resumed PM in legion-laptop\n"); + -+ /* Remove /sys/class/leds//linked_devices/ symlink */ -+ sysfs_remove_link_from_group(&btl->led->dev->kobj, -+ blkdev_trig_linked_devs.name, -+ dev_name(&btb->bdev->bd_device)); ++ return 0; +} ++#endif ++static SIMPLE_DEV_PM_OPS(legion_pm, NULL, legion_pm_resume); + -+/** -+ * blkdev_trig_unlink_norelease() - Unlink an LED from a block device that is -+ * **not** being released. -+ * @btl: The BTL that represents the LED. -+ * @btb: The BTB that represents the block device. -+ * -+ * Context: Process context. Caller must hold &blkdev_trig_mutex. -+ */ -+static void blkdev_trig_unlink_norelease(struct blkdev_trig_led *btl, -+ struct blkdev_trig_bdev *btb) ++// same as ideapad ++static const struct acpi_device_id legion_device_ids[] = { ++ { "PNP0C09", 0 }, // todo: change to "VPC2004" ++ { "", 0 }, ++}; ++MODULE_DEVICE_TABLE(acpi, legion_device_ids); ++ ++static struct platform_driver legion_driver = { ++ .probe = legion_add, ++ .remove = legion_remove, ++ .resume = legion_resume, ++ .driver = { ++ .name = "legion", ++ .pm = &legion_pm, ++ .acpi_match_table = ACPI_PTR(legion_device_ids), ++ }, ++}; ++ ++int __init legion_init(void) +{ -+ _blkdev_trig_unlink_always(btl, btb); ++ int err; + -+ /* Remove /sys/class/block//linked_leds/ symlink */ -+ sysfs_remove_link_from_group(bdev_kobj(btb->bdev), -+ blkdev_trig_linked_leds.name, -+ btl->led->name); ++ pr_info("legion_laptop starts loading\n"); ++ err = platform_driver_register(&legion_driver); ++ if (err) { ++ pr_info("legion_laptop: platform_driver_register failed\n"); ++ return err; ++ } + -+ blkdev_trig_put_btb(btb); ++ return 0; +} + -+/** -+ * blkdev_trig_unlink_release() - Unlink an LED from a block device that is -+ * being released. -+ * @btl: The BTL that represents the LED -+ * @btb: The BTB that represents the block device -+ * -+ * Context: Process context. Caller must hold &blkdev_trig_mutex. -+ */ -+static void blkdev_trig_unlink_release(struct blkdev_trig_led *btl, -+ struct blkdev_trig_bdev *btb) -+{ -+ _blkdev_trig_unlink_always(btl, btb); ++module_init(legion_init); + -+ /* -+ * If the BTB is being released, the driver core has already removed the -+ * device's attribute groups, and the BTB will be freed automatically, -+ * so there's nothing else to do. -+ */ ++void __exit legion_exit(void) ++{ ++ platform_driver_unregister(&legion_driver); ++ pr_info("legion_laptop exit\n"); +} + ++module_exit(legion_exit); +diff --git a/drivers/platform/x86/steamdeck.c b/drivers/platform/x86/steamdeck.c +new file mode 100644 +index 000000000000..77a6677ec19e +--- /dev/null ++++ b/drivers/platform/x86/steamdeck.c +@@ -0,0 +1,523 @@ ++// SPDX-License-Identifier: GPL-2.0+ + +/* ++ * Steam Deck ACPI platform driver + * -+ * BTB creation -+ * -+ */ -+ -+/** -+ * blkdev_trig_btb_release() - BTB device resource release function. -+ * @dev: The block device -+ * @res: The BTB -+ * -+ * Called by the driver core when a block device with a BTB is removed. ++ * Copyright (C) 2021-2022 Valve Corporation + * -+ * Context: Process context. Takes and releases &blkdev_trig_mutex. + */ -+static void blkdev_trig_btb_release(struct device *dev, void *res) -+{ -+ struct blkdev_trig_bdev *btb = res; -+ struct blkdev_trig_led *btl; -+ unsigned long index; -+ -+ mutex_lock(&blkdev_trig_mutex); -+ -+ xa_for_each (&btb->linked_btls, index, btl) -+ blkdev_trig_unlink_release(btl, btb); ++#include ++#include ++#include ++#include ++#include + -+ mutex_unlock(&blkdev_trig_mutex); -+} ++#define ACPI_STEAMDECK_NOTIFY_STATUS 0x80 + -+/** -+ * blkdev_trig_get_bdev() - Get a block device by path. -+ * @path: The value written to an LED's &link_dev_by_path or -+ * &unlink_dev_by_path attribute, which should be the path to a -+ * special file that represents a block device -+ * @len: The number of characters in &path (not including its -+ * terminating null) -+ * -+ * The caller must call blkdev_put() when finished with the device. -+ * -+ * Context: Process context. -+ * Return: The block device, or an error pointer. ++/* 0 - port connected, 1 -port disconnected */ ++#define ACPI_STEAMDECK_PORT_CONNECT BIT(0) ++/* 0 - Upstream Facing Port, 1 - Downdstream Facing Port */ ++#define ACPI_STEAMDECK_CUR_DATA_ROLE BIT(3) ++/* ++ * Debouncing delay to allow negotiation process to settle. 2s value ++ * was arrived at via trial and error. + */ -+static struct block_device *blkdev_trig_get_bdev(const char *path, size_t len) -+{ -+ struct block_device *bdev; -+ char *buf; -+ -+ buf = kmemdup(path, len + 1, GFP_KERNEL); /* +1 to include null */ -+ if (buf == NULL) -+ return ERR_PTR(-ENOMEM); ++#define STEAMDECK_ROLE_SWITCH_DELAY (msecs_to_jiffies(2000)) + -+ bdev = blkdev_get_by_path(strim(buf), BLKDEV_TRIG_FMODE, THIS_MODULE); -+ kfree(buf); -+ return bdev; -+} ++struct steamdeck { ++ struct acpi_device *adev; ++ struct device *hwmon; ++ void *regmap; ++ long fan_target; ++ struct delayed_work role_work; ++ struct extcon_dev *edev; ++ struct device *dev; ++}; + -+/** -+ * blkdev_trig_get_btb() - Find or create the BTB for a block device. -+ * @path: The value written to an LED's &link_dev_by_path attribute, -+ * which should be the path to a special file that represents a -+ * block device -+ * @len: The number of characters in &path -+ * -+ * If a new BTB is created, because the block device was not previously linked -+ * to any LEDs, the block device's &linked_leds &sysfs directory is created. -+ * -+ * Context: Process context. Caller must hold &blkdev_trig_mutex. -+ * Return: Pointer to the BTB, error pointer on error. -+ */ -+static struct blkdev_trig_bdev *blkdev_trig_get_btb(const char *path, -+ size_t len) ++static ssize_t ++steamdeck_simple_store(struct device *dev, const char *buf, size_t count, ++ const char *method, ++ unsigned long upper_limit) +{ -+ struct block_device *bdev; -+ struct blkdev_trig_bdev *btb; -+ int err; -+ -+ bdev = blkdev_trig_get_bdev(path, len); -+ if (IS_ERR(bdev)) -+ return ERR_CAST(bdev); ++ struct steamdeck *fan = dev_get_drvdata(dev); ++ unsigned long value; + -+ btb = devres_find(&bdev->bd_device, blkdev_trig_btb_release, -+ NULL, NULL); -+ if (btb != NULL) { -+ err = 0; -+ goto exit_put_bdev; -+ } ++ if (kstrtoul(buf, 10, &value) || value >= upper_limit) ++ return -EINVAL; + -+ if (blkdev_trig_next_index == ULONG_MAX) { -+ err = -EOVERFLOW; -+ goto exit_put_bdev; -+ } ++ if (ACPI_FAILURE(acpi_execute_simple_method(fan->adev->handle, ++ (char *)method, value))) ++ return -EIO; + -+ btb = devres_alloc(blkdev_trig_btb_release, sizeof(*btb), GFP_KERNEL); -+ if (btb == NULL) { -+ err = -ENOMEM; -+ goto exit_put_bdev; -+ } ++ return count; ++} + -+ err = sysfs_create_group(bdev_kobj(bdev), &blkdev_trig_linked_leds); -+ if (err) -+ goto exit_free_btb; ++#define STEAMDECK_ATTR_WO(_name, _method, _upper_limit) \ ++ static ssize_t _name##_store(struct device *dev, \ ++ struct device_attribute *attr, \ ++ const char *buf, size_t count) \ ++ { \ ++ return steamdeck_simple_store(dev, buf, count, \ ++ _method, \ ++ _upper_limit); \ ++ } \ ++ static DEVICE_ATTR_WO(_name) + -+ btb->index = blkdev_trig_next_index++; -+ btb->bdev = bdev; -+ xa_init(&btb->linked_btls); ++STEAMDECK_ATTR_WO(target_cpu_temp, "STCT", U8_MAX / 2); ++STEAMDECK_ATTR_WO(gain, "SGAN", U16_MAX); ++STEAMDECK_ATTR_WO(ramp_rate, "SFRR", U8_MAX); ++STEAMDECK_ATTR_WO(hysteresis, "SHTS", U16_MAX); ++STEAMDECK_ATTR_WO(maximum_battery_charge_rate, "CHGR", U16_MAX); ++STEAMDECK_ATTR_WO(recalculate, "SCHG", U16_MAX); + -+ /* Populate BTB activity counters */ -+ blkdev_trig_update_btb(btb, jiffies); ++STEAMDECK_ATTR_WO(led_brightness, "CHBV", U8_MAX); ++STEAMDECK_ATTR_WO(content_adaptive_brightness, "CABC", U8_MAX); ++STEAMDECK_ATTR_WO(gamma_set, "GAMA", U8_MAX); ++STEAMDECK_ATTR_WO(display_brightness, "WDBV", U8_MAX); ++STEAMDECK_ATTR_WO(ctrl_display, "WCDV", U8_MAX); ++STEAMDECK_ATTR_WO(cabc_minimum_brightness, "WCMB", U8_MAX); ++STEAMDECK_ATTR_WO(memory_data_access_control, "MDAC", U8_MAX); + -+ devres_add(&bdev->bd_device, btb); ++#define STEAMDECK_ATTR_WO_NOARG(_name, _method) \ ++ static ssize_t _name##_store(struct device *dev, \ ++ struct device_attribute *attr, \ ++ const char *buf, size_t count) \ ++ { \ ++ struct steamdeck *fan = dev_get_drvdata(dev); \ ++ \ ++ if (ACPI_FAILURE(acpi_evaluate_object(fan->adev->handle, \ ++ _method, NULL, NULL))) \ ++ return -EIO; \ ++ \ ++ return count; \ ++ } \ ++ static DEVICE_ATTR_WO(_name) + -+exit_free_btb: -+ if (err) -+ devres_free(btb); -+exit_put_bdev: -+ blkdev_put(bdev, BLKDEV_TRIG_FMODE); -+ return err ? ERR_PTR(err) : btb; -+} ++STEAMDECK_ATTR_WO_NOARG(power_cycle_display, "DPCY"); ++STEAMDECK_ATTR_WO_NOARG(display_normal_mode_on, "NORO"); ++STEAMDECK_ATTR_WO_NOARG(display_inversion_off, "INOF"); ++STEAMDECK_ATTR_WO_NOARG(display_inversion_on, "INON"); ++STEAMDECK_ATTR_WO_NOARG(idle_mode_on, "WRNE"); + ++#define STEAMDECK_ATTR_RO(_name, _method) \ ++ static ssize_t _name##_show(struct device *dev, \ ++ struct device_attribute *attr, \ ++ char *buf) \ ++ { \ ++ struct steamdeck *jup = dev_get_drvdata(dev); \ ++ unsigned long long val; \ ++ \ ++ if (ACPI_FAILURE(acpi_evaluate_integer( \ ++ jup->adev->handle, \ ++ _method, NULL, &val))) \ ++ return -EIO; \ ++ \ ++ return sprintf(buf, "%llu\n", val); \ ++ } \ ++ static DEVICE_ATTR_RO(_name) + -+/* -+ * -+ * Activating and deactivating the trigger on an LED -+ * -+ */ ++STEAMDECK_ATTR_RO(firmware_version, "PDFW"); ++STEAMDECK_ATTR_RO(board_id, "BOID"); ++STEAMDECK_ATTR_RO(pdcs, "PDCS"); + -+/** -+ * blkdev_trig_activate() - Called by the LEDs subsystem when an LED is -+ * associated with the trigger. -+ * @led: The LED -+ * -+ * Context: Process context. Takes and releases &blkdev_trig_mutex. -+ * Return: &0 on success, negative &errno on error. -+ */ -+static int blkdev_trig_activate(struct led_classdev *led) ++static umode_t ++steamdeck_is_visible(struct kobject *kobj, struct attribute *attr, int index) +{ -+ struct blkdev_trig_led *btl; -+ int err; ++ return attr->mode; ++} + -+ btl = kzalloc(sizeof(*btl), GFP_KERNEL); -+ if (btl == NULL) -+ return -ENOMEM; ++static struct attribute *steamdeck_attributes[] = { ++ &dev_attr_target_cpu_temp.attr, ++ &dev_attr_gain.attr, ++ &dev_attr_ramp_rate.attr, ++ &dev_attr_hysteresis.attr, ++ &dev_attr_maximum_battery_charge_rate.attr, ++ &dev_attr_recalculate.attr, ++ &dev_attr_power_cycle_display.attr, + -+ err = mutex_lock_interruptible(&blkdev_trig_mutex); -+ if (err) -+ goto exit_free; ++ &dev_attr_led_brightness.attr, ++ &dev_attr_content_adaptive_brightness.attr, ++ &dev_attr_gamma_set.attr, ++ &dev_attr_display_brightness.attr, ++ &dev_attr_ctrl_display.attr, ++ &dev_attr_cabc_minimum_brightness.attr, ++ &dev_attr_memory_data_access_control.attr, + -+ if (blkdev_trig_next_index == ULONG_MAX) { -+ err = -EOVERFLOW; -+ goto exit_unlock; -+ } ++ &dev_attr_display_normal_mode_on.attr, ++ &dev_attr_display_inversion_off.attr, ++ &dev_attr_display_inversion_on.attr, ++ &dev_attr_idle_mode_on.attr, + -+ btl->index = blkdev_trig_next_index++; -+ btl->last_checked = jiffies; -+ btl->mode = -1; /* set all bits */ -+ btl->led = led; -+ btl->blink_msec = BLKDEV_TRIG_BLINK_DEF; -+ btl->check_jiffies = msecs_to_jiffies(BLKDEV_TRIG_CHECK_DEF); -+ xa_init(&btl->linked_btbs); ++ &dev_attr_firmware_version.attr, ++ &dev_attr_board_id.attr, ++ &dev_attr_pdcs.attr, + -+ hlist_add_head(&btl->all_btls_node, &blkdev_trig_all_btls); -+ led_set_trigger_data(led, btl); ++ NULL ++}; + -+exit_unlock: -+ mutex_unlock(&blkdev_trig_mutex); -+exit_free: -+ if (err) -+ kfree(btl); -+ return err; -+} ++static const struct attribute_group steamdeck_group = { ++ .attrs = steamdeck_attributes, ++ .is_visible = steamdeck_is_visible, ++}; + -+/** -+ * blkdev_trig_deactivate() - Called by the the LEDs subsystem when an LED is -+ * disassociated from the trigger. -+ * @led: The LED -+ * -+ * The LEDs subsystem also calls this function when an LED associated with the -+ * trigger is removed or when the trigger is unregistered (if the module is -+ * unloaded). -+ * -+ * Context: Process context. Takes and releases &blkdev_trig_mutex. -+ */ -+static void blkdev_trig_deactivate(struct led_classdev *led) ++static const struct attribute_group *steamdeck_groups[] = { ++ &steamdeck_group, ++ NULL ++}; ++ ++static int steamdeck_read_fan_speed(struct steamdeck *jup, long *speed) +{ -+ struct blkdev_trig_led *btl = led_get_trigger_data(led); -+ struct blkdev_trig_bdev *btb; -+ unsigned long index; ++ unsigned long long val; + -+ mutex_lock(&blkdev_trig_mutex); ++ if (ACPI_FAILURE(acpi_evaluate_integer(jup->adev->handle, ++ "FANR", NULL, &val))) ++ return -EIO; + -+ xa_for_each (&btl->linked_btbs, index, btb) -+ blkdev_trig_unlink_norelease(btl, btb); ++ *speed = val; ++ return 0; ++} + -+ hlist_del(&btl->all_btls_node); -+ kfree(btl); ++static int ++steamdeck_hwmon_read(struct device *dev, enum hwmon_sensor_types type, ++ u32 attr, int channel, long *out) ++{ ++ struct steamdeck *sd = dev_get_drvdata(dev); ++ unsigned long long val; + -+ mutex_unlock(&blkdev_trig_mutex); ++ switch (type) { ++ case hwmon_temp: ++ if (attr != hwmon_temp_input) ++ return -EOPNOTSUPP; ++ ++ if (ACPI_FAILURE(acpi_evaluate_integer(sd->adev->handle, ++ "BATT", NULL, &val))) ++ return -EIO; ++ /* ++ * Assuming BATT returns deg C we need to mutiply it ++ * by 1000 to convert to mC ++ */ ++ *out = val * 1000; ++ break; ++ case hwmon_fan: ++ switch (attr) { ++ case hwmon_fan_input: ++ return steamdeck_read_fan_speed(sd, out); ++ case hwmon_fan_target: ++ *out = sd->fan_target; ++ break; ++ case hwmon_fan_fault: ++ if (ACPI_FAILURE(acpi_evaluate_integer( ++ sd->adev->handle, ++ "FANC", NULL, &val))) ++ return -EIO; ++ /* ++ * FANC (Fan check): ++ * 0: Abnormal ++ * 1: Normal ++ */ ++ *out = !val; ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ return 0; +} + ++static int ++steamdeck_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, ++ u32 attr, int channel, const char **str) ++{ ++ switch (type) { ++ case hwmon_temp: ++ *str = "Battery Temp"; ++ break; ++ case hwmon_fan: ++ *str = "System Fan"; ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } + -+/* -+ * -+ * Link-related attribute store functions -+ * -+ */ ++ return 0; ++} + -+/** -+ * link_dev_by_path_store() - &link_dev_by_path device attribute store function. -+ * @dev: The LED device -+ * @attr: The &link_dev_by_path attribute (&dev_attr_link_dev_by_path) -+ * @buf: The value written to the attribute, which should be the path to -+ * a special file that represents a block device to be linked to -+ * the LED (e.g. ``/dev/sda``) -+ * @count: The number of characters in &buf -+ * -+ * Context: Process context. Takes and releases &blkdev_trig_mutex. -+ * Return: &count on success, negative &errno on error. -+ */ -+static ssize_t link_dev_by_path_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) ++static int ++steamdeck_hwmon_write(struct device *dev, enum hwmon_sensor_types type, ++ u32 attr, int channel, long val) +{ -+ struct blkdev_trig_led *btl = led_trigger_get_drvdata(dev); -+ struct blkdev_trig_bdev *btb; -+ int err; ++ struct steamdeck *sd = dev_get_drvdata(dev); + -+ err = mutex_lock_interruptible(&blkdev_trig_mutex); -+ if (err) -+ return err; ++ if (type != hwmon_fan || ++ attr != hwmon_fan_target) ++ return -EOPNOTSUPP; + -+ btb = blkdev_trig_get_btb(buf, count); -+ if (IS_ERR(btb)) { -+ err = PTR_ERR(btb); -+ goto exit_unlock; -+ } ++ if (val > U16_MAX) ++ return -EINVAL; + -+ if (xa_load(&btb->linked_btls, btl->index) != NULL) { -+ err = -EEXIST; -+ goto exit_put_btb; -+ } ++ sd->fan_target = val; + -+ err = blkdev_trig_link(btl, btb); ++ if (ACPI_FAILURE(acpi_execute_simple_method(sd->adev->handle, ++ "FANS", val))) ++ return -EIO; + -+exit_put_btb: -+ if (err) -+ blkdev_trig_put_btb(btb); -+exit_unlock: -+ mutex_unlock(&blkdev_trig_mutex); -+ return err ? : count; ++ return 0; +} + -+/** -+ * unlink_dev_by_path_store() - &unlink_dev_by_path device attribute store -+ * function. -+ * @dev: The LED device -+ * @attr: The &unlink_dev_by_path attribute (&dev_attr_unlink_dev_by_path) -+ * @buf: The value written to the attribute, which should be the path to -+ * a special file that represents a block device to be unlinked -+ * from the LED (e.g. ``/dev/sda``) -+ * @count: The number of characters in &buf -+ * -+ * Context: Process context. Takes and releases &blkdev_trig_mutex. -+ * Return: &count on success, negative &errno on error. -+ */ -+static ssize_t unlink_dev_by_path_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) ++static umode_t ++steamdeck_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, ++ u32 attr, int channel) +{ -+ struct blkdev_trig_led *btl = led_trigger_get_drvdata(dev); -+ struct block_device *bdev; -+ struct blkdev_trig_bdev *btb; -+ int err; -+ -+ bdev = blkdev_trig_get_bdev(buf, count); -+ if (IS_ERR(bdev)) -+ return PTR_ERR(bdev); ++ if (type == hwmon_fan && ++ attr == hwmon_fan_target) ++ return 0644; + -+ err = mutex_lock_interruptible(&blkdev_trig_mutex); -+ if (err) -+ goto exit_put_bdev; ++ return 0444; ++} + -+ btb = devres_find(&bdev->bd_device, blkdev_trig_btb_release, -+ NULL, NULL); -+ if (btb == NULL) { -+ err = -EUNATCH; /* bdev isn't linked to any LED */ -+ goto exit_unlock; -+ } ++static const struct hwmon_channel_info *steamdeck_info[] = { ++ HWMON_CHANNEL_INFO(temp, ++ HWMON_T_INPUT | HWMON_T_LABEL), ++ HWMON_CHANNEL_INFO(fan, ++ HWMON_F_INPUT | HWMON_F_LABEL | ++ HWMON_F_TARGET | HWMON_F_FAULT), ++ NULL ++}; + -+ if (xa_load(&btb->linked_btls, btl->index) == NULL) { -+ err = -EUNATCH; /* bdev isn't linked to this LED */ -+ goto exit_unlock; -+ } ++static const struct hwmon_ops steamdeck_hwmon_ops = { ++ .is_visible = steamdeck_hwmon_is_visible, ++ .read = steamdeck_hwmon_read, ++ .read_string = steamdeck_hwmon_read_string, ++ .write = steamdeck_hwmon_write, ++}; + -+ blkdev_trig_unlink_norelease(btl, btb); ++static const struct hwmon_chip_info steamdeck_chip_info = { ++ .ops = &steamdeck_hwmon_ops, ++ .info = steamdeck_info, ++}; + -+exit_unlock: -+ mutex_unlock(&blkdev_trig_mutex); -+exit_put_bdev: -+ blkdev_put(bdev, BLKDEV_TRIG_FMODE); -+ return err ? : count; -+} ++#define STEAMDECK_STA_OK \ ++ (ACPI_STA_DEVICE_ENABLED | \ ++ ACPI_STA_DEVICE_PRESENT | \ ++ ACPI_STA_DEVICE_FUNCTIONING) + -+/** -+ * unlink_dev_by_name_store() - &unlink_dev_by_name device attribute store -+ * function. -+ * @dev: The LED device -+ * @attr: The &unlink_dev_by_name attribute (&dev_attr_unlink_dev_by_name) -+ * @buf: The value written to the attribute, which should be the kernel -+ * name of a block device to be unlinked from the LED (e.g. -+ * ``sda``) -+ * @count: The number of characters in &buf -+ * -+ * Context: Process context. Takes and releases &blkdev_trig_mutex. -+ * Return: &count on success, negative &errno on error. -+ */ -+static ssize_t unlink_dev_by_name_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) ++static int ++steamdeck_ddic_reg_read(void *context, unsigned int reg, unsigned int *val) +{ -+ struct blkdev_trig_led *btl = led_trigger_get_drvdata(dev); -+ struct blkdev_trig_bdev *btb; -+ unsigned long index; -+ int err; ++ union acpi_object obj = { .type = ACPI_TYPE_INTEGER }; ++ struct acpi_object_list arg_list = { .count = 1, .pointer = &obj, }; ++ struct steamdeck *sd = context; ++ unsigned long long _val; + -+ err = mutex_lock_interruptible(&blkdev_trig_mutex); -+ if (err) -+ return err; ++ obj.integer.value = reg; + -+ err = -EUNATCH; ++ if (ACPI_FAILURE(acpi_evaluate_integer(sd->adev->handle, ++ "RDDI", &arg_list, &_val))) ++ return -EIO; + -+ xa_for_each (&btl->linked_btbs, index, btb) { ++ *val = _val; ++ return 0; ++} + -+ if (sysfs_streq(dev_name(&btb->bdev->bd_device), buf)) { -+ blkdev_trig_unlink_norelease(btl, btb); -+ err = 0; -+ break; -+ } ++static int steamdeck_read_pdcs(struct steamdeck *sd, unsigned long long *pdcs) ++{ ++ acpi_status status; ++ ++ status = acpi_evaluate_integer(sd->adev->handle, "PDCS", NULL, pdcs); ++ if (ACPI_FAILURE(status)) { ++ dev_err(sd->dev, "PDCS evaluation failed: %s\n", ++ acpi_format_exception(status)); ++ return -EIO; + } + -+ mutex_unlock(&blkdev_trig_mutex); -+ return err ? : count; ++ return 0; +} + ++static void steamdeck_usb_role_work(struct work_struct *work) ++{ ++ struct steamdeck *sd = ++ container_of(work, struct steamdeck, role_work.work); ++ unsigned long long pdcs; ++ bool usb_host; + -+/* -+ * -+ * Atomic attribute show & store functions -+ * -+ */ ++ if (steamdeck_read_pdcs(sd, &pdcs)) ++ return; + -+/** -+ * blink_time_show() - &blink_time device attribute show function. -+ * @dev: The LED device -+ * @attr: The &blink_time attribute (&dev_attr_blink_time) -+ * @buf: Output buffer -+ * -+ * Writes the value of &blkdev_trig_led.blink_msec to &buf. -+ * -+ * Context: Process context. -+ * Return: The number of characters written to &buf. -+ */ -+static ssize_t blink_time_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ const struct blkdev_trig_led *btl = led_trigger_get_drvdata(dev); ++ /* ++ * We only care about these two ++ */ ++ pdcs &= ACPI_STEAMDECK_PORT_CONNECT | ACPI_STEAMDECK_CUR_DATA_ROLE; + -+ return sysfs_emit(buf, "%u\n", READ_ONCE(btl->blink_msec)); ++ /* ++ * For "connect" events our role is determined by a bit in ++ * PDCS, for "disconnect" we switch to being a gadget ++ * unconditionally. The thinking for the latter is we don't ++ * want to start acting as a USB host until we get ++ * confirmation from the firmware that we are a USB host ++ */ ++ usb_host = (pdcs & ACPI_STEAMDECK_PORT_CONNECT) ? ++ pdcs & ACPI_STEAMDECK_CUR_DATA_ROLE : false; ++ ++ WARN_ON(extcon_set_state_sync(sd->edev, EXTCON_USB_HOST, ++ usb_host)); ++ dev_dbg(sd->dev, "USB role is %s\n", usb_host ? "host" : "device"); +} + -+/** -+ * blink_time_store() - &blink_time device attribute store function. -+ * @dev: The LED device -+ * @attr: The &blink_time attribute (&dev_attr_blink_time) -+ * @buf: The new value (as written to the &sysfs attribute) -+ * @count: The number of characters in &buf -+ * -+ * Sets &blkdev_trig_led.blink_msec to the value in &buf. -+ * -+ * Context: Process context. -+ * Return: &count on success, negative &errno on error. -+ */ -+static ssize_t blink_time_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) ++static void steamdeck_notify(acpi_handle handle, u32 event, void *context) +{ -+ struct blkdev_trig_led *btl = led_trigger_get_drvdata(dev); -+ unsigned int value; -+ int err; -+ -+ err = kstrtouint(buf, 0, &value); -+ if (err) -+ return err; ++ struct device *dev = context; ++ struct steamdeck *sd = dev_get_drvdata(dev); ++ unsigned long long pdcs; ++ unsigned long delay; + -+ if (value < BLKDEV_TRIG_BLINK_MIN || value > BLKDEV_TRIG_BLINK_MAX) -+ return -ERANGE; ++ switch (event) { ++ case ACPI_STEAMDECK_NOTIFY_STATUS: ++ if (steamdeck_read_pdcs(sd, &pdcs)) ++ return; ++ /* ++ * We process "disconnect" events immediately and ++ * "connect" events with a delay to give the HW time ++ * to settle. For example attaching USB hub (at least ++ * for HW used for testing) will generate intermediary ++ * event with "host" bit not set, followed by the one ++ * that does have it set. ++ */ ++ delay = (pdcs & ACPI_STEAMDECK_PORT_CONNECT) ? ++ STEAMDECK_ROLE_SWITCH_DELAY : 0; + -+ WRITE_ONCE(btl->blink_msec, value); -+ return count; ++ queue_delayed_work(system_long_wq, &sd->role_work, delay); ++ break; ++ default: ++ dev_err(dev, "Unsupported event [0x%x]\n", event); ++ } +} + -+/** -+ * check_interval_show() - &check_interval device attribute show function. -+ * @dev: The LED device -+ * @attr: The &check_interval attribute (&dev_attr_check_interval) -+ * @buf: Output buffer -+ * -+ * Writes the value of &blkdev_trig_led.check_jiffies (converted to -+ * milliseconds) to &buf. -+ * -+ * Context: Process context. -+ * Return: The number of characters written to &buf. -+ */ -+static ssize_t check_interval_show(struct device *dev, -+ struct device_attribute *attr, char *buf) ++static void steamdeck_remove_notify_handler(void *data) +{ -+ struct blkdev_trig_led *btl = led_trigger_get_drvdata(dev); ++ struct steamdeck *sd = data; + -+ return sysfs_emit(buf, "%u\n", -+ jiffies_to_msecs(READ_ONCE(btl->check_jiffies))); ++ acpi_remove_notify_handler(sd->adev->handle, ACPI_DEVICE_NOTIFY, ++ steamdeck_notify); ++ cancel_delayed_work_sync(&sd->role_work); +} + -+/** -+ * check_interval_store() - &check_interval device attribute store function -+ * @dev: The LED device -+ * @attr: The &check_interval attribute (&dev_attr_check_interval) -+ * @buf: The new value (as written to the &sysfs attribute) -+ * @count: The number of characters in &buf -+ * -+ * Sets &blkdev_trig_led.check_jiffies to the value in &buf (after converting -+ * from milliseconds). -+ * -+ * Context: Process context. -+ * Return: &count on success, negative &errno on error. -+ */ -+static ssize_t check_interval_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct blkdev_trig_led *led = led_trigger_get_drvdata(dev); -+ unsigned int value; -+ int err; -+ -+ err = kstrtouint(buf, 0, &value); -+ if (err) -+ return err; ++static const unsigned int steamdeck_extcon_cable[] = { ++ EXTCON_USB, ++ EXTCON_USB_HOST, ++ EXTCON_CHG_USB_SDP, ++ EXTCON_CHG_USB_CDP, ++ EXTCON_CHG_USB_DCP, ++ EXTCON_CHG_USB_ACA, ++ EXTCON_NONE, ++}; + -+ if (value < BLKDEV_TRIG_CHECK_MIN || value > BLKDEV_TRIG_CHECK_MAX) -+ return -ERANGE; ++static int steamdeck_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct steamdeck *sd; ++ acpi_status status; ++ unsigned long long sta; ++ int ret; + -+ WRITE_ONCE(led->check_jiffies, msecs_to_jiffies(value)); ++ static const struct regmap_config regmap_config = { ++ .reg_bits = 8, ++ .val_bits = 8, ++ .max_register = 255, ++ .cache_type = REGCACHE_NONE, ++ .reg_read = steamdeck_ddic_reg_read, ++ }; + -+ return count; -+} ++ sd = devm_kzalloc(dev, sizeof(*sd), GFP_KERNEL); ++ if (!sd) ++ return -ENOMEM; ++ sd->adev = ACPI_COMPANION(&pdev->dev); ++ sd->dev = dev; ++ platform_set_drvdata(pdev, sd); ++ INIT_DELAYED_WORK(&sd->role_work, steamdeck_usb_role_work); + -+/** -+ * blkdev_trig_mode_show() - Helper for boolean attribute show functions. -+ * @led: The LED -+ * @buf: Output buffer -+ * @bit: Which bit to show -+ * -+ * Context: Process context. -+ * Return: The number of characters written to &buf. -+ */ -+static int blkdev_trig_mode_show(const struct blkdev_trig_led *led, char *buf, -+ enum stat_group bit) -+{ -+ return sysfs_emit(buf, -+ READ_ONCE(led->mode) & (1 << bit) ? "Y\n" : "N\n"); -+} ++ status = acpi_evaluate_integer(sd->adev->handle, "_STA", ++ NULL, &sta); ++ if (ACPI_FAILURE(status)) { ++ dev_err(dev, "Status check failed (0x%x)\n", status); ++ return -EINVAL; ++ } + -+/** -+ * blkdev_trig_mode_store() - Helper for boolean attribute store functions. -+ * @led: The LED -+ * @buf: The new value (as written to the &sysfs attribute) -+ * @count: The number of characters in &buf -+ * @bit: Which bit to set -+ * -+ * Context: Process context. -+ * Return: &count on success, negative &errno on error. -+ */ -+static int blkdev_trig_mode_store(struct blkdev_trig_led *led, -+ const char *buf, size_t count, -+ enum stat_group bit) -+{ -+ bool set; -+ int err; ++ if ((sta & STEAMDECK_STA_OK) != STEAMDECK_STA_OK) { ++ dev_err(dev, "Device is not ready\n"); ++ return -EINVAL; ++ } + -+ err = kstrtobool(buf, &set); -+ if (err) -+ return err; ++ /* ++ * Our ACPI interface doesn't expose a method to read current ++ * fan target, so we use current fan speed as an ++ * approximation. ++ */ ++ if (steamdeck_read_fan_speed(sd, &sd->fan_target)) ++ dev_warn(dev, "Failed to read fan speed"); + -+ if (set) -+ set_bit(bit, &led->mode); -+ else -+ clear_bit(bit, &led->mode); ++ sd->hwmon = devm_hwmon_device_register_with_info(dev, ++ "steamdeck", ++ sd, ++ &steamdeck_chip_info, ++ steamdeck_groups); ++ if (IS_ERR(sd->hwmon)) { ++ dev_err(dev, "Failed to register HWMON device"); ++ return PTR_ERR(sd->hwmon); ++ } + -+ return count; -+} ++ sd->regmap = devm_regmap_init(dev, NULL, sd, ®map_config); ++ if (IS_ERR(sd->regmap)) ++ dev_err(dev, "Failed to register REGMAP"); + -+/** -+ * blink_on_read_show() - &blink_on_read device attribute show function. -+ * @dev: The LED device -+ * @attr: The &blink_on_read attribute (&dev_attr_blink_on_read) -+ * @buf: Output buffer -+ * -+ * Writes ``Y`` or ``N`` to &buf, depending on whether the &STAT_READ bit in -+ * &blkdev_trig_led.mode is set or cleared. -+ * -+ * Context: Process context. -+ * Return: The number of characters written to &buf. -+ */ -+static ssize_t blink_on_read_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ return blkdev_trig_mode_show(led_trigger_get_drvdata(dev), -+ buf, STAT_READ); -+} ++ sd->edev = devm_extcon_dev_allocate(dev, steamdeck_extcon_cable); ++ if (IS_ERR(sd->edev)) ++ return -ENOMEM; + -+/** -+ * blink_on_read_store() - &blink_on_read device attribute store function. -+ * @dev: The LED device -+ * @attr: The &blink_on_read attribute (&dev_attr_blink_on_read) -+ * @buf: The new value (as written to the &sysfs attribute) -+ * @count: The number of characters in &buf -+ * -+ * Sets the &STAT_READ bit in &blkdev_trig_led.mode to the value in &buf -+ * (interpretted as a boolean). -+ * -+ * Context: Process context. -+ * Return: &count on success, negative &errno on error. -+ */ -+static ssize_t blink_on_read_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ return blkdev_trig_mode_store(led_trigger_get_drvdata(dev), -+ buf, count, STAT_READ); -+} ++ ret = devm_extcon_dev_register(dev, sd->edev); ++ if (ret < 0) { ++ dev_err(dev, "Failed to register extcon device: %d\n", ret); ++ return ret; ++ } + -+/** -+ * blink_on_write_show() - &blink_on_write device attribute show function. -+ * @dev: The LED device -+ * @attr: The &blink_on_write attribute (&dev_attr_blink_on_write) -+ * @buf: Output buffer -+ * -+ * Writes ``Y`` or ``N`` to &buf, depending on whether the &STAT_WRITE bit in -+ * in &blkdev_trig_led.mode is set or cleared. -+ * -+ * Context: Process context. -+ * Return: The number of characters written to &buf. -+ */ -+static ssize_t blink_on_write_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ return blkdev_trig_mode_show(led_trigger_get_drvdata(dev), -+ buf, STAT_WRITE); -+} ++ /* ++ * Set initial role value ++ */ ++ queue_delayed_work(system_long_wq, &sd->role_work, 0); ++ flush_delayed_work(&sd->role_work); + -+/** -+ * blink_on_write_store() - &blink_on_write device attribute store function. -+ * @dev: The LED device -+ * @attr: The &blink_on_write attribute (&dev_attr_blink_on_write) -+ * @buf: The new value (as written to the &sysfs attribute) -+ * @count: The number of characters in &buf -+ * -+ * Sets the &STAT_WRITE bit in &blkdev_trig_led.mode to the value in &buf -+ * (interpretted as a boolean). -+ * -+ * Context: Process context. -+ * Return: &count on success, negative &errno on error. -+ */ -+static ssize_t blink_on_write_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ return blkdev_trig_mode_store(led_trigger_get_drvdata(dev), -+ buf, count, STAT_WRITE); -+} ++ status = acpi_install_notify_handler(sd->adev->handle, ++ ACPI_DEVICE_NOTIFY, ++ steamdeck_notify, ++ dev); ++ if (ACPI_FAILURE(status)) { ++ dev_err(dev, "Error installing ACPI notify handler\n"); ++ return -EIO; ++ } + -+/** -+ * blink_on_flush_show() - &blink_on_flush device attribute show function. -+ * @dev: The LED device -+ * @attr: The &blink_on_flush attribute (&dev_attr_blink_on_flush) -+ * @buf: Output buffer -+ * -+ * Writes ``Y`` or ``N`` to &buf, depending whether the &STAT_FLUSH bit in -+ * &blkdev_trig_led.mode is set or cleared. -+ * -+ * Context: Process context. -+ * Return: The number of characters written to &buf. -+ */ -+static ssize_t blink_on_flush_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ return blkdev_trig_mode_show(led_trigger_get_drvdata(dev), -+ buf, STAT_FLUSH); ++ ret = devm_add_action_or_reset(dev, steamdeck_remove_notify_handler, ++ sd); ++ return ret; +} + -+/** -+ * blink_on_flush_store() - &blink_on_flush device attribute store function. -+ * @dev: The LED device -+ * @attr: The &blink_on_flush attribute (&dev_attr_blink_on_flush) -+ * @buf: The new value (as written to the &sysfs attribute) -+ * @count: The number of characters in &buf -+ * -+ * Sets the &STAT_FLUSH bit in &blkdev_trig_led.mode to the value in &buf -+ * (interpretted as a boolean). -+ * -+ * Context: Process context. -+ * Return: &count on success, negative &errno on error. -+ */ -+static ssize_t blink_on_flush_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ return blkdev_trig_mode_store(led_trigger_get_drvdata(dev), -+ buf, count, STAT_FLUSH); -+} ++static const struct acpi_device_id steamdeck_device_ids[] = { ++ { "VLV0100", 0 }, ++ { "", 0 }, ++}; ++MODULE_DEVICE_TABLE(acpi, steamdeck_device_ids); + -+/** -+ * blink_on_discard_show() - &blink_on_discard device attribute show function. -+ * @dev: The LED device -+ * @attr: The &blink_on_discard attribute (&dev_attr_blink_on_discard) -+ * @buf: Output buffer -+ * -+ * Writes ``Y`` or ``N`` to &buf, depending on whether the &STAT_DISCARD bit in -+ * &blkdev_trig_led.mode is set or cleared. -+ * -+ * Context: Process context. -+ * Return: The number of characters written to &buf. -+ */ -+static ssize_t blink_on_discard_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ return blkdev_trig_mode_show(led_trigger_get_drvdata(dev), -+ buf, STAT_DISCARD); -+} -+ -+/** -+ * blink_on_discard_store() - &blink_on_discard device attribute store function. -+ * @dev: The LED device -+ * @attr: The &blink_on_discard attribute (&dev_attr_blink_on_discard) -+ * @buf: The new value (as written to the &sysfs attribute) -+ * @count: The number of characters in &buf -+ * -+ * Sets the &STAT_DISCARD bit in &blkdev_trig_led.mode to the value in &buf -+ * (interpretted as a boolean). -+ * -+ * Context: Process context. -+ * Return: &count on success, negative &errno on error. -+ */ -+static ssize_t blink_on_discard_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ return blkdev_trig_mode_store(led_trigger_get_drvdata(dev), -+ buf, count, STAT_DISCARD); -+} -+ -+/* Device attributes */ -+static DEVICE_ATTR_WO(link_dev_by_path); -+static DEVICE_ATTR_WO(unlink_dev_by_path); -+static DEVICE_ATTR_WO(unlink_dev_by_name); -+static DEVICE_ATTR_RW(blink_time); -+static DEVICE_ATTR_RW(check_interval); -+static DEVICE_ATTR_RW(blink_on_read); -+static DEVICE_ATTR_RW(blink_on_write); -+static DEVICE_ATTR_RW(blink_on_flush); -+static DEVICE_ATTR_RW(blink_on_discard); -+ -+/* Device attributes in LED directory (/sys/class/leds//...) */ -+static struct attribute *blkdev_trig_attrs[] = { -+ &dev_attr_link_dev_by_path.attr, -+ &dev_attr_unlink_dev_by_path.attr, -+ &dev_attr_unlink_dev_by_name.attr, -+ &dev_attr_blink_time.attr, -+ &dev_attr_check_interval.attr, -+ &dev_attr_blink_on_read.attr, -+ &dev_attr_blink_on_write.attr, -+ &dev_attr_blink_on_flush.attr, -+ &dev_attr_blink_on_discard.attr, -+ NULL ++static struct platform_driver steamdeck_driver = { ++ .probe = steamdeck_probe, ++ .driver = { ++ .name = "steamdeck", ++ .acpi_match_table = steamdeck_device_ids, ++ }, +}; ++module_platform_driver(steamdeck_driver); + -+/* Unnamed attribute group == no subdirectory */ -+static const struct attribute_group blkdev_trig_attr_group = { -+ .attrs = blkdev_trig_attrs, -+}; ++MODULE_AUTHOR("Andrey Smirnov "); ++MODULE_DESCRIPTION("Steam Deck ACPI platform driver"); ++MODULE_LICENSE("GPL"); +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h +index 0acb8e1fb7af..b0b49c8653b0 100644 +--- a/include/linux/pagemap.h ++++ b/include/linux/pagemap.h +@@ -1182,7 +1182,7 @@ struct readahead_control { + ._index = i, \ + } + +-#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) ++#define VM_READAHEAD_PAGES (SZ_8M / PAGE_SIZE) + + void page_cache_ra_unbounded(struct readahead_control *, + unsigned long nr_to_read, unsigned long lookahead_count); +diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h +index 45f09bec02c4..87b20e2ee274 100644 +--- a/include/linux/user_namespace.h ++++ b/include/linux/user_namespace.h +@@ -148,6 +148,8 @@ static inline void set_userns_rlimit_max(struct user_namespace *ns, + + #ifdef CONFIG_USER_NS + ++extern int unprivileged_userns_clone; + -+/* Attribute groups for the trigger */ -+static const struct attribute_group *blkdev_trig_attr_groups[] = { -+ &blkdev_trig_attr_group, /* /sys/class/leds//... */ -+ &blkdev_trig_linked_devs, /* /sys/class/leds//linked_devices/ */ -+ NULL -+}; + static inline struct user_namespace *get_user_ns(struct user_namespace *ns) + { + if (ns) +@@ -181,6 +183,8 @@ extern bool current_in_userns(const struct user_namespace *target_ns); + struct ns_common *ns_get_owner(struct ns_common *ns); + #else + ++#define unprivileged_userns_clone 0 + -+/* Trigger registration data */ -+static struct led_trigger blkdev_trig_trigger = { -+ .name = "blkdev", -+ .activate = blkdev_trig_activate, -+ .deactivate = blkdev_trig_deactivate, -+ .groups = blkdev_trig_attr_groups, -+}; + static inline struct user_namespace *get_user_ns(struct user_namespace *ns) + { + return &init_user_ns; +diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h +index db762e35aca9..0336791656eb 100644 +--- a/include/net/netns/ipv4.h ++++ b/include/net/netns/ipv4.h +@@ -194,6 +194,7 @@ struct netns_ipv4 { + int sysctl_udp_rmem_min; + + u8 sysctl_fib_notify_on_flag_change; ++ unsigned int sysctl_tcp_collapse_max_bytes; + + #ifdef CONFIG_NET_L3_MASTER_DEV + u8 sysctl_udp_l3mdev_accept; +diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h +index 901b440238d5..7026df84a0f6 100644 +--- a/include/trace/events/tcp.h ++++ b/include/trace/events/tcp.h +@@ -187,6 +187,13 @@ DEFINE_EVENT(tcp_event_sk, tcp_rcv_space_adjust, + TP_ARGS(sk) + ); + ++DEFINE_EVENT(tcp_event_sk, tcp_collapse_max_bytes_exceeded, + -+/** -+ * blkdev_trig_init() - Block device LED trigger initialization. -+ * -+ * Registers the ``blkdev`` LED trigger. -+ * -+ * Return: &0 on success, negative &errno on failure. -+ */ -+static int __init blkdev_trig_init(void) -+{ -+ return led_trigger_register(&blkdev_trig_trigger); -+} -+module_init(blkdev_trig_init); ++ TP_PROTO(struct sock *sk), + -+/** -+ * blkdev_trig_exit() - Block device LED trigger module exit. -+ * -+ * Unregisters the ``blkdev`` LED trigger. -+ */ -+static void __exit blkdev_trig_exit(void) -+{ -+ led_trigger_unregister(&blkdev_trig_trigger); -+} -+module_exit(blkdev_trig_exit); ++ TP_ARGS(sk) ++); + -+MODULE_DESCRIPTION("Block device LED trigger"); -+MODULE_AUTHOR("Ian Pilcher "); -+MODULE_LICENSE("GPL v2"); -diff --git a/fs/eventpoll.c b/fs/eventpoll.c -index 64659b110973..4cad490028ab 100644 ---- a/fs/eventpoll.c -+++ b/fs/eventpoll.c -@@ -57,13 +57,7 @@ - * we need a lock that will allow us to sleep. This lock is a - * mutex (ep->mtx). It is acquired during the event transfer loop, - * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file(). -- * Then we also need a global mutex to serialize eventpoll_release_file() -- * and ep_free(). -- * This mutex is acquired by ep_free() during the epoll file -- * cleanup path and it is also acquired by eventpoll_release_file() -- * if a file has been pushed inside an epoll set and it is then -- * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL). -- * It is also acquired when inserting an epoll fd onto another epoll -+ * The epmutex is acquired when inserting an epoll fd onto another epoll - * fd. We do this so that we walk the epoll tree and ensure that this - * insertion does not create a cycle of epoll file descriptors, which - * could lead to deadlock. We need a global mutex to prevent two -@@ -153,6 +147,13 @@ struct epitem { - /* The file descriptor information this item refers to */ - struct epoll_filefd ffd; + TRACE_EVENT(tcp_retransmit_synack, -+ /* -+ * Protected by file->f_lock, true for to-be-released epitem already -+ * removed from the "struct file" items list; together with -+ * eventpoll->refcount orchestrates "struct eventpoll" disposal -+ */ -+ bool dying; -+ - /* List containing poll wait queues */ - struct eppoll_entry *pwqlist; + TP_PROTO(const struct sock *sk, const struct request_sock *req), +diff --git a/init/Kconfig b/init/Kconfig +index 1fb5f313d18f..9b298860cfed 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -123,6 +123,10 @@ config THREAD_INFO_IN_TASK -@@ -217,6 +218,12 @@ struct eventpoll { - u64 gen; - struct hlist_head refs; + menu "General setup" -+ /* -+ * usage count, used together with epitem->dying to -+ * orchestrate the disposal of this struct -+ */ -+ refcount_t refcount; ++config CACHY ++ bool "Some kernel tweaks by CachyOS" ++ default y + - #ifdef CONFIG_NET_RX_BUSY_POLL - /* used to track busy poll napi_id */ - unsigned int napi_id; -@@ -240,9 +247,7 @@ struct ep_pqueue { - /* Maximum number of epoll watched descriptors, per user */ - static long max_user_watches __read_mostly; - --/* -- * This mutex is used to serialize ep_free() and eventpoll_release_file(). -- */ -+/* Used for cycles detection */ - static DEFINE_MUTEX(epmutex); + config BROKEN + bool - static u64 loop_check_gen = 0; -@@ -557,8 +562,7 @@ static void ep_remove_wait_queue(struct eppoll_entry *pwq) +@@ -348,6 +352,19 @@ config KERNEL_UNCOMPRESSED - /* - * This function unregisters poll callbacks from the associated file -- * descriptor. Must be called with "mtx" held (or "epmutex" if called from -- * ep_free). -+ * descriptor. Must be called with "mtx" held. - */ - static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) - { -@@ -681,11 +685,40 @@ static void epi_rcu_free(struct rcu_head *head) - kmem_cache_free(epi_cache, epi); - } + endchoice -+static void ep_get(struct eventpoll *ep) -+{ -+ refcount_inc(&ep->refcount); -+} ++menu "ZSTD compression options" ++ depends on KERNEL_ZSTD + -+/* -+ * Returns true if the event poll can be disposed -+ */ -+static bool ep_refcount_dec_and_test(struct eventpoll *ep) -+{ -+ if (!refcount_dec_and_test(&ep->refcount)) -+ return false; -+ -+ WARN_ON_ONCE(!RB_EMPTY_ROOT(&ep->rbr.rb_root)); -+ return true; -+} ++config ZSTD_COMPRESSION_LEVEL ++ int "Compression level (1-22)" ++ range 1 22 ++ default "22" ++ help ++ Choose a compression level for zstd kernel compression. ++ Default is 22, which is the maximum. + -+static void ep_free(struct eventpoll *ep) -+{ -+ mutex_destroy(&ep->mtx); -+ free_uid(ep->user); -+ wakeup_source_unregister(ep->ws); -+ kfree(ep); -+} ++endmenu + - /* - * Removes a "struct epitem" from the eventpoll RB tree and deallocates - * all the associated resources. Must be called with "mtx" held. -+ * If the dying flag is set, do the removal only if force is true. -+ * This prevents ep_clear_and_put() from dropping all the ep references -+ * while running concurrently with eventpoll_release_file(). -+ * Returns true if the eventpoll can be disposed. - */ --static int ep_remove(struct eventpoll *ep, struct epitem *epi) -+static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force) - { - struct file *file = epi->ffd.file; - struct epitems_head *to_free; -@@ -700,6 +733,11 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) + config DEFAULT_INIT + string "Default init path" + default "" +@@ -1253,6 +1270,22 @@ config USER_NS - /* Remove the current item from the list of epoll hooks */ - spin_lock(&file->f_lock); -+ if (epi->dying && !force) { -+ spin_unlock(&file->f_lock); -+ return false; -+ } -+ - to_free = NULL; - head = file->f_ep; - if (head->first == &epi->fllink && !epi->fllink.next) { -@@ -733,28 +771,28 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) - call_rcu(&epi->rcu, epi_rcu_free); + If unsure, say N. - percpu_counter_dec(&ep->user->epoll_watches); -+ return ep_refcount_dec_and_test(ep); -+} ++config USER_NS_UNPRIVILEGED ++ bool "Allow unprivileged users to create namespaces" ++ default y ++ depends on USER_NS ++ help ++ When disabled, unprivileged users will not be able to create ++ new namespaces. Allowing users to create their own namespaces ++ has been part of several recent local privilege escalation ++ exploits, so if you need user namespaces but are ++ paranoid^Wsecurity-conscious you want to disable this. ++ ++ This setting can be overridden at runtime via the ++ kernel.unprivileged_userns_clone sysctl. ++ ++ If unsure, say Y. ++ + config PID_NS + bool "PID Namespaces" + default y +@@ -1433,6 +1466,12 @@ config CC_OPTIMIZE_FOR_PERFORMANCE + with the "-O2" compiler flag for best performance and most + helpful compile-time warnings. -- return 0; -+/* -+ * ep_remove variant for callers owing an additional reference to the ep -+ */ -+static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi) -+{ -+ WARN_ON_ONCE(__ep_remove(ep, epi, false)); - } ++config CC_OPTIMIZE_FOR_PERFORMANCE_O3 ++ bool "Optimize more for performance (-O3)" ++ help ++ Choosing this option will pass "-O3" to your compiler to optimize ++ the kernel yet more for performance. ++ + config CC_OPTIMIZE_FOR_SIZE + bool "Optimize for size (-Os)" + help +diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz +index 38ef6d06888e..0f78364efd4f 100644 +--- a/kernel/Kconfig.hz ++++ b/kernel/Kconfig.hz +@@ -40,6 +40,27 @@ choice + on SMP and NUMA systems and exactly dividing by both PAL and + NTSC frame rates for video and multimedia work. --static void ep_free(struct eventpoll *ep) -+static void ep_clear_and_put(struct eventpoll *ep) - { - struct rb_node *rbp; - struct epitem *epi; -+ bool dispose; ++ config HZ_500 ++ bool "500 HZ" ++ help ++ 500 Hz is a balanced timer frequency. Provides fast interactivity ++ on desktops with good smoothness without increasing CPU power ++ consumption and sacrificing the battery life on laptops. ++ ++ config HZ_600 ++ bool "600 HZ" ++ help ++ 600 Hz is a balanced timer frequency. Provides fast interactivity ++ on desktops with good smoothness without increasing CPU power ++ consumption and sacrificing the battery life on laptops. ++ ++ config HZ_750 ++ bool "750 HZ" ++ help ++ 750 Hz is a balanced timer frequency. Provides fast interactivity ++ on desktops with good smoothness without increasing CPU power ++ consumption and sacrificing the battery life on laptops. ++ + config HZ_1000 + bool "1000 HZ" + help +@@ -53,6 +74,9 @@ config HZ + default 100 if HZ_100 + default 250 if HZ_250 + default 300 if HZ_300 ++ default 500 if HZ_500 ++ default 600 if HZ_600 ++ default 750 if HZ_750 + default 1000 if HZ_1000 - /* We need to release all tasks waiting for these file */ - if (waitqueue_active(&ep->poll_wait)) - ep_poll_safewake(ep, NULL, 0); + config SCHED_HRTICK +diff --git a/kernel/fork.c b/kernel/fork.c +index 0c92f224c68c..49c173e367d2 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -98,6 +98,10 @@ + #include + #include -- /* -- * We need to lock this because we could be hit by -- * eventpoll_release_file() while we're freeing the "struct eventpoll". -- * We do not need to hold "ep->mtx" here because the epoll file -- * is on the way to be removed and no one has references to it -- * anymore. The only hit might come from eventpoll_release_file() but -- * holding "epmutex" is sufficient here. -- */ -- mutex_lock(&epmutex); -+ mutex_lock(&ep->mtx); ++#ifdef CONFIG_USER_NS ++#include ++#endif ++ + #include + #include + #include +@@ -2031,6 +2035,10 @@ static __latent_entropy struct task_struct *copy_process( + if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) + return ERR_PTR(-EINVAL); ++ if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) ++ if (!capable(CAP_SYS_ADMIN)) ++ return ERR_PTR(-EPERM); ++ /* - * Walks through the whole tree by unregistering poll callbacks. -@@ -768,25 +806,21 @@ static void ep_free(struct eventpoll *ep) + * Thread groups must share signals as well, and detached threads + * can only be started up within the thread group. +@@ -3181,6 +3189,12 @@ int ksys_unshare(unsigned long unshare_flags) + if (unshare_flags & CLONE_NEWNS) + unshare_flags |= CLONE_FS; - /* - * Walks through the whole tree by freeing each "struct epitem". At this -- * point we are sure no poll callbacks will be lingering around, and also by -- * holding "epmutex" we can be sure that no file cleanup code will hit -- * us during this operation. So we can avoid the lock on "ep->lock". -- * We do not need to lock ep->mtx, either, we only do it to prevent -- * a lockdep warning. -+ * point we are sure no poll callbacks will be lingering around. -+ * Since we still own a reference to the eventpoll struct, the loop can't -+ * dispose it. - */ -- mutex_lock(&ep->mtx); - while ((rbp = rb_first_cached(&ep->rbr)) != NULL) { - epi = rb_entry(rbp, struct epitem, rbn); -- ep_remove(ep, epi); -+ ep_remove_safe(ep, epi); - cond_resched(); - } ++ if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) { ++ err = -EPERM; ++ if (!capable(CAP_SYS_ADMIN)) ++ goto bad_unshare_out; ++ } + -+ dispose = ep_refcount_dec_and_test(ep); - mutex_unlock(&ep->mtx); - -- mutex_unlock(&epmutex); -- mutex_destroy(&ep->mtx); -- free_uid(ep->user); -- wakeup_source_unregister(ep->ws); -- kfree(ep); -+ if (dispose) -+ ep_free(ep); - } - - static int ep_eventpoll_release(struct inode *inode, struct file *file) -@@ -794,7 +828,7 @@ static int ep_eventpoll_release(struct inode *inode, struct file *file) - struct eventpoll *ep = file->private_data; - - if (ep) -- ep_free(ep); -+ ep_clear_and_put(ep); + err = check_unshare_flags(unshare_flags); + if (err) + goto bad_unshare_out; +diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig +index 424b3bc58f3f..ecf2798c5ccf 100644 +--- a/kernel/module/Kconfig ++++ b/kernel/module/Kconfig +@@ -219,6 +219,31 @@ config MODULE_COMPRESS_ZSTD - return 0; - } -@@ -906,33 +940,34 @@ void eventpoll_release_file(struct file *file) - { - struct eventpoll *ep; - struct epitem *epi; -- struct hlist_node *next; -+ bool dispose; + endchoice - /* -- * We don't want to get "file->f_lock" because it is not -- * necessary. It is not necessary because we're in the "struct file" -- * cleanup path, and this means that no one is using this file anymore. -- * So, for example, epoll_ctl() cannot hit here since if we reach this -- * point, the file counter already went to zero and fget() would fail. -- * The only hit might come from ep_free() but by holding the mutex -- * will correctly serialize the operation. We do need to acquire -- * "ep->mtx" after "epmutex" because ep_remove() requires it when called -- * from anywhere but ep_free(). -- * -- * Besides, ep_remove() acquires the lock, so we can't hold it here. -+ * Use the 'dying' flag to prevent a concurrent ep_clear_and_put() from -+ * touching the epitems list before eventpoll_release_file() can access -+ * the ep->mtx. - */ -- mutex_lock(&epmutex); -- if (unlikely(!file->f_ep)) { -- mutex_unlock(&epmutex); -- return; -- } -- hlist_for_each_entry_safe(epi, next, file->f_ep, fllink) { -+again: -+ spin_lock(&file->f_lock); -+ if (file->f_ep && file->f_ep->first) { -+ epi = hlist_entry(file->f_ep->first, struct epitem, fllink); -+ epi->dying = true; -+ spin_unlock(&file->f_lock); ++menu "ZSTD module compression options" ++ depends on MODULE_COMPRESS_ZSTD + -+ /* -+ * ep access is safe as we still own a reference to the ep -+ * struct -+ */ - ep = epi->ep; -- mutex_lock_nested(&ep->mtx, 0); -- ep_remove(ep, epi); -+ mutex_lock(&ep->mtx); -+ dispose = __ep_remove(ep, epi, true); - mutex_unlock(&ep->mtx); ++config MODULE_COMPRESS_ZSTD_LEVEL ++ int "Compression level (1-19)" ++ range 1 19 ++ default 9 ++ help ++ Compression level used by zstd for compressing modules. + -+ if (dispose) -+ ep_free(ep); -+ goto again; - } -- mutex_unlock(&epmutex); -+ spin_unlock(&file->f_lock); - } ++config MODULE_COMPRESS_ZSTD_ULTRA ++ bool "Enable ZSTD ultra compression" ++ help ++ Compress modules with ZSTD using the highest possible compression. ++ ++config MODULE_COMPRESS_ZSTD_LEVEL_ULTRA ++ int "Compression level (20-22)" ++ depends on MODULE_COMPRESS_ZSTD_ULTRA ++ range 20 22 ++ default 20 ++ help ++ Ultra compression level used by zstd for compressing modules. ++ ++endmenu ++ + config MODULE_DECOMPRESS + bool "Support in-kernel module decompression" + depends on MODULE_COMPRESS_GZIP || MODULE_COMPRESS_XZ || MODULE_COMPRESS_ZSTD +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 5f6587d94c1d..96c66b50ee48 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -69,9 +69,13 @@ + * + * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_CACHY ++unsigned int sysctl_sched_latency = 3000000ULL; ++static unsigned int normalized_sysctl_sched_latency = 3000000ULL; ++#else + unsigned int sysctl_sched_latency = 6000000ULL; + static unsigned int normalized_sysctl_sched_latency = 6000000ULL; +- ++#endif + /* + * The initial- and re-scaling of tunables is configurable + * +@@ -90,8 +94,13 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; + * + * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_CACHY ++unsigned int sysctl_sched_min_granularity = 400000ULL; ++static unsigned int normalized_sysctl_sched_min_granularity = 400000ULL; ++#else + unsigned int sysctl_sched_min_granularity = 750000ULL; + static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; ++#endif - static int ep_alloc(struct eventpoll **pep) -@@ -955,6 +990,7 @@ static int ep_alloc(struct eventpoll **pep) - ep->rbr = RB_ROOT_CACHED; - ep->ovflist = EP_UNACTIVE_PTR; - ep->user = user; -+ refcount_set(&ep->refcount, 1); + /* + * Minimal preemption granularity for CPU-bound SCHED_IDLE tasks. +@@ -121,8 +130,13 @@ unsigned int sysctl_sched_child_runs_first __read_mostly; + * + * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_CACHY ++unsigned int sysctl_sched_wakeup_granularity = 500000UL; ++static unsigned int normalized_sysctl_sched_wakeup_granularity = 500000UL; ++#else + unsigned int sysctl_sched_wakeup_granularity = 1000000UL; + static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; ++#endif - *pep = ep; + const_debug unsigned int sysctl_sched_migration_cost = 500000UL; -@@ -1223,10 +1259,10 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v - */ - list_del_init(&wait->entry); - /* -- * ->whead != NULL protects us from the race with ep_free() -- * or ep_remove(), ep_remove_wait_queue() takes whead->lock -- * held by the caller. Once we nullify it, nothing protects -- * ep/epi or even wait. -+ * ->whead != NULL protects us from the race with -+ * ep_clear_and_put() or ep_remove(), ep_remove_wait_queue() -+ * takes whead->lock held by the caller. Once we nullify it, -+ * nothing protects ep/epi or even wait. - */ - smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL); - } -@@ -1496,16 +1532,22 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, - if (tep) - mutex_unlock(&tep->mtx); +@@ -175,8 +189,12 @@ int __weak arch_asym_cpu_priority(int cpu) + * + * (default: 5 msec, units: microseconds) + */ ++#ifdef CONFIG_CACHY ++static unsigned int sysctl_sched_cfs_bandwidth_slice = 3000UL; ++#else + static unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; + #endif ++#endif -+ /* -+ * ep_remove_safe() calls in the later error paths can't lead to -+ * ep_free() as the ep file itself still holds an ep reference. -+ */ -+ ep_get(ep); -+ - /* now check if we've created too many backpaths */ - if (unlikely(full_check && reverse_path_check())) { -- ep_remove(ep, epi); -+ ep_remove_safe(ep, epi); - return -EINVAL; - } + #ifdef CONFIG_NUMA_BALANCING + /* Restrict the NUMA promotion throughput (MB/s) for each target node. */ +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 1c240d2c99bc..98e1a7472fd2 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -96,6 +96,9 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals); + #ifdef CONFIG_PERF_EVENTS + static const int six_hundred_forty_kb = 640 * 1024; + #endif ++#ifdef CONFIG_USER_NS ++#include ++#endif - if (epi->event.events & EPOLLWAKEUP) { - error = ep_create_wakeup_source(epi); - if (error) { -- ep_remove(ep, epi); -+ ep_remove_safe(ep, epi); - return error; - } - } -@@ -1529,7 +1571,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, - * high memory pressure. - */ - if (unlikely(!epq.epi)) { -- ep_remove(ep, epi); -+ ep_remove_safe(ep, epi); - return -ENOMEM; - } -@@ -1760,7 +1802,7 @@ static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry, - { - int ret = default_wake_function(wq_entry, mode, sync, key); + static const int ngroups_max = NGROUPS_MAX; +@@ -1645,6 +1648,15 @@ static struct ctl_table kern_table[] = { + .mode = 0644, + .proc_handler = proc_dointvec, + }, ++#ifdef CONFIG_USER_NS ++ { ++ .procname = "unprivileged_userns_clone", ++ .data = &unprivileged_userns_clone, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec, ++ }, ++#endif + #ifdef CONFIG_PROC_SYSCTL + { + .procname = "tainted", +diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c +index 1d8e47bed3f1..fec01d016a35 100644 +--- a/kernel/user_namespace.c ++++ b/kernel/user_namespace.c +@@ -22,6 +22,13 @@ + #include + #include -- list_del_init(&wq_entry->entry); -+ list_del_init_careful(&wq_entry->entry); - return ret; - } ++/* sysctl */ ++#ifdef CONFIG_USER_NS_UNPRIVILEGED ++int unprivileged_userns_clone = 1; ++#else ++int unprivileged_userns_clone; ++#endif ++ + static struct kmem_cache *user_ns_cachep __read_mostly; + static DEFINE_MUTEX(userns_state_mutex); -@@ -2025,7 +2067,7 @@ static int do_epoll_create(int flags) - out_free_fd: - put_unused_fd(fd); - out_free_ep: -- ep_free(ep); -+ ep_clear_and_put(ep); - return error; - } +diff --git a/mm/Kconfig b/mm/Kconfig +index 4751031f3f05..cf2e47030fe8 100644 +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -621,7 +621,7 @@ config COMPACTION + config COMPACT_UNEVICTABLE_DEFAULT + int + depends on COMPACTION +- default 0 if PREEMPT_RT ++ default 0 if PREEMPT_RT || CACHY + default 1 -@@ -2167,10 +2209,16 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds, - error = -EEXIST; - break; - case EPOLL_CTL_DEL: -- if (epi) -- error = ep_remove(ep, epi); -- else -+ if (epi) { -+ /* -+ * The eventpoll itself is still alive: the refcount -+ * can't go to zero here. -+ */ -+ ep_remove_safe(ep, epi); -+ error = 0; -+ } else { - error = -ENOENT; -+ } - break; - case EPOLL_CTL_MOD: - if (epi) { -diff --git a/fs/proc/base.c b/fs/proc/base.c -index 5e0e0ccd47aa..07463ad4a70a 100644 ---- a/fs/proc/base.c -+++ b/fs/proc/base.c -@@ -3207,6 +3207,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns, - mm = get_task_mm(task); - if (mm) { - seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items); -+ seq_printf(m, "zero_pages_sharing %lu\n", mm->ksm_zero_pages_sharing); - mmput(mm); - } - -diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h -index 77bc5522e61c..4226379a232d 100644 ---- a/include/linux/atomic/atomic-arch-fallback.h -+++ b/include/linux/atomic/atomic-arch-fallback.h -@@ -1208,15 +1208,21 @@ arch_atomic_inc_and_test(atomic_t *v) - #define arch_atomic_inc_and_test arch_atomic_inc_and_test - #endif - -+#ifndef arch_atomic_add_negative_relaxed -+#ifdef arch_atomic_add_negative -+#define arch_atomic_add_negative_acquire arch_atomic_add_negative -+#define arch_atomic_add_negative_release arch_atomic_add_negative -+#define arch_atomic_add_negative_relaxed arch_atomic_add_negative -+#endif /* arch_atomic_add_negative */ -+ - #ifndef arch_atomic_add_negative - /** -- * arch_atomic_add_negative - add and test if negative -+ * arch_atomic_add_negative - Add and test if negative - * @i: integer value to add - * @v: pointer of type atomic_t - * -- * Atomically adds @i to @v and returns true -- * if the result is negative, or false when -- * result is greater than or equal to zero. -+ * Atomically adds @i to @v and returns true if the result is negative, -+ * or false when the result is greater than or equal to zero. + # +diff --git a/mm/compaction.c b/mm/compaction.c +index 5a9501e0ae01..4d8c63b9cdca 100644 +--- a/mm/compaction.c ++++ b/mm/compaction.c +@@ -2735,7 +2735,11 @@ static void compact_nodes(void) + * aggressively the kernel should compact memory in the + * background. It takes values in the range [0, 100]. */ - static __always_inline bool - arch_atomic_add_negative(int i, atomic_t *v) -@@ -1226,6 +1232,95 @@ arch_atomic_add_negative(int i, atomic_t *v) - #define arch_atomic_add_negative arch_atomic_add_negative - #endif - -+#ifndef arch_atomic_add_negative_acquire -+/** -+ * arch_atomic_add_negative_acquire - Add and test if negative -+ * @i: integer value to add -+ * @v: pointer of type atomic_t -+ * -+ * Atomically adds @i to @v and returns true if the result is negative, -+ * or false when the result is greater than or equal to zero. -+ */ -+static __always_inline bool -+arch_atomic_add_negative_acquire(int i, atomic_t *v) -+{ -+ return arch_atomic_add_return_acquire(i, v) < 0; -+} -+#define arch_atomic_add_negative_acquire arch_atomic_add_negative_acquire -+#endif -+ -+#ifndef arch_atomic_add_negative_release -+/** -+ * arch_atomic_add_negative_release - Add and test if negative -+ * @i: integer value to add -+ * @v: pointer of type atomic_t -+ * -+ * Atomically adds @i to @v and returns true if the result is negative, -+ * or false when the result is greater than or equal to zero. -+ */ -+static __always_inline bool -+arch_atomic_add_negative_release(int i, atomic_t *v) -+{ -+ return arch_atomic_add_return_release(i, v) < 0; -+} -+#define arch_atomic_add_negative_release arch_atomic_add_negative_release ++#ifdef CONFIG_CACHY ++unsigned int __read_mostly sysctl_compaction_proactiveness; ++#else + unsigned int __read_mostly sysctl_compaction_proactiveness = 20; +#endif -+ -+#ifndef arch_atomic_add_negative_relaxed -+/** -+ * arch_atomic_add_negative_relaxed - Add and test if negative -+ * @i: integer value to add -+ * @v: pointer of type atomic_t -+ * -+ * Atomically adds @i to @v and returns true if the result is negative, -+ * or false when the result is greater than or equal to zero. -+ */ -+static __always_inline bool -+arch_atomic_add_negative_relaxed(int i, atomic_t *v) -+{ -+ return arch_atomic_add_return_relaxed(i, v) < 0; -+} -+#define arch_atomic_add_negative_relaxed arch_atomic_add_negative_relaxed + + int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, + void *buffer, size_t *length, loff_t *ppos) +diff --git a/mm/page-writeback.c b/mm/page-writeback.c +index 516b1aa247e8..78fb31d27ed7 100644 +--- a/mm/page-writeback.c ++++ b/mm/page-writeback.c +@@ -71,7 +71,11 @@ static long ratelimit_pages = 32; + /* + * Start background writeback (via writeback threads) at this percentage + */ ++#ifdef CONFIG_CACHY ++static int dirty_background_ratio = 5; ++#else + static int dirty_background_ratio = 10; +#endif -+ -+#else /* arch_atomic_add_negative_relaxed */ -+ -+#ifndef arch_atomic_add_negative_acquire -+static __always_inline bool -+arch_atomic_add_negative_acquire(int i, atomic_t *v) -+{ -+ bool ret = arch_atomic_add_negative_relaxed(i, v); -+ __atomic_acquire_fence(); -+ return ret; -+} -+#define arch_atomic_add_negative_acquire arch_atomic_add_negative_acquire + + /* + * dirty_background_bytes starts at 0 (disabled) so that it is a function of +@@ -99,7 +103,11 @@ static unsigned long vm_dirty_bytes; + /* + * The interval between `kupdate'-style writebacks + */ ++#ifdef CONFIG_CACHY ++unsigned int dirty_writeback_interval = 10 * 100; /* centiseconds */ ++#else + unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */ +#endif -+ -+#ifndef arch_atomic_add_negative_release -+static __always_inline bool -+arch_atomic_add_negative_release(int i, atomic_t *v) -+{ -+ __atomic_release_fence(); -+ return arch_atomic_add_negative_relaxed(i, v); -+} -+#define arch_atomic_add_negative_release arch_atomic_add_negative_release + + EXPORT_SYMBOL_GPL(dirty_writeback_interval); + +diff --git a/mm/swap.c b/mm/swap.c +index 57cb01b042f6..3a7bec75480f 100644 +--- a/mm/swap.c ++++ b/mm/swap.c +@@ -1090,6 +1090,10 @@ void folio_batch_remove_exceptionals(struct folio_batch *fbatch) + */ + void __init swap_setup(void) + { ++#ifdef CONFIG_CACHY ++ /* Only swap-in pages requested, avoid readahead */ ++ page_cluster = 0; ++#else + unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); + + /* Use a smaller cluster for small-memory machines */ +@@ -1101,4 +1105,5 @@ void __init swap_setup(void) + * Right now other parts of the system means that we + * _really_ don't want to cluster much more + */ +#endif -+ -+#ifndef arch_atomic_add_negative -+static __always_inline bool -+arch_atomic_add_negative(int i, atomic_t *v) -+{ -+ bool ret; -+ __atomic_pre_full_fence(); -+ ret = arch_atomic_add_negative_relaxed(i, v); -+ __atomic_post_full_fence(); -+ return ret; -+} -+#define arch_atomic_add_negative arch_atomic_add_negative + } +diff --git a/mm/vmpressure.c b/mm/vmpressure.c +index b52644771cc4..11a4b0e3b583 100644 +--- a/mm/vmpressure.c ++++ b/mm/vmpressure.c +@@ -43,7 +43,11 @@ static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; + * essence, they are percents: the higher the value, the more number + * unsuccessful reclaims there were. + */ ++#ifdef CONFIG_CACHY ++static const unsigned int vmpressure_level_med = 65; ++#else + static const unsigned int vmpressure_level_med = 60; +#endif -+ -+#endif /* arch_atomic_add_negative_relaxed */ -+ - #ifndef arch_atomic_fetch_add_unless - /** - * arch_atomic_fetch_add_unless - add unless the number is already a given value -@@ -2329,15 +2424,21 @@ arch_atomic64_inc_and_test(atomic64_t *v) - #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test - #endif + static const unsigned int vmpressure_level_critical = 95; -+#ifndef arch_atomic64_add_negative_relaxed -+#ifdef arch_atomic64_add_negative -+#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative -+#define arch_atomic64_add_negative_release arch_atomic64_add_negative -+#define arch_atomic64_add_negative_relaxed arch_atomic64_add_negative -+#endif /* arch_atomic64_add_negative */ -+ - #ifndef arch_atomic64_add_negative - /** -- * arch_atomic64_add_negative - add and test if negative -+ * arch_atomic64_add_negative - Add and test if negative - * @i: integer value to add - * @v: pointer of type atomic64_t - * -- * Atomically adds @i to @v and returns true -- * if the result is negative, or false when -- * result is greater than or equal to zero. -+ * Atomically adds @i to @v and returns true if the result is negative, -+ * or false when the result is greater than or equal to zero. + /* +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 9c1c5e8b24b8..71a7f4517e5a 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -186,7 +186,11 @@ struct scan_control { + /* + * From 0 .. 200. Higher means more swappy. */ - static __always_inline bool - arch_atomic64_add_negative(s64 i, atomic64_t *v) -@@ -2347,6 +2448,95 @@ arch_atomic64_add_negative(s64 i, atomic64_t *v) - #define arch_atomic64_add_negative arch_atomic64_add_negative - #endif ++#ifdef CONFIG_CACHY ++int vm_swappiness = 20; ++#else + int vm_swappiness = 60; ++#endif -+#ifndef arch_atomic64_add_negative_acquire -+/** -+ * arch_atomic64_add_negative_acquire - Add and test if negative -+ * @i: integer value to add -+ * @v: pointer of type atomic64_t -+ * -+ * Atomically adds @i to @v and returns true if the result is negative, -+ * or false when the result is greater than or equal to zero. -+ */ -+static __always_inline bool -+arch_atomic64_add_negative_acquire(s64 i, atomic64_t *v) -+{ -+ return arch_atomic64_add_return_acquire(i, v) < 0; -+} -+#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative_acquire -+#endif -+ -+#ifndef arch_atomic64_add_negative_release -+/** -+ * arch_atomic64_add_negative_release - Add and test if negative -+ * @i: integer value to add -+ * @v: pointer of type atomic64_t -+ * -+ * Atomically adds @i to @v and returns true if the result is negative, -+ * or false when the result is greater than or equal to zero. -+ */ -+static __always_inline bool -+arch_atomic64_add_negative_release(s64 i, atomic64_t *v) -+{ -+ return arch_atomic64_add_return_release(i, v) < 0; -+} -+#define arch_atomic64_add_negative_release arch_atomic64_add_negative_release -+#endif -+ -+#ifndef arch_atomic64_add_negative_relaxed -+/** -+ * arch_atomic64_add_negative_relaxed - Add and test if negative -+ * @i: integer value to add -+ * @v: pointer of type atomic64_t -+ * -+ * Atomically adds @i to @v and returns true if the result is negative, -+ * or false when the result is greater than or equal to zero. -+ */ -+static __always_inline bool -+arch_atomic64_add_negative_relaxed(s64 i, atomic64_t *v) -+{ -+ return arch_atomic64_add_return_relaxed(i, v) < 0; -+} -+#define arch_atomic64_add_negative_relaxed arch_atomic64_add_negative_relaxed -+#endif -+ -+#else /* arch_atomic64_add_negative_relaxed */ -+ -+#ifndef arch_atomic64_add_negative_acquire -+static __always_inline bool -+arch_atomic64_add_negative_acquire(s64 i, atomic64_t *v) -+{ -+ bool ret = arch_atomic64_add_negative_relaxed(i, v); -+ __atomic_acquire_fence(); -+ return ret; -+} -+#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative_acquire -+#endif -+ -+#ifndef arch_atomic64_add_negative_release -+static __always_inline bool -+arch_atomic64_add_negative_release(s64 i, atomic64_t *v) -+{ -+ __atomic_release_fence(); -+ return arch_atomic64_add_negative_relaxed(i, v); -+} -+#define arch_atomic64_add_negative_release arch_atomic64_add_negative_release -+#endif -+ -+#ifndef arch_atomic64_add_negative -+static __always_inline bool -+arch_atomic64_add_negative(s64 i, atomic64_t *v) -+{ -+ bool ret; -+ __atomic_pre_full_fence(); -+ ret = arch_atomic64_add_negative_relaxed(i, v); -+ __atomic_post_full_fence(); -+ return ret; -+} -+#define arch_atomic64_add_negative arch_atomic64_add_negative + static void set_task_reclaim_state(struct task_struct *task, + struct reclaim_state *rs) +@@ -4536,7 +4540,11 @@ static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc + } + + /* to protect the working set of the last N jiffies */ ++#ifdef CONFIG_CACHY ++static unsigned long lru_gen_min_ttl __read_mostly = HZ; ++#else + static unsigned long lru_gen_min_ttl __read_mostly; +#endif -+ -+#endif /* arch_atomic64_add_negative_relaxed */ -+ - #ifndef arch_atomic64_fetch_add_unless - /** - * arch_atomic64_fetch_add_unless - add unless the number is already a given value -@@ -2456,4 +2646,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v) - #endif - #endif /* _LINUX_ATOMIC_FALLBACK_H */ --// b5e87bdd5ede61470c29f7a7e4de781af3770f09 -+// 00071fffa021cec66f6290d706d69c91df87bade -diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h -index 7a139ec030b0..0496816738ca 100644 ---- a/include/linux/atomic/atomic-instrumented.h -+++ b/include/linux/atomic/atomic-instrumented.h -@@ -592,6 +592,28 @@ atomic_add_negative(int i, atomic_t *v) - return arch_atomic_add_negative(i, v); - } + static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) + { +diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c +index 40fe70fc2015..3028e27897d9 100644 +--- a/net/ipv4/sysctl_net_ipv4.c ++++ b/net/ipv4/sysctl_net_ipv4.c +@@ -1470,6 +1470,13 @@ static struct ctl_table ipv4_net_table[] = { + .extra1 = SYSCTL_ZERO, + .extra2 = &tcp_plb_max_cong_thresh, + }, ++ { ++ .procname = "tcp_collapse_max_bytes", ++ .data = &init_net.ipv4.sysctl_tcp_collapse_max_bytes, ++ .maxlen = sizeof(unsigned int), ++ .mode = 0644, ++ .proc_handler = proc_douintvec_minmax, ++ }, + { } + }; -+static __always_inline bool -+atomic_add_negative_acquire(int i, atomic_t *v) -+{ -+ instrument_atomic_read_write(v, sizeof(*v)); -+ return arch_atomic_add_negative_acquire(i, v); -+} -+ -+static __always_inline bool -+atomic_add_negative_release(int i, atomic_t *v) -+{ -+ kcsan_release(); -+ instrument_atomic_read_write(v, sizeof(*v)); -+ return arch_atomic_add_negative_release(i, v); -+} -+ -+static __always_inline bool -+atomic_add_negative_relaxed(int i, atomic_t *v) -+{ -+ instrument_atomic_read_write(v, sizeof(*v)); -+ return arch_atomic_add_negative_relaxed(i, v); -+} -+ - static __always_inline int - atomic_fetch_add_unless(atomic_t *v, int a, int u) +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 754e0212c951..b6d7faeb737a 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -5414,6 +5414,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb) + static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb) { -@@ -1211,6 +1233,28 @@ atomic64_add_negative(s64 i, atomic64_t *v) - return arch_atomic64_add_negative(i, v); - } + struct tcp_sock *tp = tcp_sk(sk); ++ struct net *net = sock_net(sk); -+static __always_inline bool -+atomic64_add_negative_acquire(s64 i, atomic64_t *v) -+{ -+ instrument_atomic_read_write(v, sizeof(*v)); -+ return arch_atomic64_add_negative_acquire(i, v); -+} + NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED); + +@@ -5425,6 +5426,39 @@ static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb) + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) + return 0; + ++ /* For context and additional information about this patch, see the ++ * blog post at ++ * ++ * sysctl: net.ipv4.tcp_collapse_max_bytes ++ * ++ * If tcp_collapse_max_bytes is non-zero, attempt to collapse the ++ * queue to free up memory if the current amount of memory allocated ++ * is less than tcp_collapse_max_bytes. Otherwise, the packet is ++ * dropped without attempting to collapse the queue. ++ * ++ * If tcp_collapse_max_bytes is zero, this feature is disabled ++ * and the default Linux behavior is used. The default Linux ++ * behavior is to always perform the attempt to collapse the ++ * queue to free up memory. ++ * ++ * When the receive queue is small, we want to collapse the ++ * queue. There are two reasons for this: (a) the latency of ++ * performing the collapse will be small on a small queue, and ++ * (b) we want to avoid sending a congestion signal (via a ++ * packet drop) to the sender when the receive queue is small. ++ * ++ * The result is that we avoid latency spikes caused by the ++ * time it takes to perform the collapse logic when the receive ++ * queue is large and full, while preserving existing behavior ++ * and performance for all other cases. ++ */ ++ if (net->ipv4.sysctl_tcp_collapse_max_bytes && ++ (atomic_read(&sk->sk_rmem_alloc) > net->ipv4.sysctl_tcp_collapse_max_bytes)) { ++ /* We are dropping the packet */ ++ trace_tcp_collapse_max_bytes_exceeded(sk); ++ goto do_not_collapse; ++ } + -+static __always_inline bool -+atomic64_add_negative_release(s64 i, atomic64_t *v) -+{ -+ kcsan_release(); -+ instrument_atomic_read_write(v, sizeof(*v)); -+ return arch_atomic64_add_negative_release(i, v); -+} + tcp_collapse_ofo_queue(sk); + if (!skb_queue_empty(&sk->sk_receive_queue)) + tcp_collapse(sk, &sk->sk_receive_queue, NULL, +@@ -5443,6 +5477,8 @@ static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb) + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) + return 0; + ++do_not_collapse: + -+static __always_inline bool -+atomic64_add_negative_relaxed(s64 i, atomic64_t *v) -+{ -+ instrument_atomic_read_write(v, sizeof(*v)); -+ return arch_atomic64_add_negative_relaxed(i, v); -+} + /* If we are really being abused, tell the caller to silently + * drop receive data on the floor. It will get retransmitted + * and hopefully then we'll have sufficient space. +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index b9d55277cb85..5e577877158b 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -3275,6 +3275,8 @@ static int __net_init tcp_sk_init(struct net *net) + else + net->ipv4.tcp_congestion_control = &tcp_reno; + ++ net->ipv4.sysctl_tcp_collapse_max_bytes = 0; + - static __always_inline s64 - atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) - { -@@ -1830,6 +1874,28 @@ atomic_long_add_negative(long i, atomic_long_t *v) - return arch_atomic_long_add_negative(i, v); + return 0; } -+static __always_inline bool -+atomic_long_add_negative_acquire(long i, atomic_long_t *v) -+{ -+ instrument_atomic_read_write(v, sizeof(*v)); -+ return arch_atomic_long_add_negative_acquire(i, v); -+} -+ -+static __always_inline bool -+atomic_long_add_negative_release(long i, atomic_long_t *v) -+{ -+ kcsan_release(); -+ instrument_atomic_read_write(v, sizeof(*v)); -+ return arch_atomic_long_add_negative_release(i, v); -+} -+ -+static __always_inline bool -+atomic_long_add_negative_relaxed(long i, atomic_long_t *v) -+{ -+ instrument_atomic_read_write(v, sizeof(*v)); -+ return arch_atomic_long_add_negative_relaxed(i, v); -+} -+ - static __always_inline long - atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) - { -@@ -2083,4 +2149,4 @@ atomic_long_dec_if_positive(atomic_long_t *v) - }) - - #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ --// 764f741eb77a7ad565dc8d99ce2837d5542e8aee -+// 1b485de9cbaa4900de59e14ee2084357eaeb1c3a -diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h -index 800b8c35992d..2fc51ba66beb 100644 ---- a/include/linux/atomic/atomic-long.h -+++ b/include/linux/atomic/atomic-long.h -@@ -479,6 +479,24 @@ arch_atomic_long_add_negative(long i, atomic_long_t *v) - return arch_atomic64_add_negative(i, v); - } +diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib +index 100a386fcd71..a3ec7265fb57 100644 +--- a/scripts/Makefile.lib ++++ b/scripts/Makefile.lib +@@ -542,14 +542,21 @@ quiet_cmd_xzmisc = XZMISC $@ + # decompression is used, like initramfs decompression, zstd22 should likely not + # be used because it would require zstd to allocate a 128 MB buffer. -+static __always_inline bool -+arch_atomic_long_add_negative_acquire(long i, atomic_long_t *v) -+{ -+ return arch_atomic64_add_negative_acquire(i, v); -+} -+ -+static __always_inline bool -+arch_atomic_long_add_negative_release(long i, atomic_long_t *v) -+{ -+ return arch_atomic64_add_negative_release(i, v); -+} -+ -+static __always_inline bool -+arch_atomic_long_add_negative_relaxed(long i, atomic_long_t *v) -+{ -+ return arch_atomic64_add_negative_relaxed(i, v); -+} ++ifdef CONFIG_ZSTD_COMPRESSION_LEVEL ++zstd_comp_val := $(CONFIG_ZSTD_COMPRESSION_LEVEL) ++ifeq ($(shell test $(zstd_comp_val) -gt 19; echo $$?),0) ++zstd_comp_val += --ultra ++endif ++endif + - static __always_inline long - arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) - { -@@ -973,6 +991,24 @@ arch_atomic_long_add_negative(long i, atomic_long_t *v) - return arch_atomic_add_negative(i, v); - } + quiet_cmd_zstd = ZSTD $@ +- cmd_zstd = cat $(real-prereqs) | $(ZSTD) -19 > $@ ++ cmd_zstd = cat $(real-prereqs) | $(ZSTD) -T0 -19 > $@ -+static __always_inline bool -+arch_atomic_long_add_negative_acquire(long i, atomic_long_t *v) -+{ -+ return arch_atomic_add_negative_acquire(i, v); -+} -+ -+static __always_inline bool -+arch_atomic_long_add_negative_release(long i, atomic_long_t *v) -+{ -+ return arch_atomic_add_negative_release(i, v); -+} -+ -+static __always_inline bool -+arch_atomic_long_add_negative_relaxed(long i, atomic_long_t *v) -+{ -+ return arch_atomic_add_negative_relaxed(i, v); -+} -+ - static __always_inline long - arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) - { -@@ -1011,4 +1047,4 @@ arch_atomic_long_dec_if_positive(atomic_long_t *v) + quiet_cmd_zstd22 = ZSTD22 $@ +- cmd_zstd22 = cat $(real-prereqs) | $(ZSTD) -22 --ultra > $@ ++ cmd_zstd22 = cat $(real-prereqs) | $(ZSTD) -T0 -22 --ultra > $@ - #endif /* CONFIG_64BIT */ - #endif /* _LINUX_ATOMIC_LONG_H */ --// e8f0e08ff072b74d180eabe2ad001282b38c2c88 -+// a194c07d7d2f4b0e178d3c118c919775d5d65f50 -diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h -index a57e6ae78e65..22b2ac82bffd 100644 ---- a/include/linux/mm_types.h -+++ b/include/linux/mm_types.h -@@ -740,7 +740,7 @@ struct mm_struct { - #ifdef CONFIG_KSM - /* - * Represent how many pages of this process are involved in KSM -- * merging. -+ * merging (not including ksm_zero_pages_sharing). - */ - unsigned long ksm_merging_pages; - /* -@@ -748,6 +748,11 @@ struct mm_struct { - * including merged and not merged. - */ - unsigned long ksm_rmap_items; -+ /* -+ * Represent how many empty pages are merged with kernel zero -+ * pages when enabling KSM use_zero_pages. -+ */ -+ unsigned long ksm_zero_pages_sharing; - #endif - #ifdef CONFIG_LRU_GEN - struct { -diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h -index 5f1ae07d724b..97cda629c9e9 100644 ---- a/include/linux/pageblock-flags.h -+++ b/include/linux/pageblock-flags.h -@@ -48,7 +48,7 @@ extern unsigned int pageblock_order; - #else /* CONFIG_HUGETLB_PAGE */ + quiet_cmd_zstd22_with_size = ZSTD22 $@ +- cmd_zstd22_with_size = { cat $(real-prereqs) | $(ZSTD) -22 --ultra; $(size_append); } > $@ ++ cmd_zstd22_with_size = { cat $(real-prereqs) | $(ZSTD) -T0 -$(zstd_comp_val); $(size_append); } > $@ - /* If huge pages are not used, group by MAX_ORDER_NR_PAGES */ --#define pageblock_order (MAX_ORDER-1) -+#define pageblock_order PAGE_ALLOC_COSTLY_ORDER + # ASM offsets + # --------------------------------------------------------------------------- +diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst +index ab0c5bd1a60f..f4989f706d7f 100644 +--- a/scripts/Makefile.modinst ++++ b/scripts/Makefile.modinst +@@ -100,8 +100,13 @@ quiet_cmd_gzip = GZIP $@ + cmd_gzip = $(KGZIP) -n -f $< + quiet_cmd_xz = XZ $@ + cmd_xz = $(XZ) --lzma2=dict=2MiB -f $< ++ifdef CONFIG_MODULE_COMPRESS_ZSTD_ULTRA + quiet_cmd_zstd = ZSTD $@ +- cmd_zstd = $(ZSTD) -T0 --rm -f -q $< ++ cmd_zstd = $(ZSTD) -$(CONFIG_MODULE_COMPRESS_ZSTD_LEVEL_ULTRA) --ultra --zstd=wlog=21 -T0 --rm -f -q $< ++else ++quiet_cmd_zstd = ZSTD $@ ++ cmd_zstd = $(ZSTD) -$(CONFIG_MODULE_COMPRESS_ZSTD_LEVEL) --zstd=wlog=21 -T0 --rm -f -q $< ++endif - #endif /* CONFIG_HUGETLB_PAGE */ + $(dst)/%.ko.gz: $(dst)/%.ko FORCE + $(call cmd,gzip) +-- +2.40.0 + +From 5157ccf4ab5b2429eb11e33b006ff0110baf0625 Mon Sep 17 00:00:00 2001 +From: Peter Jung +Date: Sat, 22 Apr 2023 11:43:21 +0200 +Subject: [PATCH 04/13] fixes + +Signed-off-by: Peter Jung +--- + Documentation/ABI/stable/sysfs-block | 10 + + .../testing/sysfs-class-led-trigger-blkdev | 78 ++ + Documentation/admin-guide/mm/ksm.rst | 7 + + Documentation/leds/index.rst | 1 + + Documentation/leds/ledtrig-blkdev.rst | 158 +++ + Documentation/x86/topology.rst | 26 + + arch/x86/include/asm/cacheinfo.h | 1 + + arch/x86/kernel/cpu/amd.c | 1 + + arch/x86/kernel/cpu/cacheinfo.c | 36 + + arch/x86/kernel/cpu/hygon.c | 1 + + arch/x86/net/bpf_jit_comp.c | 5 +- + drivers/bluetooth/btusb.c | 2 +- + .../drm/amd/display/dc/bios/bios_parser2.c | 7 +- + .../drm/amd/display/dc/dcn20/dcn20_resource.c | 2 +- + .../drm/amd/display/dc/dcn21/dcn21_resource.c | 2 +- + .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 3 +- + drivers/gpu/drm/scheduler/sched_main.c | 3 +- + drivers/leds/trigger/Kconfig | 9 + + drivers/leds/trigger/Makefile | 1 + + drivers/leds/trigger/ledtrig-blkdev.c | 1221 +++++++++++++++++ + fs/eventpoll.c | 188 ++- + fs/proc/base.c | 1 + + include/linux/atomic/atomic-arch-fallback.h | 208 ++- + include/linux/atomic/atomic-instrumented.h | 68 +- + include/linux/atomic/atomic-long.h | 38 +- + include/linux/mm_types.h | 7 +- + include/linux/pageblock-flags.h | 2 +- + include/linux/rcuref.h | 155 +++ + include/linux/types.h | 6 + + include/net/dst.h | 30 +- + include/net/ip6_fib.h | 3 - + include/net/ip6_route.h | 2 +- + include/net/route.h | 3 - + include/net/sock.h | 2 +- + kernel/fork.c | 1 + + kernel/kheaders.c | 10 +- + kernel/padata.c | 4 +- + lib/Makefile | 2 +- + lib/rcuref.c | 281 ++++ + mm/ksm.c | 185 ++- + mm/mempolicy.c | 104 +- + mm/mprotect.c | 2 +- + net/bridge/br_nf_core.c | 2 +- + net/core/dst.c | 26 +- + net/core/rtnetlink.c | 2 +- + net/ipv4/route.c | 20 +- + net/ipv4/xfrm4_policy.c | 4 +- + net/ipv6/route.c | 32 +- + net/ipv6/xfrm6_policy.c | 4 +- + net/netfilter/ipvs/ip_vs_xmit.c | 4 +- + scripts/Makefile.vmlinux_o | 2 +- + scripts/atomic/atomics.tbl | 2 +- + scripts/atomic/fallbacks/add_negative | 11 +- + sound/pci/hda/cs35l41_hda.c | 2 +- + .../selftests/mm/ksm_functional_tests.c | 96 +- + 55 files changed, 2804 insertions(+), 279 deletions(-) + create mode 100644 Documentation/ABI/testing/sysfs-class-led-trigger-blkdev + create mode 100644 Documentation/leds/ledtrig-blkdev.rst + create mode 100644 drivers/leds/trigger/ledtrig-blkdev.c + create mode 100644 include/linux/rcuref.h + create mode 100644 lib/rcuref.c + +diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block +index 282de3680367..ac1dd2fbd855 100644 +--- a/Documentation/ABI/stable/sysfs-block ++++ b/Documentation/ABI/stable/sysfs-block +@@ -101,6 +101,16 @@ Description: + devices that support receiving integrity metadata. -diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h + ++What: /sys/block//linked_leds ++Date: January 2023 ++Contact: Ian Pilcher ++Description: ++ Directory that contains symbolic links to all LEDs that ++ are associated with (linked to) this block device by the ++ blkdev LED trigger. Only present when at least one LED ++ is linked. (See Documentation/leds/ledtrig-blkdev.rst.) ++ ++ + What: /sys/block///alignment_offset + Date: April 2009 + Contact: Martin K. Petersen +diff --git a/Documentation/ABI/testing/sysfs-class-led-trigger-blkdev b/Documentation/ABI/testing/sysfs-class-led-trigger-blkdev new file mode 100644 -index 000000000000..2c8bfd0f1b6b +index 000000000000..28ce8c814fb7 --- /dev/null -+++ b/include/linux/rcuref.h -@@ -0,0 +1,155 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+#ifndef _LINUX_RCUREF_H -+#define _LINUX_RCUREF_H -+ -+#include -+#include -+#include -+#include -+#include -+#include ++++ b/Documentation/ABI/testing/sysfs-class-led-trigger-blkdev +@@ -0,0 +1,78 @@ ++What: /sys/class/leds//blink_time ++Date: January 2023 ++Contact: Ian Pilcher ++Description: ++ Time (in milliseconds) that the LED will be on during a single ++ "blink". + -+#define RCUREF_ONEREF 0x00000000U -+#define RCUREF_MAXREF 0x7FFFFFFFU -+#define RCUREF_SATURATED 0xA0000000U -+#define RCUREF_RELEASED 0xC0000000U -+#define RCUREF_DEAD 0xE0000000U -+#define RCUREF_NOREF 0xFFFFFFFFU ++What: /sys/class/leds//check_interval ++Date: January 2023 ++Contact: Ian Pilcher ++Description: ++ Interval (in milliseconds) between checks of the block devices ++ linked to this LED. The LED will be blinked if the correct type ++ of activity (see blink_on_{read,write,discard,flush} attributes) ++ has occurred on any of the linked devices since the previous ++ check. + -+/** -+ * rcuref_init - Initialize a rcuref reference count with the given reference count -+ * @ref: Pointer to the reference count -+ * @cnt: The initial reference count typically '1' -+ */ -+static inline void rcuref_init(rcuref_t *ref, unsigned int cnt) -+{ -+ atomic_set(&ref->refcnt, cnt - 1); -+} ++What: /sys/class/leds//blink_on_read ++Date: January 2023 ++Contact: Ian Pilcher ++Description: ++ Boolean that determines whether the LED will blink in response ++ to read activity on any of its linked block devices. + -+/** -+ * rcuref_read - Read the number of held reference counts of a rcuref -+ * @ref: Pointer to the reference count -+ * -+ * Return: The number of held references (0 ... N) -+ */ -+static inline unsigned int rcuref_read(rcuref_t *ref) -+{ -+ unsigned int c = atomic_read(&ref->refcnt); ++What: /sys/class/leds//blink_on_write ++Date: January 2023 ++Contact: Ian Pilcher ++Description: ++ Boolean that determines whether the LED will blink in response ++ to write activity on any of its linked block devices. + -+ /* Return 0 if within the DEAD zone. */ -+ return c >= RCUREF_RELEASED ? 0 : c + 1; -+} ++What: /sys/class/leds//blink_on_discard ++Date: January 2023 ++Contact: Ian Pilcher ++Description: ++ Boolean that determines whether the LED will blink in response ++ to discard activity on any of its linked block devices. + -+extern __must_check bool rcuref_get_slowpath(rcuref_t *ref); ++What: /sys/class/leds//blink_on_flush ++Date: January 2023 ++Contact: Ian Pilcher ++Description: ++ Boolean that determines whether the LED will blink in response ++ to cache flush activity on any of its linked block devices. + -+/** -+ * rcuref_get - Acquire one reference on a rcuref reference count -+ * @ref: Pointer to the reference count -+ * -+ * Similar to atomic_inc_not_zero() but saturates at RCUREF_MAXREF. -+ * -+ * Provides no memory ordering, it is assumed the caller has guaranteed the -+ * object memory to be stable (RCU, etc.). It does provide a control dependency -+ * and thereby orders future stores. See documentation in lib/rcuref.c -+ * -+ * Return: -+ * False if the attempt to acquire a reference failed. This happens -+ * when the last reference has been put already -+ * -+ * True if a reference was successfully acquired -+ */ -+static inline __must_check bool rcuref_get(rcuref_t *ref) -+{ -+ /* -+ * Unconditionally increase the reference count. The saturation and -+ * dead zones provide enough tolerance for this. -+ */ -+ if (likely(!atomic_add_negative_relaxed(1, &ref->refcnt))) -+ return true; ++What: /sys/class/leds//link_dev_by_path ++Date: January 2023 ++Contact: Ian Pilcher ++Description: ++ Associate a block device with this LED by writing the path to ++ the device special file (e.g. /dev/sda) to this attribute. ++ Symbolic links are followed. + -+ /* Handle the cases inside the saturation and dead zones */ -+ return rcuref_get_slowpath(ref); -+} ++What: /sys/class/leds//unlink_dev_by_path ++Date: January 2023 ++Contact: Ian Pilcher ++Description: ++ Remove the association between this LED and a block device by ++ writing the path to the device special file (e.g. /dev/sda) to ++ this attribute. Symbolic links are followed. + -+extern __must_check bool rcuref_put_slowpath(rcuref_t *ref); ++What: /sys/class/leds//unlink_dev_by_name ++Date: January 2023 ++Contact: Ian Pilcher ++Description: ++ Remove the association between this LED and a block device by ++ writing the kernel name of the device (e.g. sda) to this ++ attribute. + -+/* -+ * Internal helper. Do not invoke directly. -+ */ -+static __always_inline __must_check bool __rcuref_put(rcuref_t *ref) -+{ -+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(), -+ "suspicious rcuref_put_rcusafe() usage"); -+ /* -+ * Unconditionally decrease the reference count. The saturation and -+ * dead zones provide enough tolerance for this. -+ */ -+ if (likely(!atomic_add_negative_release(-1, &ref->refcnt))) -+ return false; ++What: /sys/class/leds//linked_devices ++Date: January 2023 ++Contact: Ian Pilcher ++Description: ++ Directory containing links to all block devices that are ++ associated with this LED. (Note that the names of the ++ symbolic links in this directory are *kernel* names, which ++ may not match the device special file paths written to ++ link_device and unlink_device.) +diff --git a/Documentation/admin-guide/mm/ksm.rst b/Documentation/admin-guide/mm/ksm.rst +index eed51a910c94..270560fef3b2 100644 +--- a/Documentation/admin-guide/mm/ksm.rst ++++ b/Documentation/admin-guide/mm/ksm.rst +@@ -171,6 +171,13 @@ stable_node_chains + the number of KSM pages that hit the ``max_page_sharing`` limit + stable_node_dups + number of duplicated KSM pages ++zero_pages_sharing ++ how many empty pages are sharing kernel zero page(s) instead of ++ with each other as it would happen normally. Only effective when ++ enabling ``use_zero_pages`` knob. + -+ /* -+ * Handle the last reference drop and cases inside the saturation -+ * and dead zones. -+ */ -+ return rcuref_put_slowpath(ref); -+} ++When enabling ``use_zero_pages``, the sum of ``pages_sharing`` + ++``zero_pages_sharing`` represents how much really saved by KSM. + + A high ratio of ``pages_sharing`` to ``pages_shared`` indicates good + sharing, but a high ratio of ``pages_unshared`` to ``pages_sharing`` +diff --git a/Documentation/leds/index.rst b/Documentation/leds/index.rst +index b9ca081fac71..5e37d8e7bd28 100644 +--- a/Documentation/leds/index.rst ++++ b/Documentation/leds/index.rst +@@ -10,6 +10,7 @@ LEDs + leds-class + leds-class-flash + leds-class-multicolor ++ ledtrig-blkdev + ledtrig-oneshot + ledtrig-transient + ledtrig-usbport +diff --git a/Documentation/leds/ledtrig-blkdev.rst b/Documentation/leds/ledtrig-blkdev.rst +new file mode 100644 +index 000000000000..9ff5b99de451 +--- /dev/null ++++ b/Documentation/leds/ledtrig-blkdev.rst +@@ -0,0 +1,158 @@ ++.. SPDX-License-Identifier: GPL-2.0 + -+/** -+ * rcuref_put_rcusafe -- Release one reference for a rcuref reference count RCU safe -+ * @ref: Pointer to the reference count -+ * -+ * Provides release memory ordering, such that prior loads and stores are done -+ * before, and provides an acquire ordering on success such that free() -+ * must come after. -+ * -+ * Can be invoked from contexts, which guarantee that no grace period can -+ * happen which would free the object concurrently if the decrement drops -+ * the last reference and the slowpath races against a concurrent get() and -+ * put() pair. rcu_read_lock()'ed and atomic contexts qualify. -+ * -+ * Return: -+ * True if this was the last reference with no future references -+ * possible. This signals the caller that it can safely release the -+ * object which is protected by the reference counter. -+ * -+ * False if there are still active references or the put() raced -+ * with a concurrent get()/put() pair. Caller is not allowed to -+ * release the protected object. -+ */ -+static inline __must_check bool rcuref_put_rcusafe(rcuref_t *ref) -+{ -+ return __rcuref_put(ref); -+} ++================================= ++Block Device (blkdev) LED Trigger ++================================= + -+/** -+ * rcuref_put -- Release one reference for a rcuref reference count -+ * @ref: Pointer to the reference count -+ * -+ * Can be invoked from any context. -+ * -+ * Provides release memory ordering, such that prior loads and stores are done -+ * before, and provides an acquire ordering on success such that free() -+ * must come after. -+ * -+ * Return: -+ * -+ * True if this was the last reference with no future references -+ * possible. This signals the caller that it can safely schedule the -+ * object, which is protected by the reference counter, for -+ * deconstruction. -+ * -+ * False if there are still active references or the put() raced -+ * with a concurrent get()/put() pair. Caller is not allowed to -+ * deconstruct the protected object. -+ */ -+static inline __must_check bool rcuref_put(rcuref_t *ref) -+{ -+ bool released; ++Available when ``CONFIG_LEDS_TRIGGER_BLKDEV=y`` or ++``CONFIG_LEDS_TRIGGER_BLKDEV=m``. + -+ preempt_disable(); -+ released = __rcuref_put(ref); -+ preempt_enable(); -+ return released; -+} ++See also: + -+#endif -diff --git a/include/linux/types.h b/include/linux/types.h -index ea8cf60a8a79..688fb943556a 100644 ---- a/include/linux/types.h -+++ b/include/linux/types.h -@@ -175,6 +175,12 @@ typedef struct { - } atomic64_t; - #endif - -+typedef struct { -+ atomic_t refcnt; -+} rcuref_t; ++* ``Documentation/ABI/testing/sysfs-class-led-trigger-blkdev`` ++* ``Documentation/ABI/stable/sysfs-block`` (``/sys/block//linked_leds``) + -+#define RCUREF_INIT(i) { .refcnt = ATOMIC_INIT(i - 1) } ++Overview ++======== + - struct list_head { - struct list_head *next, *prev; - }; -diff --git a/include/net/dst.h b/include/net/dst.h -index d67fda89cd0f..78884429deed 100644 ---- a/include/net/dst.h -+++ b/include/net/dst.h -@@ -16,6 +16,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -61,23 +62,36 @@ struct dst_entry { - unsigned short trailer_len; /* space to reserve at tail */ - - /* -- * __refcnt wants to be on a different cache line from -+ * __rcuref wants to be on a different cache line from - * input/output/ops or performance tanks badly - */ - #ifdef CONFIG_64BIT -- atomic_t __refcnt; /* 64-bit offset 64 */ -+ rcuref_t __rcuref; /* 64-bit offset 64 */ - #endif - int __use; - unsigned long lastuse; -- struct lwtunnel_state *lwtstate; - struct rcu_head rcu_head; - short error; - short __pad; - __u32 tclassid; - #ifndef CONFIG_64BIT -- atomic_t __refcnt; /* 32-bit offset 64 */ -+ struct lwtunnel_state *lwtstate; -+ rcuref_t __rcuref; /* 32-bit offset 64 */ - #endif - netdevice_tracker dev_tracker; ++.. note:: ++ The examples below use ```` to refer to the name of a ++ system-specific LED. If no suitable LED is available on a test ++ system (in a virtual machine, for example), it is possible to ++ use a userspace LED. (See ``Documentation/leds/uleds.rst``.) + -+ /* -+ * Used by rtable and rt6_info. Moves lwtstate into the next cache -+ * line on 64bit so that lwtstate does not cause false sharing with -+ * __rcuref under contention of __rcuref. This also puts the -+ * frequently accessed members of rtable and rt6_info out of the -+ * __rcuref cache line. -+ */ -+ struct list_head rt_uncached; -+ struct uncached_list *rt_uncached_list; -+#ifdef CONFIG_64BIT -+ struct lwtunnel_state *lwtstate; -+#endif - }; ++Verify that the ``blkdev`` LED trigger is available:: ++ ++ # grep blkdev /sys/class/leds//trigger ++ ... rfkill-none blkdev ++ ++(If the previous command produces no output, you may need to load the trigger ++module - ``modprobe ledtrig_blkdev``. If the module is not available, check ++the value of ``CONFIG_LEDS_TRIGGER_BLKDEV`` in your kernel configuration.) ++ ++Associate the LED with the ``blkdev`` LED trigger:: ++ ++ # echo blkdev > /sys/class/leds//trigger ++ ++ # cat /sys/class/leds//trigger ++ ... rfkill-none [blkdev] ++ ++Note that several new device attributes are available in the ++``/sys/class/leds/`` directory. ++ ++* ``link_dev_by_path``, ``unlink_dev_by_path``, and ``unlink_dev_by_name`` are ++ used to manage the set of block devices associated with this LED. The LED ++ will blink when activity occurs on any of its linked devices. ++ ++* ``blink_on_read``, ``blink_on_write``, ``blink_on_discard``, and ++ ``blink_on_flush`` are boolean values that determine whether the LED will ++ blink when a particular type of activity is detected on one of its linked ++ block devices. ++ ++* ``blink_time`` is the duration (in milliseconds) of each blink of this LED. ++ (The minimum value is 10 milliseconds.) ++ ++* ``check_interval`` is the frequency (in milliseconds) with which block devices ++ linked to this LED will be checked for activity and the LED blinked (if the ++ correct type of activity has occurred). ++ ++* The ``linked_devices`` directory will contain a symbolic link to every device ++ that is associated with this LED. ++ ++Link a block device to the LED:: ++ ++ # echo /dev/sda > /sys/class/leds//link_dev_by_path ++ ++ # ls /sys/class/leds//linked_devices ++ sda ++ ++(The value written to ``link_dev_by_path`` must be the path of the device ++special file, such as ``/dev/sda``, that represents the block device - or the ++path of a symbolic link to such a device special file.) ++ ++Activity on the device will now cause the LED to blink. The duration of each ++blink (in milliseconds) can be adjusted by setting ++``/sys/class/leds//blink_time``. (But see **check_interval and ++blink_time** below.) ++ ++Associate a second device with the LED:: ++ ++ # echo /dev/sdb > /sys/class/leds//link_dev_by_path ++ ++ # ls /sys/class/leds//linked_devices ++ sda sdb ++ ++When a block device is linked to one or more LEDs, the LEDs are linked from ++the device's ``linked_leds`` directory:: ++ ++ # ls /sys/class/block/sd{a,b}/linked_leds ++ /sys/class/block/sda/linked_leds: ++ ++ ++ /sys/class/block/sdb/linked_leds: ++ ++ ++(The ``linked_leds`` directory only exists when the block device is linked to ++at least one LED.) ++ ++``check_interval`` and ``blink_time`` ++===================================== ++ ++* By default, linked block devices are checked for activity every 100 ++ milliseconds. This frequency can be changed for an LED via the ++ ``/sys/class/leds//check_interval`` attribute. (The minimum value is 25 ++ milliseconds.) ++ ++* All block devices associated with an LED are checked for activity every ++ ``check_interval`` milliseconds, and a blink is triggered if the correct type ++ of activity (as determined by the LED's ``blink_on_*`` attributes) is ++ detected. The duration of an LED's blink is determined by its ``blink_time`` ++ attribute. Thus (when the correct type of activity is detected), the LED will ++ be on for ``blink_time`` milliseconds and off for ++ ``check_interval - blink_time`` milliseconds. ++ ++* The LED subsystem ignores new blink requests for an LED that is already in ++ in the process of blinking, so setting a ``blink_time`` greater than or equal ++ to ``check_interval`` will cause some blinks to be missed. ++ ++* Because of processing times, scheduling latencies, etc., avoiding missed ++ blinks actually requires a difference of at least a few milliseconds between ++ the ``blink_time`` and ``check_interval``. The required difference is likely ++ to vary from system to system. As a reference, a Thecus N5550 NAS requires a ++ difference of 7 milliseconds (e.g. ``check_interval == 100``, ++ ``blink_time == 93``). ++ ++* The default values (``check_interval == 100``, ``blink_time == 75``) cause the ++ LED associated with a continuously active device to blink rapidly. For a more ++ "always on" effect, increase the ``blink_time`` (but not too much; see the ++ previous bullet). ++ ++Other Notes ++=========== ++ ++* Many (possibly all) types of block devices work with this trigger, including: ++ ++ * SCSI (including SATA and USB) hard disk drives and SSDs ++ * SCSI (including SATA and USB) optical drives ++ * NVMe SSDs ++ * SD cards ++ * loopback block devices (``/dev/loop*``) ++ * device mapper devices, such as LVM logical volumes ++ * MD RAID devices ++ * zRAM compressed RAM-disks ++ * partitions on block devices that support them ++ ++* The names of the symbolic links in ``/sys/class/leds//linked_devices`` ++ are **kernel** names, which may not match the paths used for ++ ``link_dev_by_path`` and ``unlink_dev_by_path``. This is most likely when a ++ symbolic link is used to refer to the device (as is common with logical ++ volumes), but it can be true for any device, because nothing prevents the ++ creation of device special files with arbitrary names (e.g. ++ ``sudo mknod /foo b 8 0``). ++ ++ Kernel names can be used to unlink block devices from LEDs by writing them to ++ the LED's ``unlink_dev_by_name`` attribute. ++ ++* The ``blkdev`` LED trigger supports many-to-many device/LED associations. ++ A device can be associated with multiple LEDs, and an LED can be associated ++ with multiple devices. +diff --git a/Documentation/x86/topology.rst b/Documentation/x86/topology.rst +index 7f58010ea86a..9de14f3f7783 100644 +--- a/Documentation/x86/topology.rst ++++ b/Documentation/x86/topology.rst +@@ -33,6 +33,7 @@ historical nature and should be cleaned up. + The topology of a system is described in the units of: - struct dst_metrics { -@@ -225,10 +239,10 @@ static inline void dst_hold(struct dst_entry *dst) - { - /* - * If your kernel compilation stops here, please check -- * the placement of __refcnt in struct dst_entry -+ * the placement of __rcuref in struct dst_entry - */ -- BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); -- WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0); -+ BUILD_BUG_ON(offsetof(struct dst_entry, __rcuref) & 63); -+ WARN_ON(!rcuref_get(&dst->__rcuref)); - } + - packages ++ - cluster + - cores + - threads - static inline void dst_use_noref(struct dst_entry *dst, unsigned long time) -@@ -292,7 +306,7 @@ static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb - */ - static inline bool dst_hold_safe(struct dst_entry *dst) - { -- return atomic_inc_not_zero(&dst->__refcnt); -+ return rcuref_get(&dst->__rcuref); - } +@@ -90,6 +91,22 @@ Package-related topology information in the kernel: + Cache. In general, it is a number identifying an LLC uniquely on the + system. - /** -diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h -index 6268963d9599..79570cb4ea9c 100644 ---- a/include/net/ip6_fib.h -+++ b/include/net/ip6_fib.h -@@ -217,9 +217,6 @@ struct rt6_info { - struct inet6_dev *rt6i_idev; - u32 rt6i_flags; ++Clusters ++======== ++A cluster consists of threads of one or more cores sharing the same L2 cache. ++ ++Cluster-related topology information in the kernel: ++ ++ - cluster_id: ++ ++ A per-CPU variable containing: ++ ++ - Upper bits extracted from the APIC ID. CPUs which have the same value ++ in these bits share an L2 and have the same cluster_id. ++ ++ CPUs for which cluster information is unavailable will show 65535 ++ (BAD_APICID) as the cluster_id. ++ + Cores + ===== + A core consists of 1 or more threads. It does not matter whether the threads +@@ -125,6 +142,11 @@ Thread-related topology information in the kernel: -- struct list_head rt6i_uncached; -- struct uncached_list *rt6i_uncached_list; -- - /* more non-fragment space at head required */ - unsigned short rt6i_nfheader_len; - }; -diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h -index 81ee387a1fc4..3556595ce59a 100644 ---- a/include/net/ip6_route.h -+++ b/include/net/ip6_route.h -@@ -100,7 +100,7 @@ static inline struct dst_entry *ip6_route_output(struct net *net, - static inline void ip6_rt_put_flags(struct rt6_info *rt, int flags) - { - if (!(flags & RT6_LOOKUP_F_DST_NOREF) || -- !list_empty(&rt->rt6i_uncached)) -+ !list_empty(&rt->dst.rt_uncached)) - ip6_rt_put(rt); - } + The number of online threads is also printed in /proc/cpuinfo "siblings." -diff --git a/include/net/route.h b/include/net/route.h -index fe00b0a2e475..bcc367cf3aa2 100644 ---- a/include/net/route.h -+++ b/include/net/route.h -@@ -78,9 +78,6 @@ struct rtable { - /* Miscellaneous cached information */ - u32 rt_mtu_locked:1, - rt_pmtu:31; -- -- struct list_head rt_uncached; -- struct uncached_list *rt_uncached_list; - }; ++ - topology_cluster_cpumask(): ++ ++ The cpumask contains all online threads in the cluster to which a thread ++ belongs. ++ + - topology_sibling_cpumask(): - static inline bool rt_is_input_route(const struct rtable *rt) -diff --git a/include/net/sock.h b/include/net/sock.h -index 573f2bf7e0de..5edf0038867c 100644 ---- a/include/net/sock.h -+++ b/include/net/sock.h -@@ -2131,7 +2131,7 @@ sk_dst_get(struct sock *sk) + The cpumask contains all online threads in the core to which a thread +@@ -138,6 +160,10 @@ Thread-related topology information in the kernel: - rcu_read_lock(); - dst = rcu_dereference(sk->sk_dst_cache); -- if (dst && !atomic_inc_not_zero(&dst->__refcnt)) -+ if (dst && !rcuref_get(&dst->__rcuref)) - dst = NULL; - rcu_read_unlock(); - return dst; -diff --git a/kernel/fork.c b/kernel/fork.c -index 49c173e367d2..349945168239 100644 ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -1178,6 +1178,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, - fail_pcpu: - while (i > 0) - percpu_counter_destroy(&mm->rss_stat[--i]); -+ destroy_context(mm); - fail_nocontext: - mm_free_pgd(mm); - fail_nopgd: -diff --git a/kernel/kheaders.c b/kernel/kheaders.c -index 8f69772af77b..42163c9e94e5 100644 ---- a/kernel/kheaders.c -+++ b/kernel/kheaders.c -@@ -26,15 +26,15 @@ asm ( - " .popsection \n" - ); + The physical package ID to which a thread belongs. --extern char kernel_headers_data; --extern char kernel_headers_data_end; -+extern char kernel_headers_data[]; -+extern char kernel_headers_data_end[]; ++ - topology_cluster_id(); ++ ++ The ID of the cluster to which a thread belongs. ++ + - topology_core_id(); - static ssize_t - ikheaders_read(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t len) - { -- memcpy(buf, &kernel_headers_data + off, len); -+ memcpy(buf, &kernel_headers_data[off], len); - return len; + The ID of the core to which a thread belongs. It is also printed in /proc/cpuinfo +diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h +index ce9685fc78d8..2034cd556c07 100644 +--- a/arch/x86/include/asm/cacheinfo.h ++++ b/arch/x86/include/asm/cacheinfo.h +@@ -7,6 +7,7 @@ extern unsigned int memory_caching_control; + #define CACHE_MTRR 0x01 + #define CACHE_PAT 0x02 + ++void cacheinfo_topoext_init_l2c_id(struct cpuinfo_x86 *c, int cpu); + void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu); + void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu); + +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 95cdd08c4cbb..d6594727f924 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -358,6 +358,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c) + if (!err) + c->x86_coreid_bits = get_count_order(c->x86_max_cores); + ++ cacheinfo_topoext_init_l2c_id(c, cpu); + cacheinfo_amd_init_llc_id(c, cpu); + + } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { +diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c +index 4063e8991211..947a1f27278c 100644 +--- a/arch/x86/kernel/cpu/cacheinfo.c ++++ b/arch/x86/kernel/cpu/cacheinfo.c +@@ -659,6 +659,42 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c) + return i; } -@@ -48,8 +48,8 @@ static struct bin_attribute kheaders_attr __ro_after_init = { ++void cacheinfo_topoext_init_l2c_id(struct cpuinfo_x86 *c, int cpu) ++{ ++ u32 eax, ebx, ecx, edx, num_sharing_cache; ++ int i = 0, bits; ++ ++ /* Check if L2 cache identifiers exists. */ ++ if (!cpuid_ecx(0x80000006)) ++ return; ++ ++ while (true) { ++ u32 level; ++ ++ cpuid_count(0x8000001d, i, &eax, &ebx, &ecx, &edx); ++ if (!eax) ++ return; ++ ++ /* ++ * Check if the current leaf is for L2 cache using ++ * eax[7:5] used to describe the cache level. ++ */ ++ level = (eax >> 5) & 0x7; ++ if (level == 2) ++ break; ++ ++ ++i; ++ } ++ ++ /* ++ * L2 ID is calculated from the number of threads ++ * sharing the L2 cache. ++ */ ++ num_sharing_cache = ((eax >> 14) & 0xfff) + 1; ++ bits = get_count_order(num_sharing_cache); ++ per_cpu(cpu_l2c_id, cpu) = c->apicid >> bits; ++} ++ + void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu) + { + /* +diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c +index 5a2962c492d3..cb0025b4a2fd 100644 +--- a/arch/x86/kernel/cpu/hygon.c ++++ b/arch/x86/kernel/cpu/hygon.c +@@ -89,6 +89,7 @@ static void hygon_get_topology(struct cpuinfo_x86 *c) + /* Socket ID is ApicId[6] for these processors. */ + c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT; - static int __init ikheaders_init(void) ++ cacheinfo_topoext_init_l2c_id(c, cpu); + cacheinfo_hygon_init_llc_id(c, cpu); + } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { + u64 value; +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c +index 1056bbf55b17..212bfd1517ec 100644 +--- a/arch/x86/net/bpf_jit_comp.c ++++ b/arch/x86/net/bpf_jit_comp.c +@@ -343,9 +343,10 @@ static int emit_call(u8 **pprog, void *func, void *ip) + + static int emit_rsb_call(u8 **pprog, void *func, void *ip) { -- kheaders_attr.size = (&kernel_headers_data_end - -- &kernel_headers_data); -+ kheaders_attr.size = (kernel_headers_data_end - -+ kernel_headers_data); - return sysfs_create_bin_file(kernel_kobj, &kheaders_attr); ++ void *adjusted_ip; + OPTIMIZER_HIDE_VAR(func); +- x86_call_depth_emit_accounting(pprog, func); +- return emit_patch(pprog, func, ip, 0xE8); ++ adjusted_ip = (u8 *)ip + x86_call_depth_emit_accounting(pprog, func); ++ return emit_patch(pprog, func, adjusted_ip, 0xE8); } -diff --git a/kernel/padata.c b/kernel/padata.c -index e007b8a4b738..7c80301ab084 100644 ---- a/kernel/padata.c -+++ b/kernel/padata.c -@@ -45,7 +45,7 @@ struct padata_mt_job_state { - }; + static int emit_jump(u8 **pprog, void *func, void *ip) +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 5c536151ef83..5a80379253a7 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -912,7 +912,7 @@ static void btusb_qca_cmd_timeout(struct hci_dev *hdev) + } - static void padata_free_pd(struct parallel_data *pd); --static void __init padata_mt_helper(struct work_struct *work); -+static void padata_mt_helper(struct work_struct *work); + gpiod_set_value_cansleep(reset_gpio, 0); +- msleep(200); ++ usleep_range(USEC_PER_SEC / 2, USEC_PER_SEC); + gpiod_set_value_cansleep(reset_gpio, 1); - static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) + return; +diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +index e381de2429fa..ae3783a7d7f4 100644 +--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c ++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +@@ -515,11 +515,8 @@ static enum bp_result get_gpio_i2c_info( + info->i2c_slave_address = record->i2c_slave_addr; + + /* TODO: check how to get register offset for en, Y, etc. */ +- info->gpio_info.clk_a_register_index = +- le16_to_cpu( +- header->gpio_pin[table_index].data_a_reg_index); +- info->gpio_info.clk_a_shift = +- header->gpio_pin[table_index].gpio_bitshift; ++ info->gpio_info.clk_a_register_index = le16_to_cpu(pin->data_a_reg_index); ++ info->gpio_info.clk_a_shift = pin->gpio_bitshift; + + return BP_RESULT_OK; + } +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +index 3af24ef9cb2d..51838bef7fb0 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +@@ -714,7 +714,7 @@ static const struct dc_debug_options debug_defaults_drv = { + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = true, +- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, ++ .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +index 8f9244fe5c86..c10ff621cb1d 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +@@ -642,7 +642,7 @@ static const struct dc_debug_options debug_defaults_drv = { + .clock_trace = true, + .disable_pplib_clock_request = true, + .min_disp_clk_khz = 100000, +- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, ++ .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +index 95da6dd1cc65..c4000518dc56 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +@@ -304,7 +304,8 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, + | FEATURE_MASK(FEATURE_GFX_SS_BIT) + | FEATURE_MASK(FEATURE_APCC_DFLL_BIT) + | FEATURE_MASK(FEATURE_FW_CTF_BIT) +- | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT); ++ | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT) ++ | FEATURE_MASK(FEATURE_TEMP_DEPENDENT_VMIN_BIT); + + if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); +diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c +index 0e4378420271..1e08cc5a1702 100644 +--- a/drivers/gpu/drm/scheduler/sched_main.c ++++ b/drivers/gpu/drm/scheduler/sched_main.c +@@ -308,7 +308,8 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) + */ + void drm_sched_fault(struct drm_gpu_scheduler *sched) { -@@ -438,7 +438,7 @@ static int padata_setup_cpumasks(struct padata_instance *pinst) - return err; +- mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); ++ if (sched->ready) ++ mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); } + EXPORT_SYMBOL(drm_sched_fault); --static void __init padata_mt_helper(struct work_struct *w) -+static void padata_mt_helper(struct work_struct *w) - { - struct padata_work *pw = container_of(w, struct padata_work, pw_work); - struct padata_mt_job_state *ps = pw->pw_data; -diff --git a/lib/Makefile b/lib/Makefile -index baf2821f7a00..31a3a257fd49 100644 ---- a/lib/Makefile -+++ b/lib/Makefile -@@ -47,7 +47,7 @@ obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \ - list_sort.o uuid.o iov_iter.o clz_ctz.o \ - bsearch.o find_bit.o llist.o memweight.o kfifo.o \ - percpu-refcount.o rhashtable.o base64.o \ -- once.o refcount.o usercopy.o errseq.o bucket_locks.o \ -+ once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \ - generic-radix-tree.o - obj-$(CONFIG_STRING_SELFTEST) += test_string.o - obj-y += string_helpers.o -diff --git a/lib/rcuref.c b/lib/rcuref.c -new file mode 100644 -index 000000000000..5ec00a4a64d1 ---- /dev/null -+++ b/lib/rcuref.c -@@ -0,0 +1,281 @@ -+// SPDX-License-Identifier: GPL-2.0-only +diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig +index dc6816d36d06..bda249068182 100644 +--- a/drivers/leds/trigger/Kconfig ++++ b/drivers/leds/trigger/Kconfig +@@ -154,4 +154,13 @@ config LEDS_TRIGGER_TTY + + When build as a module this driver will be called ledtrig-tty. + ++config LEDS_TRIGGER_BLKDEV ++ tristate "LED Trigger for block devices" ++ depends on BLOCK ++ help ++ The blkdev LED trigger allows LEDs to be controlled by block device ++ activity (reads and writes). + -+/* -+ * rcuref - A scalable reference count implementation for RCU managed objects -+ * -+ * rcuref is provided to replace open coded reference count implementations -+ * based on atomic_t. It protects explicitely RCU managed objects which can -+ * be visible even after the last reference has been dropped and the object -+ * is heading towards destruction. -+ * -+ * A common usage pattern is: -+ * -+ * get() -+ * rcu_read_lock(); -+ * p = get_ptr(); -+ * if (p && !atomic_inc_not_zero(&p->refcnt)) -+ * p = NULL; -+ * rcu_read_unlock(); -+ * return p; -+ * -+ * put() -+ * if (!atomic_dec_return(&->refcnt)) { -+ * remove_ptr(p); -+ * kfree_rcu((p, rcu); -+ * } -+ * -+ * atomic_inc_not_zero() is implemented with a try_cmpxchg() loop which has -+ * O(N^2) behaviour under contention with N concurrent operations. -+ * -+ * rcuref uses atomic_add_negative_relaxed() for the fast path, which scales -+ * better under contention. -+ * -+ * Why not refcount? -+ * ================= -+ * -+ * In principle it should be possible to make refcount use the rcuref -+ * scheme, but the destruction race described below cannot be prevented -+ * unless the protected object is RCU managed. -+ * -+ * Theory of operation -+ * =================== ++ See Documentation/leds/ledtrig-blkdev.rst. ++ + endif # LEDS_TRIGGERS +diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile +index 25c4db97cdd4..d53bab5d93f1 100644 +--- a/drivers/leds/trigger/Makefile ++++ b/drivers/leds/trigger/Makefile +@@ -16,3 +16,4 @@ obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o + obj-$(CONFIG_LEDS_TRIGGER_PATTERN) += ledtrig-pattern.o + obj-$(CONFIG_LEDS_TRIGGER_AUDIO) += ledtrig-audio.o + obj-$(CONFIG_LEDS_TRIGGER_TTY) += ledtrig-tty.o ++obj-$(CONFIG_LEDS_TRIGGER_BLKDEV) += ledtrig-blkdev.o +diff --git a/drivers/leds/trigger/ledtrig-blkdev.c b/drivers/leds/trigger/ledtrig-blkdev.c +new file mode 100644 +index 000000000000..067eedb003b5 +--- /dev/null ++++ b/drivers/leds/trigger/ledtrig-blkdev.c +@@ -0,0 +1,1221 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++ ++/* ++ * Block device LED trigger + * -+ * rcuref uses an unsigned integer reference counter. As long as the -+ * counter value is greater than or equal to RCUREF_ONEREF and not larger -+ * than RCUREF_MAXREF the reference is alive: ++ * Copyright 2021-2022 Ian Pilcher ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++/** ++ * DOC: Overview + * -+ * ONEREF MAXREF SATURATED RELEASED DEAD NOREF -+ * 0 0x7FFFFFFF 0x8000000 0xA0000000 0xBFFFFFFF 0xC0000000 0xE0000000 0xFFFFFFFF -+ * <---valid --------> <-------saturation zone-------> <-----dead zone-----> ++ * The ``blkdev`` LED trigger works by periodically checking the activity ++ * counters of block devices that have been linked to one or more LEDs and ++ * blinking those LED(s) if the correct type of activity has occurred. The ++ * periodic check is scheduled with the Linux kernel's deferred work facility. + * -+ * The get() and put() operations do unconditional increments and -+ * decrements. The result is checked after the operation. This optimizes -+ * for the fast path. ++ * Trigger-specific data about block devices and LEDs is stored in two data ++ * structures --- &struct blkdev_trig_bdev (a "BTB") and &struct blkdev_trig_led ++ * (a "BTL"). Each structure contains a &struct xarray that holds links to any ++ * linked devices of the other type. I.e. &blkdev_trig_bdev.linked_btls ++ * contains links to all BTLs whose LEDs have been linked to the BTB's block ++ * device, and &blkdev_trig_led.linked_btbs contains links to all BTBs whose ++ * block devices have been linked to the BTL's LED. Thus, a block device can ++ * be linked to more than one LED, and an LED can be linked to more than one ++ * block device. ++ */ ++ ++/* Default, minimum & maximum blink duration (milliseconds) */ ++#define BLKDEV_TRIG_BLINK_DEF 75 ++#define BLKDEV_TRIG_BLINK_MIN 10 ++#define BLKDEV_TRIG_BLINK_MAX 86400000 /* 24 hours */ ++ ++/* Default, minimum & maximum activity check interval (milliseconds) */ ++#define BLKDEV_TRIG_CHECK_DEF 100 ++#define BLKDEV_TRIG_CHECK_MIN 25 ++#define BLKDEV_TRIG_CHECK_MAX 86400000 /* 24 hours */ ++ ++/* ++ * If blkdev_trig_check() can't lock the mutex, how long to wait before trying ++ * again (milliseconds) ++ */ ++#define BLKDEV_TRIG_CHECK_RETRY 5 ++ ++/* Mode argument for calls to blkdev_get_by_path() and blkdev_put() */ ++#define BLKDEV_TRIG_FMODE 0 ++ ++/** ++ * struct blkdev_trig_bdev - Trigger-specific data about a block device. ++ * @last_checked: Time (in jiffies) at which the trigger last checked this ++ * block device for activity. ++ * @last_activity: Time (in jiffies) at which the trigger last detected ++ * activity of each type. ++ * @ios: Activity counter values for each type, corresponding to ++ * the timestamps in &last_activity. ++ * @index: &xarray index, so the BTB can be included in one or more ++ * &blkdev_trig_led.linked_btbs. ++ * @bdev: The block device. ++ * @linked_btls: The BTLs that represent the LEDs linked to the BTB's ++ * block device. + * -+ * If the reference count is saturated or dead, then the increments and -+ * decrements are not harmful as the reference count still stays in the -+ * respective zones and is always set back to STATURATED resp. DEAD. The -+ * zones have room for 2^28 racing operations in each direction, which -+ * makes it practically impossible to escape the zones. ++ * Every block device linked to at least one LED gets a "BTB." A BTB is created ++ * when a block device that is not currently linked to any LEDs is linked to an ++ * LED. + * -+ * Once the last reference is dropped the reference count becomes -+ * RCUREF_NOREF which forces rcuref_put() into the slowpath operation. The -+ * slowpath then tries to set the reference count from RCUREF_NOREF to -+ * RCUREF_DEAD via a cmpxchg(). This opens a small window where a -+ * concurrent rcuref_get() can acquire the reference count and bring it -+ * back to RCUREF_ONEREF or even drop the reference again and mark it DEAD. ++ * A BTB is freed when one of the following occurs: + * -+ * If the cmpxchg() succeeds then a concurrent rcuref_get() will result in -+ * DEAD + 1, which is inside the dead zone. If that happens the reference -+ * count is put back to DEAD. ++ * * The number of LEDs linked to the block device becomes zero, because it has ++ * been unlinked from its last LED using the trigger's &sysfs interface. + * -+ * The actual race is possible due to the unconditional increment and -+ * decrements in rcuref_get() and rcuref_put(): ++ * * The number of LEDs linked to the block device becomes zero, because the ++ * last LED to which it was linked has been disassociated from the trigger ++ * (which happens automatically if the LED device is removed from the system). + * -+ * T1 T2 -+ * get() put() -+ * if (atomic_add_negative(-1, &ref->refcnt)) -+ * succeeds-> atomic_cmpxchg(&ref->refcnt, NOREF, DEAD); ++ * * The BTB's block device is removed from the system. To accomodate this ++ * scenario, BTB's are created as device resources, so that the release ++ * function will be called by the driver core when the device is removed. ++ */ ++struct blkdev_trig_bdev { ++ unsigned long last_checked; ++ unsigned long last_activity[NR_STAT_GROUPS]; ++ unsigned long ios[NR_STAT_GROUPS]; ++ unsigned long index; ++ struct block_device *bdev; ++ struct xarray linked_btls; ++}; ++ ++/** ++ * struct blkdev_trig_led - Trigger-specific data about an LED. ++ * @last_checked: Time (in jiffies) at which the trigger last checked the ++ * the block devices linked to this LED for activity. ++ * @index: &xarray index, so the BTL can be included in one or more ++ * &blkdev_trig_bdev.linked_btls. ++ * @mode: Bitmask for types of block device activity that will ++ * cause this LED to blink --- reads, writes, discards, ++ * etc. ++ * @led: The LED device. ++ * @blink_msec: Duration of a blink (milliseconds). ++ * @check_jiffies: Frequency with which block devices linked to this LED ++ * should be checked for activity (jiffies). ++ * @linked_btbs: The BTBs that represent the block devices linked to the ++ * BTL's LED. ++ * @all_btls_node: The BTL's node in the module's list of all BTLs. + * -+ * atomic_add_negative(1, &ref->refcnt); <- Elevates refcount to DEAD + 1 ++ * Every LED associated with the block device trigger gets a "BTL." A BTL is ++ * created when the trigger is "activated" on an LED (usually by writing ++ * ``blkdev`` to the LED's &sysfs &trigger attribute). A BTL is freed wnen its ++ * LED is disassociated from the trigger, either through the trigger's &sysfs ++ * interface or because the LED device is removed from the system. ++ */ ++struct blkdev_trig_led { ++ unsigned long last_checked; ++ unsigned long index; ++ unsigned long mode; /* must be ulong for atomic bit ops */ ++ struct led_classdev *led; ++ unsigned int blink_msec; ++ unsigned int check_jiffies; ++ struct xarray linked_btbs; ++ struct hlist_node all_btls_node; ++}; ++ ++/* Protects everything except atomic LED attributes */ ++static DEFINE_MUTEX(blkdev_trig_mutex); ++ ++/* BTB device resource release function */ ++static void blkdev_trig_btb_release(struct device *dev, void *res); ++ ++/* Index for next BTB or BTL */ ++static unsigned long blkdev_trig_next_index; ++ ++/* All LEDs associated with the trigger */ ++static HLIST_HEAD(blkdev_trig_all_btls); ++ ++/* Delayed work to periodically check for activity & blink LEDs */ ++static void blkdev_trig_check(struct work_struct *work); ++static DECLARE_DELAYED_WORK(blkdev_trig_work, blkdev_trig_check); ++ ++/* When is the delayed work scheduled to run next (jiffies) */ ++static unsigned long blkdev_trig_next_check; ++ ++/* Total number of BTB-to-BTL links */ ++static unsigned int blkdev_trig_link_count; ++ ++/* Empty sysfs attribute list for next 2 declarations */ ++static struct attribute *blkdev_trig_attrs_empty[] = { NULL }; ++ ++/* linked_leds sysfs directory for block devs linked to 1 or more LEDs */ ++static const struct attribute_group blkdev_trig_linked_leds = { ++ .name = "linked_leds", ++ .attrs = blkdev_trig_attrs_empty, ++}; ++ ++/* linked_devices sysfs directory for each LED associated with the trigger */ ++static const struct attribute_group blkdev_trig_linked_devs = { ++ .name = "linked_devices", ++ .attrs = blkdev_trig_attrs_empty, ++}; ++ ++ ++/* + * -+ * As the result of T1's add is negative, the get() goes into the slow path -+ * and observes refcnt being in the dead zone which makes the operation fail. ++ * Delayed work to check for activity & blink LEDs + * -+ * Possible critical states: ++ */ ++ ++/** ++ * blkdev_trig_blink() - Blink an LED, if the correct type of activity has ++ * occurred on the block device. ++ * @btl: The BTL that represents the LED ++ * @btb: The BTB that represents the block device + * -+ * Context Counter References Operation -+ * T1 0 1 init() -+ * T2 1 2 get() -+ * T1 0 1 put() -+ * T2 -1 0 put() tries to mark dead -+ * T1 0 1 get() -+ * T2 0 1 put() mark dead fails -+ * T1 -1 0 put() tries to mark dead -+ * T1 DEAD 0 put() mark dead succeeds -+ * T2 DEAD+1 0 get() fails and puts it back to DEAD -+ * -+ * Of course there are more complex scenarios, but the above illustrates -+ * the working principle. The rest is left to the imagination of the -+ * reader. -+ * -+ * Deconstruction race -+ * =================== -+ * -+ * The release operation must be protected by prohibiting a grace period in -+ * order to prevent a possible use after free: -+ * -+ * T1 T2 -+ * put() get() -+ * // ref->refcnt = ONEREF -+ * if (!atomic_add_negative(-1, &ref->refcnt)) -+ * return false; <- Not taken -+ * -+ * // ref->refcnt == NOREF -+ * --> preemption -+ * // Elevates ref->refcnt to ONEREF -+ * if (!atomic_add_negative(1, &ref->refcnt)) -+ * return true; <- taken -+ * -+ * if (put(&p->ref)) { <-- Succeeds -+ * remove_pointer(p); -+ * kfree_rcu(p, rcu); -+ * } -+ * -+ * RCU grace period ends, object is freed -+ * -+ * atomic_cmpxchg(&ref->refcnt, NOREF, DEAD); <- UAF -+ * -+ * This is prevented by disabling preemption around the put() operation as -+ * that's in most kernel configurations cheaper than a rcu_read_lock() / -+ * rcu_read_unlock() pair and in many cases even a NOOP. In any case it -+ * prevents the grace period which keeps the object alive until all put() -+ * operations complete. -+ * -+ * Saturation protection -+ * ===================== -+ * -+ * The reference count has a saturation limit RCUREF_MAXREF (INT_MAX). -+ * Once this is exceedded the reference count becomes stale by setting it -+ * to RCUREF_SATURATED, which will cause a memory leak, but it prevents -+ * wrap arounds which obviously cause worse problems than a memory -+ * leak. When saturation is reached a warning is emitted. -+ * -+ * Race conditions -+ * =============== -+ * -+ * All reference count increment/decrement operations are unconditional and -+ * only verified after the fact. This optimizes for the good case and takes -+ * the occasional race vs. a dead or already saturated refcount into -+ * account. The saturation and dead zones are large enough to accomodate -+ * for that. -+ * -+ * Memory ordering -+ * =============== -+ * -+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions -+ * and provide only what is strictly required for refcounts. -+ * -+ * The increments are fully relaxed; these will not provide ordering. The -+ * rationale is that whatever is used to obtain the object to increase the -+ * reference count on will provide the ordering. For locked data -+ * structures, its the lock acquire, for RCU/lockless data structures its -+ * the dependent load. -+ * -+ * rcuref_get() provides a control dependency ordering future stores which -+ * ensures that the object is not modified when acquiring a reference -+ * fails. -+ * -+ * rcuref_put() provides release order, i.e. all prior loads and stores -+ * will be issued before. It also provides a control dependency ordering -+ * against the subsequent destruction of the object. ++ * Context: Process context. Caller must hold &blkdev_trig_mutex. ++ * Return: &true if the LED is blinked, &false if not. ++ */ ++static bool blkdev_trig_blink(const struct blkdev_trig_led *btl, ++ const struct blkdev_trig_bdev *btb) ++{ ++ unsigned long mode, mask, delay_on, delay_off; ++ enum stat_group i; ++ ++ mode = READ_ONCE(btl->mode); ++ ++ for (i = STAT_READ, mask = 1; i <= STAT_FLUSH; ++i, mask <<= 1) { ++ ++ if (!(mode & mask)) ++ continue; ++ ++ if (time_before_eq(btb->last_activity[i], btl->last_checked)) ++ continue; ++ ++ delay_on = READ_ONCE(btl->blink_msec); ++ delay_off = 1; /* 0 leaves LED turned on */ ++ ++ led_blink_set_oneshot(btl->led, &delay_on, &delay_off, 0); ++ return true; ++ } ++ ++ return false; ++} ++ ++/** ++ * blkdev_trig_update_btb() - Update a BTB's activity counters and timestamps. ++ * @btb: The BTB ++ * @now: Timestamp (in jiffies) + * -+ * If rcuref_put() successfully dropped the last reference and marked the -+ * object DEAD it also provides acquire ordering. ++ * Context: Process context. Caller must hold &blkdev_trig_mutex. + */ ++static void blkdev_trig_update_btb(struct blkdev_trig_bdev *btb, ++ unsigned long now) ++{ ++ unsigned long new_ios; ++ enum stat_group i; + -+#include -+#include ++ for (i = STAT_READ; i <= STAT_FLUSH; ++i) { ++ ++ new_ios = part_stat_read(btb->bdev, ios[i]); ++ ++ if (new_ios != btb->ios[i]) { ++ btb->ios[i] = new_ios; ++ btb->last_activity[i] = now; ++ } ++ } ++ ++ btb->last_checked = now; ++} + +/** -+ * rcuref_get_slowpath - Slowpath of rcuref_get() -+ * @ref: Pointer to the reference count ++ * blkdev_trig_check() - Check linked devices for activity and blink LEDs. ++ * @work: Delayed work (&blkdev_trig_work) + * -+ * Invoked when the reference count is outside of the valid zone. ++ * Context: Process context. Takes and releases &blkdev_trig_mutex. ++ */ ++static void blkdev_trig_check(struct work_struct *work) ++{ ++ struct blkdev_trig_led *btl; ++ struct blkdev_trig_bdev *btb; ++ unsigned long index, delay, now, led_check, led_delay; ++ bool blinked; ++ ++ if (!mutex_trylock(&blkdev_trig_mutex)) { ++ delay = msecs_to_jiffies(BLKDEV_TRIG_CHECK_RETRY); ++ goto exit_reschedule; ++ } ++ ++ now = jiffies; ++ delay = ULONG_MAX; ++ ++ hlist_for_each_entry (btl, &blkdev_trig_all_btls, all_btls_node) { ++ ++ led_check = btl->last_checked + btl->check_jiffies; ++ ++ if (time_before_eq(led_check, now)) { ++ ++ blinked = false; ++ ++ xa_for_each (&btl->linked_btbs, index, btb) { ++ ++ if (btb->last_checked != now) ++ blkdev_trig_update_btb(btb, now); ++ if (!blinked) ++ blinked = blkdev_trig_blink(btl, btb); ++ } ++ ++ btl->last_checked = now; ++ led_delay = btl->check_jiffies; ++ ++ } else { ++ led_delay = led_check - now; ++ } ++ ++ if (led_delay < delay) ++ delay = led_delay; ++ } ++ ++ mutex_unlock(&blkdev_trig_mutex); ++ ++exit_reschedule: ++ WARN_ON_ONCE(delay == ULONG_MAX); ++ WARN_ON_ONCE(!schedule_delayed_work(&blkdev_trig_work, delay)); ++} ++ ++/** ++ * blkdev_trig_sched_led() - Set the schedule of the delayed work when a new ++ * LED is added to the schedule. ++ * @btl: The BTL that represents the LED + * -+ * Return: -+ * False if the reference count was already marked dead ++ * Called when the number of block devices to which an LED is linked becomes ++ * non-zero. + * -+ * True if the reference count is saturated, which prevents the -+ * object from being deconstructed ever. ++ * Context: Process context. Caller must hold &blkdev_trig_mutex. + */ -+bool rcuref_get_slowpath(rcuref_t *ref) ++static void blkdev_trig_sched_led(const struct blkdev_trig_led *btl) +{ -+ unsigned int cnt = atomic_read(&ref->refcnt); ++ unsigned long delay = READ_ONCE(btl->check_jiffies); ++ unsigned long check_by = jiffies + delay; + + /* -+ * If the reference count was already marked dead, undo the -+ * increment so it stays in the middle of the dead zone and return -+ * fail. ++ * If no other LED-to-block device links exist, simply schedule the ++ * delayed work according to this LED's check_interval attribute ++ * (check_jiffies). + */ -+ if (cnt >= RCUREF_RELEASED) { -+ atomic_set(&ref->refcnt, RCUREF_DEAD); -+ return false; ++ if (blkdev_trig_link_count == 0) { ++ WARN_ON(!schedule_delayed_work(&blkdev_trig_work, delay)); ++ blkdev_trig_next_check = check_by; ++ return; + } + + /* -+ * If it was saturated, warn and mark it so. In case the increment -+ * was already on a saturated value restore the saturation -+ * marker. This keeps it in the middle of the saturation zone and -+ * prevents the reference count from overflowing. This leaks the -+ * object memory, but prevents the obvious reference count overflow -+ * damage. ++ * If the next check is already scheduled to occur soon enough to ++ * accomodate this LED's check_interval, the schedule doesn't need ++ * to be changed. + */ -+ if (WARN_ONCE(cnt > RCUREF_MAXREF, "rcuref saturated - leaking memory")) -+ atomic_set(&ref->refcnt, RCUREF_SATURATED); -+ return true; ++ if (time_after_eq(check_by, blkdev_trig_next_check)) ++ return; ++ ++ /* ++ * Modify the schedule, so that the delayed work runs soon enough for ++ * this LED. ++ */ ++ WARN_ON(!mod_delayed_work(system_wq, &blkdev_trig_work, delay)); ++ blkdev_trig_next_check = check_by; +} -+EXPORT_SYMBOL_GPL(rcuref_get_slowpath); + -+/** -+ * rcuref_put_slowpath - Slowpath of __rcuref_put() -+ * @ref: Pointer to the reference count -+ * -+ * Invoked when the reference count is outside of the valid zone. ++ ++/* + * -+ * Return: -+ * True if this was the last reference with no future references -+ * possible. This signals the caller that it can safely schedule the -+ * object, which is protected by the reference counter, for -+ * deconstruction. ++ * Linking and unlinking LEDs and block devices + * -+ * False if there are still active references or the put() raced -+ * with a concurrent get()/put() pair. Caller is not allowed to -+ * deconstruct the protected object. + */ -+bool rcuref_put_slowpath(rcuref_t *ref) -+{ -+ unsigned int cnt = atomic_read(&ref->refcnt); + -+ /* Did this drop the last reference? */ -+ if (likely(cnt == RCUREF_NOREF)) { -+ /* -+ * Carefully try to set the reference count to RCUREF_DEAD. -+ * -+ * This can fail if a concurrent get() operation has -+ * elevated it again or the corresponding put() even marked -+ * it dead already. Both are valid situations and do not -+ * require a retry. If this fails the caller is not -+ * allowed to deconstruct the object. -+ */ -+ if (atomic_cmpxchg_release(&ref->refcnt, RCUREF_NOREF, RCUREF_DEAD) != RCUREF_NOREF) -+ return false; ++/** ++ * blkdev_trig_link() - Link a block device to an LED. ++ * @btl: The BTL that represents the LED ++ * @btb: The BTB that represents the block device ++ * ++ * Context: Process context. Caller must hold &blkdev_trig_mutex. ++ * Return: &0 on success, negative &errno on error. ++ */ ++static int blkdev_trig_link(struct blkdev_trig_led *btl, ++ struct blkdev_trig_bdev *btb) ++{ ++ bool led_first_link; ++ int err; + -+ /* -+ * The caller can safely schedule the object for -+ * deconstruction. Provide acquire ordering. -+ */ -+ smp_acquire__after_ctrl_dep(); -+ return true; -+ } ++ led_first_link = xa_empty(&btl->linked_btbs); + -+ /* -+ * If the reference count was already in the dead zone, then this -+ * put() operation is imbalanced. Warn, put the reference count back to -+ * DEAD and tell the caller to not deconstruct the object. -+ */ -+ if (WARN_ONCE(cnt >= RCUREF_RELEASED, "rcuref - imbalanced put()")) { -+ atomic_set(&ref->refcnt, RCUREF_DEAD); -+ return false; -+ } ++ err = xa_insert(&btb->linked_btls, btl->index, btl, GFP_KERNEL); ++ if (err) ++ return err; ++ ++ err = xa_insert(&btl->linked_btbs, btb->index, btb, GFP_KERNEL); ++ if (err) ++ goto error_erase_btl; ++ ++ /* Create /sys/class/block//linked_leds/ symlink */ ++ err = sysfs_add_link_to_group(bdev_kobj(btb->bdev), ++ blkdev_trig_linked_leds.name, ++ &btl->led->dev->kobj, btl->led->name); ++ if (err) ++ goto error_erase_btb; ++ ++ /* Create /sys/class/leds//linked_devices/ symlink */ ++ err = sysfs_add_link_to_group(&btl->led->dev->kobj, ++ blkdev_trig_linked_devs.name, ++ bdev_kobj(btb->bdev), ++ dev_name(&btb->bdev->bd_device)); ++ if (err) ++ goto error_remove_symlink; + + /* -+ * This is a put() operation on a saturated refcount. Restore the -+ * mean saturation value and tell the caller to not deconstruct the -+ * object. ++ * If this is the first block device linked to this LED, the delayed ++ * work schedule may need to be changed. + */ -+ if (cnt > RCUREF_MAXREF) -+ atomic_set(&ref->refcnt, RCUREF_SATURATED); -+ return false; -+} -+EXPORT_SYMBOL_GPL(rcuref_put_slowpath); -diff --git a/mm/ksm.c b/mm/ksm.c -index 2b8d30068cbb..82029f1d454b 100644 ---- a/mm/ksm.c -+++ b/mm/ksm.c -@@ -214,6 +214,7 @@ struct ksm_rmap_item { - #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ - #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ - #define STABLE_FLAG 0x200 /* is listed from the stable tree */ -+#define ZERO_PAGE_FLAG 0x400 /* is zero page placed by KSM */ - - /* The stable and unstable tree heads */ - static struct rb_root one_stable_tree[1] = { RB_ROOT }; -@@ -275,6 +276,9 @@ static unsigned int zero_checksum __read_mostly; - /* Whether to merge empty (zeroed) pages with actual zero pages */ - static bool ksm_use_zero_pages __read_mostly; - -+/* The number of zero pages placed by KSM use_zero_pages */ -+static unsigned long ksm_zero_pages_sharing; ++ if (led_first_link) ++ blkdev_trig_sched_led(btl); + - #ifdef CONFIG_NUMA - /* Zeroed when merging across nodes is not allowed */ - static unsigned int ksm_merge_across_nodes = 1; -@@ -420,6 +424,11 @@ static inline bool ksm_test_exit(struct mm_struct *mm) - return atomic_read(&mm->mm_users) == 0; - } - -+enum break_ksm_pmd_entry_return_flag { -+ HAVE_KSM_PAGE = 1, -+ HAVE_ZERO_PAGE -+}; ++ ++blkdev_trig_link_count; + - static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, - struct mm_walk *walk) - { -@@ -427,6 +436,7 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex - spinlock_t *ptl; - pte_t *pte; - int ret; -+ bool is_zero_page = false; - - if (pmd_leaf(*pmd) || !pmd_present(*pmd)) - return 0; -@@ -434,6 +444,8 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex - pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); - if (pte_present(*pte)) { - page = vm_normal_page(walk->vma, addr, *pte); -+ if (!page) -+ is_zero_page = is_zero_pfn(pte_pfn(*pte)); - } else if (!pte_none(*pte)) { - swp_entry_t entry = pte_to_swp_entry(*pte); - -@@ -444,7 +456,14 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex - if (is_migration_entry(entry)) - page = pfn_swap_entry_to_page(entry); - } -- ret = page && PageKsm(page); ++ return 0; + -+ if (page && PageKsm(page)) -+ ret = HAVE_KSM_PAGE; -+ else if (is_zero_page) -+ ret = HAVE_ZERO_PAGE; -+ else -+ ret = 0; ++error_remove_symlink: ++ sysfs_remove_link_from_group(bdev_kobj(btb->bdev), ++ blkdev_trig_linked_leds.name, ++ btl->led->name); ++error_erase_btb: ++ xa_erase(&btl->linked_btbs, btb->index); ++error_erase_btl: ++ xa_erase(&btb->linked_btls, btl->index); ++ return err; ++} + - pte_unmap_unlock(pte, ptl); - return ret; - } -@@ -466,19 +485,22 @@ static const struct mm_walk_ops break_ksm_ops = { - * of the process that owns 'vma'. We also do not want to enforce - * protection keys here anyway. - */ --static int break_ksm(struct vm_area_struct *vma, unsigned long addr) -+static int break_ksm(struct vm_area_struct *vma, unsigned long addr, -+ bool unshare_zero_page) - { - vm_fault_t ret = 0; - - do { -- int ksm_page; -+ int walk_result; - - cond_resched(); -- ksm_page = walk_page_range_vma(vma, addr, addr + 1, -+ walk_result = walk_page_range_vma(vma, addr, addr + 1, - &break_ksm_ops, NULL); -- if (WARN_ON_ONCE(ksm_page < 0)) -- return ksm_page; -- if (!ksm_page) -+ if (WARN_ON_ONCE(walk_result < 0)) -+ return walk_result; -+ if (!walk_result) -+ return 0; -+ if (walk_result == HAVE_ZERO_PAGE && !unshare_zero_page) - return 0; - ret = handle_mm_fault(vma, addr, - FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, -@@ -539,7 +561,7 @@ static void break_cow(struct ksm_rmap_item *rmap_item) - mmap_read_lock(mm); - vma = find_mergeable_vma(mm, addr); - if (vma) -- break_ksm(vma, addr); -+ break_ksm(vma, addr, false); - mmap_read_unlock(mm); - } - -@@ -764,6 +786,33 @@ static struct page *get_ksm_page(struct ksm_stable_node *stable_node, - return NULL; - } - -+/* -+ * Cleaning the rmap_item's ZERO_PAGE_FLAG -+ * This function will be called when unshare or writing on zero pages. ++/** ++ * blkdev_trig_put_btb() - Remove and free a BTB, if it is no longer needed. ++ * @btb: The BTB ++ * ++ * Does nothing if the BTB (block device) is still linked to at least one LED. ++ * ++ * Context: Process context. Caller must hold &blkdev_trig_mutex. + */ -+static inline void clean_rmap_item_zero_flag(struct ksm_rmap_item *rmap_item) ++static void blkdev_trig_put_btb(struct blkdev_trig_bdev *btb) +{ -+ if (rmap_item->address & ZERO_PAGE_FLAG) { -+ ksm_zero_pages_sharing--; -+ rmap_item->mm->ksm_zero_pages_sharing--; -+ rmap_item->address &= PAGE_MASK; ++ struct block_device *bdev = btb->bdev; ++ int err; ++ ++ if (xa_empty(&btb->linked_btls)) { ++ ++ sysfs_remove_group(bdev_kobj(bdev), &blkdev_trig_linked_leds); ++ err = devres_destroy(&bdev->bd_device, blkdev_trig_btb_release, ++ NULL, NULL); ++ WARN_ON(err); + } +} + -+/* Only called when rmap_item is going to be freed */ -+static inline void unshare_zero_pages(struct ksm_rmap_item *rmap_item) ++/** ++ * _blkdev_trig_unlink_always() - Perform the unconditionally required steps of ++ * unlinking a block device from an LED. ++ * @btl: The BTL that represents the LED ++ * @btb: The BTB that represents the block device ++ * ++ * When a block device is unlinked from an LED, certain steps must be performed ++ * only if the block device is **not** being released. This function performs ++ * those steps that are **always** required, whether or not the block device is ++ * being released. ++ * ++ * Context: Process context. Caller must hold &blkdev_trig_mutex. ++ */ ++static void _blkdev_trig_unlink_always(struct blkdev_trig_led *btl, ++ struct blkdev_trig_bdev *btb) +{ -+ struct vm_area_struct *vma; ++ --blkdev_trig_link_count; + -+ if (rmap_item->address & ZERO_PAGE_FLAG) { -+ vma = vma_lookup(rmap_item->mm, rmap_item->address); -+ if (vma && !ksm_test_exit(rmap_item->mm)) -+ break_ksm(vma, rmap_item->address, true); -+ } -+ /* Put at last. */ -+ clean_rmap_item_zero_flag(rmap_item); ++ if (blkdev_trig_link_count == 0) ++ WARN_ON(!cancel_delayed_work_sync(&blkdev_trig_work)); ++ ++ xa_erase(&btb->linked_btls, btl->index); ++ xa_erase(&btl->linked_btbs, btb->index); ++ ++ /* Remove /sys/class/leds//linked_devices/ symlink */ ++ sysfs_remove_link_from_group(&btl->led->dev->kobj, ++ blkdev_trig_linked_devs.name, ++ dev_name(&btb->bdev->bd_device)); +} + - /* - * Removing rmap_item from stable or unstable tree. - * This function will clean the information from the stable/unstable tree. -@@ -824,6 +873,7 @@ static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list) - struct ksm_rmap_item *rmap_item = *rmap_list; - *rmap_list = rmap_item->rmap_list; - remove_rmap_item_from_tree(rmap_item); -+ unshare_zero_pages(rmap_item); - free_rmap_item(rmap_item); - } - } -@@ -853,7 +903,7 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma, - if (signal_pending(current)) - err = -ERESTARTSYS; - else -- err = break_ksm(vma, addr); -+ err = break_ksm(vma, addr, false); - } - return err; - } -@@ -2050,6 +2100,42 @@ static void stable_tree_append(struct ksm_rmap_item *rmap_item, - rmap_item->mm->ksm_merging_pages++; - } - -+static int try_to_merge_with_kernel_zero_page(struct ksm_rmap_item *rmap_item, -+ struct page *page) ++/** ++ * blkdev_trig_unlink_norelease() - Unlink an LED from a block device that is ++ * **not** being released. ++ * @btl: The BTL that represents the LED. ++ * @btb: The BTB that represents the block device. ++ * ++ * Context: Process context. Caller must hold &blkdev_trig_mutex. ++ */ ++static void blkdev_trig_unlink_norelease(struct blkdev_trig_led *btl, ++ struct blkdev_trig_bdev *btb) +{ -+ struct mm_struct *mm = rmap_item->mm; -+ int err = 0; ++ _blkdev_trig_unlink_always(btl, btb); ++ ++ /* Remove /sys/class/block//linked_leds/ symlink */ ++ sysfs_remove_link_from_group(bdev_kobj(btb->bdev), ++ blkdev_trig_linked_leds.name, ++ btl->led->name); ++ ++ blkdev_trig_put_btb(btb); ++} ++ ++/** ++ * blkdev_trig_unlink_release() - Unlink an LED from a block device that is ++ * being released. ++ * @btl: The BTL that represents the LED ++ * @btb: The BTB that represents the block device ++ * ++ * Context: Process context. Caller must hold &blkdev_trig_mutex. ++ */ ++static void blkdev_trig_unlink_release(struct blkdev_trig_led *btl, ++ struct blkdev_trig_bdev *btb) ++{ ++ _blkdev_trig_unlink_always(btl, btb); + + /* -+ * It should not take ZERO_PAGE_FLAG because on one hand, -+ * get_next_rmap_item don't return zero pages' rmap_item. -+ * On the other hand, even if zero page was writen as -+ * anonymous page, rmap_item has been cleaned after -+ * stable_tree_search ++ * If the BTB is being released, the driver core has already removed the ++ * device's attribute groups, and the BTB will be freed automatically, ++ * so there's nothing else to do. + */ -+ if (!WARN_ON_ONCE(rmap_item->address & ZERO_PAGE_FLAG)) { -+ struct vm_area_struct *vma; ++} + -+ mmap_read_lock(mm); -+ vma = find_mergeable_vma(mm, rmap_item->address); -+ if (vma) { -+ err = try_to_merge_one_page(vma, page, -+ ZERO_PAGE(rmap_item->address)); -+ if (!err) { -+ rmap_item->address |= ZERO_PAGE_FLAG; -+ ksm_zero_pages_sharing++; -+ rmap_item->mm->ksm_zero_pages_sharing++; -+ } -+ } else { -+ /* If the vma is out of date, we do not need to continue. */ -+ err = 0; -+ } -+ mmap_read_unlock(mm); -+ } + -+ return err; ++/* ++ * ++ * BTB creation ++ * ++ */ ++ ++/** ++ * blkdev_trig_btb_release() - BTB device resource release function. ++ * @dev: The block device ++ * @res: The BTB ++ * ++ * Called by the driver core when a block device with a BTB is removed. ++ * ++ * Context: Process context. Takes and releases &blkdev_trig_mutex. ++ */ ++static void blkdev_trig_btb_release(struct device *dev, void *res) ++{ ++ struct blkdev_trig_bdev *btb = res; ++ struct blkdev_trig_led *btl; ++ unsigned long index; ++ ++ mutex_lock(&blkdev_trig_mutex); ++ ++ xa_for_each (&btb->linked_btls, index, btl) ++ blkdev_trig_unlink_release(btl, btb); ++ ++ mutex_unlock(&blkdev_trig_mutex); +} + - /* - * cmp_and_merge_page - first see if page can be merged into the stable tree; - * if not, compare checksum to previous and if it's the same, see if page can -@@ -2061,7 +2147,6 @@ static void stable_tree_append(struct ksm_rmap_item *rmap_item, - */ - static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item) - { -- struct mm_struct *mm = rmap_item->mm; - struct ksm_rmap_item *tree_rmap_item; - struct page *tree_page = NULL; - struct ksm_stable_node *stable_node; -@@ -2098,6 +2183,7 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite - } - - remove_rmap_item_from_tree(rmap_item); -+ clean_rmap_item_zero_flag(rmap_item); - - if (kpage) { - if (PTR_ERR(kpage) == -EBUSY) -@@ -2134,29 +2220,16 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite - * Same checksum as an empty page. We attempt to merge it with the - * appropriate zero page if the user enabled this via sysfs. - */ -- if (ksm_use_zero_pages && (checksum == zero_checksum)) { -- struct vm_area_struct *vma; -- -- mmap_read_lock(mm); -- vma = find_mergeable_vma(mm, rmap_item->address); -- if (vma) { -- err = try_to_merge_one_page(vma, page, -- ZERO_PAGE(rmap_item->address)); -- } else { -+ if (ksm_use_zero_pages) { -+ if (checksum == zero_checksum) - /* -- * If the vma is out of date, we do not need to -- * continue. -+ * In case of failure, the page was not really empty, so we -+ * need to continue. Otherwise we're done. - */ -- err = 0; -- } -- mmap_read_unlock(mm); -- /* -- * In case of failure, the page was not really empty, so we -- * need to continue. Otherwise we're done. -- */ -- if (!err) -- return; -+ if (!try_to_merge_with_kernel_zero_page(rmap_item, page)) -+ return; - } ++/** ++ * blkdev_trig_get_bdev() - Get a block device by path. ++ * @path: The value written to an LED's &link_dev_by_path or ++ * &unlink_dev_by_path attribute, which should be the path to a ++ * special file that represents a block device ++ * @len: The number of characters in &path (not including its ++ * terminating null) ++ * ++ * The caller must call blkdev_put() when finished with the device. ++ * ++ * Context: Process context. ++ * Return: The block device, or an error pointer. ++ */ ++static struct block_device *blkdev_trig_get_bdev(const char *path, size_t len) ++{ ++ struct block_device *bdev; ++ char *buf; + - tree_rmap_item = - unstable_tree_search_insert(rmap_item, page, &tree_page); - if (tree_rmap_item) { -@@ -2220,23 +2293,39 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite - } - } - --static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot, -- struct ksm_rmap_item **rmap_list, -- unsigned long addr) -+static struct ksm_rmap_item *try_to_get_old_rmap_item(unsigned long addr, -+ struct ksm_rmap_item **rmap_list) - { -- struct ksm_rmap_item *rmap_item; -- - while (*rmap_list) { -- rmap_item = *rmap_list; -+ struct ksm_rmap_item *rmap_item = *rmap_list; ++ buf = kmemdup(path, len + 1, GFP_KERNEL); /* +1 to include null */ ++ if (buf == NULL) ++ return ERR_PTR(-ENOMEM); + - if ((rmap_item->address & PAGE_MASK) == addr) - return rmap_item; - if (rmap_item->address > addr) - break; - *rmap_list = rmap_item->rmap_list; -+ /* -+ * If we end up here, the VMA is MADV_UNMERGEABLE or its page -+ * is ineligible or discarded, e.g. MADV_DONTNEED. -+ */ - remove_rmap_item_from_tree(rmap_item); -+ unshare_zero_pages(rmap_item); - free_rmap_item(rmap_item); - } - -+ return NULL; ++ bdev = blkdev_get_by_path(strim(buf), BLKDEV_TRIG_FMODE, THIS_MODULE); ++ kfree(buf); ++ return bdev; +} + -+static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot, -+ struct ksm_rmap_item **rmap_list, -+ unsigned long addr) ++/** ++ * blkdev_trig_get_btb() - Find or create the BTB for a block device. ++ * @path: The value written to an LED's &link_dev_by_path attribute, ++ * which should be the path to a special file that represents a ++ * block device ++ * @len: The number of characters in &path ++ * ++ * If a new BTB is created, because the block device was not previously linked ++ * to any LEDs, the block device's &linked_leds &sysfs directory is created. ++ * ++ * Context: Process context. Caller must hold &blkdev_trig_mutex. ++ * Return: Pointer to the BTB, error pointer on error. ++ */ ++static struct blkdev_trig_bdev *blkdev_trig_get_btb(const char *path, ++ size_t len) +{ -+ struct ksm_rmap_item *rmap_item; ++ struct block_device *bdev; ++ struct blkdev_trig_bdev *btb; ++ int err; + -+ rmap_item = try_to_get_old_rmap_item(addr, rmap_list); -+ if (rmap_item) -+ return rmap_item; ++ bdev = blkdev_trig_get_bdev(path, len); ++ if (IS_ERR(bdev)) ++ return ERR_CAST(bdev); + - rmap_item = alloc_rmap_item(); - if (rmap_item) { - /* It has already been zeroed */ -@@ -2343,6 +2432,22 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) - } - if (is_zone_device_page(*page)) - goto next_page; -+ if (is_zero_pfn(page_to_pfn(*page))) { -+ /* -+ * To monitor ksm zero pages which becomes non-anonymous, -+ * we have to save each rmap_item of zero pages by -+ * try_to_get_old_rmap_item() walking on -+ * ksm_scan.rmap_list, otherwise their rmap_items will be -+ * freed by the next turn of get_next_rmap_item(). The -+ * function get_next_rmap_item() will free all "skipped" -+ * rmap_items because it thinks its areas as UNMERGEABLE. -+ */ -+ rmap_item = try_to_get_old_rmap_item(ksm_scan.address, -+ ksm_scan.rmap_list); -+ if (rmap_item && (rmap_item->address & ZERO_PAGE_FLAG)) -+ ksm_scan.rmap_list = &rmap_item->rmap_list; -+ goto next_page; -+ } - if (PageAnon(*page)) { - flush_anon_page(vma, *page, ksm_scan.address); - flush_dcache_page(*page); -@@ -3139,6 +3244,13 @@ static ssize_t pages_volatile_show(struct kobject *kobj, - } - KSM_ATTR_RO(pages_volatile); - -+static ssize_t zero_pages_sharing_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) -+{ -+ return sysfs_emit(buf, "%ld\n", ksm_zero_pages_sharing); -+} -+KSM_ATTR_RO(zero_pages_sharing); -+ - static ssize_t stable_node_dups_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) - { -@@ -3194,6 +3306,7 @@ static struct attribute *ksm_attrs[] = { - &pages_sharing_attr.attr, - &pages_unshared_attr.attr, - &pages_volatile_attr.attr, -+ &zero_pages_sharing_attr.attr, - &full_scans_attr.attr, - #ifdef CONFIG_NUMA - &merge_across_nodes_attr.attr, -diff --git a/mm/mempolicy.c b/mm/mempolicy.c -index a256a241fd1d..2068b594dc88 100644 ---- a/mm/mempolicy.c -+++ b/mm/mempolicy.c -@@ -790,61 +790,50 @@ static int vma_replace_policy(struct vm_area_struct *vma, - return err; - } - --/* Step 2: apply policy to a range and do splits. */ --static int mbind_range(struct mm_struct *mm, unsigned long start, -- unsigned long end, struct mempolicy *new_pol) -+/* Split or merge the VMA (if required) and apply the new policy */ -+static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma, -+ struct vm_area_struct **prev, unsigned long start, -+ unsigned long end, struct mempolicy *new_pol) - { -- VMA_ITERATOR(vmi, mm, start); -- struct vm_area_struct *prev; -- struct vm_area_struct *vma; -- int err = 0; -+ struct vm_area_struct *merged; -+ unsigned long vmstart, vmend; - pgoff_t pgoff; -+ int err; - -- prev = vma_prev(&vmi); -- vma = vma_find(&vmi, end); -- if (WARN_ON(!vma)) -+ vmend = min(end, vma->vm_end); -+ if (start > vma->vm_start) { -+ *prev = vma; -+ vmstart = start; -+ } else { -+ vmstart = vma->vm_start; ++ btb = devres_find(&bdev->bd_device, blkdev_trig_btb_release, ++ NULL, NULL); ++ if (btb != NULL) { ++ err = 0; ++ goto exit_put_bdev; + } + -+ if (mpol_equal(vma_policy(vma), new_pol)) - return 0; - -- if (start > vma->vm_start) -- prev = vma; -- -- do { -- unsigned long vmstart = max(start, vma->vm_start); -- unsigned long vmend = min(end, vma->vm_end); -- -- if (mpol_equal(vma_policy(vma), new_pol)) -- goto next; -- -- pgoff = vma->vm_pgoff + -- ((vmstart - vma->vm_start) >> PAGE_SHIFT); -- prev = vma_merge(&vmi, mm, prev, vmstart, vmend, vma->vm_flags, -- vma->anon_vma, vma->vm_file, pgoff, -- new_pol, vma->vm_userfaultfd_ctx, -- anon_vma_name(vma)); -- if (prev) { -- vma = prev; -- goto replace; -- } -- if (vma->vm_start != vmstart) { -- err = split_vma(&vmi, vma, vmstart, 1); -- if (err) -- goto out; -- } -- if (vma->vm_end != vmend) { -- err = split_vma(&vmi, vma, vmend, 0); -- if (err) -- goto out; -- } --replace: -- err = vma_replace_policy(vma, new_pol); -+ pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT); -+ merged = vma_merge(vmi, vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags, -+ vma->anon_vma, vma->vm_file, pgoff, new_pol, -+ vma->vm_userfaultfd_ctx, anon_vma_name(vma)); -+ if (merged) { -+ *prev = merged; -+ return vma_replace_policy(merged, new_pol); ++ if (blkdev_trig_next_index == ULONG_MAX) { ++ err = -EOVERFLOW; ++ goto exit_put_bdev; + } + -+ if (vma->vm_start != vmstart) { -+ err = split_vma(vmi, vma, vmstart, 1); - if (err) -- goto out; --next: -- prev = vma; -- } for_each_vma_range(vmi, vma, end); -+ return err; -+ } - --out: -- return err; -+ if (vma->vm_end != vmend) { -+ err = split_vma(vmi, vma, vmend, 0); -+ if (err) -+ return err; ++ btb = devres_alloc(blkdev_trig_btb_release, sizeof(*btb), GFP_KERNEL); ++ if (btb == NULL) { ++ err = -ENOMEM; ++ goto exit_put_bdev; + } + -+ *prev = vma; -+ return vma_replace_policy(vma, new_pol); - } - - /* Set the process memory policy */ -@@ -1259,6 +1248,8 @@ static long do_mbind(unsigned long start, unsigned long len, - nodemask_t *nmask, unsigned long flags) - { - struct mm_struct *mm = current->mm; -+ struct vm_area_struct *vma, *prev; -+ struct vma_iterator vmi; - struct mempolicy *new; - unsigned long end; - int err; -@@ -1328,7 +1319,13 @@ static long do_mbind(unsigned long start, unsigned long len, - goto up_out; - } - -- err = mbind_range(mm, start, end, new); -+ vma_iter_init(&vmi, mm, start); -+ prev = vma_prev(&vmi); -+ for_each_vma_range(vmi, vma, end) { -+ err = mbind_range(&vmi, vma, &prev, start, end, new); -+ if (err) -+ break; ++ err = sysfs_create_group(bdev_kobj(bdev), &blkdev_trig_linked_leds); ++ if (err) ++ goto exit_free_btb; ++ ++ btb->index = blkdev_trig_next_index++; ++ btb->bdev = bdev; ++ xa_init(&btb->linked_btls); ++ ++ /* Populate BTB activity counters */ ++ blkdev_trig_update_btb(btb, jiffies); ++ ++ devres_add(&bdev->bd_device, btb); ++ ++exit_free_btb: ++ if (err) ++ devres_free(btb); ++exit_put_bdev: ++ blkdev_put(bdev, BLKDEV_TRIG_FMODE); ++ return err ? ERR_PTR(err) : btb; ++} ++ ++ ++/* ++ * ++ * Activating and deactivating the trigger on an LED ++ * ++ */ ++ ++/** ++ * blkdev_trig_activate() - Called by the LEDs subsystem when an LED is ++ * associated with the trigger. ++ * @led: The LED ++ * ++ * Context: Process context. Takes and releases &blkdev_trig_mutex. ++ * Return: &0 on success, negative &errno on error. ++ */ ++static int blkdev_trig_activate(struct led_classdev *led) ++{ ++ struct blkdev_trig_led *btl; ++ int err; ++ ++ btl = kzalloc(sizeof(*btl), GFP_KERNEL); ++ if (btl == NULL) ++ return -ENOMEM; ++ ++ err = mutex_lock_interruptible(&blkdev_trig_mutex); ++ if (err) ++ goto exit_free; ++ ++ if (blkdev_trig_next_index == ULONG_MAX) { ++ err = -EOVERFLOW; ++ goto exit_unlock; + } - - if (!err) { - int nr_failed = 0; -@@ -1489,10 +1486,8 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le - unsigned long, home_node, unsigned long, flags) - { - struct mm_struct *mm = current->mm; -- struct vm_area_struct *vma; -+ struct vm_area_struct *vma, *prev; - struct mempolicy *new, *old; -- unsigned long vmstart; -- unsigned long vmend; - unsigned long end; - int err = -ENOENT; - VMA_ITERATOR(vmi, mm, start); -@@ -1521,6 +1516,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le - if (end == start) - return 0; - mmap_write_lock(mm); -+ prev = vma_prev(&vmi); - for_each_vma_range(vmi, vma, end) { - /* - * If any vma in the range got policy other than MPOL_BIND -@@ -1541,9 +1537,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le - } - - new->home_node = home_node; -- vmstart = max(start, vma->vm_start); -- vmend = min(end, vma->vm_end); -- err = mbind_range(mm, vmstart, vmend, new); -+ err = mbind_range(&vmi, vma, &prev, start, end, new); - mpol_put(new); - if (err) ++ ++ btl->index = blkdev_trig_next_index++; ++ btl->last_checked = jiffies; ++ btl->mode = -1; /* set all bits */ ++ btl->led = led; ++ btl->blink_msec = BLKDEV_TRIG_BLINK_DEF; ++ btl->check_jiffies = msecs_to_jiffies(BLKDEV_TRIG_CHECK_DEF); ++ xa_init(&btl->linked_btbs); ++ ++ hlist_add_head(&btl->all_btls_node, &blkdev_trig_all_btls); ++ led_set_trigger_data(led, btl); ++ ++exit_unlock: ++ mutex_unlock(&blkdev_trig_mutex); ++exit_free: ++ if (err) ++ kfree(btl); ++ return err; ++} ++ ++/** ++ * blkdev_trig_deactivate() - Called by the the LEDs subsystem when an LED is ++ * disassociated from the trigger. ++ * @led: The LED ++ * ++ * The LEDs subsystem also calls this function when an LED associated with the ++ * trigger is removed or when the trigger is unregistered (if the module is ++ * unloaded). ++ * ++ * Context: Process context. Takes and releases &blkdev_trig_mutex. ++ */ ++static void blkdev_trig_deactivate(struct led_classdev *led) ++{ ++ struct blkdev_trig_led *btl = led_get_trigger_data(led); ++ struct blkdev_trig_bdev *btb; ++ unsigned long index; ++ ++ mutex_lock(&blkdev_trig_mutex); ++ ++ xa_for_each (&btl->linked_btbs, index, btb) ++ blkdev_trig_unlink_norelease(btl, btb); ++ ++ hlist_del(&btl->all_btls_node); ++ kfree(btl); ++ ++ mutex_unlock(&blkdev_trig_mutex); ++} ++ ++ ++/* ++ * ++ * Link-related attribute store functions ++ * ++ */ ++ ++/** ++ * link_dev_by_path_store() - &link_dev_by_path device attribute store function. ++ * @dev: The LED device ++ * @attr: The &link_dev_by_path attribute (&dev_attr_link_dev_by_path) ++ * @buf: The value written to the attribute, which should be the path to ++ * a special file that represents a block device to be linked to ++ * the LED (e.g. ``/dev/sda``) ++ * @count: The number of characters in &buf ++ * ++ * Context: Process context. Takes and releases &blkdev_trig_mutex. ++ * Return: &count on success, negative &errno on error. ++ */ ++static ssize_t link_dev_by_path_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct blkdev_trig_led *btl = led_trigger_get_drvdata(dev); ++ struct blkdev_trig_bdev *btb; ++ int err; ++ ++ err = mutex_lock_interruptible(&blkdev_trig_mutex); ++ if (err) ++ return err; ++ ++ btb = blkdev_trig_get_btb(buf, count); ++ if (IS_ERR(btb)) { ++ err = PTR_ERR(btb); ++ goto exit_unlock; ++ } ++ ++ if (xa_load(&btb->linked_btls, btl->index) != NULL) { ++ err = -EEXIST; ++ goto exit_put_btb; ++ } ++ ++ err = blkdev_trig_link(btl, btb); ++ ++exit_put_btb: ++ if (err) ++ blkdev_trig_put_btb(btb); ++exit_unlock: ++ mutex_unlock(&blkdev_trig_mutex); ++ return err ? : count; ++} ++ ++/** ++ * unlink_dev_by_path_store() - &unlink_dev_by_path device attribute store ++ * function. ++ * @dev: The LED device ++ * @attr: The &unlink_dev_by_path attribute (&dev_attr_unlink_dev_by_path) ++ * @buf: The value written to the attribute, which should be the path to ++ * a special file that represents a block device to be unlinked ++ * from the LED (e.g. ``/dev/sda``) ++ * @count: The number of characters in &buf ++ * ++ * Context: Process context. Takes and releases &blkdev_trig_mutex. ++ * Return: &count on success, negative &errno on error. ++ */ ++static ssize_t unlink_dev_by_path_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct blkdev_trig_led *btl = led_trigger_get_drvdata(dev); ++ struct block_device *bdev; ++ struct blkdev_trig_bdev *btb; ++ int err; ++ ++ bdev = blkdev_trig_get_bdev(buf, count); ++ if (IS_ERR(bdev)) ++ return PTR_ERR(bdev); ++ ++ err = mutex_lock_interruptible(&blkdev_trig_mutex); ++ if (err) ++ goto exit_put_bdev; ++ ++ btb = devres_find(&bdev->bd_device, blkdev_trig_btb_release, ++ NULL, NULL); ++ if (btb == NULL) { ++ err = -EUNATCH; /* bdev isn't linked to any LED */ ++ goto exit_unlock; ++ } ++ ++ if (xa_load(&btb->linked_btls, btl->index) == NULL) { ++ err = -EUNATCH; /* bdev isn't linked to this LED */ ++ goto exit_unlock; ++ } ++ ++ blkdev_trig_unlink_norelease(btl, btb); ++ ++exit_unlock: ++ mutex_unlock(&blkdev_trig_mutex); ++exit_put_bdev: ++ blkdev_put(bdev, BLKDEV_TRIG_FMODE); ++ return err ? : count; ++} ++ ++/** ++ * unlink_dev_by_name_store() - &unlink_dev_by_name device attribute store ++ * function. ++ * @dev: The LED device ++ * @attr: The &unlink_dev_by_name attribute (&dev_attr_unlink_dev_by_name) ++ * @buf: The value written to the attribute, which should be the kernel ++ * name of a block device to be unlinked from the LED (e.g. ++ * ``sda``) ++ * @count: The number of characters in &buf ++ * ++ * Context: Process context. Takes and releases &blkdev_trig_mutex. ++ * Return: &count on success, negative &errno on error. ++ */ ++static ssize_t unlink_dev_by_name_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct blkdev_trig_led *btl = led_trigger_get_drvdata(dev); ++ struct blkdev_trig_bdev *btb; ++ unsigned long index; ++ int err; ++ ++ err = mutex_lock_interruptible(&blkdev_trig_mutex); ++ if (err) ++ return err; ++ ++ err = -EUNATCH; ++ ++ xa_for_each (&btl->linked_btbs, index, btb) { ++ ++ if (sysfs_streq(dev_name(&btb->bdev->bd_device), buf)) { ++ blkdev_trig_unlink_norelease(btl, btb); ++ err = 0; ++ break; ++ } ++ } ++ ++ mutex_unlock(&blkdev_trig_mutex); ++ return err ? : count; ++} ++ ++ ++/* ++ * ++ * Atomic attribute show & store functions ++ * ++ */ ++ ++/** ++ * blink_time_show() - &blink_time device attribute show function. ++ * @dev: The LED device ++ * @attr: The &blink_time attribute (&dev_attr_blink_time) ++ * @buf: Output buffer ++ * ++ * Writes the value of &blkdev_trig_led.blink_msec to &buf. ++ * ++ * Context: Process context. ++ * Return: The number of characters written to &buf. ++ */ ++static ssize_t blink_time_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ const struct blkdev_trig_led *btl = led_trigger_get_drvdata(dev); ++ ++ return sysfs_emit(buf, "%u\n", READ_ONCE(btl->blink_msec)); ++} ++ ++/** ++ * blink_time_store() - &blink_time device attribute store function. ++ * @dev: The LED device ++ * @attr: The &blink_time attribute (&dev_attr_blink_time) ++ * @buf: The new value (as written to the &sysfs attribute) ++ * @count: The number of characters in &buf ++ * ++ * Sets &blkdev_trig_led.blink_msec to the value in &buf. ++ * ++ * Context: Process context. ++ * Return: &count on success, negative &errno on error. ++ */ ++static ssize_t blink_time_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct blkdev_trig_led *btl = led_trigger_get_drvdata(dev); ++ unsigned int value; ++ int err; ++ ++ err = kstrtouint(buf, 0, &value); ++ if (err) ++ return err; ++ ++ if (value < BLKDEV_TRIG_BLINK_MIN || value > BLKDEV_TRIG_BLINK_MAX) ++ return -ERANGE; ++ ++ WRITE_ONCE(btl->blink_msec, value); ++ return count; ++} ++ ++/** ++ * check_interval_show() - &check_interval device attribute show function. ++ * @dev: The LED device ++ * @attr: The &check_interval attribute (&dev_attr_check_interval) ++ * @buf: Output buffer ++ * ++ * Writes the value of &blkdev_trig_led.check_jiffies (converted to ++ * milliseconds) to &buf. ++ * ++ * Context: Process context. ++ * Return: The number of characters written to &buf. ++ */ ++static ssize_t check_interval_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct blkdev_trig_led *btl = led_trigger_get_drvdata(dev); ++ ++ return sysfs_emit(buf, "%u\n", ++ jiffies_to_msecs(READ_ONCE(btl->check_jiffies))); ++} ++ ++/** ++ * check_interval_store() - &check_interval device attribute store function ++ * @dev: The LED device ++ * @attr: The &check_interval attribute (&dev_attr_check_interval) ++ * @buf: The new value (as written to the &sysfs attribute) ++ * @count: The number of characters in &buf ++ * ++ * Sets &blkdev_trig_led.check_jiffies to the value in &buf (after converting ++ * from milliseconds). ++ * ++ * Context: Process context. ++ * Return: &count on success, negative &errno on error. ++ */ ++static ssize_t check_interval_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct blkdev_trig_led *led = led_trigger_get_drvdata(dev); ++ unsigned int value; ++ int err; ++ ++ err = kstrtouint(buf, 0, &value); ++ if (err) ++ return err; ++ ++ if (value < BLKDEV_TRIG_CHECK_MIN || value > BLKDEV_TRIG_CHECK_MAX) ++ return -ERANGE; ++ ++ WRITE_ONCE(led->check_jiffies, msecs_to_jiffies(value)); ++ ++ return count; ++} ++ ++/** ++ * blkdev_trig_mode_show() - Helper for boolean attribute show functions. ++ * @led: The LED ++ * @buf: Output buffer ++ * @bit: Which bit to show ++ * ++ * Context: Process context. ++ * Return: The number of characters written to &buf. ++ */ ++static int blkdev_trig_mode_show(const struct blkdev_trig_led *led, char *buf, ++ enum stat_group bit) ++{ ++ return sysfs_emit(buf, ++ READ_ONCE(led->mode) & (1 << bit) ? "Y\n" : "N\n"); ++} ++ ++/** ++ * blkdev_trig_mode_store() - Helper for boolean attribute store functions. ++ * @led: The LED ++ * @buf: The new value (as written to the &sysfs attribute) ++ * @count: The number of characters in &buf ++ * @bit: Which bit to set ++ * ++ * Context: Process context. ++ * Return: &count on success, negative &errno on error. ++ */ ++static int blkdev_trig_mode_store(struct blkdev_trig_led *led, ++ const char *buf, size_t count, ++ enum stat_group bit) ++{ ++ bool set; ++ int err; ++ ++ err = kstrtobool(buf, &set); ++ if (err) ++ return err; ++ ++ if (set) ++ set_bit(bit, &led->mode); ++ else ++ clear_bit(bit, &led->mode); ++ ++ return count; ++} ++ ++/** ++ * blink_on_read_show() - &blink_on_read device attribute show function. ++ * @dev: The LED device ++ * @attr: The &blink_on_read attribute (&dev_attr_blink_on_read) ++ * @buf: Output buffer ++ * ++ * Writes ``Y`` or ``N`` to &buf, depending on whether the &STAT_READ bit in ++ * &blkdev_trig_led.mode is set or cleared. ++ * ++ * Context: Process context. ++ * Return: The number of characters written to &buf. ++ */ ++static ssize_t blink_on_read_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return blkdev_trig_mode_show(led_trigger_get_drvdata(dev), ++ buf, STAT_READ); ++} ++ ++/** ++ * blink_on_read_store() - &blink_on_read device attribute store function. ++ * @dev: The LED device ++ * @attr: The &blink_on_read attribute (&dev_attr_blink_on_read) ++ * @buf: The new value (as written to the &sysfs attribute) ++ * @count: The number of characters in &buf ++ * ++ * Sets the &STAT_READ bit in &blkdev_trig_led.mode to the value in &buf ++ * (interpretted as a boolean). ++ * ++ * Context: Process context. ++ * Return: &count on success, negative &errno on error. ++ */ ++static ssize_t blink_on_read_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ return blkdev_trig_mode_store(led_trigger_get_drvdata(dev), ++ buf, count, STAT_READ); ++} ++ ++/** ++ * blink_on_write_show() - &blink_on_write device attribute show function. ++ * @dev: The LED device ++ * @attr: The &blink_on_write attribute (&dev_attr_blink_on_write) ++ * @buf: Output buffer ++ * ++ * Writes ``Y`` or ``N`` to &buf, depending on whether the &STAT_WRITE bit in ++ * in &blkdev_trig_led.mode is set or cleared. ++ * ++ * Context: Process context. ++ * Return: The number of characters written to &buf. ++ */ ++static ssize_t blink_on_write_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return blkdev_trig_mode_show(led_trigger_get_drvdata(dev), ++ buf, STAT_WRITE); ++} ++ ++/** ++ * blink_on_write_store() - &blink_on_write device attribute store function. ++ * @dev: The LED device ++ * @attr: The &blink_on_write attribute (&dev_attr_blink_on_write) ++ * @buf: The new value (as written to the &sysfs attribute) ++ * @count: The number of characters in &buf ++ * ++ * Sets the &STAT_WRITE bit in &blkdev_trig_led.mode to the value in &buf ++ * (interpretted as a boolean). ++ * ++ * Context: Process context. ++ * Return: &count on success, negative &errno on error. ++ */ ++static ssize_t blink_on_write_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ return blkdev_trig_mode_store(led_trigger_get_drvdata(dev), ++ buf, count, STAT_WRITE); ++} ++ ++/** ++ * blink_on_flush_show() - &blink_on_flush device attribute show function. ++ * @dev: The LED device ++ * @attr: The &blink_on_flush attribute (&dev_attr_blink_on_flush) ++ * @buf: Output buffer ++ * ++ * Writes ``Y`` or ``N`` to &buf, depending whether the &STAT_FLUSH bit in ++ * &blkdev_trig_led.mode is set or cleared. ++ * ++ * Context: Process context. ++ * Return: The number of characters written to &buf. ++ */ ++static ssize_t blink_on_flush_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return blkdev_trig_mode_show(led_trigger_get_drvdata(dev), ++ buf, STAT_FLUSH); ++} ++ ++/** ++ * blink_on_flush_store() - &blink_on_flush device attribute store function. ++ * @dev: The LED device ++ * @attr: The &blink_on_flush attribute (&dev_attr_blink_on_flush) ++ * @buf: The new value (as written to the &sysfs attribute) ++ * @count: The number of characters in &buf ++ * ++ * Sets the &STAT_FLUSH bit in &blkdev_trig_led.mode to the value in &buf ++ * (interpretted as a boolean). ++ * ++ * Context: Process context. ++ * Return: &count on success, negative &errno on error. ++ */ ++static ssize_t blink_on_flush_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ return blkdev_trig_mode_store(led_trigger_get_drvdata(dev), ++ buf, count, STAT_FLUSH); ++} ++ ++/** ++ * blink_on_discard_show() - &blink_on_discard device attribute show function. ++ * @dev: The LED device ++ * @attr: The &blink_on_discard attribute (&dev_attr_blink_on_discard) ++ * @buf: Output buffer ++ * ++ * Writes ``Y`` or ``N`` to &buf, depending on whether the &STAT_DISCARD bit in ++ * &blkdev_trig_led.mode is set or cleared. ++ * ++ * Context: Process context. ++ * Return: The number of characters written to &buf. ++ */ ++static ssize_t blink_on_discard_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return blkdev_trig_mode_show(led_trigger_get_drvdata(dev), ++ buf, STAT_DISCARD); ++} ++ ++/** ++ * blink_on_discard_store() - &blink_on_discard device attribute store function. ++ * @dev: The LED device ++ * @attr: The &blink_on_discard attribute (&dev_attr_blink_on_discard) ++ * @buf: The new value (as written to the &sysfs attribute) ++ * @count: The number of characters in &buf ++ * ++ * Sets the &STAT_DISCARD bit in &blkdev_trig_led.mode to the value in &buf ++ * (interpretted as a boolean). ++ * ++ * Context: Process context. ++ * Return: &count on success, negative &errno on error. ++ */ ++static ssize_t blink_on_discard_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ return blkdev_trig_mode_store(led_trigger_get_drvdata(dev), ++ buf, count, STAT_DISCARD); ++} ++ ++/* Device attributes */ ++static DEVICE_ATTR_WO(link_dev_by_path); ++static DEVICE_ATTR_WO(unlink_dev_by_path); ++static DEVICE_ATTR_WO(unlink_dev_by_name); ++static DEVICE_ATTR_RW(blink_time); ++static DEVICE_ATTR_RW(check_interval); ++static DEVICE_ATTR_RW(blink_on_read); ++static DEVICE_ATTR_RW(blink_on_write); ++static DEVICE_ATTR_RW(blink_on_flush); ++static DEVICE_ATTR_RW(blink_on_discard); ++ ++/* Device attributes in LED directory (/sys/class/leds//...) */ ++static struct attribute *blkdev_trig_attrs[] = { ++ &dev_attr_link_dev_by_path.attr, ++ &dev_attr_unlink_dev_by_path.attr, ++ &dev_attr_unlink_dev_by_name.attr, ++ &dev_attr_blink_time.attr, ++ &dev_attr_check_interval.attr, ++ &dev_attr_blink_on_read.attr, ++ &dev_attr_blink_on_write.attr, ++ &dev_attr_blink_on_flush.attr, ++ &dev_attr_blink_on_discard.attr, ++ NULL ++}; ++ ++/* Unnamed attribute group == no subdirectory */ ++static const struct attribute_group blkdev_trig_attr_group = { ++ .attrs = blkdev_trig_attrs, ++}; ++ ++/* Attribute groups for the trigger */ ++static const struct attribute_group *blkdev_trig_attr_groups[] = { ++ &blkdev_trig_attr_group, /* /sys/class/leds//... */ ++ &blkdev_trig_linked_devs, /* /sys/class/leds//linked_devices/ */ ++ NULL ++}; ++ ++/* Trigger registration data */ ++static struct led_trigger blkdev_trig_trigger = { ++ .name = "blkdev", ++ .activate = blkdev_trig_activate, ++ .deactivate = blkdev_trig_deactivate, ++ .groups = blkdev_trig_attr_groups, ++}; ++ ++/** ++ * blkdev_trig_init() - Block device LED trigger initialization. ++ * ++ * Registers the ``blkdev`` LED trigger. ++ * ++ * Return: &0 on success, negative &errno on failure. ++ */ ++static int __init blkdev_trig_init(void) ++{ ++ return led_trigger_register(&blkdev_trig_trigger); ++} ++module_init(blkdev_trig_init); ++ ++/** ++ * blkdev_trig_exit() - Block device LED trigger module exit. ++ * ++ * Unregisters the ``blkdev`` LED trigger. ++ */ ++static void __exit blkdev_trig_exit(void) ++{ ++ led_trigger_unregister(&blkdev_trig_trigger); ++} ++module_exit(blkdev_trig_exit); ++ ++MODULE_DESCRIPTION("Block device LED trigger"); ++MODULE_AUTHOR("Ian Pilcher "); ++MODULE_LICENSE("GPL v2"); +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index 64659b110973..4cad490028ab 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -57,13 +57,7 @@ + * we need a lock that will allow us to sleep. This lock is a + * mutex (ep->mtx). It is acquired during the event transfer loop, + * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file(). +- * Then we also need a global mutex to serialize eventpoll_release_file() +- * and ep_free(). +- * This mutex is acquired by ep_free() during the epoll file +- * cleanup path and it is also acquired by eventpoll_release_file() +- * if a file has been pushed inside an epoll set and it is then +- * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL). +- * It is also acquired when inserting an epoll fd onto another epoll ++ * The epmutex is acquired when inserting an epoll fd onto another epoll + * fd. We do this so that we walk the epoll tree and ensure that this + * insertion does not create a cycle of epoll file descriptors, which + * could lead to deadlock. We need a global mutex to prevent two +@@ -153,6 +147,13 @@ struct epitem { + /* The file descriptor information this item refers to */ + struct epoll_filefd ffd; + ++ /* ++ * Protected by file->f_lock, true for to-be-released epitem already ++ * removed from the "struct file" items list; together with ++ * eventpoll->refcount orchestrates "struct eventpoll" disposal ++ */ ++ bool dying; ++ + /* List containing poll wait queues */ + struct eppoll_entry *pwqlist; + +@@ -217,6 +218,12 @@ struct eventpoll { + u64 gen; + struct hlist_head refs; + ++ /* ++ * usage count, used together with epitem->dying to ++ * orchestrate the disposal of this struct ++ */ ++ refcount_t refcount; ++ + #ifdef CONFIG_NET_RX_BUSY_POLL + /* used to track busy poll napi_id */ + unsigned int napi_id; +@@ -240,9 +247,7 @@ struct ep_pqueue { + /* Maximum number of epoll watched descriptors, per user */ + static long max_user_watches __read_mostly; + +-/* +- * This mutex is used to serialize ep_free() and eventpoll_release_file(). +- */ ++/* Used for cycles detection */ + static DEFINE_MUTEX(epmutex); + + static u64 loop_check_gen = 0; +@@ -557,8 +562,7 @@ static void ep_remove_wait_queue(struct eppoll_entry *pwq) + + /* + * This function unregisters poll callbacks from the associated file +- * descriptor. Must be called with "mtx" held (or "epmutex" if called from +- * ep_free). ++ * descriptor. Must be called with "mtx" held. + */ + static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) + { +@@ -681,11 +685,40 @@ static void epi_rcu_free(struct rcu_head *head) + kmem_cache_free(epi_cache, epi); + } + ++static void ep_get(struct eventpoll *ep) ++{ ++ refcount_inc(&ep->refcount); ++} ++ ++/* ++ * Returns true if the event poll can be disposed ++ */ ++static bool ep_refcount_dec_and_test(struct eventpoll *ep) ++{ ++ if (!refcount_dec_and_test(&ep->refcount)) ++ return false; ++ ++ WARN_ON_ONCE(!RB_EMPTY_ROOT(&ep->rbr.rb_root)); ++ return true; ++} ++ ++static void ep_free(struct eventpoll *ep) ++{ ++ mutex_destroy(&ep->mtx); ++ free_uid(ep->user); ++ wakeup_source_unregister(ep->ws); ++ kfree(ep); ++} ++ + /* + * Removes a "struct epitem" from the eventpoll RB tree and deallocates + * all the associated resources. Must be called with "mtx" held. ++ * If the dying flag is set, do the removal only if force is true. ++ * This prevents ep_clear_and_put() from dropping all the ep references ++ * while running concurrently with eventpoll_release_file(). ++ * Returns true if the eventpoll can be disposed. + */ +-static int ep_remove(struct eventpoll *ep, struct epitem *epi) ++static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force) + { + struct file *file = epi->ffd.file; + struct epitems_head *to_free; +@@ -700,6 +733,11 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) + + /* Remove the current item from the list of epoll hooks */ + spin_lock(&file->f_lock); ++ if (epi->dying && !force) { ++ spin_unlock(&file->f_lock); ++ return false; ++ } ++ + to_free = NULL; + head = file->f_ep; + if (head->first == &epi->fllink && !epi->fllink.next) { +@@ -733,28 +771,28 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) + call_rcu(&epi->rcu, epi_rcu_free); + + percpu_counter_dec(&ep->user->epoll_watches); ++ return ep_refcount_dec_and_test(ep); ++} + +- return 0; ++/* ++ * ep_remove variant for callers owing an additional reference to the ep ++ */ ++static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi) ++{ ++ WARN_ON_ONCE(__ep_remove(ep, epi, false)); + } + +-static void ep_free(struct eventpoll *ep) ++static void ep_clear_and_put(struct eventpoll *ep) + { + struct rb_node *rbp; + struct epitem *epi; ++ bool dispose; + + /* We need to release all tasks waiting for these file */ + if (waitqueue_active(&ep->poll_wait)) + ep_poll_safewake(ep, NULL, 0); + +- /* +- * We need to lock this because we could be hit by +- * eventpoll_release_file() while we're freeing the "struct eventpoll". +- * We do not need to hold "ep->mtx" here because the epoll file +- * is on the way to be removed and no one has references to it +- * anymore. The only hit might come from eventpoll_release_file() but +- * holding "epmutex" is sufficient here. +- */ +- mutex_lock(&epmutex); ++ mutex_lock(&ep->mtx); + + /* + * Walks through the whole tree by unregistering poll callbacks. +@@ -768,25 +806,21 @@ static void ep_free(struct eventpoll *ep) + + /* + * Walks through the whole tree by freeing each "struct epitem". At this +- * point we are sure no poll callbacks will be lingering around, and also by +- * holding "epmutex" we can be sure that no file cleanup code will hit +- * us during this operation. So we can avoid the lock on "ep->lock". +- * We do not need to lock ep->mtx, either, we only do it to prevent +- * a lockdep warning. ++ * point we are sure no poll callbacks will be lingering around. ++ * Since we still own a reference to the eventpoll struct, the loop can't ++ * dispose it. + */ +- mutex_lock(&ep->mtx); + while ((rbp = rb_first_cached(&ep->rbr)) != NULL) { + epi = rb_entry(rbp, struct epitem, rbn); +- ep_remove(ep, epi); ++ ep_remove_safe(ep, epi); + cond_resched(); + } ++ ++ dispose = ep_refcount_dec_and_test(ep); + mutex_unlock(&ep->mtx); + +- mutex_unlock(&epmutex); +- mutex_destroy(&ep->mtx); +- free_uid(ep->user); +- wakeup_source_unregister(ep->ws); +- kfree(ep); ++ if (dispose) ++ ep_free(ep); + } + + static int ep_eventpoll_release(struct inode *inode, struct file *file) +@@ -794,7 +828,7 @@ static int ep_eventpoll_release(struct inode *inode, struct file *file) + struct eventpoll *ep = file->private_data; + + if (ep) +- ep_free(ep); ++ ep_clear_and_put(ep); + + return 0; + } +@@ -906,33 +940,34 @@ void eventpoll_release_file(struct file *file) + { + struct eventpoll *ep; + struct epitem *epi; +- struct hlist_node *next; ++ bool dispose; + + /* +- * We don't want to get "file->f_lock" because it is not +- * necessary. It is not necessary because we're in the "struct file" +- * cleanup path, and this means that no one is using this file anymore. +- * So, for example, epoll_ctl() cannot hit here since if we reach this +- * point, the file counter already went to zero and fget() would fail. +- * The only hit might come from ep_free() but by holding the mutex +- * will correctly serialize the operation. We do need to acquire +- * "ep->mtx" after "epmutex" because ep_remove() requires it when called +- * from anywhere but ep_free(). +- * +- * Besides, ep_remove() acquires the lock, so we can't hold it here. ++ * Use the 'dying' flag to prevent a concurrent ep_clear_and_put() from ++ * touching the epitems list before eventpoll_release_file() can access ++ * the ep->mtx. + */ +- mutex_lock(&epmutex); +- if (unlikely(!file->f_ep)) { +- mutex_unlock(&epmutex); +- return; +- } +- hlist_for_each_entry_safe(epi, next, file->f_ep, fllink) { ++again: ++ spin_lock(&file->f_lock); ++ if (file->f_ep && file->f_ep->first) { ++ epi = hlist_entry(file->f_ep->first, struct epitem, fllink); ++ epi->dying = true; ++ spin_unlock(&file->f_lock); ++ ++ /* ++ * ep access is safe as we still own a reference to the ep ++ * struct ++ */ + ep = epi->ep; +- mutex_lock_nested(&ep->mtx, 0); +- ep_remove(ep, epi); ++ mutex_lock(&ep->mtx); ++ dispose = __ep_remove(ep, epi, true); + mutex_unlock(&ep->mtx); ++ ++ if (dispose) ++ ep_free(ep); ++ goto again; + } +- mutex_unlock(&epmutex); ++ spin_unlock(&file->f_lock); + } + + static int ep_alloc(struct eventpoll **pep) +@@ -955,6 +990,7 @@ static int ep_alloc(struct eventpoll **pep) + ep->rbr = RB_ROOT_CACHED; + ep->ovflist = EP_UNACTIVE_PTR; + ep->user = user; ++ refcount_set(&ep->refcount, 1); + + *pep = ep; + +@@ -1223,10 +1259,10 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v + */ + list_del_init(&wait->entry); + /* +- * ->whead != NULL protects us from the race with ep_free() +- * or ep_remove(), ep_remove_wait_queue() takes whead->lock +- * held by the caller. Once we nullify it, nothing protects +- * ep/epi or even wait. ++ * ->whead != NULL protects us from the race with ++ * ep_clear_and_put() or ep_remove(), ep_remove_wait_queue() ++ * takes whead->lock held by the caller. Once we nullify it, ++ * nothing protects ep/epi or even wait. + */ + smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL); + } +@@ -1496,16 +1532,22 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, + if (tep) + mutex_unlock(&tep->mtx); + ++ /* ++ * ep_remove_safe() calls in the later error paths can't lead to ++ * ep_free() as the ep file itself still holds an ep reference. ++ */ ++ ep_get(ep); ++ + /* now check if we've created too many backpaths */ + if (unlikely(full_check && reverse_path_check())) { +- ep_remove(ep, epi); ++ ep_remove_safe(ep, epi); + return -EINVAL; + } + + if (epi->event.events & EPOLLWAKEUP) { + error = ep_create_wakeup_source(epi); + if (error) { +- ep_remove(ep, epi); ++ ep_remove_safe(ep, epi); + return error; + } + } +@@ -1529,7 +1571,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, + * high memory pressure. + */ + if (unlikely(!epq.epi)) { +- ep_remove(ep, epi); ++ ep_remove_safe(ep, epi); + return -ENOMEM; + } + +@@ -1760,7 +1802,7 @@ static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry, + { + int ret = default_wake_function(wq_entry, mode, sync, key); + +- list_del_init(&wq_entry->entry); ++ list_del_init_careful(&wq_entry->entry); + return ret; + } + +@@ -2025,7 +2067,7 @@ static int do_epoll_create(int flags) + out_free_fd: + put_unused_fd(fd); + out_free_ep: +- ep_free(ep); ++ ep_clear_and_put(ep); + return error; + } + +@@ -2167,10 +2209,16 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds, + error = -EEXIST; + break; + case EPOLL_CTL_DEL: +- if (epi) +- error = ep_remove(ep, epi); +- else ++ if (epi) { ++ /* ++ * The eventpoll itself is still alive: the refcount ++ * can't go to zero here. ++ */ ++ ep_remove_safe(ep, epi); ++ error = 0; ++ } else { + error = -ENOENT; ++ } + break; + case EPOLL_CTL_MOD: + if (epi) { +diff --git a/fs/proc/base.c b/fs/proc/base.c +index 5e0e0ccd47aa..07463ad4a70a 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -3207,6 +3207,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns, + mm = get_task_mm(task); + if (mm) { + seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items); ++ seq_printf(m, "zero_pages_sharing %lu\n", mm->ksm_zero_pages_sharing); + mmput(mm); + } + +diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h +index 77bc5522e61c..4226379a232d 100644 +--- a/include/linux/atomic/atomic-arch-fallback.h ++++ b/include/linux/atomic/atomic-arch-fallback.h +@@ -1208,15 +1208,21 @@ arch_atomic_inc_and_test(atomic_t *v) + #define arch_atomic_inc_and_test arch_atomic_inc_and_test + #endif + ++#ifndef arch_atomic_add_negative_relaxed ++#ifdef arch_atomic_add_negative ++#define arch_atomic_add_negative_acquire arch_atomic_add_negative ++#define arch_atomic_add_negative_release arch_atomic_add_negative ++#define arch_atomic_add_negative_relaxed arch_atomic_add_negative ++#endif /* arch_atomic_add_negative */ ++ + #ifndef arch_atomic_add_negative + /** +- * arch_atomic_add_negative - add and test if negative ++ * arch_atomic_add_negative - Add and test if negative + * @i: integer value to add + * @v: pointer of type atomic_t + * +- * Atomically adds @i to @v and returns true +- * if the result is negative, or false when +- * result is greater than or equal to zero. ++ * Atomically adds @i to @v and returns true if the result is negative, ++ * or false when the result is greater than or equal to zero. + */ + static __always_inline bool + arch_atomic_add_negative(int i, atomic_t *v) +@@ -1226,6 +1232,95 @@ arch_atomic_add_negative(int i, atomic_t *v) + #define arch_atomic_add_negative arch_atomic_add_negative + #endif + ++#ifndef arch_atomic_add_negative_acquire ++/** ++ * arch_atomic_add_negative_acquire - Add and test if negative ++ * @i: integer value to add ++ * @v: pointer of type atomic_t ++ * ++ * Atomically adds @i to @v and returns true if the result is negative, ++ * or false when the result is greater than or equal to zero. ++ */ ++static __always_inline bool ++arch_atomic_add_negative_acquire(int i, atomic_t *v) ++{ ++ return arch_atomic_add_return_acquire(i, v) < 0; ++} ++#define arch_atomic_add_negative_acquire arch_atomic_add_negative_acquire ++#endif ++ ++#ifndef arch_atomic_add_negative_release ++/** ++ * arch_atomic_add_negative_release - Add and test if negative ++ * @i: integer value to add ++ * @v: pointer of type atomic_t ++ * ++ * Atomically adds @i to @v and returns true if the result is negative, ++ * or false when the result is greater than or equal to zero. ++ */ ++static __always_inline bool ++arch_atomic_add_negative_release(int i, atomic_t *v) ++{ ++ return arch_atomic_add_return_release(i, v) < 0; ++} ++#define arch_atomic_add_negative_release arch_atomic_add_negative_release ++#endif ++ ++#ifndef arch_atomic_add_negative_relaxed ++/** ++ * arch_atomic_add_negative_relaxed - Add and test if negative ++ * @i: integer value to add ++ * @v: pointer of type atomic_t ++ * ++ * Atomically adds @i to @v and returns true if the result is negative, ++ * or false when the result is greater than or equal to zero. ++ */ ++static __always_inline bool ++arch_atomic_add_negative_relaxed(int i, atomic_t *v) ++{ ++ return arch_atomic_add_return_relaxed(i, v) < 0; ++} ++#define arch_atomic_add_negative_relaxed arch_atomic_add_negative_relaxed ++#endif ++ ++#else /* arch_atomic_add_negative_relaxed */ ++ ++#ifndef arch_atomic_add_negative_acquire ++static __always_inline bool ++arch_atomic_add_negative_acquire(int i, atomic_t *v) ++{ ++ bool ret = arch_atomic_add_negative_relaxed(i, v); ++ __atomic_acquire_fence(); ++ return ret; ++} ++#define arch_atomic_add_negative_acquire arch_atomic_add_negative_acquire ++#endif ++ ++#ifndef arch_atomic_add_negative_release ++static __always_inline bool ++arch_atomic_add_negative_release(int i, atomic_t *v) ++{ ++ __atomic_release_fence(); ++ return arch_atomic_add_negative_relaxed(i, v); ++} ++#define arch_atomic_add_negative_release arch_atomic_add_negative_release ++#endif ++ ++#ifndef arch_atomic_add_negative ++static __always_inline bool ++arch_atomic_add_negative(int i, atomic_t *v) ++{ ++ bool ret; ++ __atomic_pre_full_fence(); ++ ret = arch_atomic_add_negative_relaxed(i, v); ++ __atomic_post_full_fence(); ++ return ret; ++} ++#define arch_atomic_add_negative arch_atomic_add_negative ++#endif ++ ++#endif /* arch_atomic_add_negative_relaxed */ ++ + #ifndef arch_atomic_fetch_add_unless + /** + * arch_atomic_fetch_add_unless - add unless the number is already a given value +@@ -2329,15 +2424,21 @@ arch_atomic64_inc_and_test(atomic64_t *v) + #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test + #endif + ++#ifndef arch_atomic64_add_negative_relaxed ++#ifdef arch_atomic64_add_negative ++#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative ++#define arch_atomic64_add_negative_release arch_atomic64_add_negative ++#define arch_atomic64_add_negative_relaxed arch_atomic64_add_negative ++#endif /* arch_atomic64_add_negative */ ++ + #ifndef arch_atomic64_add_negative + /** +- * arch_atomic64_add_negative - add and test if negative ++ * arch_atomic64_add_negative - Add and test if negative + * @i: integer value to add + * @v: pointer of type atomic64_t + * +- * Atomically adds @i to @v and returns true +- * if the result is negative, or false when +- * result is greater than or equal to zero. ++ * Atomically adds @i to @v and returns true if the result is negative, ++ * or false when the result is greater than or equal to zero. + */ + static __always_inline bool + arch_atomic64_add_negative(s64 i, atomic64_t *v) +@@ -2347,6 +2448,95 @@ arch_atomic64_add_negative(s64 i, atomic64_t *v) + #define arch_atomic64_add_negative arch_atomic64_add_negative + #endif + ++#ifndef arch_atomic64_add_negative_acquire ++/** ++ * arch_atomic64_add_negative_acquire - Add and test if negative ++ * @i: integer value to add ++ * @v: pointer of type atomic64_t ++ * ++ * Atomically adds @i to @v and returns true if the result is negative, ++ * or false when the result is greater than or equal to zero. ++ */ ++static __always_inline bool ++arch_atomic64_add_negative_acquire(s64 i, atomic64_t *v) ++{ ++ return arch_atomic64_add_return_acquire(i, v) < 0; ++} ++#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative_acquire ++#endif ++ ++#ifndef arch_atomic64_add_negative_release ++/** ++ * arch_atomic64_add_negative_release - Add and test if negative ++ * @i: integer value to add ++ * @v: pointer of type atomic64_t ++ * ++ * Atomically adds @i to @v and returns true if the result is negative, ++ * or false when the result is greater than or equal to zero. ++ */ ++static __always_inline bool ++arch_atomic64_add_negative_release(s64 i, atomic64_t *v) ++{ ++ return arch_atomic64_add_return_release(i, v) < 0; ++} ++#define arch_atomic64_add_negative_release arch_atomic64_add_negative_release ++#endif ++ ++#ifndef arch_atomic64_add_negative_relaxed ++/** ++ * arch_atomic64_add_negative_relaxed - Add and test if negative ++ * @i: integer value to add ++ * @v: pointer of type atomic64_t ++ * ++ * Atomically adds @i to @v and returns true if the result is negative, ++ * or false when the result is greater than or equal to zero. ++ */ ++static __always_inline bool ++arch_atomic64_add_negative_relaxed(s64 i, atomic64_t *v) ++{ ++ return arch_atomic64_add_return_relaxed(i, v) < 0; ++} ++#define arch_atomic64_add_negative_relaxed arch_atomic64_add_negative_relaxed ++#endif ++ ++#else /* arch_atomic64_add_negative_relaxed */ ++ ++#ifndef arch_atomic64_add_negative_acquire ++static __always_inline bool ++arch_atomic64_add_negative_acquire(s64 i, atomic64_t *v) ++{ ++ bool ret = arch_atomic64_add_negative_relaxed(i, v); ++ __atomic_acquire_fence(); ++ return ret; ++} ++#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative_acquire ++#endif ++ ++#ifndef arch_atomic64_add_negative_release ++static __always_inline bool ++arch_atomic64_add_negative_release(s64 i, atomic64_t *v) ++{ ++ __atomic_release_fence(); ++ return arch_atomic64_add_negative_relaxed(i, v); ++} ++#define arch_atomic64_add_negative_release arch_atomic64_add_negative_release ++#endif ++ ++#ifndef arch_atomic64_add_negative ++static __always_inline bool ++arch_atomic64_add_negative(s64 i, atomic64_t *v) ++{ ++ bool ret; ++ __atomic_pre_full_fence(); ++ ret = arch_atomic64_add_negative_relaxed(i, v); ++ __atomic_post_full_fence(); ++ return ret; ++} ++#define arch_atomic64_add_negative arch_atomic64_add_negative ++#endif ++ ++#endif /* arch_atomic64_add_negative_relaxed */ ++ + #ifndef arch_atomic64_fetch_add_unless + /** + * arch_atomic64_fetch_add_unless - add unless the number is already a given value +@@ -2456,4 +2646,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v) + #endif + + #endif /* _LINUX_ATOMIC_FALLBACK_H */ +-// b5e87bdd5ede61470c29f7a7e4de781af3770f09 ++// 00071fffa021cec66f6290d706d69c91df87bade +diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h +index 7a139ec030b0..0496816738ca 100644 +--- a/include/linux/atomic/atomic-instrumented.h ++++ b/include/linux/atomic/atomic-instrumented.h +@@ -592,6 +592,28 @@ atomic_add_negative(int i, atomic_t *v) + return arch_atomic_add_negative(i, v); + } + ++static __always_inline bool ++atomic_add_negative_acquire(int i, atomic_t *v) ++{ ++ instrument_atomic_read_write(v, sizeof(*v)); ++ return arch_atomic_add_negative_acquire(i, v); ++} ++ ++static __always_inline bool ++atomic_add_negative_release(int i, atomic_t *v) ++{ ++ kcsan_release(); ++ instrument_atomic_read_write(v, sizeof(*v)); ++ return arch_atomic_add_negative_release(i, v); ++} ++ ++static __always_inline bool ++atomic_add_negative_relaxed(int i, atomic_t *v) ++{ ++ instrument_atomic_read_write(v, sizeof(*v)); ++ return arch_atomic_add_negative_relaxed(i, v); ++} ++ + static __always_inline int + atomic_fetch_add_unless(atomic_t *v, int a, int u) + { +@@ -1211,6 +1233,28 @@ atomic64_add_negative(s64 i, atomic64_t *v) + return arch_atomic64_add_negative(i, v); + } + ++static __always_inline bool ++atomic64_add_negative_acquire(s64 i, atomic64_t *v) ++{ ++ instrument_atomic_read_write(v, sizeof(*v)); ++ return arch_atomic64_add_negative_acquire(i, v); ++} ++ ++static __always_inline bool ++atomic64_add_negative_release(s64 i, atomic64_t *v) ++{ ++ kcsan_release(); ++ instrument_atomic_read_write(v, sizeof(*v)); ++ return arch_atomic64_add_negative_release(i, v); ++} ++ ++static __always_inline bool ++atomic64_add_negative_relaxed(s64 i, atomic64_t *v) ++{ ++ instrument_atomic_read_write(v, sizeof(*v)); ++ return arch_atomic64_add_negative_relaxed(i, v); ++} ++ + static __always_inline s64 + atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) + { +@@ -1830,6 +1874,28 @@ atomic_long_add_negative(long i, atomic_long_t *v) + return arch_atomic_long_add_negative(i, v); + } + ++static __always_inline bool ++atomic_long_add_negative_acquire(long i, atomic_long_t *v) ++{ ++ instrument_atomic_read_write(v, sizeof(*v)); ++ return arch_atomic_long_add_negative_acquire(i, v); ++} ++ ++static __always_inline bool ++atomic_long_add_negative_release(long i, atomic_long_t *v) ++{ ++ kcsan_release(); ++ instrument_atomic_read_write(v, sizeof(*v)); ++ return arch_atomic_long_add_negative_release(i, v); ++} ++ ++static __always_inline bool ++atomic_long_add_negative_relaxed(long i, atomic_long_t *v) ++{ ++ instrument_atomic_read_write(v, sizeof(*v)); ++ return arch_atomic_long_add_negative_relaxed(i, v); ++} ++ + static __always_inline long + atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) + { +@@ -2083,4 +2149,4 @@ atomic_long_dec_if_positive(atomic_long_t *v) + }) + + #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ +-// 764f741eb77a7ad565dc8d99ce2837d5542e8aee ++// 1b485de9cbaa4900de59e14ee2084357eaeb1c3a +diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h +index 800b8c35992d..2fc51ba66beb 100644 +--- a/include/linux/atomic/atomic-long.h ++++ b/include/linux/atomic/atomic-long.h +@@ -479,6 +479,24 @@ arch_atomic_long_add_negative(long i, atomic_long_t *v) + return arch_atomic64_add_negative(i, v); + } + ++static __always_inline bool ++arch_atomic_long_add_negative_acquire(long i, atomic_long_t *v) ++{ ++ return arch_atomic64_add_negative_acquire(i, v); ++} ++ ++static __always_inline bool ++arch_atomic_long_add_negative_release(long i, atomic_long_t *v) ++{ ++ return arch_atomic64_add_negative_release(i, v); ++} ++ ++static __always_inline bool ++arch_atomic_long_add_negative_relaxed(long i, atomic_long_t *v) ++{ ++ return arch_atomic64_add_negative_relaxed(i, v); ++} ++ + static __always_inline long + arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) + { +@@ -973,6 +991,24 @@ arch_atomic_long_add_negative(long i, atomic_long_t *v) + return arch_atomic_add_negative(i, v); + } + ++static __always_inline bool ++arch_atomic_long_add_negative_acquire(long i, atomic_long_t *v) ++{ ++ return arch_atomic_add_negative_acquire(i, v); ++} ++ ++static __always_inline bool ++arch_atomic_long_add_negative_release(long i, atomic_long_t *v) ++{ ++ return arch_atomic_add_negative_release(i, v); ++} ++ ++static __always_inline bool ++arch_atomic_long_add_negative_relaxed(long i, atomic_long_t *v) ++{ ++ return arch_atomic_add_negative_relaxed(i, v); ++} ++ + static __always_inline long + arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) + { +@@ -1011,4 +1047,4 @@ arch_atomic_long_dec_if_positive(atomic_long_t *v) + + #endif /* CONFIG_64BIT */ + #endif /* _LINUX_ATOMIC_LONG_H */ +-// e8f0e08ff072b74d180eabe2ad001282b38c2c88 ++// a194c07d7d2f4b0e178d3c118c919775d5d65f50 +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h +index a57e6ae78e65..22b2ac82bffd 100644 +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -740,7 +740,7 @@ struct mm_struct { + #ifdef CONFIG_KSM + /* + * Represent how many pages of this process are involved in KSM +- * merging. ++ * merging (not including ksm_zero_pages_sharing). + */ + unsigned long ksm_merging_pages; + /* +@@ -748,6 +748,11 @@ struct mm_struct { + * including merged and not merged. + */ + unsigned long ksm_rmap_items; ++ /* ++ * Represent how many empty pages are merged with kernel zero ++ * pages when enabling KSM use_zero_pages. ++ */ ++ unsigned long ksm_zero_pages_sharing; + #endif + #ifdef CONFIG_LRU_GEN + struct { +diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h +index 5f1ae07d724b..97cda629c9e9 100644 +--- a/include/linux/pageblock-flags.h ++++ b/include/linux/pageblock-flags.h +@@ -48,7 +48,7 @@ extern unsigned int pageblock_order; + #else /* CONFIG_HUGETLB_PAGE */ + + /* If huge pages are not used, group by MAX_ORDER_NR_PAGES */ +-#define pageblock_order (MAX_ORDER-1) ++#define pageblock_order PAGE_ALLOC_COSTLY_ORDER + + #endif /* CONFIG_HUGETLB_PAGE */ + +diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h +new file mode 100644 +index 000000000000..2c8bfd0f1b6b +--- /dev/null ++++ b/include/linux/rcuref.h +@@ -0,0 +1,155 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++#ifndef _LINUX_RCUREF_H ++#define _LINUX_RCUREF_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define RCUREF_ONEREF 0x00000000U ++#define RCUREF_MAXREF 0x7FFFFFFFU ++#define RCUREF_SATURATED 0xA0000000U ++#define RCUREF_RELEASED 0xC0000000U ++#define RCUREF_DEAD 0xE0000000U ++#define RCUREF_NOREF 0xFFFFFFFFU ++ ++/** ++ * rcuref_init - Initialize a rcuref reference count with the given reference count ++ * @ref: Pointer to the reference count ++ * @cnt: The initial reference count typically '1' ++ */ ++static inline void rcuref_init(rcuref_t *ref, unsigned int cnt) ++{ ++ atomic_set(&ref->refcnt, cnt - 1); ++} ++ ++/** ++ * rcuref_read - Read the number of held reference counts of a rcuref ++ * @ref: Pointer to the reference count ++ * ++ * Return: The number of held references (0 ... N) ++ */ ++static inline unsigned int rcuref_read(rcuref_t *ref) ++{ ++ unsigned int c = atomic_read(&ref->refcnt); ++ ++ /* Return 0 if within the DEAD zone. */ ++ return c >= RCUREF_RELEASED ? 0 : c + 1; ++} ++ ++extern __must_check bool rcuref_get_slowpath(rcuref_t *ref); ++ ++/** ++ * rcuref_get - Acquire one reference on a rcuref reference count ++ * @ref: Pointer to the reference count ++ * ++ * Similar to atomic_inc_not_zero() but saturates at RCUREF_MAXREF. ++ * ++ * Provides no memory ordering, it is assumed the caller has guaranteed the ++ * object memory to be stable (RCU, etc.). It does provide a control dependency ++ * and thereby orders future stores. See documentation in lib/rcuref.c ++ * ++ * Return: ++ * False if the attempt to acquire a reference failed. This happens ++ * when the last reference has been put already ++ * ++ * True if a reference was successfully acquired ++ */ ++static inline __must_check bool rcuref_get(rcuref_t *ref) ++{ ++ /* ++ * Unconditionally increase the reference count. The saturation and ++ * dead zones provide enough tolerance for this. ++ */ ++ if (likely(!atomic_add_negative_relaxed(1, &ref->refcnt))) ++ return true; ++ ++ /* Handle the cases inside the saturation and dead zones */ ++ return rcuref_get_slowpath(ref); ++} ++ ++extern __must_check bool rcuref_put_slowpath(rcuref_t *ref); ++ ++/* ++ * Internal helper. Do not invoke directly. ++ */ ++static __always_inline __must_check bool __rcuref_put(rcuref_t *ref) ++{ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(), ++ "suspicious rcuref_put_rcusafe() usage"); ++ /* ++ * Unconditionally decrease the reference count. The saturation and ++ * dead zones provide enough tolerance for this. ++ */ ++ if (likely(!atomic_add_negative_release(-1, &ref->refcnt))) ++ return false; ++ ++ /* ++ * Handle the last reference drop and cases inside the saturation ++ * and dead zones. ++ */ ++ return rcuref_put_slowpath(ref); ++} ++ ++/** ++ * rcuref_put_rcusafe -- Release one reference for a rcuref reference count RCU safe ++ * @ref: Pointer to the reference count ++ * ++ * Provides release memory ordering, such that prior loads and stores are done ++ * before, and provides an acquire ordering on success such that free() ++ * must come after. ++ * ++ * Can be invoked from contexts, which guarantee that no grace period can ++ * happen which would free the object concurrently if the decrement drops ++ * the last reference and the slowpath races against a concurrent get() and ++ * put() pair. rcu_read_lock()'ed and atomic contexts qualify. ++ * ++ * Return: ++ * True if this was the last reference with no future references ++ * possible. This signals the caller that it can safely release the ++ * object which is protected by the reference counter. ++ * ++ * False if there are still active references or the put() raced ++ * with a concurrent get()/put() pair. Caller is not allowed to ++ * release the protected object. ++ */ ++static inline __must_check bool rcuref_put_rcusafe(rcuref_t *ref) ++{ ++ return __rcuref_put(ref); ++} ++ ++/** ++ * rcuref_put -- Release one reference for a rcuref reference count ++ * @ref: Pointer to the reference count ++ * ++ * Can be invoked from any context. ++ * ++ * Provides release memory ordering, such that prior loads and stores are done ++ * before, and provides an acquire ordering on success such that free() ++ * must come after. ++ * ++ * Return: ++ * ++ * True if this was the last reference with no future references ++ * possible. This signals the caller that it can safely schedule the ++ * object, which is protected by the reference counter, for ++ * deconstruction. ++ * ++ * False if there are still active references or the put() raced ++ * with a concurrent get()/put() pair. Caller is not allowed to ++ * deconstruct the protected object. ++ */ ++static inline __must_check bool rcuref_put(rcuref_t *ref) ++{ ++ bool released; ++ ++ preempt_disable(); ++ released = __rcuref_put(ref); ++ preempt_enable(); ++ return released; ++} ++ ++#endif +diff --git a/include/linux/types.h b/include/linux/types.h +index ea8cf60a8a79..688fb943556a 100644 +--- a/include/linux/types.h ++++ b/include/linux/types.h +@@ -175,6 +175,12 @@ typedef struct { + } atomic64_t; + #endif + ++typedef struct { ++ atomic_t refcnt; ++} rcuref_t; ++ ++#define RCUREF_INIT(i) { .refcnt = ATOMIC_INIT(i - 1) } ++ + struct list_head { + struct list_head *next, *prev; + }; +diff --git a/include/net/dst.h b/include/net/dst.h +index d67fda89cd0f..78884429deed 100644 +--- a/include/net/dst.h ++++ b/include/net/dst.h +@@ -16,6 +16,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -61,23 +62,36 @@ struct dst_entry { + unsigned short trailer_len; /* space to reserve at tail */ + + /* +- * __refcnt wants to be on a different cache line from ++ * __rcuref wants to be on a different cache line from + * input/output/ops or performance tanks badly + */ + #ifdef CONFIG_64BIT +- atomic_t __refcnt; /* 64-bit offset 64 */ ++ rcuref_t __rcuref; /* 64-bit offset 64 */ + #endif + int __use; + unsigned long lastuse; +- struct lwtunnel_state *lwtstate; + struct rcu_head rcu_head; + short error; + short __pad; + __u32 tclassid; + #ifndef CONFIG_64BIT +- atomic_t __refcnt; /* 32-bit offset 64 */ ++ struct lwtunnel_state *lwtstate; ++ rcuref_t __rcuref; /* 32-bit offset 64 */ + #endif + netdevice_tracker dev_tracker; ++ ++ /* ++ * Used by rtable and rt6_info. Moves lwtstate into the next cache ++ * line on 64bit so that lwtstate does not cause false sharing with ++ * __rcuref under contention of __rcuref. This also puts the ++ * frequently accessed members of rtable and rt6_info out of the ++ * __rcuref cache line. ++ */ ++ struct list_head rt_uncached; ++ struct uncached_list *rt_uncached_list; ++#ifdef CONFIG_64BIT ++ struct lwtunnel_state *lwtstate; ++#endif + }; + + struct dst_metrics { +@@ -225,10 +239,10 @@ static inline void dst_hold(struct dst_entry *dst) + { + /* + * If your kernel compilation stops here, please check +- * the placement of __refcnt in struct dst_entry ++ * the placement of __rcuref in struct dst_entry + */ +- BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); +- WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0); ++ BUILD_BUG_ON(offsetof(struct dst_entry, __rcuref) & 63); ++ WARN_ON(!rcuref_get(&dst->__rcuref)); + } + + static inline void dst_use_noref(struct dst_entry *dst, unsigned long time) +@@ -292,7 +306,7 @@ static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb + */ + static inline bool dst_hold_safe(struct dst_entry *dst) + { +- return atomic_inc_not_zero(&dst->__refcnt); ++ return rcuref_get(&dst->__rcuref); + } + + /** +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h +index 6268963d9599..79570cb4ea9c 100644 +--- a/include/net/ip6_fib.h ++++ b/include/net/ip6_fib.h +@@ -217,9 +217,6 @@ struct rt6_info { + struct inet6_dev *rt6i_idev; + u32 rt6i_flags; + +- struct list_head rt6i_uncached; +- struct uncached_list *rt6i_uncached_list; +- + /* more non-fragment space at head required */ + unsigned short rt6i_nfheader_len; + }; +diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h +index 81ee387a1fc4..3556595ce59a 100644 +--- a/include/net/ip6_route.h ++++ b/include/net/ip6_route.h +@@ -100,7 +100,7 @@ static inline struct dst_entry *ip6_route_output(struct net *net, + static inline void ip6_rt_put_flags(struct rt6_info *rt, int flags) + { + if (!(flags & RT6_LOOKUP_F_DST_NOREF) || +- !list_empty(&rt->rt6i_uncached)) ++ !list_empty(&rt->dst.rt_uncached)) + ip6_rt_put(rt); + } + +diff --git a/include/net/route.h b/include/net/route.h +index fe00b0a2e475..bcc367cf3aa2 100644 +--- a/include/net/route.h ++++ b/include/net/route.h +@@ -78,9 +78,6 @@ struct rtable { + /* Miscellaneous cached information */ + u32 rt_mtu_locked:1, + rt_pmtu:31; +- +- struct list_head rt_uncached; +- struct uncached_list *rt_uncached_list; + }; + + static inline bool rt_is_input_route(const struct rtable *rt) +diff --git a/include/net/sock.h b/include/net/sock.h +index 573f2bf7e0de..5edf0038867c 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -2131,7 +2131,7 @@ sk_dst_get(struct sock *sk) + + rcu_read_lock(); + dst = rcu_dereference(sk->sk_dst_cache); +- if (dst && !atomic_inc_not_zero(&dst->__refcnt)) ++ if (dst && !rcuref_get(&dst->__rcuref)) + dst = NULL; + rcu_read_unlock(); + return dst; +diff --git a/kernel/fork.c b/kernel/fork.c +index 49c173e367d2..349945168239 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1178,6 +1178,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, + fail_pcpu: + while (i > 0) + percpu_counter_destroy(&mm->rss_stat[--i]); ++ destroy_context(mm); + fail_nocontext: + mm_free_pgd(mm); + fail_nopgd: +diff --git a/kernel/kheaders.c b/kernel/kheaders.c +index 8f69772af77b..42163c9e94e5 100644 +--- a/kernel/kheaders.c ++++ b/kernel/kheaders.c +@@ -26,15 +26,15 @@ asm ( + " .popsection \n" + ); + +-extern char kernel_headers_data; +-extern char kernel_headers_data_end; ++extern char kernel_headers_data[]; ++extern char kernel_headers_data_end[]; + + static ssize_t + ikheaders_read(struct file *file, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t len) + { +- memcpy(buf, &kernel_headers_data + off, len); ++ memcpy(buf, &kernel_headers_data[off], len); + return len; + } + +@@ -48,8 +48,8 @@ static struct bin_attribute kheaders_attr __ro_after_init = { + + static int __init ikheaders_init(void) + { +- kheaders_attr.size = (&kernel_headers_data_end - +- &kernel_headers_data); ++ kheaders_attr.size = (kernel_headers_data_end - ++ kernel_headers_data); + return sysfs_create_bin_file(kernel_kobj, &kheaders_attr); + } + +diff --git a/kernel/padata.c b/kernel/padata.c +index e007b8a4b738..7c80301ab084 100644 +--- a/kernel/padata.c ++++ b/kernel/padata.c +@@ -45,7 +45,7 @@ struct padata_mt_job_state { + }; + + static void padata_free_pd(struct parallel_data *pd); +-static void __init padata_mt_helper(struct work_struct *work); ++static void padata_mt_helper(struct work_struct *work); + + static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) + { +@@ -438,7 +438,7 @@ static int padata_setup_cpumasks(struct padata_instance *pinst) + return err; + } + +-static void __init padata_mt_helper(struct work_struct *w) ++static void padata_mt_helper(struct work_struct *w) + { + struct padata_work *pw = container_of(w, struct padata_work, pw_work); + struct padata_mt_job_state *ps = pw->pw_data; +diff --git a/lib/Makefile b/lib/Makefile +index baf2821f7a00..31a3a257fd49 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -47,7 +47,7 @@ obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \ + list_sort.o uuid.o iov_iter.o clz_ctz.o \ + bsearch.o find_bit.o llist.o memweight.o kfifo.o \ + percpu-refcount.o rhashtable.o base64.o \ +- once.o refcount.o usercopy.o errseq.o bucket_locks.o \ ++ once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \ + generic-radix-tree.o + obj-$(CONFIG_STRING_SELFTEST) += test_string.o + obj-y += string_helpers.o +diff --git a/lib/rcuref.c b/lib/rcuref.c +new file mode 100644 +index 000000000000..5ec00a4a64d1 +--- /dev/null ++++ b/lib/rcuref.c +@@ -0,0 +1,281 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++ ++/* ++ * rcuref - A scalable reference count implementation for RCU managed objects ++ * ++ * rcuref is provided to replace open coded reference count implementations ++ * based on atomic_t. It protects explicitely RCU managed objects which can ++ * be visible even after the last reference has been dropped and the object ++ * is heading towards destruction. ++ * ++ * A common usage pattern is: ++ * ++ * get() ++ * rcu_read_lock(); ++ * p = get_ptr(); ++ * if (p && !atomic_inc_not_zero(&p->refcnt)) ++ * p = NULL; ++ * rcu_read_unlock(); ++ * return p; ++ * ++ * put() ++ * if (!atomic_dec_return(&->refcnt)) { ++ * remove_ptr(p); ++ * kfree_rcu((p, rcu); ++ * } ++ * ++ * atomic_inc_not_zero() is implemented with a try_cmpxchg() loop which has ++ * O(N^2) behaviour under contention with N concurrent operations. ++ * ++ * rcuref uses atomic_add_negative_relaxed() for the fast path, which scales ++ * better under contention. ++ * ++ * Why not refcount? ++ * ================= ++ * ++ * In principle it should be possible to make refcount use the rcuref ++ * scheme, but the destruction race described below cannot be prevented ++ * unless the protected object is RCU managed. ++ * ++ * Theory of operation ++ * =================== ++ * ++ * rcuref uses an unsigned integer reference counter. As long as the ++ * counter value is greater than or equal to RCUREF_ONEREF and not larger ++ * than RCUREF_MAXREF the reference is alive: ++ * ++ * ONEREF MAXREF SATURATED RELEASED DEAD NOREF ++ * 0 0x7FFFFFFF 0x8000000 0xA0000000 0xBFFFFFFF 0xC0000000 0xE0000000 0xFFFFFFFF ++ * <---valid --------> <-------saturation zone-------> <-----dead zone-----> ++ * ++ * The get() and put() operations do unconditional increments and ++ * decrements. The result is checked after the operation. This optimizes ++ * for the fast path. ++ * ++ * If the reference count is saturated or dead, then the increments and ++ * decrements are not harmful as the reference count still stays in the ++ * respective zones and is always set back to STATURATED resp. DEAD. The ++ * zones have room for 2^28 racing operations in each direction, which ++ * makes it practically impossible to escape the zones. ++ * ++ * Once the last reference is dropped the reference count becomes ++ * RCUREF_NOREF which forces rcuref_put() into the slowpath operation. The ++ * slowpath then tries to set the reference count from RCUREF_NOREF to ++ * RCUREF_DEAD via a cmpxchg(). This opens a small window where a ++ * concurrent rcuref_get() can acquire the reference count and bring it ++ * back to RCUREF_ONEREF or even drop the reference again and mark it DEAD. ++ * ++ * If the cmpxchg() succeeds then a concurrent rcuref_get() will result in ++ * DEAD + 1, which is inside the dead zone. If that happens the reference ++ * count is put back to DEAD. ++ * ++ * The actual race is possible due to the unconditional increment and ++ * decrements in rcuref_get() and rcuref_put(): ++ * ++ * T1 T2 ++ * get() put() ++ * if (atomic_add_negative(-1, &ref->refcnt)) ++ * succeeds-> atomic_cmpxchg(&ref->refcnt, NOREF, DEAD); ++ * ++ * atomic_add_negative(1, &ref->refcnt); <- Elevates refcount to DEAD + 1 ++ * ++ * As the result of T1's add is negative, the get() goes into the slow path ++ * and observes refcnt being in the dead zone which makes the operation fail. ++ * ++ * Possible critical states: ++ * ++ * Context Counter References Operation ++ * T1 0 1 init() ++ * T2 1 2 get() ++ * T1 0 1 put() ++ * T2 -1 0 put() tries to mark dead ++ * T1 0 1 get() ++ * T2 0 1 put() mark dead fails ++ * T1 -1 0 put() tries to mark dead ++ * T1 DEAD 0 put() mark dead succeeds ++ * T2 DEAD+1 0 get() fails and puts it back to DEAD ++ * ++ * Of course there are more complex scenarios, but the above illustrates ++ * the working principle. The rest is left to the imagination of the ++ * reader. ++ * ++ * Deconstruction race ++ * =================== ++ * ++ * The release operation must be protected by prohibiting a grace period in ++ * order to prevent a possible use after free: ++ * ++ * T1 T2 ++ * put() get() ++ * // ref->refcnt = ONEREF ++ * if (!atomic_add_negative(-1, &ref->refcnt)) ++ * return false; <- Not taken ++ * ++ * // ref->refcnt == NOREF ++ * --> preemption ++ * // Elevates ref->refcnt to ONEREF ++ * if (!atomic_add_negative(1, &ref->refcnt)) ++ * return true; <- taken ++ * ++ * if (put(&p->ref)) { <-- Succeeds ++ * remove_pointer(p); ++ * kfree_rcu(p, rcu); ++ * } ++ * ++ * RCU grace period ends, object is freed ++ * ++ * atomic_cmpxchg(&ref->refcnt, NOREF, DEAD); <- UAF ++ * ++ * This is prevented by disabling preemption around the put() operation as ++ * that's in most kernel configurations cheaper than a rcu_read_lock() / ++ * rcu_read_unlock() pair and in many cases even a NOOP. In any case it ++ * prevents the grace period which keeps the object alive until all put() ++ * operations complete. ++ * ++ * Saturation protection ++ * ===================== ++ * ++ * The reference count has a saturation limit RCUREF_MAXREF (INT_MAX). ++ * Once this is exceedded the reference count becomes stale by setting it ++ * to RCUREF_SATURATED, which will cause a memory leak, but it prevents ++ * wrap arounds which obviously cause worse problems than a memory ++ * leak. When saturation is reached a warning is emitted. ++ * ++ * Race conditions ++ * =============== ++ * ++ * All reference count increment/decrement operations are unconditional and ++ * only verified after the fact. This optimizes for the good case and takes ++ * the occasional race vs. a dead or already saturated refcount into ++ * account. The saturation and dead zones are large enough to accomodate ++ * for that. ++ * ++ * Memory ordering ++ * =============== ++ * ++ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions ++ * and provide only what is strictly required for refcounts. ++ * ++ * The increments are fully relaxed; these will not provide ordering. The ++ * rationale is that whatever is used to obtain the object to increase the ++ * reference count on will provide the ordering. For locked data ++ * structures, its the lock acquire, for RCU/lockless data structures its ++ * the dependent load. ++ * ++ * rcuref_get() provides a control dependency ordering future stores which ++ * ensures that the object is not modified when acquiring a reference ++ * fails. ++ * ++ * rcuref_put() provides release order, i.e. all prior loads and stores ++ * will be issued before. It also provides a control dependency ordering ++ * against the subsequent destruction of the object. ++ * ++ * If rcuref_put() successfully dropped the last reference and marked the ++ * object DEAD it also provides acquire ordering. ++ */ ++ ++#include ++#include ++ ++/** ++ * rcuref_get_slowpath - Slowpath of rcuref_get() ++ * @ref: Pointer to the reference count ++ * ++ * Invoked when the reference count is outside of the valid zone. ++ * ++ * Return: ++ * False if the reference count was already marked dead ++ * ++ * True if the reference count is saturated, which prevents the ++ * object from being deconstructed ever. ++ */ ++bool rcuref_get_slowpath(rcuref_t *ref) ++{ ++ unsigned int cnt = atomic_read(&ref->refcnt); ++ ++ /* ++ * If the reference count was already marked dead, undo the ++ * increment so it stays in the middle of the dead zone and return ++ * fail. ++ */ ++ if (cnt >= RCUREF_RELEASED) { ++ atomic_set(&ref->refcnt, RCUREF_DEAD); ++ return false; ++ } ++ ++ /* ++ * If it was saturated, warn and mark it so. In case the increment ++ * was already on a saturated value restore the saturation ++ * marker. This keeps it in the middle of the saturation zone and ++ * prevents the reference count from overflowing. This leaks the ++ * object memory, but prevents the obvious reference count overflow ++ * damage. ++ */ ++ if (WARN_ONCE(cnt > RCUREF_MAXREF, "rcuref saturated - leaking memory")) ++ atomic_set(&ref->refcnt, RCUREF_SATURATED); ++ return true; ++} ++EXPORT_SYMBOL_GPL(rcuref_get_slowpath); ++ ++/** ++ * rcuref_put_slowpath - Slowpath of __rcuref_put() ++ * @ref: Pointer to the reference count ++ * ++ * Invoked when the reference count is outside of the valid zone. ++ * ++ * Return: ++ * True if this was the last reference with no future references ++ * possible. This signals the caller that it can safely schedule the ++ * object, which is protected by the reference counter, for ++ * deconstruction. ++ * ++ * False if there are still active references or the put() raced ++ * with a concurrent get()/put() pair. Caller is not allowed to ++ * deconstruct the protected object. ++ */ ++bool rcuref_put_slowpath(rcuref_t *ref) ++{ ++ unsigned int cnt = atomic_read(&ref->refcnt); ++ ++ /* Did this drop the last reference? */ ++ if (likely(cnt == RCUREF_NOREF)) { ++ /* ++ * Carefully try to set the reference count to RCUREF_DEAD. ++ * ++ * This can fail if a concurrent get() operation has ++ * elevated it again or the corresponding put() even marked ++ * it dead already. Both are valid situations and do not ++ * require a retry. If this fails the caller is not ++ * allowed to deconstruct the object. ++ */ ++ if (atomic_cmpxchg_release(&ref->refcnt, RCUREF_NOREF, RCUREF_DEAD) != RCUREF_NOREF) ++ return false; ++ ++ /* ++ * The caller can safely schedule the object for ++ * deconstruction. Provide acquire ordering. ++ */ ++ smp_acquire__after_ctrl_dep(); ++ return true; ++ } ++ ++ /* ++ * If the reference count was already in the dead zone, then this ++ * put() operation is imbalanced. Warn, put the reference count back to ++ * DEAD and tell the caller to not deconstruct the object. ++ */ ++ if (WARN_ONCE(cnt >= RCUREF_RELEASED, "rcuref - imbalanced put()")) { ++ atomic_set(&ref->refcnt, RCUREF_DEAD); ++ return false; ++ } ++ ++ /* ++ * This is a put() operation on a saturated refcount. Restore the ++ * mean saturation value and tell the caller to not deconstruct the ++ * object. ++ */ ++ if (cnt > RCUREF_MAXREF) ++ atomic_set(&ref->refcnt, RCUREF_SATURATED); ++ return false; ++} ++EXPORT_SYMBOL_GPL(rcuref_put_slowpath); +diff --git a/mm/ksm.c b/mm/ksm.c +index 2b8d30068cbb..82029f1d454b 100644 +--- a/mm/ksm.c ++++ b/mm/ksm.c +@@ -214,6 +214,7 @@ struct ksm_rmap_item { + #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ + #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ + #define STABLE_FLAG 0x200 /* is listed from the stable tree */ ++#define ZERO_PAGE_FLAG 0x400 /* is zero page placed by KSM */ + + /* The stable and unstable tree heads */ + static struct rb_root one_stable_tree[1] = { RB_ROOT }; +@@ -275,6 +276,9 @@ static unsigned int zero_checksum __read_mostly; + /* Whether to merge empty (zeroed) pages with actual zero pages */ + static bool ksm_use_zero_pages __read_mostly; + ++/* The number of zero pages placed by KSM use_zero_pages */ ++static unsigned long ksm_zero_pages_sharing; ++ + #ifdef CONFIG_NUMA + /* Zeroed when merging across nodes is not allowed */ + static unsigned int ksm_merge_across_nodes = 1; +@@ -420,6 +424,11 @@ static inline bool ksm_test_exit(struct mm_struct *mm) + return atomic_read(&mm->mm_users) == 0; + } + ++enum break_ksm_pmd_entry_return_flag { ++ HAVE_KSM_PAGE = 1, ++ HAVE_ZERO_PAGE ++}; ++ + static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, + struct mm_walk *walk) + { +@@ -427,6 +436,7 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex + spinlock_t *ptl; + pte_t *pte; + int ret; ++ bool is_zero_page = false; + + if (pmd_leaf(*pmd) || !pmd_present(*pmd)) + return 0; +@@ -434,6 +444,8 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex + pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (pte_present(*pte)) { + page = vm_normal_page(walk->vma, addr, *pte); ++ if (!page) ++ is_zero_page = is_zero_pfn(pte_pfn(*pte)); + } else if (!pte_none(*pte)) { + swp_entry_t entry = pte_to_swp_entry(*pte); + +@@ -444,7 +456,14 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex + if (is_migration_entry(entry)) + page = pfn_swap_entry_to_page(entry); + } +- ret = page && PageKsm(page); ++ ++ if (page && PageKsm(page)) ++ ret = HAVE_KSM_PAGE; ++ else if (is_zero_page) ++ ret = HAVE_ZERO_PAGE; ++ else ++ ret = 0; ++ + pte_unmap_unlock(pte, ptl); + return ret; + } +@@ -466,19 +485,22 @@ static const struct mm_walk_ops break_ksm_ops = { + * of the process that owns 'vma'. We also do not want to enforce + * protection keys here anyway. + */ +-static int break_ksm(struct vm_area_struct *vma, unsigned long addr) ++static int break_ksm(struct vm_area_struct *vma, unsigned long addr, ++ bool unshare_zero_page) + { + vm_fault_t ret = 0; + + do { +- int ksm_page; ++ int walk_result; + + cond_resched(); +- ksm_page = walk_page_range_vma(vma, addr, addr + 1, ++ walk_result = walk_page_range_vma(vma, addr, addr + 1, + &break_ksm_ops, NULL); +- if (WARN_ON_ONCE(ksm_page < 0)) +- return ksm_page; +- if (!ksm_page) ++ if (WARN_ON_ONCE(walk_result < 0)) ++ return walk_result; ++ if (!walk_result) ++ return 0; ++ if (walk_result == HAVE_ZERO_PAGE && !unshare_zero_page) + return 0; + ret = handle_mm_fault(vma, addr, + FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, +@@ -539,7 +561,7 @@ static void break_cow(struct ksm_rmap_item *rmap_item) + mmap_read_lock(mm); + vma = find_mergeable_vma(mm, addr); + if (vma) +- break_ksm(vma, addr); ++ break_ksm(vma, addr, false); + mmap_read_unlock(mm); + } + +@@ -764,6 +786,33 @@ static struct page *get_ksm_page(struct ksm_stable_node *stable_node, + return NULL; + } + ++/* ++ * Cleaning the rmap_item's ZERO_PAGE_FLAG ++ * This function will be called when unshare or writing on zero pages. ++ */ ++static inline void clean_rmap_item_zero_flag(struct ksm_rmap_item *rmap_item) ++{ ++ if (rmap_item->address & ZERO_PAGE_FLAG) { ++ ksm_zero_pages_sharing--; ++ rmap_item->mm->ksm_zero_pages_sharing--; ++ rmap_item->address &= PAGE_MASK; ++ } ++} ++ ++/* Only called when rmap_item is going to be freed */ ++static inline void unshare_zero_pages(struct ksm_rmap_item *rmap_item) ++{ ++ struct vm_area_struct *vma; ++ ++ if (rmap_item->address & ZERO_PAGE_FLAG) { ++ vma = vma_lookup(rmap_item->mm, rmap_item->address); ++ if (vma && !ksm_test_exit(rmap_item->mm)) ++ break_ksm(vma, rmap_item->address, true); ++ } ++ /* Put at last. */ ++ clean_rmap_item_zero_flag(rmap_item); ++} ++ + /* + * Removing rmap_item from stable or unstable tree. + * This function will clean the information from the stable/unstable tree. +@@ -824,6 +873,7 @@ static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list) + struct ksm_rmap_item *rmap_item = *rmap_list; + *rmap_list = rmap_item->rmap_list; + remove_rmap_item_from_tree(rmap_item); ++ unshare_zero_pages(rmap_item); + free_rmap_item(rmap_item); + } + } +@@ -853,7 +903,7 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma, + if (signal_pending(current)) + err = -ERESTARTSYS; + else +- err = break_ksm(vma, addr); ++ err = break_ksm(vma, addr, false); + } + return err; + } +@@ -2050,6 +2100,42 @@ static void stable_tree_append(struct ksm_rmap_item *rmap_item, + rmap_item->mm->ksm_merging_pages++; + } + ++static int try_to_merge_with_kernel_zero_page(struct ksm_rmap_item *rmap_item, ++ struct page *page) ++{ ++ struct mm_struct *mm = rmap_item->mm; ++ int err = 0; ++ ++ /* ++ * It should not take ZERO_PAGE_FLAG because on one hand, ++ * get_next_rmap_item don't return zero pages' rmap_item. ++ * On the other hand, even if zero page was writen as ++ * anonymous page, rmap_item has been cleaned after ++ * stable_tree_search ++ */ ++ if (!WARN_ON_ONCE(rmap_item->address & ZERO_PAGE_FLAG)) { ++ struct vm_area_struct *vma; ++ ++ mmap_read_lock(mm); ++ vma = find_mergeable_vma(mm, rmap_item->address); ++ if (vma) { ++ err = try_to_merge_one_page(vma, page, ++ ZERO_PAGE(rmap_item->address)); ++ if (!err) { ++ rmap_item->address |= ZERO_PAGE_FLAG; ++ ksm_zero_pages_sharing++; ++ rmap_item->mm->ksm_zero_pages_sharing++; ++ } ++ } else { ++ /* If the vma is out of date, we do not need to continue. */ ++ err = 0; ++ } ++ mmap_read_unlock(mm); ++ } ++ ++ return err; ++} ++ + /* + * cmp_and_merge_page - first see if page can be merged into the stable tree; + * if not, compare checksum to previous and if it's the same, see if page can +@@ -2061,7 +2147,6 @@ static void stable_tree_append(struct ksm_rmap_item *rmap_item, + */ + static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item) + { +- struct mm_struct *mm = rmap_item->mm; + struct ksm_rmap_item *tree_rmap_item; + struct page *tree_page = NULL; + struct ksm_stable_node *stable_node; +@@ -2098,6 +2183,7 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite + } + + remove_rmap_item_from_tree(rmap_item); ++ clean_rmap_item_zero_flag(rmap_item); + + if (kpage) { + if (PTR_ERR(kpage) == -EBUSY) +@@ -2134,29 +2220,16 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite + * Same checksum as an empty page. We attempt to merge it with the + * appropriate zero page if the user enabled this via sysfs. + */ +- if (ksm_use_zero_pages && (checksum == zero_checksum)) { +- struct vm_area_struct *vma; +- +- mmap_read_lock(mm); +- vma = find_mergeable_vma(mm, rmap_item->address); +- if (vma) { +- err = try_to_merge_one_page(vma, page, +- ZERO_PAGE(rmap_item->address)); +- } else { ++ if (ksm_use_zero_pages) { ++ if (checksum == zero_checksum) + /* +- * If the vma is out of date, we do not need to +- * continue. ++ * In case of failure, the page was not really empty, so we ++ * need to continue. Otherwise we're done. + */ +- err = 0; +- } +- mmap_read_unlock(mm); +- /* +- * In case of failure, the page was not really empty, so we +- * need to continue. Otherwise we're done. +- */ +- if (!err) +- return; ++ if (!try_to_merge_with_kernel_zero_page(rmap_item, page)) ++ return; + } ++ + tree_rmap_item = + unstable_tree_search_insert(rmap_item, page, &tree_page); + if (tree_rmap_item) { +@@ -2220,23 +2293,39 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite + } + } + +-static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot, +- struct ksm_rmap_item **rmap_list, +- unsigned long addr) ++static struct ksm_rmap_item *try_to_get_old_rmap_item(unsigned long addr, ++ struct ksm_rmap_item **rmap_list) + { +- struct ksm_rmap_item *rmap_item; +- + while (*rmap_list) { +- rmap_item = *rmap_list; ++ struct ksm_rmap_item *rmap_item = *rmap_list; ++ + if ((rmap_item->address & PAGE_MASK) == addr) + return rmap_item; + if (rmap_item->address > addr) + break; + *rmap_list = rmap_item->rmap_list; ++ /* ++ * If we end up here, the VMA is MADV_UNMERGEABLE or its page ++ * is ineligible or discarded, e.g. MADV_DONTNEED. ++ */ + remove_rmap_item_from_tree(rmap_item); ++ unshare_zero_pages(rmap_item); + free_rmap_item(rmap_item); + } + ++ return NULL; ++} ++ ++static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot, ++ struct ksm_rmap_item **rmap_list, ++ unsigned long addr) ++{ ++ struct ksm_rmap_item *rmap_item; ++ ++ rmap_item = try_to_get_old_rmap_item(addr, rmap_list); ++ if (rmap_item) ++ return rmap_item; ++ + rmap_item = alloc_rmap_item(); + if (rmap_item) { + /* It has already been zeroed */ +@@ -2343,6 +2432,22 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) + } + if (is_zone_device_page(*page)) + goto next_page; ++ if (is_zero_pfn(page_to_pfn(*page))) { ++ /* ++ * To monitor ksm zero pages which becomes non-anonymous, ++ * we have to save each rmap_item of zero pages by ++ * try_to_get_old_rmap_item() walking on ++ * ksm_scan.rmap_list, otherwise their rmap_items will be ++ * freed by the next turn of get_next_rmap_item(). The ++ * function get_next_rmap_item() will free all "skipped" ++ * rmap_items because it thinks its areas as UNMERGEABLE. ++ */ ++ rmap_item = try_to_get_old_rmap_item(ksm_scan.address, ++ ksm_scan.rmap_list); ++ if (rmap_item && (rmap_item->address & ZERO_PAGE_FLAG)) ++ ksm_scan.rmap_list = &rmap_item->rmap_list; ++ goto next_page; ++ } + if (PageAnon(*page)) { + flush_anon_page(vma, *page, ksm_scan.address); + flush_dcache_page(*page); +@@ -3139,6 +3244,13 @@ static ssize_t pages_volatile_show(struct kobject *kobj, + } + KSM_ATTR_RO(pages_volatile); + ++static ssize_t zero_pages_sharing_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sysfs_emit(buf, "%ld\n", ksm_zero_pages_sharing); ++} ++KSM_ATTR_RO(zero_pages_sharing); ++ + static ssize_t stable_node_dups_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) + { +@@ -3194,6 +3306,7 @@ static struct attribute *ksm_attrs[] = { + &pages_sharing_attr.attr, + &pages_unshared_attr.attr, + &pages_volatile_attr.attr, ++ &zero_pages_sharing_attr.attr, + &full_scans_attr.attr, + #ifdef CONFIG_NUMA + &merge_across_nodes_attr.attr, +diff --git a/mm/mempolicy.c b/mm/mempolicy.c +index a256a241fd1d..2068b594dc88 100644 +--- a/mm/mempolicy.c ++++ b/mm/mempolicy.c +@@ -790,61 +790,50 @@ static int vma_replace_policy(struct vm_area_struct *vma, + return err; + } + +-/* Step 2: apply policy to a range and do splits. */ +-static int mbind_range(struct mm_struct *mm, unsigned long start, +- unsigned long end, struct mempolicy *new_pol) ++/* Split or merge the VMA (if required) and apply the new policy */ ++static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma, ++ struct vm_area_struct **prev, unsigned long start, ++ unsigned long end, struct mempolicy *new_pol) + { +- VMA_ITERATOR(vmi, mm, start); +- struct vm_area_struct *prev; +- struct vm_area_struct *vma; +- int err = 0; ++ struct vm_area_struct *merged; ++ unsigned long vmstart, vmend; + pgoff_t pgoff; ++ int err; + +- prev = vma_prev(&vmi); +- vma = vma_find(&vmi, end); +- if (WARN_ON(!vma)) ++ vmend = min(end, vma->vm_end); ++ if (start > vma->vm_start) { ++ *prev = vma; ++ vmstart = start; ++ } else { ++ vmstart = vma->vm_start; ++ } ++ ++ if (mpol_equal(vma_policy(vma), new_pol)) + return 0; + +- if (start > vma->vm_start) +- prev = vma; +- +- do { +- unsigned long vmstart = max(start, vma->vm_start); +- unsigned long vmend = min(end, vma->vm_end); +- +- if (mpol_equal(vma_policy(vma), new_pol)) +- goto next; +- +- pgoff = vma->vm_pgoff + +- ((vmstart - vma->vm_start) >> PAGE_SHIFT); +- prev = vma_merge(&vmi, mm, prev, vmstart, vmend, vma->vm_flags, +- vma->anon_vma, vma->vm_file, pgoff, +- new_pol, vma->vm_userfaultfd_ctx, +- anon_vma_name(vma)); +- if (prev) { +- vma = prev; +- goto replace; +- } +- if (vma->vm_start != vmstart) { +- err = split_vma(&vmi, vma, vmstart, 1); +- if (err) +- goto out; +- } +- if (vma->vm_end != vmend) { +- err = split_vma(&vmi, vma, vmend, 0); +- if (err) +- goto out; +- } +-replace: +- err = vma_replace_policy(vma, new_pol); ++ pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT); ++ merged = vma_merge(vmi, vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags, ++ vma->anon_vma, vma->vm_file, pgoff, new_pol, ++ vma->vm_userfaultfd_ctx, anon_vma_name(vma)); ++ if (merged) { ++ *prev = merged; ++ return vma_replace_policy(merged, new_pol); ++ } ++ ++ if (vma->vm_start != vmstart) { ++ err = split_vma(vmi, vma, vmstart, 1); + if (err) +- goto out; +-next: +- prev = vma; +- } for_each_vma_range(vmi, vma, end); ++ return err; ++ } + +-out: +- return err; ++ if (vma->vm_end != vmend) { ++ err = split_vma(vmi, vma, vmend, 0); ++ if (err) ++ return err; ++ } ++ ++ *prev = vma; ++ return vma_replace_policy(vma, new_pol); + } + + /* Set the process memory policy */ +@@ -1259,6 +1248,8 @@ static long do_mbind(unsigned long start, unsigned long len, + nodemask_t *nmask, unsigned long flags) + { + struct mm_struct *mm = current->mm; ++ struct vm_area_struct *vma, *prev; ++ struct vma_iterator vmi; + struct mempolicy *new; + unsigned long end; + int err; +@@ -1328,7 +1319,13 @@ static long do_mbind(unsigned long start, unsigned long len, + goto up_out; + } + +- err = mbind_range(mm, start, end, new); ++ vma_iter_init(&vmi, mm, start); ++ prev = vma_prev(&vmi); ++ for_each_vma_range(vmi, vma, end) { ++ err = mbind_range(&vmi, vma, &prev, start, end, new); ++ if (err) ++ break; ++ } + + if (!err) { + int nr_failed = 0; +@@ -1489,10 +1486,8 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le + unsigned long, home_node, unsigned long, flags) + { + struct mm_struct *mm = current->mm; +- struct vm_area_struct *vma; ++ struct vm_area_struct *vma, *prev; + struct mempolicy *new, *old; +- unsigned long vmstart; +- unsigned long vmend; + unsigned long end; + int err = -ENOENT; + VMA_ITERATOR(vmi, mm, start); +@@ -1521,6 +1516,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le + if (end == start) + return 0; + mmap_write_lock(mm); ++ prev = vma_prev(&vmi); + for_each_vma_range(vmi, vma, end) { + /* + * If any vma in the range got policy other than MPOL_BIND +@@ -1541,9 +1537,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le + } + + new->home_node = home_node; +- vmstart = max(start, vma->vm_start); +- vmend = min(end, vma->vm_end); +- err = mbind_range(mm, vmstart, vmend, new); ++ err = mbind_range(&vmi, vma, &prev, start, end, new); + mpol_put(new); + if (err) + break; +diff --git a/mm/mprotect.c b/mm/mprotect.c +index 13e84d8c0797..36351a00c0e8 100644 +--- a/mm/mprotect.c ++++ b/mm/mprotect.c +@@ -838,7 +838,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len, + } + tlb_finish_mmu(&tlb); + +- if (vma_iter_end(&vmi) < end) ++ if (!error && vma_iter_end(&vmi) < end) + error = -ENOMEM; + + out: +diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c +index 8c69f0c95a8e..98aea5485aae 100644 +--- a/net/bridge/br_nf_core.c ++++ b/net/bridge/br_nf_core.c +@@ -73,7 +73,7 @@ void br_netfilter_rtable_init(struct net_bridge *br) + { + struct rtable *rt = &br->fake_rtable; + +- atomic_set(&rt->dst.__refcnt, 1); ++ rcuref_init(&rt->dst.__rcuref, 1); + rt->dst.dev = br->dev; + dst_init_metrics(&rt->dst, br_dst_default_metrics, true); + rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE; +diff --git a/net/core/dst.c b/net/core/dst.c +index 31c08a3386d3..3247e84045ca 100644 +--- a/net/core/dst.c ++++ b/net/core/dst.c +@@ -66,7 +66,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops, + dst->tclassid = 0; + #endif + dst->lwtstate = NULL; +- atomic_set(&dst->__refcnt, initial_ref); ++ rcuref_init(&dst->__rcuref, initial_ref); + dst->__use = 0; + dst->lastuse = jiffies; + dst->flags = flags; +@@ -162,31 +162,15 @@ EXPORT_SYMBOL(dst_dev_put); + + void dst_release(struct dst_entry *dst) + { +- if (dst) { +- int newrefcnt; +- +- newrefcnt = atomic_dec_return(&dst->__refcnt); +- if (WARN_ONCE(newrefcnt < 0, "dst_release underflow")) +- net_warn_ratelimited("%s: dst:%p refcnt:%d\n", +- __func__, dst, newrefcnt); +- if (!newrefcnt) +- call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu); +- } ++ if (dst && rcuref_put(&dst->__rcuref)) ++ call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu); + } + EXPORT_SYMBOL(dst_release); + + void dst_release_immediate(struct dst_entry *dst) + { +- if (dst) { +- int newrefcnt; +- +- newrefcnt = atomic_dec_return(&dst->__refcnt); +- if (WARN_ONCE(newrefcnt < 0, "dst_release_immediate underflow")) +- net_warn_ratelimited("%s: dst:%p refcnt:%d\n", +- __func__, dst, newrefcnt); +- if (!newrefcnt) +- dst_destroy(dst); +- } ++ if (dst && rcuref_put(&dst->__rcuref)) ++ dst_destroy(dst); + } + EXPORT_SYMBOL(dst_release_immediate); + +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index 6e44e92ebdf5..a6380dd47e7f 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -840,7 +840,7 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, + if (dst) { + ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse); + ci.rta_used = dst->__use; +- ci.rta_clntref = atomic_read(&dst->__refcnt); ++ ci.rta_clntref = rcuref_read(&dst->__rcuref); + } + if (expires) { + unsigned long clock; +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index de6e3515ab4f..0f0cb629e0ad 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -1508,20 +1508,20 @@ void rt_add_uncached_list(struct rtable *rt) + { + struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list); + +- rt->rt_uncached_list = ul; ++ rt->dst.rt_uncached_list = ul; + + spin_lock_bh(&ul->lock); +- list_add_tail(&rt->rt_uncached, &ul->head); ++ list_add_tail(&rt->dst.rt_uncached, &ul->head); + spin_unlock_bh(&ul->lock); + } + + void rt_del_uncached_list(struct rtable *rt) + { +- if (!list_empty(&rt->rt_uncached)) { +- struct uncached_list *ul = rt->rt_uncached_list; ++ if (!list_empty(&rt->dst.rt_uncached)) { ++ struct uncached_list *ul = rt->dst.rt_uncached_list; + + spin_lock_bh(&ul->lock); +- list_del_init(&rt->rt_uncached); ++ list_del_init(&rt->dst.rt_uncached); + spin_unlock_bh(&ul->lock); + } + } +@@ -1546,13 +1546,13 @@ void rt_flush_dev(struct net_device *dev) + continue; + + spin_lock_bh(&ul->lock); +- list_for_each_entry_safe(rt, safe, &ul->head, rt_uncached) { ++ list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) { + if (rt->dst.dev != dev) + continue; + rt->dst.dev = blackhole_netdev; + netdev_ref_replace(dev, blackhole_netdev, + &rt->dst.dev_tracker, GFP_ATOMIC); +- list_move(&rt->rt_uncached, &ul->quarantine); ++ list_move(&rt->dst.rt_uncached, &ul->quarantine); + } + spin_unlock_bh(&ul->lock); + } +@@ -1644,7 +1644,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev, + rt->rt_uses_gateway = 0; + rt->rt_gw_family = 0; + rt->rt_gw4 = 0; +- INIT_LIST_HEAD(&rt->rt_uncached); ++ INIT_LIST_HEAD(&rt->dst.rt_uncached); + + rt->dst.output = ip_output; + if (flags & RTCF_LOCAL) +@@ -1675,7 +1675,7 @@ struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt) + new_rt->rt_gw4 = rt->rt_gw4; + else if (rt->rt_gw_family == AF_INET6) + new_rt->rt_gw6 = rt->rt_gw6; +- INIT_LIST_HEAD(&new_rt->rt_uncached); ++ INIT_LIST_HEAD(&new_rt->dst.rt_uncached); + + new_rt->dst.input = rt->dst.input; + new_rt->dst.output = rt->dst.output; +@@ -2859,7 +2859,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or + else if (rt->rt_gw_family == AF_INET6) + rt->rt_gw6 = ort->rt_gw6; + +- INIT_LIST_HEAD(&rt->rt_uncached); ++ INIT_LIST_HEAD(&rt->dst.rt_uncached); + } + + dst_release(dst_orig); +diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c +index 3d0dfa6cf9f9..47861c8b7340 100644 +--- a/net/ipv4/xfrm4_policy.c ++++ b/net/ipv4/xfrm4_policy.c +@@ -91,7 +91,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, + xdst->u.rt.rt_gw6 = rt->rt_gw6; + xdst->u.rt.rt_pmtu = rt->rt_pmtu; + xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked; +- INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); ++ INIT_LIST_HEAD(&xdst->u.rt.dst.rt_uncached); + rt_add_uncached_list(&xdst->u.rt); + + return 0; +@@ -121,7 +121,7 @@ static void xfrm4_dst_destroy(struct dst_entry *dst) + struct xfrm_dst *xdst = (struct xfrm_dst *)dst; + + dst_destroy_metrics_generic(dst); +- if (xdst->u.rt.rt_uncached_list) ++ if (xdst->u.rt.dst.rt_uncached_list) + rt_del_uncached_list(&xdst->u.rt); + xfrm_dst_destroy(xdst); + } +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 0fdb03df2287..b9d22a0a6c09 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -139,20 +139,20 @@ void rt6_uncached_list_add(struct rt6_info *rt) + { + struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); + +- rt->rt6i_uncached_list = ul; ++ rt->dst.rt_uncached_list = ul; + + spin_lock_bh(&ul->lock); +- list_add_tail(&rt->rt6i_uncached, &ul->head); ++ list_add_tail(&rt->dst.rt_uncached, &ul->head); + spin_unlock_bh(&ul->lock); + } + + void rt6_uncached_list_del(struct rt6_info *rt) + { +- if (!list_empty(&rt->rt6i_uncached)) { +- struct uncached_list *ul = rt->rt6i_uncached_list; ++ if (!list_empty(&rt->dst.rt_uncached)) { ++ struct uncached_list *ul = rt->dst.rt_uncached_list; + + spin_lock_bh(&ul->lock); +- list_del_init(&rt->rt6i_uncached); ++ list_del_init(&rt->dst.rt_uncached); + spin_unlock_bh(&ul->lock); + } + } +@@ -169,7 +169,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev) + continue; + + spin_lock_bh(&ul->lock); +- list_for_each_entry_safe(rt, safe, &ul->head, rt6i_uncached) { ++ list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) { + struct inet6_dev *rt_idev = rt->rt6i_idev; + struct net_device *rt_dev = rt->dst.dev; + bool handled = false; +@@ -188,7 +188,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev) + handled = true; + } + if (handled) +- list_move(&rt->rt6i_uncached, ++ list_move(&rt->dst.rt_uncached, + &ul->quarantine); + } + spin_unlock_bh(&ul->lock); +@@ -293,7 +293,7 @@ static const struct fib6_info fib6_null_entry_template = { + + static const struct rt6_info ip6_null_entry_template = { + .dst = { +- .__refcnt = ATOMIC_INIT(1), ++ .__rcuref = RCUREF_INIT(1), + .__use = 1, + .obsolete = DST_OBSOLETE_FORCE_CHK, + .error = -ENETUNREACH, +@@ -307,7 +307,7 @@ static const struct rt6_info ip6_null_entry_template = { + + static const struct rt6_info ip6_prohibit_entry_template = { + .dst = { +- .__refcnt = ATOMIC_INIT(1), ++ .__rcuref = RCUREF_INIT(1), + .__use = 1, + .obsolete = DST_OBSOLETE_FORCE_CHK, + .error = -EACCES, +@@ -319,7 +319,7 @@ static const struct rt6_info ip6_prohibit_entry_template = { + + static const struct rt6_info ip6_blk_hole_entry_template = { + .dst = { +- .__refcnt = ATOMIC_INIT(1), ++ .__rcuref = RCUREF_INIT(1), + .__use = 1, + .obsolete = DST_OBSOLETE_FORCE_CHK, + .error = -EINVAL, +@@ -334,7 +334,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = { + static void rt6_info_init(struct rt6_info *rt) + { + memset_after(rt, 0, dst); +- INIT_LIST_HEAD(&rt->rt6i_uncached); ++ INIT_LIST_HEAD(&rt->dst.rt_uncached); + } + + /* allocate dst with ip6_dst_ops */ +@@ -2638,7 +2638,7 @@ struct dst_entry *ip6_route_output_flags(struct net *net, + dst = ip6_route_output_flags_noref(net, sk, fl6, flags); + rt6 = (struct rt6_info *)dst; + /* For dst cached in uncached_list, refcnt is already taken. */ +- if (list_empty(&rt6->rt6i_uncached) && !dst_hold_safe(dst)) { ++ if (list_empty(&rt6->dst.rt_uncached) && !dst_hold_safe(dst)) { + dst = &net->ipv6.ip6_null_entry->dst; + dst_hold(dst); + } +@@ -2748,7 +2748,7 @@ INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst, + from = rcu_dereference(rt->from); + + if (from && (rt->rt6i_flags & RTF_PCPU || +- unlikely(!list_empty(&rt->rt6i_uncached)))) ++ unlikely(!list_empty(&rt->dst.rt_uncached)))) + dst_ret = rt6_dst_from_check(rt, from, cookie); + else + dst_ret = rt6_check(rt, from, cookie); +@@ -6477,7 +6477,7 @@ static int __net_init ip6_route_net_init(struct net *net) + net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops; + dst_init_metrics(&net->ipv6.ip6_null_entry->dst, + ip6_template_metrics, true); +- INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->rt6i_uncached); ++ INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->dst.rt_uncached); + + #ifdef CONFIG_IPV6_MULTIPLE_TABLES + net->ipv6.fib6_has_custom_rules = false; +@@ -6489,7 +6489,7 @@ static int __net_init ip6_route_net_init(struct net *net) + net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops; + dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst, + ip6_template_metrics, true); +- INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached); ++ INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->dst.rt_uncached); + + net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, + sizeof(*net->ipv6.ip6_blk_hole_entry), +@@ -6499,7 +6499,7 @@ static int __net_init ip6_route_net_init(struct net *net) + net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops; + dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst, + ip6_template_metrics, true); +- INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->rt6i_uncached); ++ INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->dst.rt_uncached); + #ifdef CONFIG_IPV6_SUBTREES + net->ipv6.fib6_routes_require_src = 0; + #endif +diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c +index ea435eba3053..2b493f8d0091 100644 +--- a/net/ipv6/xfrm6_policy.c ++++ b/net/ipv6/xfrm6_policy.c +@@ -89,7 +89,7 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, + xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway; + xdst->u.rt6.rt6i_dst = rt->rt6i_dst; + xdst->u.rt6.rt6i_src = rt->rt6i_src; +- INIT_LIST_HEAD(&xdst->u.rt6.rt6i_uncached); ++ INIT_LIST_HEAD(&xdst->u.rt6.dst.rt_uncached); + rt6_uncached_list_add(&xdst->u.rt6); + + return 0; +@@ -121,7 +121,7 @@ static void xfrm6_dst_destroy(struct dst_entry *dst) + if (likely(xdst->u.rt6.rt6i_idev)) + in6_dev_put(xdst->u.rt6.rt6i_idev); + dst_destroy_metrics_generic(dst); +- if (xdst->u.rt6.rt6i_uncached_list) ++ if (xdst->u.rt6.dst.rt_uncached_list) + rt6_uncached_list_del(&xdst->u.rt6); + xfrm_dst_destroy(xdst); + } +diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c +index 80448885c3d7..99c349c0d968 100644 +--- a/net/netfilter/ipvs/ip_vs_xmit.c ++++ b/net/netfilter/ipvs/ip_vs_xmit.c +@@ -339,7 +339,7 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb, + spin_unlock_bh(&dest->dst_lock); + IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d\n", + &dest->addr.ip, &dest_dst->dst_saddr.ip, +- atomic_read(&rt->dst.__refcnt)); ++ rcuref_read(&rt->dst.__rcuref)); + } + if (ret_saddr) + *ret_saddr = dest_dst->dst_saddr.ip; +@@ -507,7 +507,7 @@ __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb, + spin_unlock_bh(&dest->dst_lock); + IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n", + &dest->addr.in6, &dest_dst->dst_saddr.in6, +- atomic_read(&rt->dst.__refcnt)); ++ rcuref_read(&rt->dst.__rcuref)); + } + if (ret_saddr) + *ret_saddr = dest_dst->dst_saddr.in6; +diff --git a/scripts/Makefile.vmlinux_o b/scripts/Makefile.vmlinux_o +index 0edfdb40364b..ae52d3b3f063 100644 +--- a/scripts/Makefile.vmlinux_o ++++ b/scripts/Makefile.vmlinux_o +@@ -19,7 +19,7 @@ quiet_cmd_gen_initcalls_lds = GEN $@ + + .tmp_initcalls.lds: $(srctree)/scripts/generate_initcall_order.pl \ + vmlinux.a $(KBUILD_VMLINUX_LIBS) FORCE +- $(call if_changed,gen_initcalls_lds) ++ +$(call if_changed,gen_initcalls_lds) + + targets := .tmp_initcalls.lds + +diff --git a/scripts/atomic/atomics.tbl b/scripts/atomic/atomics.tbl +index fbee2f6190d9..85ca8d9b5c27 100644 +--- a/scripts/atomic/atomics.tbl ++++ b/scripts/atomic/atomics.tbl +@@ -33,7 +33,7 @@ try_cmpxchg B v p:old i:new + sub_and_test b i v + dec_and_test b v + inc_and_test b v +-add_negative b i v ++add_negative B i v + add_unless fb v i:a i:u + inc_not_zero b v + inc_unless_negative b v +diff --git a/scripts/atomic/fallbacks/add_negative b/scripts/atomic/fallbacks/add_negative +index 15caa2eb2371..e5980abf5904 100755 +--- a/scripts/atomic/fallbacks/add_negative ++++ b/scripts/atomic/fallbacks/add_negative +@@ -1,16 +1,15 @@ + cat <bst_type = CS35L41_EXT_BOOST_NO_VSPK_SWITCH; +- } else if (strncmp(hid, "CLSA0101", 8) == 0) { ++ } else if (strncmp(hid, "CLSA0101", 8) == 0 || strncmp(hid, "CSC3551", 7) == 0) { + hw_cfg->bst_type = CS35L41_EXT_BOOST; + hw_cfg->gpio1.func = CS35l41_VSPK_SWITCH; + hw_cfg->gpio1.valid = true; +diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c +index d8b5b4930412..05048ebc24d8 100644 +--- a/tools/testing/selftests/mm/ksm_functional_tests.c ++++ b/tools/testing/selftests/mm/ksm_functional_tests.c +@@ -24,9 +24,12 @@ + + #define KiB 1024u + #define MiB (1024 * KiB) ++#define PageSize (4 * KiB) + + static int ksm_fd; + static int ksm_full_scans_fd; ++static int ksm_zero_pages_fd; ++static int ksm_use_zero_pages_fd; + static int pagemap_fd; + static size_t pagesize; + +@@ -57,6 +60,21 @@ static bool range_maps_duplicates(char *addr, unsigned long size) + return false; + } + ++static long ksm_get_zero_pages(void) ++{ ++ char buf[20]; ++ ssize_t read_size; ++ unsigned long ksm_zero_pages; ++ ++ read_size = pread(ksm_zero_pages_fd, buf, sizeof(buf) - 1, 0); ++ if (read_size < 0) ++ return -errno; ++ buf[read_size] = 0; ++ ksm_zero_pages = strtol(buf, NULL, 10); ++ ++ return ksm_zero_pages; ++} ++ + static long ksm_get_full_scans(void) + { + char buf[10]; +@@ -70,15 +88,12 @@ static long ksm_get_full_scans(void) + return strtol(buf, NULL, 10); + } + +-static int ksm_merge(void) ++static int wait_two_full_scans(void) + { + long start_scans, end_scans; + +- /* Wait for two full scans such that any possible merging happened. */ + start_scans = ksm_get_full_scans(); + if (start_scans < 0) +- return start_scans; +- if (write(ksm_fd, "1", 1) != 1) + return -errno; + do { + end_scans = ksm_get_full_scans(); +@@ -89,6 +104,34 @@ static int ksm_merge(void) + return 0; + } + ++static inline int ksm_merge(void) ++{ ++ /* Wait for two full scans such that any possible merging happened. */ ++ if (write(ksm_fd, "1", 1) != 1) ++ return -errno; ++ ++ return wait_two_full_scans(); ++} ++ ++static int unmerge_zero_page(char *start, unsigned long size) ++{ ++ int ret; ++ ++ ret = madvise(start, size, MADV_UNMERGEABLE); ++ if (ret) { ++ ksft_test_result_fail("MADV_UNMERGEABLE failed\n"); ++ return ret; ++ } ++ ++ /* ++ * Wait for two full scans such that any possible unmerging of zero ++ * pages happened. Why? Because the unmerge action of zero pages is not ++ * done in the context of madvise(), but in the context of ++ * unshare_zero_pages() of the ksmd thread. ++ */ ++ return wait_two_full_scans(); ++} ++ + static char *mmap_and_merge_range(char val, unsigned long size) + { + char *map; +@@ -146,6 +189,48 @@ static void test_unmerge(void) + munmap(map, size); + } + ++static void test_unmerge_zero_pages(void) ++{ ++ const unsigned int size = 2 * MiB; ++ char *map; ++ unsigned long pages_expected; ++ ++ ksft_print_msg("[RUN] %s\n", __func__); ++ ++ /* Confirm the interfaces*/ ++ if (ksm_zero_pages_fd < 0) { ++ ksft_test_result_skip("open(\"/sys/kernel/mm/ksm/zero_pages_sharing\") failed\n"); ++ return; ++ } ++ if (ksm_use_zero_pages_fd < 0) { ++ ksft_test_result_skip("open \"/sys/kernel/mm/ksm/use_zero_pages\" failed\n"); ++ return; ++ } ++ if (write(ksm_use_zero_pages_fd, "1", 1) != 1) { ++ ksft_test_result_skip("write \"/sys/kernel/mm/ksm/use_zero_pages\" failed\n"); ++ return; ++ } ++ ++ /* Mmap zero pages*/ ++ map = mmap_and_merge_range(0x00, size); ++ if (map == MAP_FAILED) ++ return; ++ ++ if (unmerge_zero_page(map + size / 2, size / 2)) ++ goto unmap; ++ ++ /* Check if zero_pages_sharing can be update correctly when unmerge */ ++ pages_expected = (size / 2) / PageSize; ++ ksft_test_result(pages_expected == ksm_get_zero_pages(), ++ "zero page count react to unmerge\n"); ++ ++ /* Check if ksm zero pages are really unmerged */ ++ ksft_test_result(!range_maps_duplicates(map + size / 2, size / 2), ++ "KSM zero pages were unmerged\n"); ++unmap: ++ munmap(map, size); ++} ++ + static void test_unmerge_discarded(void) + { + const unsigned int size = 2 * MiB; +@@ -264,8 +349,11 @@ int main(int argc, char **argv) + pagemap_fd = open("/proc/self/pagemap", O_RDONLY); + if (pagemap_fd < 0) + ksft_exit_skip("open(\"/proc/self/pagemap\") failed\n"); ++ ksm_zero_pages_fd = open("/sys/kernel/mm/ksm/zero_pages_sharing", O_RDONLY); ++ ksm_use_zero_pages_fd = open("/sys/kernel/mm/ksm/use_zero_pages", O_RDWR); + + test_unmerge(); ++ test_unmerge_zero_pages(); + test_unmerge_discarded(); + #ifdef __NR_userfaultfd + test_unmerge_uffd_wp(); +-- +2.40.0 + +From 44c369d8ceca091040f847df8e2e9e15df9dc300 Mon Sep 17 00:00:00 2001 +From: Peter Jung +Date: Sat, 22 Apr 2023 11:43:38 +0200 +Subject: [PATCH 05/13] fs-patches + +Signed-off-by: Peter Jung +--- + fs/btrfs/block-group.c | 7 +- + fs/btrfs/block-group.h | 2 +- + fs/btrfs/block-rsv.c | 21 +++--- + fs/btrfs/block-rsv.h | 2 +- + fs/btrfs/btrfs_inode.h | 33 ++++++++-- + fs/btrfs/ctree.c | 131 +++++++++++++++++++++++--------------- + fs/btrfs/ctree.h | 1 - + fs/btrfs/delalloc-space.c | 2 +- + fs/btrfs/delayed-ref.c | 49 ++------------ + fs/btrfs/delayed-ref.h | 22 ++++++- + fs/btrfs/discard.c | 18 +++--- + fs/btrfs/disk-io.c | 1 - + fs/btrfs/extent-tree.c | 27 +------- + fs/btrfs/file.c | 11 +++- + fs/btrfs/fs.h | 17 ++++- + fs/btrfs/inode-item.c | 15 ++--- + fs/btrfs/inode.c | 117 ++++++++++++++++++++++------------ + fs/btrfs/ordered-data.c | 46 ++++++++++--- + fs/btrfs/ordered-data.h | 7 +- + fs/btrfs/space-info.c | 32 ++++++++-- + fs/btrfs/space-info.h | 1 + + fs/btrfs/transaction.c | 28 ++++---- + fs/btrfs/tree-log.c | 76 ++++++++++++++-------- + fs/btrfs/volumes.c | 2 +- + 24 files changed, 399 insertions(+), 269 deletions(-) + +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c +index 5fc670c27f86..bececc6f0a87 100644 +--- a/fs/btrfs/block-group.c ++++ b/fs/btrfs/block-group.c +@@ -2672,7 +2672,7 @@ static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset) + } + + struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, +- u64 bytes_used, u64 type, ++ u64 type, + u64 chunk_offset, u64 size) + { + struct btrfs_fs_info *fs_info = trans->fs_info; +@@ -2687,7 +2687,6 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran + + cache->length = size; + set_free_space_tree_thresholds(cache); +- cache->used = bytes_used; + cache->flags = type; + cache->cached = BTRFS_CACHE_FINISHED; + cache->global_root_id = calculate_global_root_id(fs_info, cache->start); +@@ -2738,9 +2737,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran + + #ifdef CONFIG_BTRFS_DEBUG + if (btrfs_should_fragment_free_space(cache)) { +- u64 new_bytes_used = size - bytes_used; +- +- cache->space_info->bytes_used += new_bytes_used >> 1; ++ cache->space_info->bytes_used += size >> 1; + fragment_free_space(cache); + } + #endif +diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h +index 6e4a0b429ac3..db729ad7315b 100644 +--- a/fs/btrfs/block-group.h ++++ b/fs/btrfs/block-group.h +@@ -302,7 +302,7 @@ void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info); + void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg); + int btrfs_read_block_groups(struct btrfs_fs_info *info); + struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, +- u64 bytes_used, u64 type, ++ u64 type, + u64 chunk_offset, u64 size); + void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); + int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, +diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c +index 5367a14d44d2..3ab707e26fa2 100644 +--- a/fs/btrfs/block-rsv.c ++++ b/fs/btrfs/block-rsv.c +@@ -232,9 +232,6 @@ int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent) + u64 num_bytes = 0; + int ret = -ENOSPC; + +- if (!block_rsv) +- return 0; +- + spin_lock(&block_rsv->lock); + num_bytes = mult_perc(block_rsv->size, min_percent); + if (block_rsv->reserved >= num_bytes) +@@ -245,17 +242,15 @@ int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent) + } + + int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info, +- struct btrfs_block_rsv *block_rsv, u64 min_reserved, ++ struct btrfs_block_rsv *block_rsv, u64 num_bytes, + enum btrfs_reserve_flush_enum flush) + { +- u64 num_bytes = 0; + int ret = -ENOSPC; + + if (!block_rsv) + return 0; + + spin_lock(&block_rsv->lock); +- num_bytes = min_reserved; + if (block_rsv->reserved >= num_bytes) + ret = 0; + else +@@ -355,17 +350,19 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info) + + /* + * But we also want to reserve enough space so we can do the fallback +- * global reserve for an unlink, which is an additional 5 items (see the +- * comment in __unlink_start_trans for what we're modifying.) ++ * global reserve for an unlink, which is an additional ++ * BTRFS_UNLINK_METADATA_UNITS items. + * + * But we also need space for the delayed ref updates from the unlink, +- * so its 10, 5 for the actual operation, and 5 for the delayed ref +- * updates. ++ * so add BTRFS_UNLINK_METADATA_UNITS units for delayed refs, one for ++ * each unlink metadata item. + */ +- min_items += 10; ++ min_items += BTRFS_UNLINK_METADATA_UNITS; + + num_bytes = max_t(u64, num_bytes, +- btrfs_calc_insert_metadata_size(fs_info, min_items)); ++ btrfs_calc_insert_metadata_size(fs_info, min_items) + ++ btrfs_calc_delayed_ref_bytes(fs_info, ++ BTRFS_UNLINK_METADATA_UNITS)); + + spin_lock(&sinfo->lock); + spin_lock(&block_rsv->lock); +diff --git a/fs/btrfs/block-rsv.h b/fs/btrfs/block-rsv.h +index 4cc41c9aaa82..6dc781709aca 100644 +--- a/fs/btrfs/block-rsv.h ++++ b/fs/btrfs/block-rsv.h +@@ -65,7 +65,7 @@ int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info, + enum btrfs_reserve_flush_enum flush); + int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent); + int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info, +- struct btrfs_block_rsv *block_rsv, u64 min_reserved, ++ struct btrfs_block_rsv *block_rsv, u64 num_bytes, + enum btrfs_reserve_flush_enum flush); + int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, + struct btrfs_block_rsv *dst_rsv, u64 num_bytes, +diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h +index 9dc21622806e..fd1a7436e694 100644 +--- a/fs/btrfs/btrfs_inode.h ++++ b/fs/btrfs/btrfs_inode.h +@@ -142,11 +142,22 @@ struct btrfs_inode { + /* a local copy of root's last_log_commit */ + int last_log_commit; + +- /* +- * Total number of bytes pending delalloc, used by stat to calculate the +- * real block usage of the file. This is used only for files. +- */ +- u64 delalloc_bytes; ++ union { ++ /* ++ * Total number of bytes pending delalloc, used by stat to ++ * calculate the real block usage of the file. This is used ++ * only for files. ++ */ ++ u64 delalloc_bytes; ++ /* ++ * The lowest possible index of the next dir index key which ++ * points to an inode that needs to be logged. ++ * This is used only for directories. ++ * Use the helpers btrfs_get_first_dir_index_to_log() and ++ * btrfs_set_first_dir_index_to_log() to access this field. ++ */ ++ u64 first_dir_index_to_log; ++ }; + + union { + /* +@@ -247,6 +258,17 @@ struct btrfs_inode { + struct inode vfs_inode; + }; + ++static inline u64 btrfs_get_first_dir_index_to_log(const struct btrfs_inode *inode) ++{ ++ return READ_ONCE(inode->first_dir_index_to_log); ++} ++ ++static inline void btrfs_set_first_dir_index_to_log(struct btrfs_inode *inode, ++ u64 index) ++{ ++ WRITE_ONCE(inode->first_dir_index_to_log, index); ++} ++ + static inline struct btrfs_inode *BTRFS_I(const struct inode *inode) + { + return container_of(inode, struct btrfs_inode, vfs_inode); +@@ -516,6 +538,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, + ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, + size_t done_before); + struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter, ++ struct btrfs_ordered_extent **ordered_extent, + size_t done_before); + + extern const struct dentry_operations btrfs_dentry_operations; +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c +index a5b6bb54545f..b5b73ed8b86b 100644 +--- a/fs/btrfs/ctree.c ++++ b/fs/btrfs/ctree.c +@@ -2370,6 +2370,87 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, + return ret; + } + ++/* ++ * Search the tree again to find a leaf with smaller keys. ++ * Returns 0 if it found something. ++ * Returns 1 if there are no smaller keys. ++ * Returns < 0 on error. ++ * ++ * This may release the path, and so you may lose any locks held at the ++ * time you call it. ++ */ ++static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) ++{ ++ struct btrfs_key key; ++ struct btrfs_key orig_key; ++ struct btrfs_disk_key found_key; ++ int ret; ++ ++ btrfs_item_key_to_cpu(path->nodes[0], &key, 0); ++ orig_key = key; ++ ++ if (key.offset > 0) { ++ key.offset--; ++ } else if (key.type > 0) { ++ key.type--; ++ key.offset = (u64)-1; ++ } else if (key.objectid > 0) { ++ key.objectid--; ++ key.type = (u8)-1; ++ key.offset = (u64)-1; ++ } else { ++ return 1; ++ } ++ ++ btrfs_release_path(path); ++ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); ++ if (ret <= 0) ++ return ret; ++ ++ /* ++ * Previous key not found. Even if we were at slot 0 of the leaf we had ++ * before releasing the path and calling btrfs_search_slot(), we now may ++ * be in a slot pointing to the same original key - this can happen if ++ * after we released the path, one of more items were moved from a ++ * sibbling leaf into the front of the leaf we had due to an insertion ++ * (see push_leaf_right()). ++ * If we hit this case and our slot is > 0 and just decrement the slot ++ * so that the caller does not process the same key again, which may or ++ * may not break the caller, depending on its logic. ++ */ ++ if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { ++ btrfs_item_key(path->nodes[0], &found_key, path->slots[0]); ++ ret = comp_keys(&found_key, &orig_key); ++ if (ret == 0) { ++ if (path->slots[0] > 0) { ++ path->slots[0]--; ++ return 0; ++ } ++ /* ++ * At slot 0, same key as before, it means orig_key is ++ * the lowest, leftmost, key in the tree. We're done. ++ */ ++ return 1; ++ } ++ } ++ ++ btrfs_item_key(path->nodes[0], &found_key, 0); ++ ret = comp_keys(&found_key, &key); ++ /* ++ * We might have had an item with the previous key in the tree right ++ * before we released our path. And after we released our path, that ++ * item might have been pushed to the first slot (0) of the leaf we ++ * were holding due to a tree balance. Alternatively, an item with the ++ * previous key can exist as the only element of a leaf (big fat item). ++ * Therefore account for these 2 cases, so that our callers (like ++ * btrfs_previous_item) don't miss an existing item with a key matching ++ * the previous key we computed above. ++ */ ++ if (ret <= 0) ++ return 0; ++ return 1; ++} ++ + /* + * helper to use instead of search slot if no exact match is needed but + * instead the next or previous item should be returned. +@@ -4478,56 +4559,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, + return ret; + } + +-/* +- * search the tree again to find a leaf with lesser keys +- * returns 0 if it found something or 1 if there are no lesser leaves. +- * returns < 0 on io errors. +- * +- * This may release the path, and so you may lose any locks held at the +- * time you call it. +- */ +-int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) +-{ +- struct btrfs_key key; +- struct btrfs_disk_key found_key; +- int ret; +- +- btrfs_item_key_to_cpu(path->nodes[0], &key, 0); +- +- if (key.offset > 0) { +- key.offset--; +- } else if (key.type > 0) { +- key.type--; +- key.offset = (u64)-1; +- } else if (key.objectid > 0) { +- key.objectid--; +- key.type = (u8)-1; +- key.offset = (u64)-1; +- } else { +- return 1; +- } +- +- btrfs_release_path(path); +- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); +- if (ret < 0) +- return ret; +- btrfs_item_key(path->nodes[0], &found_key, 0); +- ret = comp_keys(&found_key, &key); +- /* +- * We might have had an item with the previous key in the tree right +- * before we released our path. And after we released our path, that +- * item might have been pushed to the first slot (0) of the leaf we +- * were holding due to a tree balance. Alternatively, an item with the +- * previous key can exist as the only element of a leaf (big fat item). +- * Therefore account for these 2 cases, so that our callers (like +- * btrfs_previous_item) don't miss an existing item with a key matching +- * the previous key we computed above. +- */ +- if (ret <= 0) +- return 0; +- return 1; +-} +- + /* + * A helper function to walk down the tree starting at min_key, and looking + * for nodes or leaves that are have a minimum transaction id. +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 97897107fab5..406f90508f7e 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -646,7 +646,6 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, + return btrfs_insert_empty_items(trans, root, path, &batch); + } + +-int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); + int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, + u64 time_seq); + +diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c +index 7ddb1d104e8e..427abaf608b8 100644 +--- a/fs/btrfs/delalloc-space.c ++++ b/fs/btrfs/delalloc-space.c +@@ -358,8 +358,8 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes, + * racing with an ordered completion or some such that would think it + * needs to free the reservation we just made. + */ +- spin_lock(&inode->lock); + nr_extents = count_max_extents(fs_info, num_bytes); ++ spin_lock(&inode->lock); + btrfs_mod_outstanding_extents(inode, nr_extents); + inode->csum_bytes += disk_num_bytes; + btrfs_calculate_inode_block_rsv_size(fs_info, inode); +diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c +index 886ffb232eac..0b32432d7d56 100644 +--- a/fs/btrfs/delayed-ref.c ++++ b/fs/btrfs/delayed-ref.c +@@ -53,24 +53,6 @@ bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info) + return ret; + } + +-int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans) +-{ +- u64 num_entries = +- atomic_read(&trans->transaction->delayed_refs.num_entries); +- u64 avg_runtime; +- u64 val; +- +- smp_mb(); +- avg_runtime = trans->fs_info->avg_delayed_ref_runtime; +- val = num_entries * avg_runtime; +- if (val >= NSEC_PER_SEC) +- return 1; +- if (val >= NSEC_PER_SEC / 2) +- return 2; +- +- return btrfs_check_space_for_delayed_refs(trans->fs_info); +-} +- + /* + * Release a ref head's reservation. + * +@@ -83,20 +65,9 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans) + void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr) + { + struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv; +- u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr); ++ const u64 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr); + u64 released = 0; + +- /* +- * We have to check the mount option here because we could be enabling +- * the free space tree for the first time and don't have the compat_ro +- * option set yet. +- * +- * We need extra reservations if we have the free space tree because +- * we'll have to modify that tree as well. +- */ +- if (btrfs_test_opt(fs_info, FREE_SPACE_TREE)) +- num_bytes *= 2; +- + released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL); + if (released) + trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", +@@ -118,18 +89,8 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans) + if (!trans->delayed_ref_updates) + return; + +- num_bytes = btrfs_calc_insert_metadata_size(fs_info, +- trans->delayed_ref_updates); +- /* +- * We have to check the mount option here because we could be enabling +- * the free space tree for the first time and don't have the compat_ro +- * option set yet. +- * +- * We need extra reservations if we have the free space tree because +- * we'll have to modify that tree as well. +- */ +- if (btrfs_test_opt(fs_info, FREE_SPACE_TREE)) +- num_bytes *= 2; ++ num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, ++ trans->delayed_ref_updates); + + spin_lock(&delayed_rsv->lock); + delayed_rsv->size += num_bytes; +@@ -200,7 +161,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, + enum btrfs_reserve_flush_enum flush) + { + struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv; +- u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1); ++ u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1); + u64 num_bytes = 0; + int ret = -ENOSPC; + +@@ -217,7 +178,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, + ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush); + if (ret) + return ret; +- btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0); ++ btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false); + trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", + 0, num_bytes, 1); + return 0; +diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h +index 2eb34abf700f..b54261fe509b 100644 +--- a/fs/btrfs/delayed-ref.h ++++ b/fs/btrfs/delayed-ref.h +@@ -253,6 +253,27 @@ extern struct kmem_cache *btrfs_delayed_extent_op_cachep; + int __init btrfs_delayed_ref_init(void); + void __cold btrfs_delayed_ref_exit(void); + ++static inline u64 btrfs_calc_delayed_ref_bytes(const struct btrfs_fs_info *fs_info, ++ int num_delayed_refs) ++{ ++ u64 num_bytes; ++ ++ num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_delayed_refs); ++ ++ /* ++ * We have to check the mount option here because we could be enabling ++ * the free space tree for the first time and don't have the compat_ro ++ * option set yet. ++ * ++ * We need extra reservations if we have the free space tree because ++ * we'll have to modify that tree as well. ++ */ ++ if (btrfs_test_opt(fs_info, FREE_SPACE_TREE)) ++ num_bytes *= 2; ++ ++ return num_bytes; ++} ++ + static inline void btrfs_init_generic_ref(struct btrfs_ref *generic_ref, + int action, u64 bytenr, u64 len, u64 parent) + { +@@ -385,7 +406,6 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, + void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info, + struct btrfs_block_rsv *src, + u64 num_bytes); +-int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans); + bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info); + + /* +diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c +index 317aeff6c1da..c48abc817ed2 100644 +--- a/fs/btrfs/discard.c ++++ b/fs/btrfs/discard.c +@@ -56,11 +56,9 @@ + #define BTRFS_DISCARD_DELAY (120ULL * NSEC_PER_SEC) + #define BTRFS_DISCARD_UNUSED_DELAY (10ULL * NSEC_PER_SEC) + +-/* Target completion latency of discarding all discardable extents */ +-#define BTRFS_DISCARD_TARGET_MSEC (6 * 60 * 60UL * MSEC_PER_SEC) + #define BTRFS_DISCARD_MIN_DELAY_MSEC (1UL) + #define BTRFS_DISCARD_MAX_DELAY_MSEC (1000UL) +-#define BTRFS_DISCARD_MAX_IOPS (10U) ++#define BTRFS_DISCARD_MAX_IOPS (1000U) + + /* Monotonically decreasing minimum length filters after index 0 */ + static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = { +@@ -577,6 +575,7 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl) + s32 discardable_extents; + s64 discardable_bytes; + u32 iops_limit; ++ unsigned long min_delay = BTRFS_DISCARD_MIN_DELAY_MSEC; + unsigned long delay; + + discardable_extents = atomic_read(&discard_ctl->discardable_extents); +@@ -607,13 +606,16 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl) + } + + iops_limit = READ_ONCE(discard_ctl->iops_limit); +- if (iops_limit) ++ ++ if (iops_limit) { + delay = MSEC_PER_SEC / iops_limit; +- else +- delay = BTRFS_DISCARD_TARGET_MSEC / discardable_extents; ++ } else { ++ /* unset iops_limit means go as fast as possible, so allow a delay of 0 */ ++ delay = 0; ++ min_delay = 0; ++ } + +- delay = clamp(delay, BTRFS_DISCARD_MIN_DELAY_MSEC, +- BTRFS_DISCARD_MAX_DELAY_MSEC); ++ delay = clamp(delay, min_delay, BTRFS_DISCARD_MAX_DELAY_MSEC); + discard_ctl->delay_ms = delay; + + spin_unlock(&discard_ctl->lock); +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 9e1596bb208d..6cf064f41bec 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -2966,7 +2966,6 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) + atomic64_set(&fs_info->free_chunk_space, 0); + fs_info->tree_mod_log = RB_ROOT; + fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; +- fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */ + btrfs_init_ref_verify(fs_info); + + fs_info->thread_pool_size = min_t(unsigned long, +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index 824c657f59e8..cf1f7e901337 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -1894,8 +1894,7 @@ static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head( + } + + static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans, +- struct btrfs_delayed_ref_head *locked_ref, +- unsigned long *run_refs) ++ struct btrfs_delayed_ref_head *locked_ref) + { + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_delayed_ref_root *delayed_refs; +@@ -1917,7 +1916,6 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans, + return -EAGAIN; + } + +- (*run_refs)++; + ref->in_tree = 0; + rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree); + RB_CLEAR_NODE(&ref->ref_node); +@@ -1981,10 +1979,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_delayed_ref_root *delayed_refs; + struct btrfs_delayed_ref_head *locked_ref = NULL; +- ktime_t start = ktime_get(); + int ret; + unsigned long count = 0; +- unsigned long actual_count = 0; + + delayed_refs = &trans->transaction->delayed_refs; + do { +@@ -2014,8 +2010,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, + spin_lock(&locked_ref->lock); + btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref); + +- ret = btrfs_run_delayed_refs_for_head(trans, locked_ref, +- &actual_count); ++ ret = btrfs_run_delayed_refs_for_head(trans, locked_ref); + if (ret < 0 && ret != -EAGAIN) { + /* + * Error, btrfs_run_delayed_refs_for_head already +@@ -2046,24 +2041,6 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, + cond_resched(); + } while ((nr != -1 && count < nr) || locked_ref); + +- /* +- * We don't want to include ref heads since we can have empty ref heads +- * and those will drastically skew our runtime down since we just do +- * accounting, no actual extent tree updates. +- */ +- if (actual_count > 0) { +- u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start)); +- u64 avg; +- +- /* +- * We weigh the current average higher than our current runtime +- * to avoid large swings in the average. +- */ +- spin_lock(&delayed_refs->lock); +- avg = fs_info->avg_delayed_ref_runtime * 3 + runtime; +- fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */ +- spin_unlock(&delayed_refs->lock); +- } + return 0; + } + +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index 5cc5a1faaef5..ec5c5355906b 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -1465,6 +1465,7 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) + ssize_t err; + unsigned int ilock_flags = 0; + struct iomap_dio *dio; ++ struct btrfs_ordered_extent *ordered_extent = NULL; + + if (iocb->ki_flags & IOCB_NOWAIT) + ilock_flags |= BTRFS_ILOCK_TRY; +@@ -1526,7 +1527,7 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) + * got -EFAULT, faulting in the pages before the retry. + */ + from->nofault = true; +- dio = btrfs_dio_write(iocb, from, written); ++ dio = btrfs_dio_write(iocb, from, &ordered_extent, written); + from->nofault = false; + + /* +@@ -1569,6 +1570,14 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) + goto relock; + } + } ++ /* ++ * We can't loop back to btrfs_dio_write, so we can drop the cached ++ * ordered extent. Typically btrfs_dio_iomap_end will run and put the ++ * ordered_extent, but this is needed to clean up in case of an error ++ * path breaking out of iomap_iter before the final iomap_end call. ++ */ ++ if (ordered_extent) ++ btrfs_put_ordered_extent(ordered_extent); + + /* + * If 'err' is -ENOTBLK or we have not written all data, then it means +diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h +index 24cd49229408..23f05ba8f5c5 100644 +--- a/fs/btrfs/fs.h ++++ b/fs/btrfs/fs.h +@@ -24,6 +24,18 @@ + #define BTRFS_SUPER_INFO_SIZE 4096 + static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE); + ++/* ++ * Number of metadata items necessary for an unlink operation: ++ * ++ * 1 for the possible orphan item ++ * 1 for the dir item ++ * 1 for the dir index ++ * 1 for the inode ref ++ * 1 for the inode ++ * 1 for the parent inode ++ */ ++#define BTRFS_UNLINK_METADATA_UNITS 6 ++ + /* + * The reserved space at the beginning of each device. It covers the primary + * super block and leaves space for potential use by other tools like +@@ -412,7 +424,6 @@ struct btrfs_fs_info { + * Must be written and read while holding btrfs_fs_info::commit_root_sem. + */ + u64 last_reloc_trans; +- u64 avg_delayed_ref_runtime; + + /* + * This is updated to the current trans every time a full commit is +@@ -828,7 +839,7 @@ static inline u64 btrfs_csum_bytes_to_leaves( + * Use this if we would be adding new items, as we could split nodes as we cow + * down the tree. + */ +-static inline u64 btrfs_calc_insert_metadata_size(struct btrfs_fs_info *fs_info, ++static inline u64 btrfs_calc_insert_metadata_size(const struct btrfs_fs_info *fs_info, + unsigned num_items) + { + return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; +@@ -838,7 +849,7 @@ static inline u64 btrfs_calc_insert_metadata_size(struct btrfs_fs_info *fs_info, + * Doing a truncate or a modification won't result in new nodes or leaves, just + * what we need for COW. + */ +-static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info, ++static inline u64 btrfs_calc_metadata_size(const struct btrfs_fs_info *fs_info, + unsigned num_items) + { + return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; +diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c +index b65c45b5d681..4c322b720a80 100644 +--- a/fs/btrfs/inode-item.c ++++ b/fs/btrfs/inode-item.c +@@ -527,7 +527,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, + + while (1) { + u64 clear_start = 0, clear_len = 0, extent_start = 0; +- bool should_throttle = false; ++ bool refill_delayed_refs_rsv = false; + + fi = NULL; + leaf = path->nodes[0]; +@@ -660,8 +660,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, + /* No pending yet, add ourselves */ + pending_del_slot = path->slots[0]; + pending_del_nr = 1; +- } else if (pending_del_nr && +- path->slots[0] + 1 == pending_del_slot) { ++ } else if (path->slots[0] + 1 == pending_del_slot) { + /* Hop on the pending chunk */ + pending_del_nr++; + pending_del_slot = path->slots[0]; +@@ -686,10 +685,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, + btrfs_abort_transaction(trans, ret); + break; + } +- if (be_nice) { +- if (btrfs_should_throttle_delayed_refs(trans)) +- should_throttle = true; +- } ++ if (be_nice && btrfs_check_space_for_delayed_refs(fs_info)) ++ refill_delayed_refs_rsv = true; + } + + if (found_type == BTRFS_INODE_ITEM_KEY) +@@ -697,7 +694,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, + + if (path->slots[0] == 0 || + path->slots[0] != pending_del_slot || +- should_throttle) { ++ refill_delayed_refs_rsv) { + if (pending_del_nr) { + ret = btrfs_del_items(trans, root, path, + pending_del_slot, +@@ -720,7 +717,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, + * actually allocate, so just bail if we're short and + * let the normal reservation dance happen higher up. + */ +- if (should_throttle) { ++ if (refill_delayed_refs_rsv) { + ret = btrfs_delayed_refs_rsv_refill(fs_info, + BTRFS_RESERVE_NO_FLUSH); + if (ret) { +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 957e4d76a7b6..0dbb1c1cc851 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -81,6 +81,7 @@ struct btrfs_dio_data { + struct extent_changeset *data_reserved; + bool data_space_reserved; + bool nocow_done; ++ struct btrfs_ordered_extent *ordered; + }; + + struct btrfs_dio_private { +@@ -4261,15 +4262,8 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir) + { + struct btrfs_root *root = dir->root; + +- /* +- * 1 for the possible orphan item +- * 1 for the dir item +- * 1 for the dir index +- * 1 for the inode ref +- * 1 for the inode +- * 1 for the parent inode +- */ +- return btrfs_start_transaction_fallback_global_rsv(root, 6); ++ return btrfs_start_transaction_fallback_global_rsv(root, ++ BTRFS_UNLINK_METADATA_UNITS); + } + + static int btrfs_unlink(struct inode *dir, struct dentry *dentry) +@@ -5243,7 +5237,7 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, + { + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_trans_handle *trans; +- u64 delayed_refs_extra = btrfs_calc_insert_metadata_size(fs_info, 1); ++ u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1); + int ret; + + /* +@@ -5281,7 +5275,7 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, + trans->block_rsv = &fs_info->trans_block_rsv; + trans->bytes_reserved = delayed_refs_extra; + btrfs_block_rsv_migrate(rsv, trans->block_rsv, +- delayed_refs_extra, 1); ++ delayed_refs_extra, true); + } + return trans; + } +@@ -5291,7 +5285,7 @@ void btrfs_evict_inode(struct inode *inode) + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_trans_handle *trans; + struct btrfs_root *root = BTRFS_I(inode)->root; +- struct btrfs_block_rsv *rsv; ++ struct btrfs_block_rsv *rsv = NULL; + int ret; + + trace_btrfs_inode_evict(inode); +@@ -5308,18 +5302,18 @@ void btrfs_evict_inode(struct inode *inode) + ((btrfs_root_refs(&root->root_item) != 0 && + root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || + btrfs_is_free_space_inode(BTRFS_I(inode)))) +- goto no_delete; ++ goto out; + + if (is_bad_inode(inode)) +- goto no_delete; ++ goto out; + + if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) +- goto no_delete; ++ goto out; + + if (inode->i_nlink > 0) { + BUG_ON(btrfs_root_refs(&root->root_item) != 0 && + root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); +- goto no_delete; ++ goto out; + } + + /* +@@ -5328,7 +5322,7 @@ void btrfs_evict_inode(struct inode *inode) + */ + ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); + if (ret) +- goto no_delete; ++ goto out; + + /* + * This drops any pending insert or delete operations we have for this +@@ -5340,7 +5334,7 @@ void btrfs_evict_inode(struct inode *inode) + + rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); + if (!rsv) +- goto no_delete; ++ goto out; + rsv->size = btrfs_calc_metadata_size(fs_info, 1); + rsv->failfast = true; + +@@ -5356,16 +5350,21 @@ void btrfs_evict_inode(struct inode *inode) + + trans = evict_refill_and_join(root, rsv); + if (IS_ERR(trans)) +- goto free_rsv; ++ goto out; + + trans->block_rsv = rsv; + + ret = btrfs_truncate_inode_items(trans, root, &control); + trans->block_rsv = &fs_info->trans_block_rsv; + btrfs_end_transaction(trans); +- btrfs_btree_balance_dirty(fs_info); ++ /* ++ * We have not added new delayed items for our inode after we ++ * have flushed its delayed items, so no need to throttle on ++ * delayed items. However we have modified extent buffers. ++ */ ++ btrfs_btree_balance_dirty_nodelay(fs_info); + if (ret && ret != -ENOSPC && ret != -EAGAIN) +- goto free_rsv; ++ goto out; + else if (!ret) + break; + } +@@ -5387,9 +5386,8 @@ void btrfs_evict_inode(struct inode *inode) + btrfs_end_transaction(trans); + } + +-free_rsv: ++out: + btrfs_free_block_rsv(fs_info, rsv); +-no_delete: + /* + * If we didn't successfully delete, the orphan item will still be in + * the tree and we'll retry on the next mount. Again, we might also want +@@ -6981,6 +6979,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, + } + + static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, ++ struct btrfs_dio_data *dio_data, + const u64 start, + const u64 len, + const u64 orig_start, +@@ -6991,7 +6990,7 @@ static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, + const int type) + { + struct extent_map *em = NULL; +- int ret; ++ struct btrfs_ordered_extent *ordered; + + if (type != BTRFS_ORDERED_NOCOW) { + em = create_io_em(inode, start, len, orig_start, block_start, +@@ -7001,18 +7000,21 @@ static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, + if (IS_ERR(em)) + goto out; + } +- ret = btrfs_add_ordered_extent(inode, start, len, len, block_start, +- block_len, 0, +- (1 << type) | +- (1 << BTRFS_ORDERED_DIRECT), +- BTRFS_COMPRESS_NONE); +- if (ret) { ++ ordered = btrfs_alloc_ordered_extent(inode, start, len, len, ++ block_start, block_len, 0, ++ (1 << type) | ++ (1 << BTRFS_ORDERED_DIRECT), ++ BTRFS_COMPRESS_NONE); ++ if (IS_ERR(ordered)) { + if (em) { + free_extent_map(em); + btrfs_drop_extent_map_range(inode, start, + start + len - 1, false); + } +- em = ERR_PTR(ret); ++ em = ERR_PTR(PTR_ERR(ordered)); ++ } else { ++ ASSERT(!dio_data->ordered); ++ dio_data->ordered = ordered; + } + out: + +@@ -7020,6 +7022,7 @@ static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, + } + + static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode, ++ struct btrfs_dio_data *dio_data, + u64 start, u64 len) + { + struct btrfs_root *root = inode->root; +@@ -7035,7 +7038,8 @@ static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode, + if (ret) + return ERR_PTR(ret); + +- em = btrfs_create_dio_extent(inode, start, ins.offset, start, ++ em = btrfs_create_dio_extent(inode, dio_data, ++ start, ins.offset, start, + ins.objectid, ins.offset, ins.offset, + ins.offset, BTRFS_ORDERED_REGULAR); + btrfs_dec_block_group_reservations(fs_info, ins.objectid); +@@ -7380,7 +7384,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, + } + space_reserved = true; + +- em2 = btrfs_create_dio_extent(BTRFS_I(inode), start, len, ++ em2 = btrfs_create_dio_extent(BTRFS_I(inode), dio_data, start, len, + orig_start, block_start, + len, orig_block_len, + ram_bytes, type); +@@ -7422,7 +7426,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, + goto out; + space_reserved = true; + +- em = btrfs_new_extent_direct(BTRFS_I(inode), start, len); ++ em = btrfs_new_extent_direct(BTRFS_I(inode), dio_data, start, len); + if (IS_ERR(em)) { + ret = PTR_ERR(em); + goto out; +@@ -7526,6 +7530,17 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, + } + } + ++ if (dio_data->ordered) { ++ ASSERT(write); ++ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, ++ dio_data->ordered->file_offset, ++ dio_data->ordered->bytes_left); ++ if (IS_ERR(em)) { ++ ret = PTR_ERR(em); ++ goto err; ++ } ++ goto map_iomap; ++ } + memset(dio_data, 0, sizeof(*dio_data)); + + /* +@@ -7667,6 +7682,7 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, + else + free_extent_state(cached_state); + ++map_iomap: + /* + * Translate extent map information to iomap. + * We trim the extents (and move the addr) even though iomap code does +@@ -7720,13 +7736,25 @@ static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, + if (submitted < length) { + pos += submitted; + length -= submitted; +- if (write) +- btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL, +- pos, length, false); +- else ++ if (write) { ++ if (submitted == 0) { ++ btrfs_mark_ordered_io_finished(BTRFS_I(inode), ++ NULL, pos, ++ length, false); ++ btrfs_put_ordered_extent(dio_data->ordered); ++ dio_data->ordered = NULL; ++ } ++ } else { + unlock_extent(&BTRFS_I(inode)->io_tree, pos, + pos + length - 1, NULL); ++ } + ret = -ENOTBLK; ++ } else { ++ /* On the last bio, release our cached ordered_extent. */ ++ if (write) { ++ btrfs_put_ordered_extent(dio_data->ordered); ++ dio_data->ordered = NULL; ++ } + } + + if (write) +@@ -7789,19 +7817,24 @@ static const struct iomap_dio_ops btrfs_dio_ops = { + + ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, size_t done_before) + { +- struct btrfs_dio_data data; ++ struct btrfs_dio_data data = { 0 }; + + return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, + IOMAP_DIO_PARTIAL, &data, done_before); + } + + struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter, ++ struct btrfs_ordered_extent **ordered_extent, + size_t done_before) + { +- struct btrfs_dio_data data; ++ struct btrfs_dio_data dio_data = { .ordered = *ordered_extent }; ++ struct iomap_dio *dio; + +- return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, +- IOMAP_DIO_PARTIAL, &data, done_before); ++ dio = __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, ++ IOMAP_DIO_PARTIAL, &dio_data, done_before); ++ if (!IS_ERR_OR_NULL(dio)) ++ *ordered_extent = dio_data.ordered; ++ return dio; + } + + static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, +diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c +index 6c24b69e2d0a..1848d0d1a9c4 100644 +--- a/fs/btrfs/ordered-data.c ++++ b/fs/btrfs/ordered-data.c +@@ -160,14 +160,16 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, + * @compress_type: Compression algorithm used for data. + * + * Most of these parameters correspond to &struct btrfs_file_extent_item. The +- * tree is given a single reference on the ordered extent that was inserted. ++ * tree is given a single reference on the ordered extent that was inserted, and ++ * the returned pointer is given a second reference. + * +- * Return: 0 or -ENOMEM. ++ * Return: the new ordered extent or ERR_PTR(-ENOMEM). + */ +-int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, +- u64 num_bytes, u64 ram_bytes, u64 disk_bytenr, +- u64 disk_num_bytes, u64 offset, unsigned flags, +- int compress_type) ++struct btrfs_ordered_extent *btrfs_alloc_ordered_extent( ++ struct btrfs_inode *inode, u64 file_offset, ++ u64 num_bytes, u64 ram_bytes, u64 disk_bytenr, ++ u64 disk_num_bytes, u64 offset, unsigned long flags, ++ int compress_type) + { + struct btrfs_root *root = inode->root; + struct btrfs_fs_info *fs_info = root->fs_info; +@@ -181,7 +183,7 @@ int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, + /* For nocow write, we can release the qgroup rsv right now */ + ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes); + if (ret < 0) +- return ret; ++ return ERR_PTR(ret); + ret = 0; + } else { + /* +@@ -190,11 +192,11 @@ int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, + */ + ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes); + if (ret < 0) +- return ret; ++ return ERR_PTR(ret); + } + entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); + if (!entry) +- return -ENOMEM; ++ return ERR_PTR(-ENOMEM); + + entry->file_offset = file_offset; + entry->num_bytes = num_bytes; +@@ -256,6 +258,32 @@ int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, + btrfs_mod_outstanding_extents(inode, 1); + spin_unlock(&inode->lock); + ++ /* One ref for the returned entry to match semantics of lookup. */ ++ refcount_inc(&entry->refs); ++ ++ return entry; ++} ++ ++/* ++ * Add a new btrfs_ordered_extent for the range, but drop the reference instead ++ * of returning it to the caller. ++ */ ++int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, ++ u64 num_bytes, u64 ram_bytes, u64 disk_bytenr, ++ u64 disk_num_bytes, u64 offset, unsigned long flags, ++ int compress_type) ++{ ++ struct btrfs_ordered_extent *ordered; ++ ++ ordered = btrfs_alloc_ordered_extent(inode, file_offset, num_bytes, ++ ram_bytes, disk_bytenr, ++ disk_num_bytes, offset, flags, ++ compress_type); ++ ++ if (IS_ERR(ordered)) ++ return PTR_ERR(ordered); ++ btrfs_put_ordered_extent(ordered); ++ + return 0; + } + +diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h +index eb40cb39f842..18007f9c00ad 100644 +--- a/fs/btrfs/ordered-data.h ++++ b/fs/btrfs/ordered-data.h +@@ -178,9 +178,14 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode, + bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode, + struct btrfs_ordered_extent **cached, + u64 file_offset, u64 io_size); ++struct btrfs_ordered_extent *btrfs_alloc_ordered_extent( ++ struct btrfs_inode *inode, u64 file_offset, ++ u64 num_bytes, u64 ram_bytes, u64 disk_bytenr, ++ u64 disk_num_bytes, u64 offset, unsigned long flags, ++ int compress_type); + int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, + u64 num_bytes, u64 ram_bytes, u64 disk_bytenr, +- u64 disk_num_bytes, u64 offset, unsigned flags, ++ u64 disk_num_bytes, u64 offset, unsigned long flags, + int compress_type); + void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry, + struct btrfs_ordered_sum *sum); +diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c +index 3eecce86f63f..75e7fa337e66 100644 +--- a/fs/btrfs/space-info.c ++++ b/fs/btrfs/space-info.c +@@ -537,7 +537,7 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, + up_read(&info->groups_sem); + } + +-static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, ++static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info, + u64 to_reclaim) + { + u64 bytes; +@@ -550,6 +550,18 @@ static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, + return nr; + } + ++static inline u64 calc_delayed_refs_nr(const struct btrfs_fs_info *fs_info, ++ u64 to_reclaim) ++{ ++ const u64 bytes = btrfs_calc_delayed_ref_bytes(fs_info, 1); ++ u64 nr; ++ ++ nr = div64_u64(to_reclaim, bytes); ++ if (!nr) ++ nr = 1; ++ return nr; ++} ++ + #define EXTENT_SIZE_PER_ITEM SZ_256K + + /* +@@ -727,7 +739,7 @@ static void flush_space(struct btrfs_fs_info *fs_info, + break; + } + if (state == FLUSH_DELAYED_REFS_NR) +- nr = calc_reclaim_items_nr(fs_info, num_bytes); ++ nr = calc_delayed_refs_nr(fs_info, num_bytes); + else + nr = 0; + btrfs_run_delayed_refs(trans, nr); +@@ -1599,11 +1611,22 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, + struct reserve_ticket ticket; + u64 start_ns = 0; + u64 used; +- int ret = 0; ++ int ret = -ENOSPC; + bool pending_tickets; + + ASSERT(orig_bytes); +- ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); ++ /* ++ * If have a transaction handle (current->journal_info != NULL), then ++ * the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor ++ * BTRFS_RESERVE_FLUSH_EVICT, as we could deadlock because those ++ * flushing methods can trigger transaction commits. ++ */ ++ if (current->journal_info) { ++ /* One assert per line for easier debugging. */ ++ ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL); ++ ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL); ++ ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT); ++ } + + if (flush == BTRFS_RESERVE_FLUSH_DATA) + async_work = &fs_info->async_data_reclaim_work; +@@ -1611,7 +1634,6 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, + async_work = &fs_info->async_reclaim_work; + + spin_lock(&space_info->lock); +- ret = -ENOSPC; + used = btrfs_space_info_used(space_info, true); + + /* +diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h +index 2033b71b18ce..0bb9d14e60a8 100644 +--- a/fs/btrfs/space-info.h ++++ b/fs/btrfs/space-info.h +@@ -27,6 +27,7 @@ enum btrfs_reserve_flush_enum { + * - Running delayed refs + * - Running delalloc and waiting for ordered extents + * - Allocating a new chunk ++ * - Committing transaction + */ + BTRFS_RESERVE_FLUSH_EVICT, + +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c +index b8d5b1fa9a03..8b6a99b8d7f6 100644 +--- a/fs/btrfs/transaction.c ++++ b/fs/btrfs/transaction.c +@@ -601,15 +601,16 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, + /* + * We want to reserve all the bytes we may need all at once, so + * we only do 1 enospc flushing cycle per transaction start. We +- * accomplish this by simply assuming we'll do 2 x num_items +- * worth of delayed refs updates in this trans handle, and +- * refill that amount for whatever is missing in the reserve. ++ * accomplish this by simply assuming we'll do num_items worth ++ * of delayed refs updates in this trans handle, and refill that ++ * amount for whatever is missing in the reserve. + */ + num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); + if (flush == BTRFS_RESERVE_FLUSH_ALL && +- btrfs_block_rsv_full(delayed_refs_rsv) == 0) { +- delayed_refs_bytes = num_bytes; +- num_bytes <<= 1; ++ !btrfs_block_rsv_full(delayed_refs_rsv)) { ++ delayed_refs_bytes = btrfs_calc_delayed_ref_bytes(fs_info, ++ num_items); ++ num_bytes += delayed_refs_bytes; + } + + /* +@@ -942,16 +943,6 @@ void btrfs_throttle(struct btrfs_fs_info *fs_info) + wait_current_trans(fs_info); + } + +-static bool should_end_transaction(struct btrfs_trans_handle *trans) +-{ +- struct btrfs_fs_info *fs_info = trans->fs_info; +- +- if (btrfs_check_space_for_delayed_refs(fs_info)) +- return true; +- +- return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 50); +-} +- + bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans) + { + struct btrfs_transaction *cur_trans = trans->transaction; +@@ -960,7 +951,10 @@ bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans) + test_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags)) + return true; + +- return should_end_transaction(trans); ++ if (btrfs_check_space_for_delayed_refs(trans->fs_info)) ++ return true; ++ ++ return !!btrfs_block_rsv_check(&trans->fs_info->global_block_rsv, 50); + } + + static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans) +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index 200cea6e49e5..df3d0753618f 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -3648,6 +3648,9 @@ static int flush_dir_items_batch(struct btrfs_trans_handle *trans, + ret = BTRFS_LOG_FORCE_COMMIT; + else + inode->last_dir_index_offset = last_index; ++ ++ if (btrfs_get_first_dir_index_to_log(inode) == 0) ++ btrfs_set_first_dir_index_to_log(inode, batch.keys[0].offset); + out: + kfree(ins_data); + +@@ -5406,6 +5409,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, + LIST_HEAD(dir_list); + struct btrfs_dir_list *dir_elem; + u64 ino = btrfs_ino(start_inode); ++ struct btrfs_inode *curr_inode = start_inode; + int ret = 0; + + /* +@@ -5420,43 +5424,38 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, + if (!path) + return -ENOMEM; + ++ ihold(&curr_inode->vfs_inode); ++ + while (true) { +- struct extent_buffer *leaf; +- struct btrfs_key min_key; ++ struct inode *vfs_inode; ++ struct btrfs_key key; ++ struct btrfs_key found_key; ++ u64 next_index; + bool continue_curr_inode = true; +- int nritems; +- int i; ++ int iter_ret; + +- min_key.objectid = ino; +- min_key.type = BTRFS_DIR_INDEX_KEY; +- min_key.offset = 0; ++ key.objectid = ino; ++ key.type = BTRFS_DIR_INDEX_KEY; ++ key.offset = btrfs_get_first_dir_index_to_log(curr_inode); ++ next_index = key.offset; + again: +- btrfs_release_path(path); +- ret = btrfs_search_forward(root, &min_key, path, trans->transid); +- if (ret < 0) { +- break; +- } else if (ret > 0) { +- ret = 0; +- goto next; +- } +- +- leaf = path->nodes[0]; +- nritems = btrfs_header_nritems(leaf); +- for (i = path->slots[0]; i < nritems; i++) { ++ btrfs_for_each_slot(root->log_root, &key, &found_key, path, iter_ret) { ++ struct extent_buffer *leaf = path->nodes[0]; + struct btrfs_dir_item *di; + struct btrfs_key di_key; + struct inode *di_inode; + int log_mode = LOG_INODE_EXISTS; + int type; + +- btrfs_item_key_to_cpu(leaf, &min_key, i); +- if (min_key.objectid != ino || +- min_key.type != BTRFS_DIR_INDEX_KEY) { ++ if (found_key.objectid != ino || ++ found_key.type != BTRFS_DIR_INDEX_KEY) { + continue_curr_inode = false; + break; + } + +- di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item); ++ next_index = found_key.offset + 1; ++ ++ di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); + type = btrfs_dir_ftype(leaf, di); + if (btrfs_dir_transid(leaf, di) < trans->transid) + continue; +@@ -5496,12 +5495,24 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, break; -diff --git a/mm/mprotect.c b/mm/mprotect.c -index 13e84d8c0797..36351a00c0e8 100644 ---- a/mm/mprotect.c -+++ b/mm/mprotect.c -@@ -838,7 +838,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len, + } + +- if (continue_curr_inode && min_key.offset < (u64)-1) { +- min_key.offset++; ++ btrfs_release_path(path); ++ ++ if (iter_ret < 0) { ++ ret = iter_ret; ++ goto out; ++ } else if (iter_ret > 0) { ++ continue_curr_inode = false; ++ } else { ++ key = found_key; ++ } ++ ++ if (continue_curr_inode && key.offset < (u64)-1) { ++ key.offset++; + goto again; + } + +-next: ++ btrfs_set_first_dir_index_to_log(curr_inode, next_index); ++ + if (list_empty(&dir_list)) + break; + +@@ -5509,9 +5520,22 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, + ino = dir_elem->ino; + list_del(&dir_elem->list); + kfree(dir_elem); ++ ++ btrfs_add_delayed_iput(curr_inode); ++ curr_inode = NULL; ++ ++ vfs_inode = btrfs_iget(fs_info->sb, ino, root); ++ if (IS_ERR(vfs_inode)) { ++ ret = PTR_ERR(vfs_inode); ++ break; ++ } ++ curr_inode = BTRFS_I(vfs_inode); + } + out: + btrfs_free_path(path); ++ if (curr_inode) ++ btrfs_add_delayed_iput(curr_inode); ++ + if (ret) { + struct btrfs_dir_list *next; + +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index c6d592870400..236b6fdb9e92 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -5438,7 +5438,7 @@ static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, + } + write_unlock(&em_tree->lock); + +- block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); ++ block_group = btrfs_make_block_group(trans, type, start, ctl->chunk_size); + if (IS_ERR(block_group)) + goto error_del_extent; + +-- +2.40.0 + +From f20814462752f28649096c4de6604e56ca1091dc Mon Sep 17 00:00:00 2001 +From: Peter Jung +Date: Mon, 17 Apr 2023 18:32:06 +0200 +Subject: [PATCH 06/13] Implement amd-pstate guided driver + +Signed-off-by: Peter Jung +--- + .../admin-guide/kernel-parameters.txt | 40 ++-- + Documentation/admin-guide/pm/amd-pstate.rst | 31 ++- + drivers/acpi/cppc_acpi.c | 121 ++++++++++- + drivers/cpufreq/amd-pstate.c | 199 ++++++++++++------ + include/acpi/cppc_acpi.h | 11 + + include/linux/amd-pstate.h | 2 + + 6 files changed, 312 insertions(+), 92 deletions(-) + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 4f6761a93715..bf2a402af231 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -339,6 +339,29 @@ + This mode requires kvm-amd.avic=1. + (Default when IOMMU HW support is present.) + ++ amd_pstate= [X86] ++ disable ++ Do not enable amd_pstate as the default ++ scaling driver for the supported processors ++ passive ++ Use amd_pstate with passive mode as a scaling driver. ++ In this mode autonomous selection is disabled. ++ Driver requests a desired performance level and platform ++ tries to match the same performance level if it is ++ satisfied by guaranteed performance level. ++ active ++ Use amd_pstate_epp driver instance as the scaling driver, ++ driver provides a hint to the hardware if software wants ++ to bias toward performance (0x0) or energy efficiency (0xff) ++ to the CPPC firmware. then CPPC power algorithm will ++ calculate the runtime workload and adjust the realtime cores ++ frequency. ++ guided ++ Activate guided autonomous mode. Driver requests minimum and ++ maximum performance level and the platform autonomously ++ selects a performance level in this range and appropriate ++ to the current workload. ++ + amijoy.map= [HW,JOY] Amiga joystick support + Map of devices attached to JOY0DAT and JOY1DAT + Format: , +@@ -7068,20 +7091,3 @@ + xmon commands. + off xmon is disabled. + +- amd_pstate= [X86] +- disable +- Do not enable amd_pstate as the default +- scaling driver for the supported processors +- passive +- Use amd_pstate as a scaling driver, driver requests a +- desired performance on this abstract scale and the power +- management firmware translates the requests into actual +- hardware states (core frequency, data fabric and memory +- clocks etc.) +- active +- Use amd_pstate_epp driver instance as the scaling driver, +- driver provides a hint to the hardware if software wants +- to bias toward performance (0x0) or energy efficiency (0xff) +- to the CPPC firmware. then CPPC power algorithm will +- calculate the runtime workload and adjust the realtime cores +- frequency. +diff --git a/Documentation/admin-guide/pm/amd-pstate.rst b/Documentation/admin-guide/pm/amd-pstate.rst +index 6e5298b521b1..1cf40f69278c 100644 +--- a/Documentation/admin-guide/pm/amd-pstate.rst ++++ b/Documentation/admin-guide/pm/amd-pstate.rst +@@ -303,13 +303,18 @@ efficiency frequency management method on AMD processors. + AMD Pstate Driver Operation Modes + ================================= + +-``amd_pstate`` CPPC has two operation modes: CPPC Autonomous(active) mode and +-CPPC non-autonomous(passive) mode. +-active mode and passive mode can be chosen by different kernel parameters. +-When in Autonomous mode, CPPC ignores requests done in the Desired Performance +-Target register and takes into account only the values set to the Minimum requested +-performance, Maximum requested performance, and Energy Performance Preference +-registers. When Autonomous is disabled, it only considers the Desired Performance Target. ++``amd_pstate`` CPPC has 3 operation modes: autonomous (active) mode, ++non-autonomous (passive) mode and guided autonomous (guided) mode. ++Active/passive/guided mode can be chosen by different kernel parameters. ++ ++- In autonomous mode, platform ignores the desired performance level request ++ and takes into account only the values set to the minimum, maximum and energy ++ performance preference registers. ++- In non-autonomous mode, platform gets desired performance level ++ from OS directly through Desired Performance Register. ++- In guided-autonomous mode, platform sets operating performance level ++ autonomously according to the current workload and within the limits set by ++ OS through min and max performance registers. + + Active Mode + ------------ +@@ -338,6 +343,15 @@ to the Performance Reduction Tolerance register. Above the nominal performance l + processor must provide at least nominal performance requested and go higher if current + operating conditions allow. + ++Guided Mode ++----------- ++ ++``amd_pstate=guided`` ++ ++If ``amd_pstate=guided`` is passed to kernel command line option then this mode ++is activated. In this mode, driver requests minimum and maximum performance ++level and the platform autonomously selects a performance level in this range ++and appropriate to the current workload. + + User Space Interface in ``sysfs`` - General + =========================================== +@@ -358,6 +372,9 @@ control its functionality at the system level. They are located in the + "passive" + The driver is functional and in the ``passive mode`` + ++ "guided" ++ The driver is functional and in the ``guided mode`` ++ + "disable" + The driver is unregistered and not functional now. + +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c +index c51d3ccb4cca..02a4bfb54967 100644 +--- a/drivers/acpi/cppc_acpi.c ++++ b/drivers/acpi/cppc_acpi.c +@@ -1433,6 +1433,103 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable) + } + EXPORT_SYMBOL_GPL(cppc_set_epp_perf); + ++/* ++ * cppc_get_auto_sel_caps - Read autonomous selection register. ++ * @cpunum : CPU from which to read register. ++ * @perf_caps : struct where autonomous selection register value is updated. ++ */ ++int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps) ++{ ++ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); ++ struct cpc_register_resource *auto_sel_reg; ++ u64 auto_sel; ++ ++ if (!cpc_desc) { ++ pr_debug("No CPC descriptor for CPU:%d\n", cpunum); ++ return -ENODEV; ++ } ++ ++ auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE]; ++ ++ if (!CPC_SUPPORTED(auto_sel_reg)) ++ pr_warn_once("Autonomous mode is not unsupported!\n"); ++ ++ if (CPC_IN_PCC(auto_sel_reg)) { ++ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); ++ struct cppc_pcc_data *pcc_ss_data = NULL; ++ int ret = 0; ++ ++ if (pcc_ss_id < 0) ++ return -ENODEV; ++ ++ pcc_ss_data = pcc_data[pcc_ss_id]; ++ ++ down_write(&pcc_ss_data->pcc_lock); ++ ++ if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) { ++ cpc_read(cpunum, auto_sel_reg, &auto_sel); ++ perf_caps->auto_sel = (bool)auto_sel; ++ } else { ++ ret = -EIO; ++ } ++ ++ up_write(&pcc_ss_data->pcc_lock); ++ ++ return ret; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps); ++ ++/* ++ * cppc_set_auto_sel - Write autonomous selection register. ++ * @cpu : CPU to which to write register. ++ * @enable : the desired value of autonomous selection resiter to be updated. ++ */ ++int cppc_set_auto_sel(int cpu, bool enable) ++{ ++ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); ++ struct cpc_register_resource *auto_sel_reg; ++ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); ++ struct cppc_pcc_data *pcc_ss_data = NULL; ++ int ret = -EINVAL; ++ ++ if (!cpc_desc) { ++ pr_debug("No CPC descriptor for CPU:%d\n", cpu); ++ return -ENODEV; ++ } ++ ++ auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE]; ++ ++ if (CPC_IN_PCC(auto_sel_reg)) { ++ if (pcc_ss_id < 0) { ++ pr_debug("Invalid pcc_ss_id\n"); ++ return -ENODEV; ++ } ++ ++ if (CPC_SUPPORTED(auto_sel_reg)) { ++ ret = cpc_write(cpu, auto_sel_reg, enable); ++ if (ret) ++ return ret; ++ } ++ ++ pcc_ss_data = pcc_data[pcc_ss_id]; ++ ++ down_write(&pcc_ss_data->pcc_lock); ++ /* after writing CPC, transfer the ownership of PCC to platform */ ++ ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE); ++ up_write(&pcc_ss_data->pcc_lock); ++ } else { ++ ret = -ENOTSUPP; ++ pr_debug("_CPC in PCC is not supported\n"); ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(cppc_set_auto_sel); ++ ++ + /** + * cppc_set_enable - Set to enable CPPC on the processor by writing the + * Continuous Performance Control package EnableRegister field. +@@ -1488,7 +1585,7 @@ EXPORT_SYMBOL_GPL(cppc_set_enable); + int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) + { + struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); +- struct cpc_register_resource *desired_reg; ++ struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg; + int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); + struct cppc_pcc_data *pcc_ss_data = NULL; + int ret = 0; +@@ -1499,6 +1596,8 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) + } + + desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; ++ min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF]; ++ max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF]; + + /* + * This is Phase-I where we want to write to CPC registers +@@ -1507,7 +1606,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) + * Since read_lock can be acquired by multiple CPUs simultaneously we + * achieve that goal here + */ +- if (CPC_IN_PCC(desired_reg)) { ++ if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) { + if (pcc_ss_id < 0) { + pr_debug("Invalid pcc_ss_id\n"); + return -ENODEV; +@@ -1530,13 +1629,19 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) + cpc_desc->write_cmd_status = 0; + } + +- /* +- * Skip writing MIN/MAX until Linux knows how to come up with +- * useful values. +- */ + cpc_write(cpu, desired_reg, perf_ctrls->desired_perf); + +- if (CPC_IN_PCC(desired_reg)) ++ /** ++ * Only write if min_perf and max_perf not zero. Some drivers pass zero ++ * value to min and max perf, but they don't mean to set the zero value, ++ * they just don't want to write to those registers. ++ */ ++ if (perf_ctrls->min_perf) ++ cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf); ++ if (perf_ctrls->max_perf) ++ cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf); ++ ++ if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) + up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */ + /* + * This is Phase-II where we transfer the ownership of PCC to Platform +@@ -1584,7 +1689,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) + * case during a CMD_READ and if there are pending writes it delivers + * the write command before servicing the read command + */ +- if (CPC_IN_PCC(desired_reg)) { ++ if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) { + if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */ + /* Update only if there are pending write commands */ + if (pcc_ss_data->pending_pcc_write_cmd) +diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c +index 8dd46fad151e..5a3d4aa0f45a 100644 +--- a/drivers/cpufreq/amd-pstate.c ++++ b/drivers/cpufreq/amd-pstate.c +@@ -63,7 +63,6 @@ static struct cpufreq_driver *current_pstate_driver; + static struct cpufreq_driver amd_pstate_driver; + static struct cpufreq_driver amd_pstate_epp_driver; + static int cppc_state = AMD_PSTATE_DISABLE; +-struct kobject *amd_pstate_kobj; + + /* + * AMD Energy Preference Performance (EPP) +@@ -106,6 +105,8 @@ static unsigned int epp_values[] = { + [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE, + }; + ++typedef int (*cppc_mode_transition_fn)(int); ++ + static inline int get_mode_idx_from_str(const char *str, size_t size) + { + int i; +@@ -308,7 +309,22 @@ static int cppc_init_perf(struct amd_cpudata *cpudata) + cppc_perf.lowest_nonlinear_perf); + WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); + +- return 0; ++ if (cppc_state == AMD_PSTATE_ACTIVE) ++ return 0; ++ ++ ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf); ++ if (ret) { ++ pr_warn("failed to get auto_sel, ret: %d\n", ret); ++ return 0; ++ } ++ ++ ret = cppc_set_auto_sel(cpudata->cpu, ++ (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1); ++ ++ if (ret) ++ pr_warn("failed to set auto_sel, ret: %d\n", ret); ++ ++ return ret; + } + + DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf); +@@ -385,12 +401,18 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) + } + + static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, +- u32 des_perf, u32 max_perf, bool fast_switch) ++ u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags) + { + u64 prev = READ_ONCE(cpudata->cppc_req_cached); + u64 value = prev; + + des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); ++ ++ if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) { ++ min_perf = des_perf; ++ des_perf = 0; ++ } ++ + value &= ~AMD_CPPC_MIN_PERF(~0L); + value |= AMD_CPPC_MIN_PERF(min_perf); + +@@ -445,7 +467,7 @@ static int amd_pstate_target(struct cpufreq_policy *policy, + + cpufreq_freq_transition_begin(policy, &freqs); + amd_pstate_update(cpudata, min_perf, des_perf, +- max_perf, false); ++ max_perf, false, policy->governor->flags); + cpufreq_freq_transition_end(policy, &freqs, false); + + return 0; +@@ -479,7 +501,8 @@ static void amd_pstate_adjust_perf(unsigned int cpu, + if (max_perf < min_perf) + max_perf = min_perf; + +- amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true); ++ amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true, ++ policy->governor->flags); + cpufreq_cpu_put(policy); + } + +@@ -816,6 +839,98 @@ static ssize_t show_energy_performance_preference( + return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]); + } + ++static void amd_pstate_driver_cleanup(void) ++{ ++ amd_pstate_enable(false); ++ cppc_state = AMD_PSTATE_DISABLE; ++ current_pstate_driver = NULL; ++} ++ ++static int amd_pstate_register_driver(int mode) ++{ ++ int ret; ++ ++ if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED) ++ current_pstate_driver = &amd_pstate_driver; ++ else if (mode == AMD_PSTATE_ACTIVE) ++ current_pstate_driver = &amd_pstate_epp_driver; ++ else ++ return -EINVAL; ++ ++ cppc_state = mode; ++ ret = cpufreq_register_driver(current_pstate_driver); ++ if (ret) { ++ amd_pstate_driver_cleanup(); ++ return ret; ++ } ++ return 0; ++} ++ ++static int amd_pstate_unregister_driver(int dummy) ++{ ++ cpufreq_unregister_driver(current_pstate_driver); ++ amd_pstate_driver_cleanup(); ++ return 0; ++} ++ ++static int amd_pstate_change_mode_without_dvr_change(int mode) ++{ ++ int cpu = 0; ++ ++ cppc_state = mode; ++ ++ if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE) ++ return 0; ++ ++ for_each_present_cpu(cpu) { ++ cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1); ++ } ++ ++ return 0; ++} ++ ++static int amd_pstate_change_driver_mode(int mode) ++{ ++ int ret; ++ ++ ret = amd_pstate_unregister_driver(0); ++ if (ret) ++ return ret; ++ ++ ret = amd_pstate_register_driver(mode); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static cppc_mode_transition_fn mode_state_machine[AMD_PSTATE_MAX][AMD_PSTATE_MAX] = { ++ [AMD_PSTATE_DISABLE] = { ++ [AMD_PSTATE_DISABLE] = NULL, ++ [AMD_PSTATE_PASSIVE] = amd_pstate_register_driver, ++ [AMD_PSTATE_ACTIVE] = amd_pstate_register_driver, ++ [AMD_PSTATE_GUIDED] = amd_pstate_register_driver, ++ }, ++ [AMD_PSTATE_PASSIVE] = { ++ [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver, ++ [AMD_PSTATE_PASSIVE] = NULL, ++ [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode, ++ [AMD_PSTATE_GUIDED] = amd_pstate_change_mode_without_dvr_change, ++ }, ++ [AMD_PSTATE_ACTIVE] = { ++ [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver, ++ [AMD_PSTATE_PASSIVE] = amd_pstate_change_driver_mode, ++ [AMD_PSTATE_ACTIVE] = NULL, ++ [AMD_PSTATE_GUIDED] = amd_pstate_change_driver_mode, ++ }, ++ [AMD_PSTATE_GUIDED] = { ++ [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver, ++ [AMD_PSTATE_PASSIVE] = amd_pstate_change_mode_without_dvr_change, ++ [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode, ++ [AMD_PSTATE_GUIDED] = NULL, ++ }, ++}; ++ + static ssize_t amd_pstate_show_status(char *buf) + { + if (!current_pstate_driver) +@@ -824,55 +939,22 @@ static ssize_t amd_pstate_show_status(char *buf) + return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]); + } + +-static void amd_pstate_driver_cleanup(void) +-{ +- current_pstate_driver = NULL; +-} +- + static int amd_pstate_update_status(const char *buf, size_t size) + { +- int ret = 0; + int mode_idx; + +- if (size > 7 || size < 6) ++ if (size > strlen("passive") || size < strlen("active")) + return -EINVAL; ++ + mode_idx = get_mode_idx_from_str(buf, size); + +- switch(mode_idx) { +- case AMD_PSTATE_DISABLE: +- if (current_pstate_driver) { +- cpufreq_unregister_driver(current_pstate_driver); +- amd_pstate_driver_cleanup(); +- } +- break; +- case AMD_PSTATE_PASSIVE: +- if (current_pstate_driver) { +- if (current_pstate_driver == &amd_pstate_driver) +- return 0; +- cpufreq_unregister_driver(current_pstate_driver); +- } ++ if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX) ++ return -EINVAL; + +- current_pstate_driver = &amd_pstate_driver; +- cppc_state = AMD_PSTATE_PASSIVE; +- ret = cpufreq_register_driver(current_pstate_driver); +- break; +- case AMD_PSTATE_ACTIVE: +- if (current_pstate_driver) { +- if (current_pstate_driver == &amd_pstate_epp_driver) +- return 0; +- cpufreq_unregister_driver(current_pstate_driver); +- } ++ if (mode_state_machine[cppc_state][mode_idx]) ++ return mode_state_machine[cppc_state][mode_idx](mode_idx); + +- current_pstate_driver = &amd_pstate_epp_driver; +- cppc_state = AMD_PSTATE_ACTIVE; +- ret = cpufreq_register_driver(current_pstate_driver); +- break; +- default: +- ret = -EINVAL; +- break; +- } +- +- return ret; ++ return 0; + } + + static ssize_t show_status(struct kobject *kobj, +@@ -930,6 +1012,7 @@ static struct attribute *pstate_global_attributes[] = { + }; + + static const struct attribute_group amd_pstate_global_attr_group = { ++ .name = "amd_pstate", + .attrs = pstate_global_attributes, + }; + +@@ -1251,6 +1334,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = { + + static int __init amd_pstate_init(void) + { ++ struct device *dev_root; + int ret; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) +@@ -1277,7 +1361,7 @@ static int __init amd_pstate_init(void) + /* capability check */ + if (boot_cpu_has(X86_FEATURE_CPPC)) { + pr_debug("AMD CPPC MSR based functionality is supported\n"); +- if (cppc_state == AMD_PSTATE_PASSIVE) ++ if (cppc_state != AMD_PSTATE_ACTIVE) + current_pstate_driver->adjust_perf = amd_pstate_adjust_perf; + } else { + pr_debug("AMD CPPC shared memory based functionality is supported\n"); +@@ -1297,24 +1381,19 @@ static int __init amd_pstate_init(void) + if (ret) + pr_err("failed to register with return %d\n", ret); + +- amd_pstate_kobj = kobject_create_and_add("amd_pstate", &cpu_subsys.dev_root->kobj); +- if (!amd_pstate_kobj) { +- ret = -EINVAL; +- pr_err("global sysfs registration failed.\n"); +- goto kobject_free; +- } +- +- ret = sysfs_create_group(amd_pstate_kobj, &amd_pstate_global_attr_group); +- if (ret) { +- pr_err("sysfs attribute export failed with error %d.\n", ret); +- goto global_attr_free; ++ dev_root = bus_get_dev_root(&cpu_subsys); ++ if (dev_root) { ++ ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group); ++ put_device(dev_root); ++ if (ret) { ++ pr_err("sysfs attribute export failed with error %d.\n", ret); ++ goto global_attr_free; ++ } } - tlb_finish_mmu(&tlb); -- if (vma_iter_end(&vmi) < end) -+ if (!error && vma_iter_end(&vmi) < end) - error = -ENOMEM; + return ret; + + global_attr_free: +- kobject_put(amd_pstate_kobj); +-kobject_free: + cpufreq_unregister_driver(current_pstate_driver); + return ret; + } +@@ -1339,7 +1418,7 @@ static int __init amd_pstate_param(char *str) + if (cppc_state == AMD_PSTATE_ACTIVE) + current_pstate_driver = &amd_pstate_epp_driver; + +- if (cppc_state == AMD_PSTATE_PASSIVE) ++ if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED) + current_pstate_driver = &amd_pstate_driver; + + return 0; +diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h +index 6b487a5bd638..6126c977ece0 100644 +--- a/include/acpi/cppc_acpi.h ++++ b/include/acpi/cppc_acpi.h +@@ -109,6 +109,7 @@ struct cppc_perf_caps { + u32 lowest_freq; + u32 nominal_freq; + u32 energy_perf; ++ bool auto_sel; + }; + + struct cppc_perf_ctrls { +@@ -153,6 +154,8 @@ extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val); + extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val); + extern int cppc_get_epp_perf(int cpunum, u64 *epp_perf); + extern int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable); ++extern int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps); ++extern int cppc_set_auto_sel(int cpu, bool enable); + #else /* !CONFIG_ACPI_CPPC_LIB */ + static inline int cppc_get_desired_perf(int cpunum, u64 *desired_perf) + { +@@ -214,6 +217,14 @@ static inline int cppc_get_epp_perf(int cpunum, u64 *epp_perf) + { + return -ENOTSUPP; + } ++static inline int cppc_set_auto_sel(int cpu, bool enable) ++{ ++ return -ENOTSUPP; ++} ++static inline int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps) ++{ ++ return -ENOTSUPP; ++} + #endif /* !CONFIG_ACPI_CPPC_LIB */ + + #endif /* _CPPC_ACPI_H*/ +diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h +index f5f22418e64b..c10ebf8c42e6 100644 +--- a/include/linux/amd-pstate.h ++++ b/include/linux/amd-pstate.h +@@ -97,6 +97,7 @@ enum amd_pstate_mode { + AMD_PSTATE_DISABLE = 0, + AMD_PSTATE_PASSIVE, + AMD_PSTATE_ACTIVE, ++ AMD_PSTATE_GUIDED, + AMD_PSTATE_MAX, + }; - out: -diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c -index 8c69f0c95a8e..98aea5485aae 100644 ---- a/net/bridge/br_nf_core.c -+++ b/net/bridge/br_nf_core.c -@@ -73,7 +73,7 @@ void br_netfilter_rtable_init(struct net_bridge *br) - { - struct rtable *rt = &br->fake_rtable; +@@ -104,6 +105,7 @@ static const char * const amd_pstate_mode_string[] = { + [AMD_PSTATE_DISABLE] = "disable", + [AMD_PSTATE_PASSIVE] = "passive", + [AMD_PSTATE_ACTIVE] = "active", ++ [AMD_PSTATE_GUIDED] = "guided", + NULL, + }; + #endif /* _LINUX_AMD_PSTATE_H */ +-- +2.40.0 + +From a4c03062611ae066405d1ec08eed628e8d1640f2 Mon Sep 17 00:00:00 2001 +From: Peter Jung +Date: Mon, 17 Apr 2023 18:28:52 +0200 +Subject: [PATCH 07/13] ksm + +Signed-off-by: Peter Jung +--- + arch/alpha/kernel/syscalls/syscall.tbl | 1 + + arch/arm/tools/syscall.tbl | 1 + + arch/arm64/include/asm/unistd.h | 2 +- + arch/arm64/include/asm/unistd32.h | 2 + + arch/ia64/kernel/syscalls/syscall.tbl | 1 + + arch/m68k/kernel/syscalls/syscall.tbl | 1 + + arch/microblaze/kernel/syscalls/syscall.tbl | 1 + + arch/mips/kernel/syscalls/syscall_n32.tbl | 1 + + arch/mips/kernel/syscalls/syscall_n64.tbl | 1 + + arch/mips/kernel/syscalls/syscall_o32.tbl | 1 + + arch/parisc/kernel/syscalls/syscall.tbl | 1 + + arch/powerpc/kernel/syscalls/syscall.tbl | 1 + + arch/s390/kernel/syscalls/syscall.tbl | 1 + + arch/sh/kernel/syscalls/syscall.tbl | 1 + + arch/sparc/kernel/syscalls/syscall.tbl | 1 + + arch/x86/entry/syscalls/syscall_32.tbl | 1 + + arch/x86/entry/syscalls/syscall_64.tbl | 1 + + arch/xtensa/kernel/syscalls/syscall.tbl | 1 + + include/linux/ksm.h | 4 + + include/linux/syscalls.h | 1 + + include/uapi/asm-generic/unistd.h | 5 +- + kernel/sys_ni.c | 1 + + mm/ksm.c | 82 +++++++++----- + mm/madvise.c | 117 ++++++++++++++++++++ + 24 files changed, 199 insertions(+), 31 deletions(-) + +diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl +index 8ebacf37a8cf..c9d25f85d86d 100644 +--- a/arch/alpha/kernel/syscalls/syscall.tbl ++++ b/arch/alpha/kernel/syscalls/syscall.tbl +@@ -490,3 +490,4 @@ + 558 common process_mrelease sys_process_mrelease + 559 common futex_waitv sys_futex_waitv + 560 common set_mempolicy_home_node sys_ni_syscall ++561 common pmadv_ksm sys_pmadv_ksm +diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl +index ac964612d8b0..90933eabe115 100644 +--- a/arch/arm/tools/syscall.tbl ++++ b/arch/arm/tools/syscall.tbl +@@ -464,3 +464,4 @@ + 448 common process_mrelease sys_process_mrelease + 449 common futex_waitv sys_futex_waitv + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node ++451 common pmadv_ksm sys_pmadv_ksm +diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h +index 037feba03a51..64a514f90131 100644 +--- a/arch/arm64/include/asm/unistd.h ++++ b/arch/arm64/include/asm/unistd.h +@@ -39,7 +39,7 @@ + #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) + #define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) -- atomic_set(&rt->dst.__refcnt, 1); -+ rcuref_init(&rt->dst.__rcuref, 1); - rt->dst.dev = br->dev; - dst_init_metrics(&rt->dst, br_dst_default_metrics, true); - rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE; -diff --git a/net/core/dst.c b/net/core/dst.c -index 31c08a3386d3..3247e84045ca 100644 ---- a/net/core/dst.c -+++ b/net/core/dst.c -@@ -66,7 +66,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops, - dst->tclassid = 0; +-#define __NR_compat_syscalls 451 ++#define __NR_compat_syscalls 452 #endif - dst->lwtstate = NULL; -- atomic_set(&dst->__refcnt, initial_ref); -+ rcuref_init(&dst->__rcuref, initial_ref); - dst->__use = 0; - dst->lastuse = jiffies; - dst->flags = flags; -@@ -162,31 +162,15 @@ EXPORT_SYMBOL(dst_dev_put); - void dst_release(struct dst_entry *dst) - { -- if (dst) { -- int newrefcnt; -- -- newrefcnt = atomic_dec_return(&dst->__refcnt); -- if (WARN_ONCE(newrefcnt < 0, "dst_release underflow")) -- net_warn_ratelimited("%s: dst:%p refcnt:%d\n", -- __func__, dst, newrefcnt); -- if (!newrefcnt) -- call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu); -- } -+ if (dst && rcuref_put(&dst->__rcuref)) -+ call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu); - } - EXPORT_SYMBOL(dst_release); + #define __ARCH_WANT_SYS_CLONE +diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h +index 604a2053d006..91f2bb7199af 100644 +--- a/arch/arm64/include/asm/unistd32.h ++++ b/arch/arm64/include/asm/unistd32.h +@@ -907,6 +907,8 @@ __SYSCALL(__NR_process_mrelease, sys_process_mrelease) + __SYSCALL(__NR_futex_waitv, sys_futex_waitv) + #define __NR_set_mempolicy_home_node 450 + __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) ++#define __NR_pmadv_ksm 451 ++__SYSCALL(__NR_pmadv_ksm, sys_pmadv_ksm) + + /* + * Please add new compat syscalls above this comment and update +diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl +index 72c929d9902b..0d5b1d14b2b5 100644 +--- a/arch/ia64/kernel/syscalls/syscall.tbl ++++ b/arch/ia64/kernel/syscalls/syscall.tbl +@@ -371,3 +371,4 @@ + 448 common process_mrelease sys_process_mrelease + 449 common futex_waitv sys_futex_waitv + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node ++451 common pmadv_ksm sys_pmadv_ksm +diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl +index b1f3940bc298..5ccf925567da 100644 +--- a/arch/m68k/kernel/syscalls/syscall.tbl ++++ b/arch/m68k/kernel/syscalls/syscall.tbl +@@ -450,3 +450,4 @@ + 448 common process_mrelease sys_process_mrelease + 449 common futex_waitv sys_futex_waitv + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node ++451 common pmadv_ksm sys_pmadv_ksm +diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl +index 820145e47350..6b76208597f3 100644 +--- a/arch/microblaze/kernel/syscalls/syscall.tbl ++++ b/arch/microblaze/kernel/syscalls/syscall.tbl +@@ -456,3 +456,4 @@ + 448 common process_mrelease sys_process_mrelease + 449 common futex_waitv sys_futex_waitv + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node ++451 common pmadv_ksm sys_pmadv_ksm +diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl +index 253ff994ed2e..e4aeedb17c38 100644 +--- a/arch/mips/kernel/syscalls/syscall_n32.tbl ++++ b/arch/mips/kernel/syscalls/syscall_n32.tbl +@@ -389,3 +389,4 @@ + 448 n32 process_mrelease sys_process_mrelease + 449 n32 futex_waitv sys_futex_waitv + 450 n32 set_mempolicy_home_node sys_set_mempolicy_home_node ++451 n32 pmadv_ksm sys_pmadv_ksm +diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl +index 3f1886ad9d80..fe88db51efa0 100644 +--- a/arch/mips/kernel/syscalls/syscall_n64.tbl ++++ b/arch/mips/kernel/syscalls/syscall_n64.tbl +@@ -365,3 +365,4 @@ + 448 n64 process_mrelease sys_process_mrelease + 449 n64 futex_waitv sys_futex_waitv + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node ++451 n64 pmadv_ksm sys_pmadv_ksm +diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl +index 8f243e35a7b2..674cb940bd15 100644 +--- a/arch/mips/kernel/syscalls/syscall_o32.tbl ++++ b/arch/mips/kernel/syscalls/syscall_o32.tbl +@@ -438,3 +438,4 @@ + 448 o32 process_mrelease sys_process_mrelease + 449 o32 futex_waitv sys_futex_waitv + 450 o32 set_mempolicy_home_node sys_set_mempolicy_home_node ++451 o32 pmadv_ksm sys_pmadv_ksm +diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl +index 0e42fceb2d5e..5914aa460255 100644 +--- a/arch/parisc/kernel/syscalls/syscall.tbl ++++ b/arch/parisc/kernel/syscalls/syscall.tbl +@@ -448,3 +448,4 @@ + 448 common process_mrelease sys_process_mrelease + 449 common futex_waitv sys_futex_waitv + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node ++451 common pmadv_ksm sys_pmadv_ksm +diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl +index a0be127475b1..347894da4eb6 100644 +--- a/arch/powerpc/kernel/syscalls/syscall.tbl ++++ b/arch/powerpc/kernel/syscalls/syscall.tbl +@@ -537,3 +537,4 @@ + 448 common process_mrelease sys_process_mrelease + 449 common futex_waitv sys_futex_waitv + 450 nospu set_mempolicy_home_node sys_set_mempolicy_home_node ++451 common pmadv_ksm sys_pmadv_ksm +diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl +index 799147658dee..1cd523748bd2 100644 +--- a/arch/s390/kernel/syscalls/syscall.tbl ++++ b/arch/s390/kernel/syscalls/syscall.tbl +@@ -453,3 +453,4 @@ + 448 common process_mrelease sys_process_mrelease sys_process_mrelease + 449 common futex_waitv sys_futex_waitv sys_futex_waitv + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node ++451 common pmadv_ksm sys_pmadv_ksm sys_pmadv_ksm +diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl +index 2de85c977f54..cfc75fa43eae 100644 +--- a/arch/sh/kernel/syscalls/syscall.tbl ++++ b/arch/sh/kernel/syscalls/syscall.tbl +@@ -453,3 +453,4 @@ + 448 common process_mrelease sys_process_mrelease + 449 common futex_waitv sys_futex_waitv + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node ++451 common pmadv_ksm sys_pmadv_ksm +diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl +index 4398cc6fb68d..d2c0a6426f6b 100644 +--- a/arch/sparc/kernel/syscalls/syscall.tbl ++++ b/arch/sparc/kernel/syscalls/syscall.tbl +@@ -496,3 +496,4 @@ + 448 common process_mrelease sys_process_mrelease + 449 common futex_waitv sys_futex_waitv + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node ++451 common pmadv_ksm sys_pmadv_ksm +diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl +index 320480a8db4f..331aaf1a782f 100644 +--- a/arch/x86/entry/syscalls/syscall_32.tbl ++++ b/arch/x86/entry/syscalls/syscall_32.tbl +@@ -455,3 +455,4 @@ + 448 i386 process_mrelease sys_process_mrelease + 449 i386 futex_waitv sys_futex_waitv + 450 i386 set_mempolicy_home_node sys_set_mempolicy_home_node ++451 i386 pmadv_ksm sys_pmadv_ksm +diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl +index c84d12608cd2..14902db4c01f 100644 +--- a/arch/x86/entry/syscalls/syscall_64.tbl ++++ b/arch/x86/entry/syscalls/syscall_64.tbl +@@ -372,6 +372,7 @@ + 448 common process_mrelease sys_process_mrelease + 449 common futex_waitv sys_futex_waitv + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node ++451 common pmadv_ksm sys_pmadv_ksm - void dst_release_immediate(struct dst_entry *dst) - { -- if (dst) { -- int newrefcnt; -- -- newrefcnt = atomic_dec_return(&dst->__refcnt); -- if (WARN_ONCE(newrefcnt < 0, "dst_release_immediate underflow")) -- net_warn_ratelimited("%s: dst:%p refcnt:%d\n", -- __func__, dst, newrefcnt); -- if (!newrefcnt) -- dst_destroy(dst); -- } -+ if (dst && rcuref_put(&dst->__rcuref)) -+ dst_destroy(dst); - } - EXPORT_SYMBOL(dst_release_immediate); + # + # Due to a historical design error, certain syscalls are numbered differently +diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl +index 52c94ab5c205..1518e261d882 100644 +--- a/arch/xtensa/kernel/syscalls/syscall.tbl ++++ b/arch/xtensa/kernel/syscalls/syscall.tbl +@@ -421,3 +421,4 @@ + 448 common process_mrelease sys_process_mrelease + 449 common futex_waitv sys_futex_waitv + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node ++451 common pmadv_ksm sys_pmadv_ksm +diff --git a/include/linux/ksm.h b/include/linux/ksm.h +index 7e232ba59b86..57ed92987717 100644 +--- a/include/linux/ksm.h ++++ b/include/linux/ksm.h +@@ -16,6 +16,10 @@ + #include -diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c -index 6e44e92ebdf5..a6380dd47e7f 100644 ---- a/net/core/rtnetlink.c -+++ b/net/core/rtnetlink.c -@@ -840,7 +840,7 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, - if (dst) { - ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse); - ci.rta_used = dst->__use; -- ci.rta_clntref = atomic_read(&dst->__refcnt); -+ ci.rta_clntref = rcuref_read(&dst->__rcuref); - } - if (expires) { - unsigned long clock; -diff --git a/net/ipv4/route.c b/net/ipv4/route.c -index de6e3515ab4f..0f0cb629e0ad 100644 ---- a/net/ipv4/route.c -+++ b/net/ipv4/route.c -@@ -1508,20 +1508,20 @@ void rt_add_uncached_list(struct rtable *rt) - { - struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list); + #ifdef CONFIG_KSM ++int ksm_madvise_merge(struct mm_struct *mm, struct vm_area_struct *vma, ++ const vm_flags_t *vm_flags); ++int ksm_madvise_unmerge(struct vm_area_struct *vma, unsigned long start, ++ unsigned long end, const vm_flags_t *vm_flags); + int ksm_madvise(struct vm_area_struct *vma, unsigned long start, + unsigned long end, int advice, unsigned long *vm_flags); + int __ksm_enter(struct mm_struct *mm); +diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h +index 33a0ee3bcb2e..62f14e800839 100644 +--- a/include/linux/syscalls.h ++++ b/include/linux/syscalls.h +@@ -919,6 +919,7 @@ asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior); + asmlinkage long sys_process_madvise(int pidfd, const struct iovec __user *vec, + size_t vlen, int behavior, unsigned int flags); + asmlinkage long sys_process_mrelease(int pidfd, unsigned int flags); ++asmlinkage long sys_pmadv_ksm(int pidfd, int behavior, unsigned int flags); + asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, + unsigned long prot, unsigned long pgoff, + unsigned long flags); +diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h +index 45fa180cc56a..40f7e6d04af0 100644 +--- a/include/uapi/asm-generic/unistd.h ++++ b/include/uapi/asm-generic/unistd.h +@@ -886,8 +886,11 @@ __SYSCALL(__NR_futex_waitv, sys_futex_waitv) + #define __NR_set_mempolicy_home_node 450 + __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) -- rt->rt_uncached_list = ul; -+ rt->dst.rt_uncached_list = ul; ++#define __NR_pmadv_ksm 451 ++__SYSCALL(__NR_pmadv_ksm, sys_pmadv_ksm) ++ + #undef __NR_syscalls +-#define __NR_syscalls 451 ++#define __NR_syscalls 452 - spin_lock_bh(&ul->lock); -- list_add_tail(&rt->rt_uncached, &ul->head); -+ list_add_tail(&rt->dst.rt_uncached, &ul->head); - spin_unlock_bh(&ul->lock); + /* + * 32 bit systems traditionally used different +diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c +index 860b2dcf3ac4..810e1fcaff94 100644 +--- a/kernel/sys_ni.c ++++ b/kernel/sys_ni.c +@@ -292,6 +292,7 @@ COND_SYSCALL(mincore); + COND_SYSCALL(madvise); + COND_SYSCALL(process_madvise); + COND_SYSCALL(process_mrelease); ++COND_SYSCALL(pmadv_ksm); + COND_SYSCALL(remap_file_pages); + COND_SYSCALL(mbind); + COND_SYSCALL(get_mempolicy); +diff --git a/mm/ksm.c b/mm/ksm.c +index 82029f1d454b..0c206bd8007d 100644 +--- a/mm/ksm.c ++++ b/mm/ksm.c +@@ -2576,52 +2576,76 @@ static int ksm_scan_thread(void *nothing) + return 0; } - void rt_del_uncached_list(struct rtable *rt) +-int ksm_madvise(struct vm_area_struct *vma, unsigned long start, +- unsigned long end, int advice, unsigned long *vm_flags) ++int ksm_madvise_merge(struct mm_struct *mm, struct vm_area_struct *vma, ++ const vm_flags_t *vm_flags) { -- if (!list_empty(&rt->rt_uncached)) { -- struct uncached_list *ul = rt->rt_uncached_list; -+ if (!list_empty(&rt->dst.rt_uncached)) { -+ struct uncached_list *ul = rt->dst.rt_uncached_list; - - spin_lock_bh(&ul->lock); -- list_del_init(&rt->rt_uncached); -+ list_del_init(&rt->dst.rt_uncached); - spin_unlock_bh(&ul->lock); - } - } -@@ -1546,13 +1546,13 @@ void rt_flush_dev(struct net_device *dev) - continue; - - spin_lock_bh(&ul->lock); -- list_for_each_entry_safe(rt, safe, &ul->head, rt_uncached) { -+ list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) { - if (rt->dst.dev != dev) - continue; - rt->dst.dev = blackhole_netdev; - netdev_ref_replace(dev, blackhole_netdev, - &rt->dst.dev_tracker, GFP_ATOMIC); -- list_move(&rt->rt_uncached, &ul->quarantine); -+ list_move(&rt->dst.rt_uncached, &ul->quarantine); - } - spin_unlock_bh(&ul->lock); - } -@@ -1644,7 +1644,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev, - rt->rt_uses_gateway = 0; - rt->rt_gw_family = 0; - rt->rt_gw4 = 0; -- INIT_LIST_HEAD(&rt->rt_uncached); -+ INIT_LIST_HEAD(&rt->dst.rt_uncached); +- struct mm_struct *mm = vma->vm_mm; + int err; - rt->dst.output = ip_output; - if (flags & RTCF_LOCAL) -@@ -1675,7 +1675,7 @@ struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt) - new_rt->rt_gw4 = rt->rt_gw4; - else if (rt->rt_gw_family == AF_INET6) - new_rt->rt_gw6 = rt->rt_gw6; -- INIT_LIST_HEAD(&new_rt->rt_uncached); -+ INIT_LIST_HEAD(&new_rt->dst.rt_uncached); +- switch (advice) { +- case MADV_MERGEABLE: +- /* +- * Be somewhat over-protective for now! +- */ +- if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | +- VM_PFNMAP | VM_IO | VM_DONTEXPAND | +- VM_HUGETLB | VM_MIXEDMAP)) +- return 0; /* just ignore the advice */ ++ /* ++ * Be somewhat over-protective for now! ++ */ ++ if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | ++ VM_PFNMAP | VM_IO | VM_DONTEXPAND | ++ VM_HUGETLB | VM_MIXEDMAP)) ++ return 0; /* just ignore the advice */ - new_rt->dst.input = rt->dst.input; - new_rt->dst.output = rt->dst.output; -@@ -2859,7 +2859,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or - else if (rt->rt_gw_family == AF_INET6) - rt->rt_gw6 = ort->rt_gw6; +- if (vma_is_dax(vma)) +- return 0; ++ if (vma_is_dax(vma)) ++ return 0; -- INIT_LIST_HEAD(&rt->rt_uncached); -+ INIT_LIST_HEAD(&rt->dst.rt_uncached); - } + #ifdef VM_SAO + if (*vm_flags & VM_SAO) + return 0; + #endif + #ifdef VM_SPARC_ADI +- if (*vm_flags & VM_SPARC_ADI) +- return 0; ++ if (*vm_flags & VM_SPARC_ADI) ++ return 0; + #endif - dst_release(dst_orig); -diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c -index 3d0dfa6cf9f9..47861c8b7340 100644 ---- a/net/ipv4/xfrm4_policy.c -+++ b/net/ipv4/xfrm4_policy.c -@@ -91,7 +91,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, - xdst->u.rt.rt_gw6 = rt->rt_gw6; - xdst->u.rt.rt_pmtu = rt->rt_pmtu; - xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked; -- INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); -+ INIT_LIST_HEAD(&xdst->u.rt.dst.rt_uncached); - rt_add_uncached_list(&xdst->u.rt); +- if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { +- err = __ksm_enter(mm); +- if (err) +- return err; +- } ++ if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { ++ err = __ksm_enter(mm); ++ if (err) ++ return err; ++ } ++ ++ return 0; ++} ++ ++int ksm_madvise_unmerge(struct vm_area_struct *vma, unsigned long start, ++ unsigned long end, const vm_flags_t *vm_flags) ++{ ++ int err; ++ ++ if (!(*vm_flags & VM_MERGEABLE)) ++ return 0; /* just ignore the advice */ ++ ++ if (vma->anon_vma) { ++ err = unmerge_ksm_pages(vma, start, end); ++ if (err) ++ return err; ++ } ++ ++ return 0; ++} ++ ++int ksm_madvise(struct vm_area_struct *vma, unsigned long start, ++ unsigned long end, int advice, unsigned long *vm_flags) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ int err; ++ ++ switch (advice) { ++ case MADV_MERGEABLE: ++ err = ksm_madvise_merge(mm, vma, vm_flags); ++ if (err) ++ return err; - return 0; -@@ -121,7 +121,7 @@ static void xfrm4_dst_destroy(struct dst_entry *dst) - struct xfrm_dst *xdst = (struct xfrm_dst *)dst; + *vm_flags |= VM_MERGEABLE; + break; - dst_destroy_metrics_generic(dst); -- if (xdst->u.rt.rt_uncached_list) -+ if (xdst->u.rt.dst.rt_uncached_list) - rt_del_uncached_list(&xdst->u.rt); - xfrm_dst_destroy(xdst); + case MADV_UNMERGEABLE: +- if (!(*vm_flags & VM_MERGEABLE)) +- return 0; /* just ignore the advice */ +- +- if (vma->anon_vma) { +- err = unmerge_ksm_pages(vma, start, end); +- if (err) +- return err; +- } ++ err = ksm_madvise_unmerge(vma, start, end, vm_flags); ++ if (err) ++ return err; + + *vm_flags &= ~VM_MERGEABLE; + break; +diff --git a/mm/madvise.c b/mm/madvise.c +index 340125d08c03..36e756355f04 100644 +--- a/mm/madvise.c ++++ b/mm/madvise.c +@@ -1522,3 +1522,120 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, + out: + return ret; } -diff --git a/net/ipv6/route.c b/net/ipv6/route.c -index 0fdb03df2287..b9d22a0a6c09 100644 ---- a/net/ipv6/route.c -+++ b/net/ipv6/route.c -@@ -139,20 +139,20 @@ void rt6_uncached_list_add(struct rt6_info *rt) - { - struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); ++ ++SYSCALL_DEFINE3(pmadv_ksm, int, pidfd, int, behaviour, unsigned int, flags) ++{ ++#ifdef CONFIG_KSM ++ ssize_t ret; ++ struct pid *pid; ++ struct task_struct *task; ++ struct mm_struct *mm; ++ unsigned int f_flags; ++ struct vm_area_struct *vma; ++ struct vma_iterator vmi; ++ ++ if (flags != 0) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ switch (behaviour) { ++ case MADV_MERGEABLE: ++ case MADV_UNMERGEABLE: ++ break; ++ default: ++ ret = -EINVAL; ++ goto out; ++ break; ++ } ++ ++ pid = pidfd_get_pid(pidfd, &f_flags); ++ if (IS_ERR(pid)) { ++ ret = PTR_ERR(pid); ++ goto out; ++ } ++ ++ task = get_pid_task(pid, PIDTYPE_PID); ++ if (!task) { ++ ret = -ESRCH; ++ goto put_pid; ++ } ++ ++ /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */ ++ mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); ++ if (IS_ERR_OR_NULL(mm)) { ++ ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; ++ goto release_task; ++ } ++ ++ /* Require CAP_SYS_NICE for influencing process performance. */ ++ if (!capable(CAP_SYS_NICE)) { ++ ret = -EPERM; ++ goto release_mm; ++ } ++ ++ if (mmap_write_lock_killable(mm)) { ++ ret = -EINTR; ++ goto release_mm; ++ } ++ ++ vma_iter_init(&vmi, mm, 0); ++ for_each_vma(vmi, vma) { ++ switch (behaviour) { ++ case MADV_MERGEABLE: ++ ret = ksm_madvise_merge(vma->vm_mm, vma, &vma->vm_flags); ++ if (!ret) ++ vm_flags_set(vma, VM_MERGEABLE); ++ break; ++ case MADV_UNMERGEABLE: ++ ret = ksm_madvise_unmerge(vma, vma->vm_start, vma->vm_end, &vma->vm_flags); ++ if (!ret) ++ vm_flags_clear(vma, VM_MERGEABLE); ++ break; ++ default: ++ /* look, ma, no brain */ ++ break; ++ } ++ if (ret) ++ break; ++ } ++ ++ mmap_write_unlock(mm); ++ ++release_mm: ++ mmput(mm); ++release_task: ++ put_task_struct(task); ++put_pid: ++ put_pid(pid); ++out: ++ return ret; ++#else /* CONFIG_KSM */ ++ return -ENOSYS; ++#endif /* CONFIG_KSM */ ++} ++ ++#ifdef CONFIG_KSM ++static ssize_t ksm_show(struct kobject *kobj, struct kobj_attribute *attr, ++ char *buf) ++{ ++ return sprintf(buf, "%u\n", __NR_pmadv_ksm); ++} ++static struct kobj_attribute pmadv_ksm_attr = __ATTR_RO(ksm); ++ ++static struct attribute *pmadv_sysfs_attrs[] = { ++ &pmadv_ksm_attr.attr, ++ NULL, ++}; ++ ++static const struct attribute_group pmadv_sysfs_attr_group = { ++ .attrs = pmadv_sysfs_attrs, ++ .name = "pmadv", ++}; ++ ++static int __init pmadv_sysfs_init(void) ++{ ++ return sysfs_create_group(kernel_kobj, &pmadv_sysfs_attr_group); ++} ++subsys_initcall(pmadv_sysfs_init); ++#endif /* CONFIG_KSM */ +-- +2.40.0 + +From 94175b03e3ecc1d0bb22dc4ecea1457bc64eb5cd Mon Sep 17 00:00:00 2001 +From: Peter Jung +Date: Sat, 22 Apr 2023 11:43:59 +0200 +Subject: [PATCH 08/13] maple-lru + +Signed-off-by: Peter Jung +--- + Documentation/mm/multigen_lru.rst | 44 ++++++++- + arch/s390/mm/hugetlbpage.c | 2 +- + arch/s390/mm/mmap.c | 2 +- + fs/hugetlbfs/inode.c | 2 +- + include/linux/mmzone.h | 10 +- + lib/maple_tree.c | 158 +++++++++++++----------------- + lib/test_maple_tree.c | 27 +++-- + mm/mmap.c | 57 +++++++++-- + mm/vmscan.c | 136 ++++++++++--------------- + tools/testing/radix-tree/maple.c | 24 +++++ + 10 files changed, 258 insertions(+), 204 deletions(-) + +diff --git a/Documentation/mm/multigen_lru.rst b/Documentation/mm/multigen_lru.rst +index 5f1f6ecbb79b..52ed5092022f 100644 +--- a/Documentation/mm/multigen_lru.rst ++++ b/Documentation/mm/multigen_lru.rst +@@ -103,7 +103,8 @@ moving across tiers only involves atomic operations on + ``folio->flags`` and therefore has a negligible cost. A feedback loop + modeled after the PID controller monitors refaults over all the tiers + from anon and file types and decides which tiers from which types to +-evict or protect. ++evict or protect. The desired effect is to balance refault percentages ++between anon and file types proportional to the swappiness level. -- rt->rt6i_uncached_list = ul; -+ rt->dst.rt_uncached_list = ul; + There are two conceptually independent procedures: the aging and the + eviction. They form a closed-loop system, i.e., the page reclaim. +@@ -156,6 +157,27 @@ This time-based approach has the following advantages: + and memory sizes. + 2. It is more reliable because it is directly wired to the OOM killer. - spin_lock_bh(&ul->lock); -- list_add_tail(&rt->rt6i_uncached, &ul->head); -+ list_add_tail(&rt->dst.rt_uncached, &ul->head); - spin_unlock_bh(&ul->lock); - } ++``mm_struct`` list ++------------------ ++An ``mm_struct`` list is maintained for each memcg, and an ++``mm_struct`` follows its owner task to the new memcg when this task ++is migrated. ++ ++A page table walker iterates ``lruvec_memcg()->mm_list`` and calls ++``walk_page_range()`` with each ``mm_struct`` on this list to scan ++PTEs. When multiple page table walkers iterate the same list, each of ++them gets a unique ``mm_struct``, and therefore they can run in ++parallel. ++ ++Page table walkers ignore any misplaced pages, e.g., if an ++``mm_struct`` was migrated, pages left in the previous memcg will be ++ignored when the current memcg is under reclaim. Similarly, page table ++walkers will ignore pages from nodes other than the one under reclaim. ++ ++This infrastructure also tracks the usage of ``mm_struct`` between ++context switches so that page table walkers can skip processes that ++have been sleeping since the last iteration. ++ + Rmap/PT walk feedback + --------------------- + Searching the rmap for PTEs mapping each page on an LRU list (to test +@@ -170,7 +192,7 @@ promotes hot pages. If the scan was done cacheline efficiently, it + adds the PMD entry pointing to the PTE table to the Bloom filter. This + forms a feedback loop between the eviction and the aging. - void rt6_uncached_list_del(struct rt6_info *rt) - { -- if (!list_empty(&rt->rt6i_uncached)) { -- struct uncached_list *ul = rt->rt6i_uncached_list; -+ if (!list_empty(&rt->dst.rt_uncached)) { -+ struct uncached_list *ul = rt->dst.rt_uncached_list; +-Bloom Filters ++Bloom filters + ------------- + Bloom filters are a space and memory efficient data structure for set + membership test, i.e., test if an element is not in the set or may be +@@ -186,6 +208,18 @@ is false positive, the cost is an additional scan of a range of PTEs, + which may yield hot pages anyway. Parameters of the filter itself can + control the false positive rate in the limit. - spin_lock_bh(&ul->lock); -- list_del_init(&rt->rt6i_uncached); -+ list_del_init(&rt->dst.rt_uncached); - spin_unlock_bh(&ul->lock); - } - } -@@ -169,7 +169,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev) - continue; ++PID controller ++-------------- ++A feedback loop modeled after the Proportional-Integral-Derivative ++(PID) controller monitors refaults over anon and file types and ++decides which type to evict when both types are available from the ++same generation. ++ ++The PID controller uses generations rather than the wall clock as the ++time domain because a CPU can scan pages at different rates under ++varying memory pressure. It calculates a moving average for each new ++generation to avoid being permanently locked in a suboptimal state. ++ + Memcg LRU + --------- + An memcg LRU is a per-node LRU of memcgs. It is also an LRU of LRUs, +@@ -223,9 +257,9 @@ parts: - spin_lock_bh(&ul->lock); -- list_for_each_entry_safe(rt, safe, &ul->head, rt6i_uncached) { -+ list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) { - struct inet6_dev *rt_idev = rt->rt6i_idev; - struct net_device *rt_dev = rt->dst.dev; - bool handled = false; -@@ -188,7 +188,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev) - handled = true; - } - if (handled) -- list_move(&rt->rt6i_uncached, -+ list_move(&rt->dst.rt_uncached, - &ul->quarantine); - } - spin_unlock_bh(&ul->lock); -@@ -293,7 +293,7 @@ static const struct fib6_info fib6_null_entry_template = { + * Generations + * Rmap walks +-* Page table walks +-* Bloom filters +-* PID controller ++* Page table walks via ``mm_struct`` list ++* Bloom filters for rmap/PT walk feedback ++* PID controller for refault feedback - static const struct rt6_info ip6_null_entry_template = { - .dst = { -- .__refcnt = ATOMIC_INIT(1), -+ .__rcuref = RCUREF_INIT(1), - .__use = 1, - .obsolete = DST_OBSOLETE_FORCE_CHK, - .error = -ENETUNREACH, -@@ -307,7 +307,7 @@ static const struct rt6_info ip6_null_entry_template = { + The aging and the eviction form a producer-consumer model; + specifically, the latter drives the former by the sliding window over +diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c +index c299a18273ff..c718f2a0de94 100644 +--- a/arch/s390/mm/hugetlbpage.c ++++ b/arch/s390/mm/hugetlbpage.c +@@ -273,7 +273,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; +- info.low_limit = max(PAGE_SIZE, mmap_min_addr); ++ info.low_limit = PAGE_SIZE; + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; +diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c +index 3327c47bc181..fc9a7dc26c5e 100644 +--- a/arch/s390/mm/mmap.c ++++ b/arch/s390/mm/mmap.c +@@ -136,7 +136,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long ad + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; +- info.low_limit = max(PAGE_SIZE, mmap_min_addr); ++ info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + if (filp || (flags & MAP_SHARED)) + info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index 9062da6da567..4bff10704e7f 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -208,7 +208,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; +- info.low_limit = max(PAGE_SIZE, mmap_min_addr); ++ info.low_limit = PAGE_SIZE; + info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h +index 9fb1b03b83b2..cabe7f51ea66 100644 +--- a/include/linux/mmzone.h ++++ b/include/linux/mmzone.h +@@ -453,18 +453,14 @@ enum { + struct lru_gen_mm_state { + /* set to max_seq after each iteration */ + unsigned long seq; +- /* where the current iteration continues (inclusive) */ ++ /* where the current iteration continues after */ + struct list_head *head; +- /* where the last iteration ended (exclusive) */ ++ /* where the last iteration ended before */ + struct list_head *tail; +- /* to wait for the last page table walker to finish */ +- struct wait_queue_head wait; + /* Bloom filters flip after each iteration */ + unsigned long *filters[NR_BLOOM_FILTERS]; + /* the mm stats for debugging */ + unsigned long stats[NR_HIST_GENS][NR_MM_STATS]; +- /* the number of concurrent page table walkers */ +- int nr_walkers; + }; - static const struct rt6_info ip6_prohibit_entry_template = { - .dst = { -- .__refcnt = ATOMIC_INIT(1), -+ .__rcuref = RCUREF_INIT(1), - .__use = 1, - .obsolete = DST_OBSOLETE_FORCE_CHK, - .error = -EACCES, -@@ -319,7 +319,7 @@ static const struct rt6_info ip6_prohibit_entry_template = { + struct lru_gen_mm_walk { +@@ -1369,7 +1365,7 @@ typedef struct pglist_data { - static const struct rt6_info ip6_blk_hole_entry_template = { - .dst = { -- .__refcnt = ATOMIC_INIT(1), -+ .__rcuref = RCUREF_INIT(1), - .__use = 1, - .obsolete = DST_OBSOLETE_FORCE_CHK, - .error = -EINVAL, -@@ -334,7 +334,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = { - static void rt6_info_init(struct rt6_info *rt) - { - memset_after(rt, 0, dst); -- INIT_LIST_HEAD(&rt->rt6i_uncached); -+ INIT_LIST_HEAD(&rt->dst.rt_uncached); - } + #ifdef CONFIG_LRU_GEN + /* kswap mm walk data */ +- struct lru_gen_mm_walk mm_walk; ++ struct lru_gen_mm_walk mm_walk; + /* lru_gen_folio list */ + struct lru_gen_memcg memcg_lru; + #endif +diff --git a/lib/maple_tree.c b/lib/maple_tree.c +index db60edb55f2f..44f34a51afc0 100644 +--- a/lib/maple_tree.c ++++ b/lib/maple_tree.c +@@ -1303,26 +1303,21 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) + node = mas->alloc; + node->request_count = 0; + while (requested) { +- max_req = MAPLE_ALLOC_SLOTS; +- if (node->node_count) { +- unsigned int offset = node->node_count; +- +- slots = (void **)&node->slot[offset]; +- max_req -= offset; +- } else { +- slots = (void **)&node->slot; +- } +- ++ max_req = MAPLE_ALLOC_SLOTS - node->node_count; ++ slots = (void **)&node->slot[node->node_count]; + max_req = min(requested, max_req); + count = mt_alloc_bulk(gfp, max_req, slots); + if (!count) + goto nomem_bulk; - /* allocate dst with ip6_dst_ops */ -@@ -2638,7 +2638,7 @@ struct dst_entry *ip6_route_output_flags(struct net *net, - dst = ip6_route_output_flags_noref(net, sk, fl6, flags); - rt6 = (struct rt6_info *)dst; - /* For dst cached in uncached_list, refcnt is already taken. */ -- if (list_empty(&rt6->rt6i_uncached) && !dst_hold_safe(dst)) { -+ if (list_empty(&rt6->dst.rt_uncached) && !dst_hold_safe(dst)) { - dst = &net->ipv6.ip6_null_entry->dst; - dst_hold(dst); ++ if (node->node_count == 0) { ++ node->slot[0]->node_count = 0; ++ node->slot[0]->request_count = 0; ++ } ++ + node->node_count += count; + allocated += count; + node = node->slot[0]; +- node->node_count = 0; +- node->request_count = 0; + requested -= count; } -@@ -2748,7 +2748,7 @@ INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst, - from = rcu_dereference(rt->from); - - if (from && (rt->rt6i_flags & RTF_PCPU || -- unlikely(!list_empty(&rt->rt6i_uncached)))) -+ unlikely(!list_empty(&rt->dst.rt_uncached)))) - dst_ret = rt6_dst_from_check(rt, from, cookie); - else - dst_ret = rt6_check(rt, from, cookie); -@@ -6477,7 +6477,7 @@ static int __net_init ip6_route_net_init(struct net *net) - net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops; - dst_init_metrics(&net->ipv6.ip6_null_entry->dst, - ip6_template_metrics, true); -- INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->rt6i_uncached); -+ INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->dst.rt_uncached); + mas->alloc->total = allocated; +@@ -2317,9 +2312,7 @@ static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode) + static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) + { + struct ma_state *mas = wr_mas->mas; +- unsigned char count; +- unsigned char offset; +- unsigned long index, min, max; ++ unsigned char count, offset; - #ifdef CONFIG_IPV6_MULTIPLE_TABLES - net->ipv6.fib6_has_custom_rules = false; -@@ -6489,7 +6489,7 @@ static int __net_init ip6_route_net_init(struct net *net) - net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops; - dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst, - ip6_template_metrics, true); -- INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached); -+ INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->dst.rt_uncached); + if (unlikely(ma_is_dense(wr_mas->type))) { + wr_mas->r_max = wr_mas->r_min = mas->index; +@@ -2332,34 +2325,12 @@ static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) + count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type, + wr_mas->pivots, mas->max); + offset = mas->offset; +- min = mas_safe_min(mas, wr_mas->pivots, offset); +- if (unlikely(offset == count)) +- goto max; +- +- max = wr_mas->pivots[offset]; +- index = mas->index; +- if (unlikely(index <= max)) +- goto done; +- +- if (unlikely(!max && offset)) +- goto max; - net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, - sizeof(*net->ipv6.ip6_blk_hole_entry), -@@ -6499,7 +6499,7 @@ static int __net_init ip6_route_net_init(struct net *net) - net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops; - dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst, - ip6_template_metrics, true); -- INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->rt6i_uncached); -+ INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->dst.rt_uncached); - #ifdef CONFIG_IPV6_SUBTREES - net->ipv6.fib6_routes_require_src = 0; - #endif -diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c -index ea435eba3053..2b493f8d0091 100644 ---- a/net/ipv6/xfrm6_policy.c -+++ b/net/ipv6/xfrm6_policy.c -@@ -89,7 +89,7 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, - xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway; - xdst->u.rt6.rt6i_dst = rt->rt6i_dst; - xdst->u.rt6.rt6i_src = rt->rt6i_src; -- INIT_LIST_HEAD(&xdst->u.rt6.rt6i_uncached); -+ INIT_LIST_HEAD(&xdst->u.rt6.dst.rt_uncached); - rt6_uncached_list_add(&xdst->u.rt6); +- min = max + 1; +- while (++offset < count) { +- max = wr_mas->pivots[offset]; +- if (index <= max) +- goto done; +- else if (unlikely(!max)) +- break; +- +- min = max + 1; +- } ++ while (offset < count && mas->index > wr_mas->pivots[offset]) ++ offset++; - return 0; -@@ -121,7 +121,7 @@ static void xfrm6_dst_destroy(struct dst_entry *dst) - if (likely(xdst->u.rt6.rt6i_idev)) - in6_dev_put(xdst->u.rt6.rt6i_idev); - dst_destroy_metrics_generic(dst); -- if (xdst->u.rt6.rt6i_uncached_list) -+ if (xdst->u.rt6.dst.rt_uncached_list) - rt6_uncached_list_del(&xdst->u.rt6); - xfrm_dst_destroy(xdst); - } -diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c -index 80448885c3d7..99c349c0d968 100644 ---- a/net/netfilter/ipvs/ip_vs_xmit.c -+++ b/net/netfilter/ipvs/ip_vs_xmit.c -@@ -339,7 +339,7 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb, - spin_unlock_bh(&dest->dst_lock); - IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d\n", - &dest->addr.ip, &dest_dst->dst_saddr.ip, -- atomic_read(&rt->dst.__refcnt)); -+ rcuref_read(&rt->dst.__rcuref)); - } - if (ret_saddr) - *ret_saddr = dest_dst->dst_saddr.ip; -@@ -507,7 +507,7 @@ __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb, - spin_unlock_bh(&dest->dst_lock); - IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n", - &dest->addr.in6, &dest_dst->dst_saddr.in6, -- atomic_read(&rt->dst.__refcnt)); -+ rcuref_read(&rt->dst.__rcuref)); - } - if (ret_saddr) - *ret_saddr = dest_dst->dst_saddr.in6; -diff --git a/scripts/Makefile.vmlinux_o b/scripts/Makefile.vmlinux_o -index 0edfdb40364b..ae52d3b3f063 100644 ---- a/scripts/Makefile.vmlinux_o -+++ b/scripts/Makefile.vmlinux_o -@@ -19,7 +19,7 @@ quiet_cmd_gen_initcalls_lds = GEN $@ +-max: +- max = mas->max; +-done: +- wr_mas->r_max = max; +- wr_mas->r_min = min; ++ wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max; ++ wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset); + wr_mas->offset_end = mas->offset = offset; + } - .tmp_initcalls.lds: $(srctree)/scripts/generate_initcall_order.pl \ - vmlinux.a $(KBUILD_VMLINUX_LIBS) FORCE -- $(call if_changed,gen_initcalls_lds) -+ +$(call if_changed,gen_initcalls_lds) +@@ -3287,7 +3258,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end - targets := .tmp_initcalls.lds + if (tmp < max_p) + memset(pivs + tmp, 0, +- sizeof(unsigned long *) * (max_p - tmp)); ++ sizeof(unsigned long) * (max_p - tmp)); -diff --git a/scripts/atomic/atomics.tbl b/scripts/atomic/atomics.tbl -index fbee2f6190d9..85ca8d9b5c27 100644 ---- a/scripts/atomic/atomics.tbl -+++ b/scripts/atomic/atomics.tbl -@@ -33,7 +33,7 @@ try_cmpxchg B v p:old i:new - sub_and_test b i v - dec_and_test b v - inc_and_test b v --add_negative b i v -+add_negative B i v - add_unless fb v i:a i:u - inc_not_zero b v - inc_unless_negative b v -diff --git a/scripts/atomic/fallbacks/add_negative b/scripts/atomic/fallbacks/add_negative -index 15caa2eb2371..e5980abf5904 100755 ---- a/scripts/atomic/fallbacks/add_negative -+++ b/scripts/atomic/fallbacks/add_negative -@@ -1,16 +1,15 @@ - cat <bst_type = CS35L41_EXT_BOOST_NO_VSPK_SWITCH; -- } else if (strncmp(hid, "CLSA0101", 8) == 0) { -+ } else if (strncmp(hid, "CLSA0101", 8) == 0 || strncmp(hid, "CSC3551", 7) == 0) { - hw_cfg->bst_type = CS35L41_EXT_BOOST; - hw_cfg->gpio1.func = CS35l41_VSPK_SWITCH; - hw_cfg->gpio1.valid = true; -diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c -index d8b5b4930412..05048ebc24d8 100644 ---- a/tools/testing/selftests/mm/ksm_functional_tests.c -+++ b/tools/testing/selftests/mm/ksm_functional_tests.c -@@ -24,9 +24,12 @@ + enum maple_type type = mte_node_type(mas->node); + struct maple_node *node = mas_mn(mas); +@@ -5035,8 +5007,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) - #define KiB 1024u - #define MiB (1024 * KiB) -+#define PageSize (4 * KiB) + if (unlikely(ma_is_leaf(type))) { + mas->offset = offset; +- mas->min = min; +- mas->max = min + gap - 1; ++ *gap_min = min; ++ *gap_max = min + gap - 1; + return true; + } - static int ksm_fd; - static int ksm_full_scans_fd; -+static int ksm_zero_pages_fd; -+static int ksm_use_zero_pages_fd; - static int pagemap_fd; - static size_t pagesize; +@@ -5060,10 +5032,10 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) + { + enum maple_type type = mte_node_type(mas->node); + unsigned long pivot, min, gap = 0; +- unsigned char offset; +- unsigned long *gaps; +- unsigned long *pivots = ma_pivots(mas_mn(mas), type); +- void __rcu **slots = ma_slots(mas_mn(mas), type); ++ unsigned char offset, data_end; ++ unsigned long *gaps, *pivots; ++ void __rcu **slots; ++ struct maple_node *node; + bool found = false; -@@ -57,6 +60,21 @@ static bool range_maps_duplicates(char *addr, unsigned long size) - return false; - } + if (ma_is_dense(type)) { +@@ -5071,13 +5043,15 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) + return true; + } -+static long ksm_get_zero_pages(void) -+{ -+ char buf[20]; -+ ssize_t read_size; -+ unsigned long ksm_zero_pages; -+ -+ read_size = pread(ksm_zero_pages_fd, buf, sizeof(buf) - 1, 0); -+ if (read_size < 0) -+ return -errno; -+ buf[read_size] = 0; -+ ksm_zero_pages = strtol(buf, NULL, 10); -+ -+ return ksm_zero_pages; -+} -+ - static long ksm_get_full_scans(void) - { - char buf[10]; -@@ -70,15 +88,12 @@ static long ksm_get_full_scans(void) - return strtol(buf, NULL, 10); - } +- gaps = ma_gaps(mte_to_node(mas->node), type); ++ node = mas_mn(mas); ++ pivots = ma_pivots(node, type); ++ slots = ma_slots(node, type); ++ gaps = ma_gaps(node, type); + offset = mas->offset; + min = mas_safe_min(mas, pivots, offset); +- for (; offset < mt_slots[type]; offset++) { +- pivot = mas_safe_pivot(mas, pivots, offset, type); +- if (offset && !pivot) +- break; ++ data_end = ma_data_end(node, type, pivots, mas->max); ++ for (; offset <= data_end; offset++) { ++ pivot = mas_logical_pivot(mas, pivots, offset, type); --static int ksm_merge(void) -+static int wait_two_full_scans(void) + /* Not within lower bounds */ + if (mas->index > pivot) +@@ -5276,25 +5250,28 @@ static inline void mas_fill_gap(struct ma_state *mas, void *entry, + * @size: The size of the gap + * @fwd: Searching forward or back + */ +-static inline void mas_sparse_area(struct ma_state *mas, unsigned long min, ++static inline int mas_sparse_area(struct ma_state *mas, unsigned long min, + unsigned long max, unsigned long size, bool fwd) { - long start_scans, end_scans; +- unsigned long start = 0; +- +- if (!unlikely(mas_is_none(mas))) +- start++; ++ if (!unlikely(mas_is_none(mas)) && min == 0) { ++ min++; ++ /* ++ * At this time, min is increased, we need to recheck whether ++ * the size is satisfied. ++ */ ++ if (min > max || max - min + 1 < size) ++ return -EBUSY; ++ } + /* mas_is_ptr */ -- /* Wait for two full scans such that any possible merging happened. */ - start_scans = ksm_get_full_scans(); - if (start_scans < 0) -- return start_scans; -- if (write(ksm_fd, "1", 1) != 1) - return -errno; - do { - end_scans = ksm_get_full_scans(); -@@ -89,6 +104,34 @@ static int ksm_merge(void) - return 0; +- if (start < min) +- start = min; +- + if (fwd) { +- mas->index = start; +- mas->last = start + size - 1; +- return; ++ mas->index = min; ++ mas->last = min + size - 1; ++ } else { ++ mas->last = max; ++ mas->index = max - size + 1; + } +- +- mas->index = max; ++ return 0; } -+static inline int ksm_merge(void) -+{ -+ /* Wait for two full scans such that any possible merging happened. */ -+ if (write(ksm_fd, "1", 1) != 1) -+ return -errno; -+ -+ return wait_two_full_scans(); -+} -+ -+static int unmerge_zero_page(char *start, unsigned long size) -+{ -+ int ret; -+ -+ ret = madvise(start, size, MADV_UNMERGEABLE); -+ if (ret) { -+ ksft_test_result_fail("MADV_UNMERGEABLE failed\n"); -+ return ret; -+ } -+ -+ /* -+ * Wait for two full scans such that any possible unmerging of zero -+ * pages happened. Why? Because the unmerge action of zero pages is not -+ * done in the context of madvise(), but in the context of -+ * unshare_zero_pages() of the ksmd thread. -+ */ -+ return wait_two_full_scans(); -+} -+ - static char *mmap_and_merge_range(char val, unsigned long size) - { - char *map; -@@ -146,6 +189,48 @@ static void test_unmerge(void) - munmap(map, size); - } + /* +@@ -5312,6 +5289,12 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, + unsigned long *pivots; + enum maple_type mt; -+static void test_unmerge_zero_pages(void) -+{ -+ const unsigned int size = 2 * MiB; -+ char *map; -+ unsigned long pages_expected; -+ -+ ksft_print_msg("[RUN] %s\n", __func__); -+ -+ /* Confirm the interfaces*/ -+ if (ksm_zero_pages_fd < 0) { -+ ksft_test_result_skip("open(\"/sys/kernel/mm/ksm/zero_pages_sharing\") failed\n"); -+ return; -+ } -+ if (ksm_use_zero_pages_fd < 0) { -+ ksft_test_result_skip("open \"/sys/kernel/mm/ksm/use_zero_pages\" failed\n"); -+ return; -+ } -+ if (write(ksm_use_zero_pages_fd, "1", 1) != 1) { -+ ksft_test_result_skip("write \"/sys/kernel/mm/ksm/use_zero_pages\" failed\n"); -+ return; -+ } -+ -+ /* Mmap zero pages*/ -+ map = mmap_and_merge_range(0x00, size); -+ if (map == MAP_FAILED) -+ return; -+ -+ if (unmerge_zero_page(map + size / 2, size / 2)) -+ goto unmap; -+ -+ /* Check if zero_pages_sharing can be update correctly when unmerge */ -+ pages_expected = (size / 2) / PageSize; -+ ksft_test_result(pages_expected == ksm_get_zero_pages(), -+ "zero page count react to unmerge\n"); ++ if (min > max) ++ return -EINVAL; + -+ /* Check if ksm zero pages are really unmerged */ -+ ksft_test_result(!range_maps_duplicates(map + size / 2, size / 2), -+ "KSM zero pages were unmerged\n"); -+unmap: -+ munmap(map, size); -+} ++ if (size == 0 || max - min < size - 1) ++ return -EINVAL; + - static void test_unmerge_discarded(void) - { - const unsigned int size = 2 * MiB; -@@ -264,8 +349,11 @@ int main(int argc, char **argv) - pagemap_fd = open("/proc/self/pagemap", O_RDONLY); - if (pagemap_fd < 0) - ksft_exit_skip("open(\"/proc/self/pagemap\") failed\n"); -+ ksm_zero_pages_fd = open("/sys/kernel/mm/ksm/zero_pages_sharing", O_RDONLY); -+ ksm_use_zero_pages_fd = open("/sys/kernel/mm/ksm/use_zero_pages", O_RDWR); + if (mas_is_start(mas)) + mas_start(mas); + else if (mas->offset >= 2) +@@ -5320,10 +5303,8 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, + return -EBUSY; - test_unmerge(); -+ test_unmerge_zero_pages(); - test_unmerge_discarded(); - #ifdef __NR_userfaultfd - test_unmerge_uffd_wp(); --- -2.40.0 - -From cbcd9e3198b7d93339f80452d765183b6fe5f30b Mon Sep 17 00:00:00 2001 -From: Peter Jung -Date: Mon, 17 Apr 2023 18:25:39 +0200 -Subject: [PATCH 05/12] fs-patches - -Signed-off-by: Peter Jung ---- - fs/btrfs/block-group.c | 7 +- - fs/btrfs/block-group.h | 2 +- - fs/btrfs/block-rsv.c | 21 +++--- - fs/btrfs/block-rsv.h | 2 +- - fs/btrfs/btrfs_inode.h | 33 ++++++++-- - fs/btrfs/ctree.c | 131 +++++++++++++++++++++++--------------- - fs/btrfs/ctree.h | 1 - - fs/btrfs/delalloc-space.c | 2 +- - fs/btrfs/delayed-ref.c | 49 ++------------ - fs/btrfs/delayed-ref.h | 22 ++++++- - fs/btrfs/discard.c | 18 +++--- - fs/btrfs/disk-io.c | 1 - - fs/btrfs/extent-tree.c | 27 +------- - fs/btrfs/file.c | 11 +++- - fs/btrfs/fs.h | 17 ++++- - fs/btrfs/inode-item.c | 15 ++--- - fs/btrfs/inode.c | 117 ++++++++++++++++++++++------------ - fs/btrfs/ordered-data.c | 46 ++++++++++--- - fs/btrfs/ordered-data.h | 7 +- - fs/btrfs/space-info.c | 32 ++++++++-- - fs/btrfs/space-info.h | 1 + - fs/btrfs/transaction.c | 28 ++++---- - fs/btrfs/tree-log.c | 76 ++++++++++++++-------- - fs/btrfs/volumes.c | 2 +- - 24 files changed, 399 insertions(+), 269 deletions(-) - -diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c -index 5fc670c27f86..bececc6f0a87 100644 ---- a/fs/btrfs/block-group.c -+++ b/fs/btrfs/block-group.c -@@ -2672,7 +2672,7 @@ static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset) - } + /* Empty set */ +- if (mas_is_none(mas) || mas_is_ptr(mas)) { +- mas_sparse_area(mas, min, max, size, true); +- return 0; +- } ++ if (mas_is_none(mas) || mas_is_ptr(mas)) ++ return mas_sparse_area(mas, min, max, size, true); - struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, -- u64 bytes_used, u64 type, -+ u64 type, - u64 chunk_offset, u64 size) + /* The start of the window can only be within these values */ + mas->index = min; +@@ -5366,6 +5347,12 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, { - struct btrfs_fs_info *fs_info = trans->fs_info; -@@ -2687,7 +2687,6 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran - - cache->length = size; - set_free_space_tree_thresholds(cache); -- cache->used = bytes_used; - cache->flags = type; - cache->cached = BTRFS_CACHE_FINISHED; - cache->global_root_id = calculate_global_root_id(fs_info, cache->start); -@@ -2738,9 +2737,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran + struct maple_enode *last = mas->node; - #ifdef CONFIG_BTRFS_DEBUG - if (btrfs_should_fragment_free_space(cache)) { -- u64 new_bytes_used = size - bytes_used; -- -- cache->space_info->bytes_used += new_bytes_used >> 1; -+ cache->space_info->bytes_used += size >> 1; - fragment_free_space(cache); ++ if (min > max) ++ return -EINVAL; ++ ++ if (size == 0 || max - min < size - 1) ++ return -EINVAL; ++ + if (mas_is_start(mas)) { + mas_start(mas); + mas->offset = mas_data_end(mas); +@@ -5376,16 +5363,14 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, } - #endif -diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h -index 6e4a0b429ac3..db729ad7315b 100644 ---- a/fs/btrfs/block-group.h -+++ b/fs/btrfs/block-group.h -@@ -302,7 +302,7 @@ void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info); - void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg); - int btrfs_read_block_groups(struct btrfs_fs_info *info); - struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, -- u64 bytes_used, u64 type, -+ u64 type, - u64 chunk_offset, u64 size); - void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); - int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, -diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c -index 5367a14d44d2..3ab707e26fa2 100644 ---- a/fs/btrfs/block-rsv.c -+++ b/fs/btrfs/block-rsv.c -@@ -232,9 +232,6 @@ int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent) - u64 num_bytes = 0; - int ret = -ENOSPC; -- if (!block_rsv) + /* Empty set. */ +- if (mas_is_none(mas) || mas_is_ptr(mas)) { +- mas_sparse_area(mas, min, max, size, false); - return 0; +- } ++ if (mas_is_none(mas) || mas_is_ptr(mas)) ++ return mas_sparse_area(mas, min, max, size, false); + + /* The start of the window can only be within these values. */ + mas->index = min; + mas->last = max; + +- while (!mas_rev_awalk(mas, size)) { ++ while (!mas_rev_awalk(mas, size, &min, &max)) { + if (last == mas->node) { + if (!mas_rewind_node(mas)) + return -EBUSY; +@@ -5400,17 +5385,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, + if (unlikely(mas->offset == MAPLE_NODE_SLOTS)) + return -EBUSY; + +- /* +- * mas_rev_awalk() has set mas->min and mas->max to the gap values. If +- * the maximum is outside the window we are searching, then use the last +- * location in the search. +- * mas->max and mas->min is the range of the gap. +- * mas->index and mas->last are currently set to the search range. +- */ - - spin_lock(&block_rsv->lock); - num_bytes = mult_perc(block_rsv->size, min_percent); - if (block_rsv->reserved >= num_bytes) -@@ -245,17 +242,15 @@ int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent) + /* Trim the upper limit to the max. */ +- if (mas->max <= mas->last) +- mas->last = mas->max; ++ if (max < mas->last) ++ mas->last = max; + + mas->index = mas->last - size + 1; + return 0; +@@ -5819,6 +5796,7 @@ int mas_preallocate(struct ma_state *mas, gfp_t gfp) + mas_reset(mas); + return ret; } ++EXPORT_SYMBOL_GPL(mas_preallocate); - int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info, -- struct btrfs_block_rsv *block_rsv, u64 min_reserved, -+ struct btrfs_block_rsv *block_rsv, u64 num_bytes, - enum btrfs_reserve_flush_enum flush) + /* + * mas_destroy() - destroy a maple state. +@@ -6391,7 +6369,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, { -- u64 num_bytes = 0; - int ret = -ENOSPC; + int ret = 0; - if (!block_rsv) - return 0; +- MA_STATE(mas, mt, min, max - size); ++ MA_STATE(mas, mt, min, min); + if (!mt_is_alloc(mt)) + return -EINVAL; - spin_lock(&block_rsv->lock); -- num_bytes = min_reserved; - if (block_rsv->reserved >= num_bytes) - ret = 0; - else -@@ -355,17 +350,19 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info) +@@ -6411,7 +6389,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, + retry: + mas.offset = 0; + mas.index = min; +- mas.last = max - size; ++ mas.last = max - size + 1; + ret = mas_alloc(&mas, entry, size, startp); + if (mas_nomem(&mas, gfp)) + goto retry; +@@ -6427,14 +6405,14 @@ int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, + { + int ret = 0; - /* - * But we also want to reserve enough space so we can do the fallback -- * global reserve for an unlink, which is an additional 5 items (see the -- * comment in __unlink_start_trans for what we're modifying.) -+ * global reserve for an unlink, which is an additional -+ * BTRFS_UNLINK_METADATA_UNITS items. - * - * But we also need space for the delayed ref updates from the unlink, -- * so its 10, 5 for the actual operation, and 5 for the delayed ref -- * updates. -+ * so add BTRFS_UNLINK_METADATA_UNITS units for delayed refs, one for -+ * each unlink metadata item. - */ -- min_items += 10; -+ min_items += BTRFS_UNLINK_METADATA_UNITS; +- MA_STATE(mas, mt, min, max - size); ++ MA_STATE(mas, mt, min, max - size + 1); + if (!mt_is_alloc(mt)) + return -EINVAL; - num_bytes = max_t(u64, num_bytes, -- btrfs_calc_insert_metadata_size(fs_info, min_items)); -+ btrfs_calc_insert_metadata_size(fs_info, min_items) + -+ btrfs_calc_delayed_ref_bytes(fs_info, -+ BTRFS_UNLINK_METADATA_UNITS)); + if (WARN_ON_ONCE(mt_is_reserved(entry))) + return -EINVAL; - spin_lock(&sinfo->lock); - spin_lock(&block_rsv->lock); -diff --git a/fs/btrfs/block-rsv.h b/fs/btrfs/block-rsv.h -index 4cc41c9aaa82..6dc781709aca 100644 ---- a/fs/btrfs/block-rsv.h -+++ b/fs/btrfs/block-rsv.h -@@ -65,7 +65,7 @@ int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info, - enum btrfs_reserve_flush_enum flush); - int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent); - int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info, -- struct btrfs_block_rsv *block_rsv, u64 min_reserved, -+ struct btrfs_block_rsv *block_rsv, u64 num_bytes, - enum btrfs_reserve_flush_enum flush); - int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, - struct btrfs_block_rsv *dst_rsv, u64 num_bytes, -diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h -index 9dc21622806e..fd1a7436e694 100644 ---- a/fs/btrfs/btrfs_inode.h -+++ b/fs/btrfs/btrfs_inode.h -@@ -142,11 +142,22 @@ struct btrfs_inode { - /* a local copy of root's last_log_commit */ - int last_log_commit; +- if (min >= max) ++ if (min > max) + return -EINVAL; -- /* -- * Total number of bytes pending delalloc, used by stat to calculate the -- * real block usage of the file. This is used only for files. -- */ -- u64 delalloc_bytes; -+ union { -+ /* -+ * Total number of bytes pending delalloc, used by stat to -+ * calculate the real block usage of the file. This is used -+ * only for files. -+ */ -+ u64 delalloc_bytes; -+ /* -+ * The lowest possible index of the next dir index key which -+ * points to an inode that needs to be logged. -+ * This is used only for directories. -+ * Use the helpers btrfs_get_first_dir_index_to_log() and -+ * btrfs_set_first_dir_index_to_log() to access this field. -+ */ -+ u64 first_dir_index_to_log; -+ }; + if (max < size - 1) +diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c +index f1db333270e9..4d85d04b26f8 100644 +--- a/lib/test_maple_tree.c ++++ b/lib/test_maple_tree.c +@@ -102,7 +102,7 @@ static noinline void check_mtree_alloc_rrange(struct maple_tree *mt, + unsigned long result = expected + 1; + int ret; - union { - /* -@@ -247,6 +258,17 @@ struct btrfs_inode { - struct inode vfs_inode; - }; +- ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end - 1, ++ ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end, + GFP_KERNEL); + MT_BUG_ON(mt, ret != eret); + if (ret) +@@ -680,7 +680,7 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) + 0, /* Return value success. */ + + 0x0, /* Min */ +- 0x565234AF1 << 12, /* Max */ ++ 0x565234AF0 << 12, /* Max */ + 0x3000, /* Size */ + 0x565234AEE << 12, /* max - 3. */ + 0, /* Return value success. */ +@@ -692,14 +692,14 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) + 0, /* Return value success. */ + + 0x0, /* Min */ +- 0x7F36D510A << 12, /* Max */ ++ 0x7F36D5109 << 12, /* Max */ + 0x4000, /* Size */ + 0x7F36D5106 << 12, /* First rev hole of size 0x4000 */ + 0, /* Return value success. */ + + /* Ascend test. */ + 0x0, +- 34148798629 << 12, ++ 34148798628 << 12, + 19 << 12, + 34148797418 << 12, + 0x0, +@@ -711,6 +711,12 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) + 0x0, + -EBUSY, + ++ /* Single space test. */ ++ 34148798725 << 12, ++ 34148798725 << 12, ++ 1 << 12, ++ 34148798725 << 12, ++ 0, + }; -+static inline u64 btrfs_get_first_dir_index_to_log(const struct btrfs_inode *inode) -+{ -+ return READ_ONCE(inode->first_dir_index_to_log); -+} -+ -+static inline void btrfs_set_first_dir_index_to_log(struct btrfs_inode *inode, -+ u64 index) -+{ -+ WRITE_ONCE(inode->first_dir_index_to_log, index); -+} -+ - static inline struct btrfs_inode *BTRFS_I(const struct inode *inode) + int i, range_count = ARRAY_SIZE(range); +@@ -759,9 +765,9 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) + mas_unlock(&mas); + for (i = 0; i < req_range_count; i += 5) { + #if DEBUG_REV_RANGE +- pr_debug("\tReverse request between %lu-%lu size %lu, should get %lu\n", +- req_range[i] >> 12, +- (req_range[i + 1] >> 12) - 1, ++ pr_debug("\tReverse request %d between %lu-%lu size %lu, should get %lu\n", ++ i, req_range[i] >> 12, ++ (req_range[i + 1] >> 12), + req_range[i+2] >> 12, + req_range[i+3] >> 12); + #endif +@@ -880,6 +886,13 @@ static noinline void check_alloc_range(struct maple_tree *mt) + 4503599618982063UL << 12, /* Size */ + 34359052178 << 12, /* Expected location */ + -EBUSY, /* Return failure. */ ++ ++ /* Test a single entry */ ++ 34148798648 << 12, /* Min */ ++ 34148798648 << 12, /* Max */ ++ 4096, /* Size of 1 */ ++ 34148798648 << 12, /* Location is the same as min/max */ ++ 0, /* Success */ + }; + int i, range_count = ARRAY_SIZE(range); + int req_range_count = ARRAY_SIZE(req_range); +diff --git a/mm/mmap.c b/mm/mmap.c +index ff68a67a2a7c..6819eb2b77d7 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -1519,6 +1519,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) + static unsigned long unmapped_area(struct vm_unmapped_area_info *info) { - return container_of(inode, struct btrfs_inode, vfs_inode); -@@ -516,6 +538,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, - ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, - size_t done_before); - struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter, -+ struct btrfs_ordered_extent **ordered_extent, - size_t done_before); + unsigned long length, gap; ++ unsigned long low_limit, high_limit; ++ struct vm_area_struct *tmp; - extern const struct dentry_operations btrfs_dentry_operations; -diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c -index a5b6bb54545f..b5b73ed8b86b 100644 ---- a/fs/btrfs/ctree.c -+++ b/fs/btrfs/ctree.c -@@ -2370,6 +2370,87 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, - return ret; - } + MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); -+/* -+ * Search the tree again to find a leaf with smaller keys. -+ * Returns 0 if it found something. -+ * Returns 1 if there are no smaller keys. -+ * Returns < 0 on error. -+ * -+ * This may release the path, and so you may lose any locks held at the -+ * time you call it. -+ */ -+static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) -+{ -+ struct btrfs_key key; -+ struct btrfs_key orig_key; -+ struct btrfs_disk_key found_key; -+ int ret; -+ -+ btrfs_item_key_to_cpu(path->nodes[0], &key, 0); -+ orig_key = key; -+ -+ if (key.offset > 0) { -+ key.offset--; -+ } else if (key.type > 0) { -+ key.type--; -+ key.offset = (u64)-1; -+ } else if (key.objectid > 0) { -+ key.objectid--; -+ key.type = (u8)-1; -+ key.offset = (u64)-1; +@@ -1527,12 +1529,32 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) + if (length < info->length) + return -ENOMEM; + +- if (mas_empty_area(&mas, info->low_limit, info->high_limit - 1, +- length)) ++ low_limit = info->low_limit; ++ if (low_limit < mmap_min_addr) ++ low_limit = mmap_min_addr; ++ high_limit = info->high_limit; ++retry: ++ if (mas_empty_area(&mas, low_limit, high_limit - 1, length)) + return -ENOMEM; + + gap = mas.index; + gap += (info->align_offset - gap) & info->align_mask; ++ tmp = mas_next(&mas, ULONG_MAX); ++ if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */ ++ if (vm_start_gap(tmp) < gap + length - 1) { ++ low_limit = tmp->vm_end; ++ mas_reset(&mas); ++ goto retry; ++ } + } else { -+ return 1; ++ tmp = mas_prev(&mas, 0); ++ if (tmp && vm_end_gap(tmp) > gap) { ++ low_limit = vm_end_gap(tmp); ++ mas_reset(&mas); ++ goto retry; ++ } + } + -+ btrfs_release_path(path); -+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); -+ if (ret <= 0) -+ return ret; -+ -+ /* -+ * Previous key not found. Even if we were at slot 0 of the leaf we had -+ * before releasing the path and calling btrfs_search_slot(), we now may -+ * be in a slot pointing to the same original key - this can happen if -+ * after we released the path, one of more items were moved from a -+ * sibbling leaf into the front of the leaf we had due to an insertion -+ * (see push_leaf_right()). -+ * If we hit this case and our slot is > 0 and just decrement the slot -+ * so that the caller does not process the same key again, which may or -+ * may not break the caller, depending on its logic. -+ */ -+ if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { -+ btrfs_item_key(path->nodes[0], &found_key, path->slots[0]); -+ ret = comp_keys(&found_key, &orig_key); -+ if (ret == 0) { -+ if (path->slots[0] > 0) { -+ path->slots[0]--; -+ return 0; -+ } -+ /* -+ * At slot 0, same key as before, it means orig_key is -+ * the lowest, leftmost, key in the tree. We're done. -+ */ -+ return 1; + return gap; + } + +@@ -1548,7 +1570,9 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) + */ + static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) + { +- unsigned long length, gap; ++ unsigned long length, gap, gap_end; ++ unsigned long low_limit, high_limit; ++ struct vm_area_struct *tmp; + + MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); + /* Adjust search length to account for worst case alignment overhead */ +@@ -1556,12 +1580,33 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) + if (length < info->length) + return -ENOMEM; + +- if (mas_empty_area_rev(&mas, info->low_limit, info->high_limit - 1, +- length)) ++ low_limit = info->low_limit; ++ if (low_limit < mmap_min_addr) ++ low_limit = mmap_min_addr; ++ high_limit = info->high_limit; ++retry: ++ if (mas_empty_area_rev(&mas, low_limit, high_limit - 1, length)) + return -ENOMEM; + + gap = mas.last + 1 - info->length; + gap -= (gap - info->align_offset) & info->align_mask; ++ gap_end = mas.last; ++ tmp = mas_next(&mas, ULONG_MAX); ++ if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */ ++ if (vm_start_gap(tmp) <= gap_end) { ++ high_limit = vm_start_gap(tmp); ++ mas_reset(&mas); ++ goto retry; ++ } ++ } else { ++ tmp = mas_prev(&mas, 0); ++ if (tmp && vm_end_gap(tmp) > gap) { ++ high_limit = tmp->vm_start; ++ mas_reset(&mas); ++ goto retry; + } + } + -+ btrfs_item_key(path->nodes[0], &found_key, 0); -+ ret = comp_keys(&found_key, &key); -+ /* -+ * We might have had an item with the previous key in the tree right -+ * before we released our path. And after we released our path, that -+ * item might have been pushed to the first slot (0) of the leaf we -+ * were holding due to a tree balance. Alternatively, an item with the -+ * previous key can exist as the only element of a leaf (big fat item). -+ * Therefore account for these 2 cases, so that our callers (like -+ * btrfs_previous_item) don't miss an existing item with a key matching -+ * the previous key we computed above. -+ */ -+ if (ret <= 0) -+ return 0; -+ return 1; -+} -+ - /* - * helper to use instead of search slot if no exact match is needed but - * instead the next or previous item should be returned. -@@ -4478,56 +4559,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, - return ret; + return gap; } --/* -- * search the tree again to find a leaf with lesser keys -- * returns 0 if it found something or 1 if there are no lesser leaves. -- * returns < 0 on io errors. -- * -- * This may release the path, and so you may lose any locks held at the -- * time you call it. -- */ --int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) --{ -- struct btrfs_key key; -- struct btrfs_disk_key found_key; -- int ret; +@@ -1675,7 +1720,7 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; +- info.low_limit = max(PAGE_SIZE, mmap_min_addr); ++ info.low_limit = PAGE_SIZE; + info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); + info.align_mask = 0; + info.align_offset = 0; +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 71a7f4517e5a..ae60ddff831a 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -3398,18 +3398,13 @@ void lru_gen_del_mm(struct mm_struct *mm) + for_each_node(nid) { + struct lruvec *lruvec = get_lruvec(memcg, nid); + +- /* where the last iteration ended (exclusive) */ ++ /* where the current iteration continues after */ ++ if (lruvec->mm_state.head == &mm->lru_gen.list) ++ lruvec->mm_state.head = lruvec->mm_state.head->prev; ++ ++ /* where the last iteration ended before */ + if (lruvec->mm_state.tail == &mm->lru_gen.list) + lruvec->mm_state.tail = lruvec->mm_state.tail->next; - -- btrfs_item_key_to_cpu(path->nodes[0], &key, 0); +- /* where the current iteration continues (inclusive) */ +- if (lruvec->mm_state.head != &mm->lru_gen.list) +- continue; - -- if (key.offset > 0) { -- key.offset--; -- } else if (key.type > 0) { -- key.type--; -- key.offset = (u64)-1; -- } else if (key.objectid > 0) { -- key.objectid--; -- key.type = (u8)-1; -- key.offset = (u64)-1; -- } else { -- return 1; +- lruvec->mm_state.head = lruvec->mm_state.head->next; +- /* the deletion ends the current iteration */ +- if (lruvec->mm_state.head == &mm_list->fifo) +- WRITE_ONCE(lruvec->mm_state.seq, lruvec->mm_state.seq + 1); + } + + list_del_init(&mm->lru_gen.list); +@@ -3505,68 +3500,54 @@ static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, + struct mm_struct **iter) + { + bool first = false; +- bool last = true; ++ bool last = false; + struct mm_struct *mm = NULL; + struct mem_cgroup *memcg = lruvec_memcg(lruvec); + struct lru_gen_mm_list *mm_list = get_mm_list(memcg); + struct lru_gen_mm_state *mm_state = &lruvec->mm_state; + + /* +- * There are four interesting cases for this page table walker: +- * 1. It tries to start a new iteration of mm_list with a stale max_seq; +- * there is nothing left to do. +- * 2. It's the first of the current generation, and it needs to reset +- * the Bloom filter for the next generation. +- * 3. It reaches the end of mm_list, and it needs to increment +- * mm_state->seq; the iteration is done. +- * 4. It's the last of the current generation, and it needs to reset the +- * mm stats counters for the next generation. ++ * mm_state->seq is incremented after each iteration of mm_list. There ++ * are three interesting cases for this page table walker: ++ * 1. It tries to start a new iteration with a stale max_seq: there is ++ * nothing left to do. ++ * 2. It started the next iteration: it needs to reset the Bloom filter ++ * so that a fresh set of PTE tables can be recorded. ++ * 3. It ended the current iteration: it needs to reset the mm stats ++ * counters and tell its caller to increment max_seq. + */ + spin_lock(&mm_list->lock); + + VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->max_seq); +- VM_WARN_ON_ONCE(*iter && mm_state->seq > walk->max_seq); +- VM_WARN_ON_ONCE(*iter && !mm_state->nr_walkers); + +- if (walk->max_seq <= mm_state->seq) { +- if (!*iter) +- last = false; ++ if (walk->max_seq <= mm_state->seq) + goto done; +- } + +- if (!mm_state->nr_walkers) { +- VM_WARN_ON_ONCE(mm_state->head && mm_state->head != &mm_list->fifo); ++ if (!mm_state->head) ++ mm_state->head = &mm_list->fifo; + +- mm_state->head = mm_list->fifo.next; ++ if (mm_state->head == &mm_list->fifo) + first = true; - } - -- btrfs_release_path(path); -- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); -- if (ret < 0) -- return ret; -- btrfs_item_key(path->nodes[0], &found_key, 0); -- ret = comp_keys(&found_key, &key); -- /* -- * We might have had an item with the previous key in the tree right -- * before we released our path. And after we released our path, that -- * item might have been pushed to the first slot (0) of the leaf we -- * were holding due to a tree balance. Alternatively, an item with the -- * previous key can exist as the only element of a leaf (big fat item). -- * Therefore account for these 2 cases, so that our callers (like -- * btrfs_previous_item) don't miss an existing item with a key matching -- * the previous key we computed above. -- */ -- if (ret <= 0) -- return 0; -- return 1; --} -- - /* - * A helper function to walk down the tree starting at min_key, and looking - * for nodes or leaves that are have a minimum transaction id. -diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h -index 97897107fab5..406f90508f7e 100644 ---- a/fs/btrfs/ctree.h -+++ b/fs/btrfs/ctree.h -@@ -646,7 +646,6 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, - return btrfs_insert_empty_items(trans, root, path, &batch); - } +- while (!mm && mm_state->head != &mm_list->fifo) { +- mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); --int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); - int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, - u64 time_seq); ++ do { + mm_state->head = mm_state->head->next; ++ if (mm_state->head == &mm_list->fifo) { ++ WRITE_ONCE(mm_state->seq, mm_state->seq + 1); ++ last = true; ++ break; ++ } -diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c -index 7ddb1d104e8e..427abaf608b8 100644 ---- a/fs/btrfs/delalloc-space.c -+++ b/fs/btrfs/delalloc-space.c -@@ -358,8 +358,8 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes, - * racing with an ordered completion or some such that would think it - * needs to free the reservation we just made. - */ -- spin_lock(&inode->lock); - nr_extents = count_max_extents(fs_info, num_bytes); -+ spin_lock(&inode->lock); - btrfs_mod_outstanding_extents(inode, nr_extents); - inode->csum_bytes += disk_num_bytes; - btrfs_calculate_inode_block_rsv_size(fs_info, inode); -diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c -index 886ffb232eac..0b32432d7d56 100644 ---- a/fs/btrfs/delayed-ref.c -+++ b/fs/btrfs/delayed-ref.c -@@ -53,24 +53,6 @@ bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info) - return ret; - } + /* force scan for those added after the last iteration */ +- if (!mm_state->tail || mm_state->tail == &mm->lru_gen.list) { +- mm_state->tail = mm_state->head; ++ if (!mm_state->tail || mm_state->tail == mm_state->head) { ++ mm_state->tail = mm_state->head->next; + walk->force_scan = true; + } --int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans) --{ -- u64 num_entries = -- atomic_read(&trans->transaction->delayed_refs.num_entries); -- u64 avg_runtime; -- u64 val; ++ mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); + if (should_skip_mm(mm, walk)) + mm = NULL; +- } - -- smp_mb(); -- avg_runtime = trans->fs_info->avg_delayed_ref_runtime; -- val = num_entries * avg_runtime; -- if (val >= NSEC_PER_SEC) -- return 1; -- if (val >= NSEC_PER_SEC / 2) -- return 2; +- if (mm_state->head == &mm_list->fifo) +- WRITE_ONCE(mm_state->seq, mm_state->seq + 1); ++ } while (!mm); + done: +- if (*iter && !mm) +- mm_state->nr_walkers--; +- if (!*iter && mm) +- mm_state->nr_walkers++; - -- return btrfs_check_space_for_delayed_refs(trans->fs_info); --} +- if (mm_state->nr_walkers) +- last = false; - - /* - * Release a ref head's reservation. - * -@@ -83,20 +65,9 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans) - void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr) - { - struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv; -- u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr); -+ const u64 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr); - u64 released = 0; + if (*iter || last) + reset_mm_stats(lruvec, walk, last); -- /* -- * We have to check the mount option here because we could be enabling -- * the free space tree for the first time and don't have the compat_ro -- * option set yet. -- * -- * We need extra reservations if we have the free space tree because -- * we'll have to modify that tree as well. -- */ -- if (btrfs_test_opt(fs_info, FREE_SPACE_TREE)) -- num_bytes *= 2; +@@ -3594,9 +3575,9 @@ static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq) + + VM_WARN_ON_ONCE(mm_state->seq + 1 < max_seq); + +- if (max_seq > mm_state->seq && !mm_state->nr_walkers) { +- VM_WARN_ON_ONCE(mm_state->head && mm_state->head != &mm_list->fifo); - - released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL); - if (released) - trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", -@@ -118,18 +89,8 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans) - if (!trans->delayed_ref_updates) - return; ++ if (max_seq > mm_state->seq) { ++ mm_state->head = NULL; ++ mm_state->tail = NULL; + WRITE_ONCE(mm_state->seq, mm_state->seq + 1); + reset_mm_stats(lruvec, NULL, true); + success = true; +@@ -3608,7 +3589,7 @@ static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq) + } -- num_bytes = btrfs_calc_insert_metadata_size(fs_info, -- trans->delayed_ref_updates); -- /* -- * We have to check the mount option here because we could be enabling -- * the free space tree for the first time and don't have the compat_ro -- * option set yet. -- * -- * We need extra reservations if we have the free space tree because -- * we'll have to modify that tree as well. -- */ -- if (btrfs_test_opt(fs_info, FREE_SPACE_TREE)) -- num_bytes *= 2; -+ num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, -+ trans->delayed_ref_updates); + /****************************************************************************** +- * refault feedback loop ++ * PID controller + ******************************************************************************/ - spin_lock(&delayed_rsv->lock); - delayed_rsv->size += num_bytes; -@@ -200,7 +161,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, - enum btrfs_reserve_flush_enum flush) - { - struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv; -- u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1); -+ u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1); - u64 num_bytes = 0; - int ret = -ENOSPC; + /* +@@ -4196,10 +4177,6 @@ static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end, -@@ -217,7 +178,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, - ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush); - if (ret) - return ret; -- btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0); -+ btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false); - trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", - 0, num_bytes, 1); - return 0; -diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h -index 2eb34abf700f..b54261fe509b 100644 ---- a/fs/btrfs/delayed-ref.h -+++ b/fs/btrfs/delayed-ref.h -@@ -253,6 +253,27 @@ extern struct kmem_cache *btrfs_delayed_extent_op_cachep; - int __init btrfs_delayed_ref_init(void); - void __cold btrfs_delayed_ref_exit(void); + walk_pmd_range(&val, addr, next, args); -+static inline u64 btrfs_calc_delayed_ref_bytes(const struct btrfs_fs_info *fs_info, -+ int num_delayed_refs) -+{ -+ u64 num_bytes; -+ -+ num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_delayed_refs); -+ -+ /* -+ * We have to check the mount option here because we could be enabling -+ * the free space tree for the first time and don't have the compat_ro -+ * option set yet. -+ * -+ * We need extra reservations if we have the free space tree because -+ * we'll have to modify that tree as well. -+ */ -+ if (btrfs_test_opt(fs_info, FREE_SPACE_TREE)) -+ num_bytes *= 2; +- /* a racy check to curtail the waiting time */ +- if (wq_has_sleeper(&walk->lruvec->mm_state.wait)) +- return 1; +- + if (need_resched() || walk->batched >= MAX_LRU_BATCH) { + end = (addr | ~PUD_MASK) + 1; + goto done; +@@ -4232,8 +4209,14 @@ static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_ + walk->next_addr = FIRST_USER_ADDRESS; + + do { ++ DEFINE_MAX_SEQ(lruvec); + -+ return num_bytes; -+} + err = -EBUSY; + ++ /* another thread might have called inc_max_seq() */ ++ if (walk->max_seq != max_seq) ++ break; + - static inline void btrfs_init_generic_ref(struct btrfs_ref *generic_ref, - int action, u64 bytenr, u64 len, u64 parent) - { -@@ -385,7 +406,6 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, - void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info, - struct btrfs_block_rsv *src, - u64 num_bytes); --int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans); - bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info); + /* folio_update_gen() requires stable folio_memcg() */ + if (!mem_cgroup_trylock_pages(memcg)) + break; +@@ -4466,25 +4449,12 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, + success = iterate_mm_list(lruvec, walk, &mm); + if (mm) + walk_mm(lruvec, mm, walk); +- +- cond_resched(); + } while (mm); + done: +- if (!success) { +- if (sc->priority <= DEF_PRIORITY - 2) +- wait_event_killable(lruvec->mm_state.wait, +- max_seq < READ_ONCE(lrugen->max_seq)); +- return false; +- } +- +- VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq)); ++ if (success) ++ inc_max_seq(lruvec, can_swap, force_scan); - /* -diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c -index 317aeff6c1da..c48abc817ed2 100644 ---- a/fs/btrfs/discard.c -+++ b/fs/btrfs/discard.c -@@ -56,11 +56,9 @@ - #define BTRFS_DISCARD_DELAY (120ULL * NSEC_PER_SEC) - #define BTRFS_DISCARD_UNUSED_DELAY (10ULL * NSEC_PER_SEC) +- inc_max_seq(lruvec, can_swap, force_scan); +- /* either this sees any waiters or they will see updated max_seq */ +- if (wq_has_sleeper(&lruvec->mm_state.wait)) +- wake_up_all(&lruvec->mm_state.wait); +- +- return true; ++ return success; + } --/* Target completion latency of discarding all discardable extents */ --#define BTRFS_DISCARD_TARGET_MSEC (6 * 60 * 60UL * MSEC_PER_SEC) - #define BTRFS_DISCARD_MIN_DELAY_MSEC (1UL) - #define BTRFS_DISCARD_MAX_DELAY_MSEC (1000UL) --#define BTRFS_DISCARD_MAX_IOPS (10U) -+#define BTRFS_DISCARD_MAX_IOPS (1000U) + /****************************************************************************** +@@ -5671,14 +5641,14 @@ static void lru_gen_change_state(bool enabled) + * sysfs interface + ******************************************************************************/ - /* Monotonically decreasing minimum length filters after index 0 */ - static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = { -@@ -577,6 +575,7 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl) - s32 discardable_extents; - s64 discardable_bytes; - u32 iops_limit; -+ unsigned long min_delay = BTRFS_DISCARD_MIN_DELAY_MSEC; - unsigned long delay; +-static ssize_t show_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, char *buf) ++static ssize_t min_ttl_ms_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) + { +- return sprintf(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl))); ++ return sysfs_emit(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl))); + } - discardable_extents = atomic_read(&discard_ctl->discardable_extents); -@@ -607,13 +606,16 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl) - } + /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ +-static ssize_t store_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, +- const char *buf, size_t len) ++static ssize_t min_ttl_ms_store(struct kobject *kobj, struct kobj_attribute *attr, ++ const char *buf, size_t len) + { + unsigned int msecs; - iops_limit = READ_ONCE(discard_ctl->iops_limit); -- if (iops_limit) -+ -+ if (iops_limit) { - delay = MSEC_PER_SEC / iops_limit; -- else -- delay = BTRFS_DISCARD_TARGET_MSEC / discardable_extents; -+ } else { -+ /* unset iops_limit means go as fast as possible, so allow a delay of 0 */ -+ delay = 0; -+ min_delay = 0; -+ } +@@ -5690,11 +5660,9 @@ static ssize_t store_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, + return len; + } -- delay = clamp(delay, BTRFS_DISCARD_MIN_DELAY_MSEC, -- BTRFS_DISCARD_MAX_DELAY_MSEC); -+ delay = clamp(delay, min_delay, BTRFS_DISCARD_MAX_DELAY_MSEC); - discard_ctl->delay_ms = delay; +-static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR( +- min_ttl_ms, 0644, show_min_ttl, store_min_ttl +-); ++static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR_RW(min_ttl_ms); - spin_unlock(&discard_ctl->lock); -diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c -index 9e1596bb208d..6cf064f41bec 100644 ---- a/fs/btrfs/disk-io.c -+++ b/fs/btrfs/disk-io.c -@@ -2966,7 +2966,6 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) - atomic64_set(&fs_info->free_chunk_space, 0); - fs_info->tree_mod_log = RB_ROOT; - fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; -- fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */ - btrfs_init_ref_verify(fs_info); +-static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, char *buf) ++static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) + { + unsigned int caps = 0; - fs_info->thread_pool_size = min_t(unsigned long, -diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c -index 824c657f59e8..cf1f7e901337 100644 ---- a/fs/btrfs/extent-tree.c -+++ b/fs/btrfs/extent-tree.c -@@ -1894,8 +1894,7 @@ static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head( +@@ -5711,7 +5679,7 @@ static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, c } - static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans, -- struct btrfs_delayed_ref_head *locked_ref, -- unsigned long *run_refs) -+ struct btrfs_delayed_ref_head *locked_ref) + /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ +-static ssize_t store_enabled(struct kobject *kobj, struct kobj_attribute *attr, ++static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t len) { - struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_delayed_ref_root *delayed_refs; -@@ -1917,7 +1916,6 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans, - return -EAGAIN; - } + int i; +@@ -5738,9 +5706,7 @@ static ssize_t store_enabled(struct kobject *kobj, struct kobj_attribute *attr, + return len; + } -- (*run_refs)++; - ref->in_tree = 0; - rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree); - RB_CLEAR_NODE(&ref->ref_node); -@@ -1981,10 +1979,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_delayed_ref_root *delayed_refs; - struct btrfs_delayed_ref_head *locked_ref = NULL; -- ktime_t start = ktime_get(); - int ret; - unsigned long count = 0; -- unsigned long actual_count = 0; +-static struct kobj_attribute lru_gen_enabled_attr = __ATTR( +- enabled, 0644, show_enabled, store_enabled +-); ++static struct kobj_attribute lru_gen_enabled_attr = __ATTR_RW(enabled); - delayed_refs = &trans->transaction->delayed_refs; - do { -@@ -2014,8 +2010,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, - spin_lock(&locked_ref->lock); - btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref); + static struct attribute *lru_gen_attrs[] = { + &lru_gen_min_ttl_attr.attr, +@@ -5748,7 +5714,7 @@ static struct attribute *lru_gen_attrs[] = { + NULL + }; -- ret = btrfs_run_delayed_refs_for_head(trans, locked_ref, -- &actual_count); -+ ret = btrfs_run_delayed_refs_for_head(trans, locked_ref); - if (ret < 0 && ret != -EAGAIN) { - /* - * Error, btrfs_run_delayed_refs_for_head already -@@ -2046,24 +2041,6 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, - cond_resched(); - } while ((nr != -1 && count < nr) || locked_ref); +-static struct attribute_group lru_gen_attr_group = { ++static const struct attribute_group lru_gen_attr_group = { + .name = "lru_gen", + .attrs = lru_gen_attrs, + }; +@@ -6130,7 +6096,6 @@ void lru_gen_init_lruvec(struct lruvec *lruvec) + INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]); -- /* -- * We don't want to include ref heads since we can have empty ref heads -- * and those will drastically skew our runtime down since we just do -- * accounting, no actual extent tree updates. -- */ -- if (actual_count > 0) { -- u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start)); -- u64 avg; -- -- /* -- * We weigh the current average higher than our current runtime -- * to avoid large swings in the average. -- */ -- spin_lock(&delayed_refs->lock); -- avg = fs_info->avg_delayed_ref_runtime * 3 + runtime; -- fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */ -- spin_unlock(&delayed_refs->lock); -- } - return 0; + lruvec->mm_state.seq = MIN_NR_GENS; +- init_waitqueue_head(&lruvec->mm_state.wait); } -diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c -index 5cc5a1faaef5..ec5c5355906b 100644 ---- a/fs/btrfs/file.c -+++ b/fs/btrfs/file.c -@@ -1465,6 +1465,7 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) - ssize_t err; - unsigned int ilock_flags = 0; - struct iomap_dio *dio; -+ struct btrfs_ordered_extent *ordered_extent = NULL; - - if (iocb->ki_flags & IOCB_NOWAIT) - ilock_flags |= BTRFS_ILOCK_TRY; -@@ -1526,7 +1527,7 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) - * got -EFAULT, faulting in the pages before the retry. - */ - from->nofault = true; -- dio = btrfs_dio_write(iocb, from, written); -+ dio = btrfs_dio_write(iocb, from, &ordered_extent, written); - from->nofault = false; + #ifdef CONFIG_MEMCG +@@ -6163,7 +6128,6 @@ void lru_gen_exit_memcg(struct mem_cgroup *memcg) + for_each_node(nid) { + struct lruvec *lruvec = get_lruvec(memcg, nid); - /* -@@ -1569,6 +1570,14 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) - goto relock; - } - } -+ /* -+ * We can't loop back to btrfs_dio_write, so we can drop the cached -+ * ordered extent. Typically btrfs_dio_iomap_end will run and put the -+ * ordered_extent, but this is needed to clean up in case of an error -+ * path breaking out of iomap_iter before the final iomap_end call. -+ */ -+ if (ordered_extent) -+ btrfs_put_ordered_extent(ordered_extent); +- VM_WARN_ON_ONCE(lruvec->mm_state.nr_walkers); + VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, + sizeof(lruvec->lrugen.nr_pages))); - /* - * If 'err' is -ENOTBLK or we have not written all data, then it means -diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h -index 24cd49229408..23f05ba8f5c5 100644 ---- a/fs/btrfs/fs.h -+++ b/fs/btrfs/fs.h -@@ -24,6 +24,18 @@ - #define BTRFS_SUPER_INFO_SIZE 4096 - static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE); +diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c +index 4c89ff333f6f..9286d3baa12d 100644 +--- a/tools/testing/radix-tree/maple.c ++++ b/tools/testing/radix-tree/maple.c +@@ -55,6 +55,28 @@ struct rcu_reader_struct { + struct rcu_test_struct2 *test; + }; -+/* -+ * Number of metadata items necessary for an unlink operation: -+ * -+ * 1 for the possible orphan item -+ * 1 for the dir item -+ * 1 for the dir index -+ * 1 for the inode ref -+ * 1 for the inode -+ * 1 for the parent inode -+ */ -+#define BTRFS_UNLINK_METADATA_UNITS 6 ++static int get_alloc_node_count(struct ma_state *mas) ++{ ++ int count = 1; ++ struct maple_alloc *node = mas->alloc; ++ ++ if (!node || ((unsigned long)node & 0x1)) ++ return 0; ++ while (node->node_count) { ++ count += node->node_count; ++ node = node->slot[0]; ++ } ++ return count; ++} ++ ++static void check_mas_alloc_node_count(struct ma_state *mas) ++{ ++ mas_node_count_gfp(mas, MAPLE_ALLOC_SLOTS + 1, GFP_KERNEL); ++ mas_node_count_gfp(mas, MAPLE_ALLOC_SLOTS + 3, GFP_KERNEL); ++ MT_BUG_ON(mas->tree, get_alloc_node_count(mas) != mas->alloc->total); ++ mas_destroy(mas); ++} + /* - * The reserved space at the beginning of each device. It covers the primary - * super block and leaves space for potential use by other tools like -@@ -412,7 +424,6 @@ struct btrfs_fs_info { - * Must be written and read while holding btrfs_fs_info::commit_root_sem. - */ - u64 last_reloc_trans; -- u64 avg_delayed_ref_runtime; - - /* - * This is updated to the current trans every time a full commit is -@@ -828,7 +839,7 @@ static inline u64 btrfs_csum_bytes_to_leaves( - * Use this if we would be adding new items, as we could split nodes as we cow - * down the tree. - */ --static inline u64 btrfs_calc_insert_metadata_size(struct btrfs_fs_info *fs_info, -+static inline u64 btrfs_calc_insert_metadata_size(const struct btrfs_fs_info *fs_info, - unsigned num_items) - { - return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; -@@ -838,7 +849,7 @@ static inline u64 btrfs_calc_insert_metadata_size(struct btrfs_fs_info *fs_info, - * Doing a truncate or a modification won't result in new nodes or leaves, just - * what we need for COW. - */ --static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info, -+static inline u64 btrfs_calc_metadata_size(const struct btrfs_fs_info *fs_info, - unsigned num_items) - { - return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; -diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c -index b65c45b5d681..4c322b720a80 100644 ---- a/fs/btrfs/inode-item.c -+++ b/fs/btrfs/inode-item.c -@@ -527,7 +527,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, - - while (1) { - u64 clear_start = 0, clear_len = 0, extent_start = 0; -- bool should_throttle = false; -+ bool refill_delayed_refs_rsv = false; + * check_new_node() - Check the creation of new nodes and error path + * verification. +@@ -69,6 +91,8 @@ static noinline void check_new_node(struct maple_tree *mt) - fi = NULL; - leaf = path->nodes[0]; -@@ -660,8 +660,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, - /* No pending yet, add ourselves */ - pending_del_slot = path->slots[0]; - pending_del_nr = 1; -- } else if (pending_del_nr && -- path->slots[0] + 1 == pending_del_slot) { -+ } else if (path->slots[0] + 1 == pending_del_slot) { - /* Hop on the pending chunk */ - pending_del_nr++; - pending_del_slot = path->slots[0]; -@@ -686,10 +685,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, - btrfs_abort_transaction(trans, ret); - break; - } -- if (be_nice) { -- if (btrfs_should_throttle_delayed_refs(trans)) -- should_throttle = true; -- } -+ if (be_nice && btrfs_check_space_for_delayed_refs(fs_info)) -+ refill_delayed_refs_rsv = true; - } + MA_STATE(mas, mt, 0, 0); - if (found_type == BTRFS_INODE_ITEM_KEY) -@@ -697,7 +694,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ++ check_mas_alloc_node_count(&mas); ++ + /* Try allocating 3 nodes */ + mtree_lock(mt); + mt_set_non_kernel(0); +-- +2.40.0 + +From 1a6be495a542f35f888b756b3fe71baec0acd980 Mon Sep 17 00:00:00 2001 +From: Peter Jung +Date: Sat, 22 Apr 2023 11:44:23 +0200 +Subject: [PATCH 09/13] Per-VMA locks +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Previous versions: +v3: https://lore.kernel.org/all/20230216051750.3125598-1-surenb@google.com/ +v2: https://lore.kernel.org/lkml/20230127194110.533103-1-surenb@google.com/ +v1: https://lore.kernel.org/all/20230109205336.3665937-1-surenb@google.com/ +RFC: https://lore.kernel.org/all/20220901173516.702122-1-surenb@google.com/ + +LWN article describing the feature: +https://lwn.net/Articles/906852/ + +Per-vma locks idea that was discussed during SPF [1] discussion at LSF/MM +last year [2], which concluded with suggestion that “a reader/writer +semaphore could be put into the VMA itself; that would have the effect of +using the VMA as a sort of range lock. There would still be contention at +the VMA level, but it would be an improvement.” This patchset implements +this suggested approach. + +When handling page faults we lookup the VMA that contains the faulting +page under RCU protection and try to acquire its lock. If that fails we +fall back to using mmap_lock, similar to how SPF handled this situation. + +One notable way the implementation deviates from the proposal is the way +VMAs are read-locked. During some of mm updates, multiple VMAs need to be +locked until the end of the update (e.g. vma_merge, split_vma, etc). +Tracking all the locked VMAs, avoiding recursive locks, figuring out when +it's safe to unlock previously locked VMAs would make the code more +complex. So, instead of the usual lock/unlock pattern, the proposed +solution marks a VMA as locked and provides an efficient way to: +1. Identify locked VMAs. +2. Unlock all locked VMAs in bulk. +We also postpone unlocking the locked VMAs until the end of the update, +when we do mmap_write_unlock. Potentially this keeps a VMA locked for +longer than is absolutely necessary but it results in a big reduction of +code complexity. +Read-locking a VMA is done using two sequence numbers - one in the +vm_area_struct and one in the mm_struct. VMA is considered read-locked +when these sequence numbers are equal. To read-lock a VMA we set the +sequence number in vm_area_struct to be equal to the sequence number in +mm_struct. To unlock all VMAs we increment mm_struct's seq number. This +allows for an efficient way to track locked VMAs and to drop the locks on +all VMAs at the end of the update. + +The patchset implements per-VMA locking only for anonymous pages which +are not in swap and avoids userfaultfs as their implementation is more +complex. Additional support for file-back page faults, swapped and user +pages can be added incrementally. + +Performance benchmarks show similar although slightly smaller benefits as +with SPF patchset (~75% of SPF benefits). Still, with lower complexity +this approach might be more desirable. + +Since RFC was posted in September 2022, two separate Google teams outside +of Android evaluated the patchset and confirmed positive results. Here are +the known usecases when per-VMA locks show benefits: + +Android: +Apps with high number of threads (~100) launch times improve by up to 20%. +Each thread mmaps several areas upon startup (Stack and Thread-local +storage (TLS), thread signal stack, indirect ref table), which requires +taking mmap_lock in write mode. Page faults take mmap_lock in read mode. +During app launch, both thread creation and page faults establishing the +active workinget are happening in parallel and that causes lock contention +between mm writers and readers even if updates and page faults are +happening in different VMAs. Per-vma locks prevent this contention by +providing more granular lock. + +Google Fibers: +We have several dynamically sized thread pools that spawn new threads +under increased load and reduce their number when idling. For example, +Google's in-process scheduling/threading framework, UMCG/Fibers, is backed +by such a thread pool. When idling, only a small number of idle worker +threads are available; when a spike of incoming requests arrive, each +request is handled in its own "fiber", which is a work item posted onto a +UMCG worker thread; quite often these spikes lead to a number of new +threads spawning. Each new thread needs to allocate and register an RSEQ +section on its TLS, then register itself with the kernel as a UMCG worker +thread, and only after that it can be considered by the in-process +UMCG/Fiber scheduler as available to do useful work. In short, during an +incoming workload spike new threads have to be spawned, and they perform +several syscalls (RSEQ registration, UMCG worker registration, memory +allocations) before they can actually start doing useful work. Removing +any bottlenecks on this thread startup path will greatly improve our +services' latencies when faced with request/workload spikes. +At high scale, mmap_lock contention during thread creation and stack page +faults leads to user-visible multi-second serving latencies in a similar +pattern to Android app startup. Per-VMA locking patchset has been run +successfully in limited experiments with user-facing production workloads. +In these experiments, we observed that the peak thread creation rate was +high enough that thread creation is no longer a bottleneck. + +TCP zerocopy receive: +From the point of view of TCP zerocopy receive, the per-vma lock patch is +massively beneficial. +In today's implementation, a process with N threads where N - 1 are +performing zerocopy receive and 1 thread is performing madvise() with the +write lock taken (e.g. needs to change vm_flags) will result in all N -1 +receive threads blocking until the madvise is done. Conversely, on a busy +process receiving a lot of data, an madvise operation that does need to +take the mmap lock in write mode will need to wait for all of the receives +to be done - a lose:lose proposition. Per-VMA locking _removes_ by +definition this source of contention entirely. +There are other benefits for receive as well, chiefly a reduction in +cacheline bouncing across receiving threads for locking/unlocking the +single mmap lock. On an RPC style synthetic workload with 4KB RPCs: +1a) The find+lock+unlock VMA path in the base case, without the per-vma +lock patchset, is about 0.7% of cycles as measured by perf. +1b) mmap_read_lock + mmap_read_unlock in the base case is about 0.5% +cycles overall - most of this is within the TCP read hotpath (a small +fraction is 'other' usage in the system). +2a) The find+lock+unlock VMA path, with the per-vma patchset and a trivial +patch written to take advantage of it in TCP, is about 0.4% of cycles +(down from 0.7% above) +2b) mmap_read_lock + mmap_read_unlock in the per-vma patchset is < 0.1% +cycles and is out of the TCP read hotpath entirely (down from 0.5% before, +the remaining usage is the 'other' usage in the system). +So, in addition to entirely removing an onerous source of contention, it +also reduces the CPU cycles of TCP receive zerocopy by about 0.5%+ +(compared to overall cycles in perf) for the 'small' RPC scenario. + +The patchset structure is: +0001-0008: Enable maple-tree RCU mode +0009-0031: Main per-vma locks patchset +0032-0033: Performance optimizations + +Changes since v3: +- Changed patch [3] to move vma_prepare before vma_adjust_trans_huge +- Dropped patch [4] from the set as unnecessary, per Hyeonggon Yoo +- Changed patch [5] to do VMA locking inside vma_prepare, per Liam Howlett +- Dropped patch [6] from the set as unnecessary, per Liam Howlett + +[1] https://lore.kernel.org/all/20220128131006.67712-1-michel@lespinasse.org/ +[2] https://lwn.net/Articles/893906/ +[3] https://lore.kernel.org/all/20230216051750.3125598-15-surenb@google.com/ +[4] https://lore.kernel.org/all/20230216051750.3125598-17-surenb@google.com/ +[5] https://lore.kernel.org/all/20230216051750.3125598-18-surenb@google.com/ +[6] https://lore.kernel.org/all/20230216051750.3125598-22-surenb@google.com/ + +The patchset applies cleanly over mm-unstable branch. + +Laurent Dufour (1): + powerc/mm: try VMA lock-based page fault handling first + +Liam Howlett (4): + maple_tree: Be more cautious about dead nodes + maple_tree: Detect dead nodes in mas_start() + maple_tree: Fix freeing of nodes in rcu mode + maple_tree: remove extra smp_wmb() from mas_dead_leaves() + +Liam R. Howlett (4): + maple_tree: Fix write memory barrier of nodes once dead for RCU mode + maple_tree: Add smp_rmb() to dead node detection + maple_tree: Add RCU lock checking to rcu callback functions + mm: Enable maple tree RCU mode by default. + +Michel Lespinasse (1): + mm: rcu safe VMA freeing + +Suren Baghdasaryan (23): + mm: introduce CONFIG_PER_VMA_LOCK + mm: move mmap_lock assert function definitions + mm: add per-VMA lock and helper functions to control it + mm: mark VMA as being written when changing vm_flags + mm/mmap: move vma_prepare before vma_adjust_trans_huge + mm/khugepaged: write-lock VMA while collapsing a huge page + mm/mmap: write-lock VMAs in vma_prepare before modifying them + mm/mremap: write-lock VMA while remapping it to a new address range + mm: write-lock VMAs before removing them from VMA tree + mm: conditionally write-lock VMA in free_pgtables + kernel/fork: assert no VMA readers during its destruction + mm/mmap: prevent pagefault handler from racing with mmu_notifier + registration + mm: introduce vma detached flag + mm: introduce lock_vma_under_rcu to be used from arch-specific code + mm: fall back to mmap_lock if vma->anon_vma is not yet set + mm: add FAULT_FLAG_VMA_LOCK flag + mm: prevent do_swap_page from handling page faults under VMA lock + mm: prevent userfaults to be handled under per-vma lock + mm: introduce per-VMA lock statistics + x86/mm: try VMA lock-based page fault handling first + arm64/mm: try VMA lock-based page fault handling first + mm/mmap: free vm_area_struct without call_rcu in exit_mmap + mm: separate vma->lock from vm_area_struct + +Signed-off-by: Peter Jung +--- + Documentation/admin-guide/mm/userfaultfd.rst | 17 ++ + arch/arm64/Kconfig | 1 + + arch/arm64/mm/fault.c | 36 ++++ + arch/powerpc/mm/fault.c | 37 ++++ + arch/powerpc/platforms/powernv/Kconfig | 1 + + arch/powerpc/platforms/pseries/Kconfig | 1 + + arch/s390/Kconfig | 1 + + arch/s390/mm/fault.c | 24 +++ + arch/x86/Kconfig | 1 + + arch/x86/mm/fault.c | 36 ++++ + fs/userfaultfd.c | 16 ++ + include/linux/mm.h | 127 ++++++++++++- + include/linux/mm_inline.h | 6 + + include/linux/mm_types.h | 30 ++- + include/linux/mmap_lock.h | 37 ++-- + include/linux/userfaultfd_k.h | 23 +++ + include/linux/vm_event_item.h | 6 + + include/linux/vmstat.h | 6 + + include/uapi/linux/userfaultfd.h | 10 +- + kernel/fork.c | 96 ++++++++-- + mm/Kconfig | 12 ++ + mm/Kconfig.debug | 6 + + mm/filemap.c | 6 + + mm/hugetlb.c | 4 + + mm/init-mm.c | 3 + + mm/internal.h | 2 +- + mm/khugepaged.c | 10 +- + mm/memory.c | 187 +++++++++++++++---- + mm/mmap.c | 48 +++-- + mm/mprotect.c | 51 ++++- + mm/mremap.c | 1 + + mm/rmap.c | 31 +-- + mm/vmstat.c | 6 + + tools/testing/selftests/mm/userfaultfd.c | 45 ++++- + 34 files changed, 811 insertions(+), 113 deletions(-) + +diff --git a/Documentation/admin-guide/mm/userfaultfd.rst b/Documentation/admin-guide/mm/userfaultfd.rst +index 7dc823b56ca4..bd2226299583 100644 +--- a/Documentation/admin-guide/mm/userfaultfd.rst ++++ b/Documentation/admin-guide/mm/userfaultfd.rst +@@ -219,6 +219,23 @@ former will have ``UFFD_PAGEFAULT_FLAG_WP`` set, the latter + you still need to supply a page when ``UFFDIO_REGISTER_MODE_MISSING`` was + used. - if (path->slots[0] == 0 || - path->slots[0] != pending_del_slot || -- should_throttle) { -+ refill_delayed_refs_rsv) { - if (pending_del_nr) { - ret = btrfs_del_items(trans, root, path, - pending_del_slot, -@@ -720,7 +717,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, - * actually allocate, so just bail if we're short and - * let the normal reservation dance happen higher up. - */ -- if (should_throttle) { -+ if (refill_delayed_refs_rsv) { - ret = btrfs_delayed_refs_rsv_refill(fs_info, - BTRFS_RESERVE_NO_FLUSH); - if (ret) { -diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c -index 957e4d76a7b6..0dbb1c1cc851 100644 ---- a/fs/btrfs/inode.c -+++ b/fs/btrfs/inode.c -@@ -81,6 +81,7 @@ struct btrfs_dio_data { - struct extent_changeset *data_reserved; - bool data_space_reserved; - bool nocow_done; -+ struct btrfs_ordered_extent *ordered; - }; ++Userfaultfd write-protect mode currently behave differently on none ptes ++(when e.g. page is missing) over different types of memories. ++ ++For anonymous memory, ``ioctl(UFFDIO_WRITEPROTECT)`` will ignore none ptes ++(e.g. when pages are missing and not populated). For file-backed memories ++like shmem and hugetlbfs, none ptes will be write protected just like a ++present pte. In other words, there will be a userfaultfd write fault ++message generated when writing to a missing page on file typed memories, ++as long as the page range was write-protected before. Such a message will ++not be generated on anonymous memories by default. ++ ++If the application wants to be able to write protect none ptes on anonymous ++memory, one can pre-populate the memory with e.g. MADV_POPULATE_READ. On ++newer kernels, one can also detect the feature UFFD_FEATURE_WP_UNPOPULATED ++and set the feature bit in advance to make sure none ptes will also be ++write protected even upon anonymous memory. ++ + QEMU/KVM + ======== - struct btrfs_dio_private { -@@ -4261,15 +4262,8 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir) - { - struct btrfs_root *root = dir->root; +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index 1023e896d46b..6f104c829731 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -95,6 +95,7 @@ config ARM64 + select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 + select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_SUPPORTS_PAGE_TABLE_CHECK ++ select ARCH_SUPPORTS_PER_VMA_LOCK + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT + select ARCH_WANT_DEFAULT_BPF_JIT + select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c +index f4cb0f85ccf4..9e0db5c387e3 100644 +--- a/arch/arm64/mm/fault.c ++++ b/arch/arm64/mm/fault.c +@@ -535,6 +535,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, + unsigned long vm_flags; + unsigned int mm_flags = FAULT_FLAG_DEFAULT; + unsigned long addr = untagged_addr(far); ++#ifdef CONFIG_PER_VMA_LOCK ++ struct vm_area_struct *vma; ++#endif -- /* -- * 1 for the possible orphan item -- * 1 for the dir item -- * 1 for the dir index -- * 1 for the inode ref -- * 1 for the inode -- * 1 for the parent inode -- */ -- return btrfs_start_transaction_fallback_global_rsv(root, 6); -+ return btrfs_start_transaction_fallback_global_rsv(root, -+ BTRFS_UNLINK_METADATA_UNITS); - } + if (kprobe_page_fault(regs, esr)) + return 0; +@@ -585,6 +588,36 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, - static int btrfs_unlink(struct inode *dir, struct dentry *dentry) -@@ -5243,7 +5237,7 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, - { - struct btrfs_fs_info *fs_info = root->fs_info; - struct btrfs_trans_handle *trans; -- u64 delayed_refs_extra = btrfs_calc_insert_metadata_size(fs_info, 1); -+ u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1); - int ret; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); ++#ifdef CONFIG_PER_VMA_LOCK ++ if (!(mm_flags & FAULT_FLAG_USER)) ++ goto lock_mmap; ++ ++ vma = lock_vma_under_rcu(mm, addr); ++ if (!vma) ++ goto lock_mmap; ++ ++ if (!(vma->vm_flags & vm_flags)) { ++ vma_end_read(vma); ++ goto lock_mmap; ++ } ++ fault = handle_mm_fault(vma, addr & PAGE_MASK, ++ mm_flags | FAULT_FLAG_VMA_LOCK, regs); ++ vma_end_read(vma); ++ ++ if (!(fault & VM_FAULT_RETRY)) { ++ count_vm_vma_lock_event(VMA_LOCK_SUCCESS); ++ goto done; ++ } ++ count_vm_vma_lock_event(VMA_LOCK_RETRY); ++ ++ /* Quick path to respond to signals */ ++ if (fault_signal_pending(fault, regs)) { ++ if (!user_mode(regs)) ++ goto no_context; ++ return 0; ++ } ++lock_mmap: ++#endif /* CONFIG_PER_VMA_LOCK */ /* -@@ -5281,7 +5275,7 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, - trans->block_rsv = &fs_info->trans_block_rsv; - trans->bytes_reserved = delayed_refs_extra; - btrfs_block_rsv_migrate(rsv, trans->block_rsv, -- delayed_refs_extra, 1); -+ delayed_refs_extra, true); + * As per x86, we may deadlock here. However, since the kernel only + * validly references user space from well defined areas of the code, +@@ -628,6 +661,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, } - return trans; - } -@@ -5291,7 +5285,7 @@ void btrfs_evict_inode(struct inode *inode) - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_trans_handle *trans; - struct btrfs_root *root = BTRFS_I(inode)->root; -- struct btrfs_block_rsv *rsv; -+ struct btrfs_block_rsv *rsv = NULL; - int ret; + mmap_read_unlock(mm); + ++#ifdef CONFIG_PER_VMA_LOCK ++done: ++#endif + /* + * Handle the "normal" (no error) case first. + */ +diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c +index af46aa88422b..531177a4ee08 100644 +--- a/arch/powerpc/mm/fault.c ++++ b/arch/powerpc/mm/fault.c +@@ -474,6 +474,40 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, + if (is_exec) + flags |= FAULT_FLAG_INSTRUCTION; + ++#ifdef CONFIG_PER_VMA_LOCK ++ if (!(flags & FAULT_FLAG_USER)) ++ goto lock_mmap; ++ ++ vma = lock_vma_under_rcu(mm, address); ++ if (!vma) ++ goto lock_mmap; ++ ++ if (unlikely(access_pkey_error(is_write, is_exec, ++ (error_code & DSISR_KEYFAULT), vma))) { ++ vma_end_read(vma); ++ goto lock_mmap; ++ } ++ ++ if (unlikely(access_error(is_write, is_exec, vma))) { ++ vma_end_read(vma); ++ goto lock_mmap; ++ } ++ ++ fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); ++ vma_end_read(vma); ++ ++ if (!(fault & VM_FAULT_RETRY)) { ++ count_vm_vma_lock_event(VMA_LOCK_SUCCESS); ++ goto done; ++ } ++ count_vm_vma_lock_event(VMA_LOCK_RETRY); ++ ++ if (fault_signal_pending(fault, regs)) ++ return user_mode(regs) ? 0 : SIGBUS; ++ ++lock_mmap: ++#endif /* CONFIG_PER_VMA_LOCK */ ++ + /* When running in the kernel we expect faults to occur only to + * addresses in user space. All other faults represent errors in the + * kernel and should generate an OOPS. Unfortunately, in the case of an +@@ -550,6 +584,9 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, - trace_btrfs_inode_evict(inode); -@@ -5308,18 +5302,18 @@ void btrfs_evict_inode(struct inode *inode) - ((btrfs_root_refs(&root->root_item) != 0 && - root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || - btrfs_is_free_space_inode(BTRFS_I(inode)))) -- goto no_delete; -+ goto out; + mmap_read_unlock(current->mm); - if (is_bad_inode(inode)) -- goto no_delete; -+ goto out; ++#ifdef CONFIG_PER_VMA_LOCK ++done: ++#endif + if (unlikely(fault & VM_FAULT_ERROR)) + return mm_fault_error(regs, address, fault); - if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) -- goto no_delete; -+ goto out; +diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig +index ae248a161b43..70a46acc70d6 100644 +--- a/arch/powerpc/platforms/powernv/Kconfig ++++ b/arch/powerpc/platforms/powernv/Kconfig +@@ -16,6 +16,7 @@ config PPC_POWERNV + select PPC_DOORBELL + select MMU_NOTIFIER + select FORCE_SMP ++ select ARCH_SUPPORTS_PER_VMA_LOCK + default y - if (inode->i_nlink > 0) { - BUG_ON(btrfs_root_refs(&root->root_item) != 0 && - root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); -- goto no_delete; -+ goto out; - } + config OPAL_PRD +diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig +index 21b22bf16ce6..4ebf2ef2845d 100644 +--- a/arch/powerpc/platforms/pseries/Kconfig ++++ b/arch/powerpc/platforms/pseries/Kconfig +@@ -22,6 +22,7 @@ config PPC_PSERIES + select HOTPLUG_CPU + select FORCE_SMP + select SWIOTLB ++ select ARCH_SUPPORTS_PER_VMA_LOCK + default y - /* -@@ -5328,7 +5322,7 @@ void btrfs_evict_inode(struct inode *inode) - */ - ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); - if (ret) -- goto no_delete; + config PARAVIRT +diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig +index 9809c74e1240..548b5b587003 100644 +--- a/arch/s390/Kconfig ++++ b/arch/s390/Kconfig +@@ -120,6 +120,7 @@ config S390 + select ARCH_SUPPORTS_DEBUG_PAGEALLOC + select ARCH_SUPPORTS_HUGETLBFS + select ARCH_SUPPORTS_NUMA_BALANCING ++ select ARCH_SUPPORTS_PER_VMA_LOCK + select ARCH_USE_BUILTIN_BSWAP + select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_WANTS_DYNAMIC_TASK_STRUCT +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c +index a2632fd97d00..b65144c392b0 100644 +--- a/arch/s390/mm/fault.c ++++ b/arch/s390/mm/fault.c +@@ -407,6 +407,30 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) + access = VM_WRITE; + if (access == VM_WRITE) + flags |= FAULT_FLAG_WRITE; ++#ifdef CONFIG_PER_VMA_LOCK ++ if (!(flags & FAULT_FLAG_USER)) ++ goto lock_mmap; ++ vma = lock_vma_under_rcu(mm, address); ++ if (!vma) ++ goto lock_mmap; ++ if (!(vma->vm_flags & access)) { ++ vma_end_read(vma); ++ goto lock_mmap; ++ } ++ fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); ++ vma_end_read(vma); ++ if (!(fault & VM_FAULT_RETRY)) { ++ count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + goto out; - - /* - * This drops any pending insert or delete operations we have for this -@@ -5340,7 +5334,7 @@ void btrfs_evict_inode(struct inode *inode) - - rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); - if (!rsv) -- goto no_delete; ++ } ++ count_vm_vma_lock_event(VMA_LOCK_RETRY); ++ /* Quick path to respond to signals */ ++ if (fault_signal_pending(fault, regs)) { ++ fault = VM_FAULT_SIGNAL; + goto out; - rsv->size = btrfs_calc_metadata_size(fs_info, 1); - rsv->failfast = true; - -@@ -5356,16 +5350,21 @@ void btrfs_evict_inode(struct inode *inode) - - trans = evict_refill_and_join(root, rsv); - if (IS_ERR(trans)) -- goto free_rsv; -+ goto out; ++ } ++lock_mmap: ++#endif /* CONFIG_PER_VMA_LOCK */ + mmap_read_lock(mm); - trans->block_rsv = rsv; + gmap = NULL; +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index a825bf031f49..df21fba77db1 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -27,6 +27,7 @@ config X86_64 + # Options that are inherently 64-bit kernel only: + select ARCH_HAS_GIGANTIC_PAGE + select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 ++ select ARCH_SUPPORTS_PER_VMA_LOCK + select ARCH_USE_CMPXCHG_LOCKREF + select HAVE_ARCH_SOFT_DIRTY + select MODULES_USE_ELF_RELA +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c +index a498ae1fbe66..e4399983c50c 100644 +--- a/arch/x86/mm/fault.c ++++ b/arch/x86/mm/fault.c +@@ -19,6 +19,7 @@ + #include /* faulthandler_disabled() */ + #include /* efi_crash_gracefully_on_page_fault()*/ + #include ++#include /* find_and_lock_vma() */ - ret = btrfs_truncate_inode_items(trans, root, &control); - trans->block_rsv = &fs_info->trans_block_rsv; - btrfs_end_transaction(trans); -- btrfs_btree_balance_dirty(fs_info); -+ /* -+ * We have not added new delayed items for our inode after we -+ * have flushed its delayed items, so no need to throttle on -+ * delayed items. However we have modified extent buffers. -+ */ -+ btrfs_btree_balance_dirty_nodelay(fs_info); - if (ret && ret != -ENOSPC && ret != -EAGAIN) -- goto free_rsv; -+ goto out; - else if (!ret) - break; - } -@@ -5387,9 +5386,8 @@ void btrfs_evict_inode(struct inode *inode) - btrfs_end_transaction(trans); + #include /* boot_cpu_has, ... */ + #include /* dotraplinkage, ... */ +@@ -1333,6 +1334,38 @@ void do_user_addr_fault(struct pt_regs *regs, } + #endif --free_rsv: -+out: - btrfs_free_block_rsv(fs_info, rsv); --no_delete: ++#ifdef CONFIG_PER_VMA_LOCK ++ if (!(flags & FAULT_FLAG_USER)) ++ goto lock_mmap; ++ ++ vma = lock_vma_under_rcu(mm, address); ++ if (!vma) ++ goto lock_mmap; ++ ++ if (unlikely(access_error(error_code, vma))) { ++ vma_end_read(vma); ++ goto lock_mmap; ++ } ++ fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); ++ vma_end_read(vma); ++ ++ if (!(fault & VM_FAULT_RETRY)) { ++ count_vm_vma_lock_event(VMA_LOCK_SUCCESS); ++ goto done; ++ } ++ count_vm_vma_lock_event(VMA_LOCK_RETRY); ++ ++ /* Quick path to respond to signals */ ++ if (fault_signal_pending(fault, regs)) { ++ if (!user_mode(regs)) ++ kernelmode_fixup_or_oops(regs, error_code, address, ++ SIGBUS, BUS_ADRERR, ++ ARCH_DEFAULT_PKEY); ++ return; ++ } ++lock_mmap: ++#endif /* CONFIG_PER_VMA_LOCK */ ++ /* - * If we didn't successfully delete, the orphan item will still be in - * the tree and we'll retry on the next mount. Again, we might also want -@@ -6981,6 +6979,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, - } - - static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, -+ struct btrfs_dio_data *dio_data, - const u64 start, - const u64 len, - const u64 orig_start, -@@ -6991,7 +6990,7 @@ static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, - const int type) - { - struct extent_map *em = NULL; -- int ret; -+ struct btrfs_ordered_extent *ordered; - - if (type != BTRFS_ORDERED_NOCOW) { - em = create_io_em(inode, start, len, orig_start, block_start, -@@ -7001,18 +7000,21 @@ static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, - if (IS_ERR(em)) - goto out; - } -- ret = btrfs_add_ordered_extent(inode, start, len, len, block_start, -- block_len, 0, -- (1 << type) | -- (1 << BTRFS_ORDERED_DIRECT), -- BTRFS_COMPRESS_NONE); -- if (ret) { -+ ordered = btrfs_alloc_ordered_extent(inode, start, len, len, -+ block_start, block_len, 0, -+ (1 << type) | -+ (1 << BTRFS_ORDERED_DIRECT), -+ BTRFS_COMPRESS_NONE); -+ if (IS_ERR(ordered)) { - if (em) { - free_extent_map(em); - btrfs_drop_extent_map_range(inode, start, - start + len - 1, false); - } -- em = ERR_PTR(ret); -+ em = ERR_PTR(PTR_ERR(ordered)); -+ } else { -+ ASSERT(!dio_data->ordered); -+ dio_data->ordered = ordered; + * Kernel-mode access to the user address space should only occur + * on well-defined single instructions listed in the exception +@@ -1433,6 +1466,9 @@ void do_user_addr_fault(struct pt_regs *regs, } - out: -@@ -7020,6 +7022,7 @@ static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, + mmap_read_unlock(mm); ++#ifdef CONFIG_PER_VMA_LOCK ++done: ++#endif + if (likely(!(fault & VM_FAULT_ERROR))) + return; + +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c +index 44d1ee429eb0..881e9c82b9d1 100644 +--- a/fs/userfaultfd.c ++++ b/fs/userfaultfd.c +@@ -108,6 +108,21 @@ static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx) + return ctx->features & UFFD_FEATURE_INITIALIZED; } - static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode, -+ struct btrfs_dio_data *dio_data, - u64 start, u64 len) ++/* ++ * Whether WP_UNPOPULATED is enabled on the uffd context. It is only ++ * meaningful when userfaultfd_wp()==true on the vma and when it's ++ * anonymous. ++ */ ++bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma) ++{ ++ struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; ++ ++ if (!ctx) ++ return false; ++ ++ return ctx->features & UFFD_FEATURE_WP_UNPOPULATED; ++} ++ + static void userfaultfd_set_vm_flags(struct vm_area_struct *vma, + vm_flags_t flags) { - struct btrfs_root *root = inode->root; -@@ -7035,7 +7038,8 @@ static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode, - if (ret) - return ERR_PTR(ret); - -- em = btrfs_create_dio_extent(inode, start, ins.offset, start, -+ em = btrfs_create_dio_extent(inode, dio_data, -+ start, ins.offset, start, - ins.objectid, ins.offset, ins.offset, - ins.offset, BTRFS_ORDERED_REGULAR); - btrfs_dec_block_group_reservations(fs_info, ins.objectid); -@@ -7380,7 +7384,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, - } - space_reserved = true; +@@ -1971,6 +1986,7 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx, + #endif + #ifndef CONFIG_PTE_MARKER_UFFD_WP + uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM; ++ uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED; + #endif + uffdio_api.ioctls = UFFD_API_IOCTLS; + ret = -EFAULT; +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 1f79667824eb..c4c9de7d1916 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -256,6 +256,8 @@ void setup_initial_init_mm(void *start_code, void *end_code, + struct vm_area_struct *vm_area_alloc(struct mm_struct *); + struct vm_area_struct *vm_area_dup(struct vm_area_struct *); + void vm_area_free(struct vm_area_struct *); ++/* Use only if VMA has no other users */ ++void __vm_area_free(struct vm_area_struct *vma); -- em2 = btrfs_create_dio_extent(BTRFS_I(inode), start, len, -+ em2 = btrfs_create_dio_extent(BTRFS_I(inode), dio_data, start, len, - orig_start, block_start, - len, orig_block_len, - ram_bytes, type); -@@ -7422,7 +7426,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, - goto out; - space_reserved = true; + #ifndef CONFIG_MMU + extern struct rb_root nommu_region_tree; +@@ -478,7 +480,8 @@ static inline bool fault_flag_allow_retry_first(enum fault_flag flags) + { FAULT_FLAG_USER, "USER" }, \ + { FAULT_FLAG_REMOTE, "REMOTE" }, \ + { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \ +- { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" } ++ { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }, \ ++ { FAULT_FLAG_VMA_LOCK, "VMA_LOCK" } -- em = btrfs_new_extent_direct(BTRFS_I(inode), start, len); -+ em = btrfs_new_extent_direct(BTRFS_I(inode), dio_data, start, len); - if (IS_ERR(em)) { - ret = PTR_ERR(em); - goto out; -@@ -7526,6 +7530,17 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, - } - } + /* + * vm_fault is filled by the pagefault handler and passed to the vma's +@@ -623,6 +626,117 @@ struct vm_operations_struct { + unsigned long addr); + }; -+ if (dio_data->ordered) { -+ ASSERT(write); -+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, -+ dio_data->ordered->file_offset, -+ dio_data->ordered->bytes_left); -+ if (IS_ERR(em)) { -+ ret = PTR_ERR(em); -+ goto err; -+ } -+ goto map_iomap; ++#ifdef CONFIG_PER_VMA_LOCK ++/* ++ * Try to read-lock a vma. The function is allowed to occasionally yield false ++ * locked result to avoid performance overhead, in which case we fall back to ++ * using mmap_lock. The function should never yield false unlocked result. ++ */ ++static inline bool vma_start_read(struct vm_area_struct *vma) ++{ ++ /* Check before locking. A race might cause false locked result. */ ++ if (vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq)) ++ return false; ++ ++ if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0)) ++ return false; ++ ++ /* ++ * Overflow might produce false locked result. ++ * False unlocked result is impossible because we modify and check ++ * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq ++ * modification invalidates all existing locks. ++ */ ++ if (unlikely(vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))) { ++ up_read(&vma->vm_lock->lock); ++ return false; + } - memset(dio_data, 0, sizeof(*dio_data)); - - /* -@@ -7667,6 +7682,7 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, - else - free_extent_state(cached_state); - -+map_iomap: - /* - * Translate extent map information to iomap. - * We trim the extents (and move the addr) even though iomap code does -@@ -7720,13 +7736,25 @@ static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, - if (submitted < length) { - pos += submitted; - length -= submitted; -- if (write) -- btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL, -- pos, length, false); -- else -+ if (write) { -+ if (submitted == 0) { -+ btrfs_mark_ordered_io_finished(BTRFS_I(inode), -+ NULL, pos, -+ length, false); -+ btrfs_put_ordered_extent(dio_data->ordered); -+ dio_data->ordered = NULL; -+ } -+ } else { - unlock_extent(&BTRFS_I(inode)->io_tree, pos, - pos + length - 1, NULL); -+ } - ret = -ENOTBLK; -+ } else { -+ /* On the last bio, release our cached ordered_extent. */ -+ if (write) { -+ btrfs_put_ordered_extent(dio_data->ordered); -+ dio_data->ordered = NULL; -+ } - } - - if (write) -@@ -7789,19 +7817,24 @@ static const struct iomap_dio_ops btrfs_dio_ops = { ++ return true; ++} ++ ++static inline void vma_end_read(struct vm_area_struct *vma) ++{ ++ rcu_read_lock(); /* keeps vma alive till the end of up_read */ ++ up_read(&vma->vm_lock->lock); ++ rcu_read_unlock(); ++} ++ ++static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) ++{ ++ mmap_assert_write_locked(vma->vm_mm); ++ ++ /* ++ * current task is holding mmap_write_lock, both vma->vm_lock_seq and ++ * mm->mm_lock_seq can't be concurrently modified. ++ */ ++ *mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq); ++ return (vma->vm_lock_seq == *mm_lock_seq); ++} ++ ++static inline void vma_start_write(struct vm_area_struct *vma) ++{ ++ int mm_lock_seq; ++ ++ if (__is_vma_write_locked(vma, &mm_lock_seq)) ++ return; ++ ++ down_write(&vma->vm_lock->lock); ++ vma->vm_lock_seq = mm_lock_seq; ++ up_write(&vma->vm_lock->lock); ++} ++ ++static inline bool vma_try_start_write(struct vm_area_struct *vma) ++{ ++ int mm_lock_seq; ++ ++ if (__is_vma_write_locked(vma, &mm_lock_seq)) ++ return true; ++ ++ if (!down_write_trylock(&vma->vm_lock->lock)) ++ return false; ++ ++ vma->vm_lock_seq = mm_lock_seq; ++ up_write(&vma->vm_lock->lock); ++ return true; ++} ++ ++static inline void vma_assert_write_locked(struct vm_area_struct *vma) ++{ ++ int mm_lock_seq; ++ ++ VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma); ++} ++ ++static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) ++{ ++ /* When detaching vma should be write-locked */ ++ if (detached) ++ vma_assert_write_locked(vma); ++ vma->detached = detached; ++} ++ ++struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, ++ unsigned long address); ++ ++#else /* CONFIG_PER_VMA_LOCK */ ++ ++static inline void vma_init_lock(struct vm_area_struct *vma) {} ++static inline bool vma_start_read(struct vm_area_struct *vma) ++ { return false; } ++static inline void vma_end_read(struct vm_area_struct *vma) {} ++static inline void vma_start_write(struct vm_area_struct *vma) {} ++static inline bool vma_try_start_write(struct vm_area_struct *vma) ++ { return true; } ++static inline void vma_assert_write_locked(struct vm_area_struct *vma) {} ++static inline void vma_mark_detached(struct vm_area_struct *vma, ++ bool detached) {} ++ ++#endif /* CONFIG_PER_VMA_LOCK */ ++ ++/* ++ * WARNING: vma_init does not initialize vma->vm_lock. ++ * Use vm_area_alloc()/vm_area_free() if vma needs locking. ++ */ + static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) + { + static const struct vm_operations_struct dummy_vm_ops = {}; +@@ -631,6 +745,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) + vma->vm_mm = mm; + vma->vm_ops = &dummy_vm_ops; + INIT_LIST_HEAD(&vma->anon_vma_chain); ++ vma_mark_detached(vma, false); + } - ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, size_t done_before) + /* Use when VMA is not part of the VMA tree and needs no locking */ +@@ -644,28 +759,28 @@ static inline void vm_flags_init(struct vm_area_struct *vma, + static inline void vm_flags_reset(struct vm_area_struct *vma, + vm_flags_t flags) { -- struct btrfs_dio_data data; -+ struct btrfs_dio_data data = { 0 }; +- mmap_assert_write_locked(vma->vm_mm); ++ vma_start_write(vma); + vm_flags_init(vma, flags); + } - return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, - IOMAP_DIO_PARTIAL, &data, done_before); + static inline void vm_flags_reset_once(struct vm_area_struct *vma, + vm_flags_t flags) + { +- mmap_assert_write_locked(vma->vm_mm); ++ vma_start_write(vma); + WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags); } - struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter, -+ struct btrfs_ordered_extent **ordered_extent, - size_t done_before) + static inline void vm_flags_set(struct vm_area_struct *vma, + vm_flags_t flags) { -- struct btrfs_dio_data data; -+ struct btrfs_dio_data dio_data = { .ordered = *ordered_extent }; -+ struct iomap_dio *dio; +- mmap_assert_write_locked(vma->vm_mm); ++ vma_start_write(vma); + ACCESS_PRIVATE(vma, __vm_flags) |= flags; + } -- return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, -- IOMAP_DIO_PARTIAL, &data, done_before); -+ dio = __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, -+ IOMAP_DIO_PARTIAL, &dio_data, done_before); -+ if (!IS_ERR_OR_NULL(dio)) -+ *ordered_extent = dio_data.ordered; -+ return dio; + static inline void vm_flags_clear(struct vm_area_struct *vma, + vm_flags_t flags) + { +- mmap_assert_write_locked(vma->vm_mm); ++ vma_start_write(vma); + ACCESS_PRIVATE(vma, __vm_flags) &= ~flags; } - static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, -diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c -index 6c24b69e2d0a..1848d0d1a9c4 100644 ---- a/fs/btrfs/ordered-data.c -+++ b/fs/btrfs/ordered-data.c -@@ -160,14 +160,16 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, - * @compress_type: Compression algorithm used for data. - * - * Most of these parameters correspond to &struct btrfs_file_extent_item. The -- * tree is given a single reference on the ordered extent that was inserted. -+ * tree is given a single reference on the ordered extent that was inserted, and -+ * the returned pointer is given a second reference. - * -- * Return: 0 or -ENOMEM. -+ * Return: the new ordered extent or ERR_PTR(-ENOMEM). - */ --int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, -- u64 num_bytes, u64 ram_bytes, u64 disk_bytenr, -- u64 disk_num_bytes, u64 offset, unsigned flags, -- int compress_type) -+struct btrfs_ordered_extent *btrfs_alloc_ordered_extent( -+ struct btrfs_inode *inode, u64 file_offset, -+ u64 num_bytes, u64 ram_bytes, u64 disk_bytenr, -+ u64 disk_num_bytes, u64 offset, unsigned long flags, -+ int compress_type) +@@ -686,7 +801,7 @@ static inline void __vm_flags_mod(struct vm_area_struct *vma, + static inline void vm_flags_mod(struct vm_area_struct *vma, + vm_flags_t set, vm_flags_t clear) { - struct btrfs_root *root = inode->root; - struct btrfs_fs_info *fs_info = root->fs_info; -@@ -181,7 +183,7 @@ int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, - /* For nocow write, we can release the qgroup rsv right now */ - ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes); - if (ret < 0) -- return ret; -+ return ERR_PTR(ret); - ret = 0; - } else { - /* -@@ -190,11 +192,11 @@ int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, - */ - ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes); - if (ret < 0) -- return ret; -+ return ERR_PTR(ret); - } - entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); - if (!entry) -- return -ENOMEM; -+ return ERR_PTR(-ENOMEM); +- mmap_assert_write_locked(vma->vm_mm); ++ vma_start_write(vma); + __vm_flags_mod(vma, set, clear); + } - entry->file_offset = file_offset; - entry->num_bytes = num_bytes; -@@ -256,6 +258,32 @@ int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, - btrfs_mod_outstanding_extents(inode, 1); - spin_unlock(&inode->lock); +diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h +index de1e622dd366..0e1d239a882c 100644 +--- a/include/linux/mm_inline.h ++++ b/include/linux/mm_inline.h +@@ -557,6 +557,12 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, + /* The current status of the pte should be "cleared" before calling */ + WARN_ON_ONCE(!pte_none(*pte)); -+ /* One ref for the returned entry to match semantics of lookup. */ -+ refcount_inc(&entry->refs); -+ -+ return entry; -+} -+ -+/* -+ * Add a new btrfs_ordered_extent for the range, but drop the reference instead -+ * of returning it to the caller. -+ */ -+int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, -+ u64 num_bytes, u64 ram_bytes, u64 disk_bytenr, -+ u64 disk_num_bytes, u64 offset, unsigned long flags, -+ int compress_type) -+{ -+ struct btrfs_ordered_extent *ordered; ++ /* ++ * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole ++ * thing, because when zapping either it means it's dropping the ++ * page, or in TTU where the present pte will be quickly replaced ++ * with a swap pte. There's no way of leaking the bit. ++ */ + if (vma_is_anonymous(vma) || !userfaultfd_wp(vma)) + return; + +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h +index 22b2ac82bffd..ef74ea892c5b 100644 +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -471,6 +471,10 @@ struct anon_vma_name { + char name[]; + }; + ++struct vma_lock { ++ struct rw_semaphore lock; ++}; + -+ ordered = btrfs_alloc_ordered_extent(inode, file_offset, num_bytes, -+ ram_bytes, disk_bytenr, -+ disk_num_bytes, offset, flags, -+ compress_type); + /* + * This struct describes a virtual memory area. There is one of these + * per VM-area/task. A VM area is any part of the process virtual memory +@@ -480,9 +484,16 @@ struct anon_vma_name { + struct vm_area_struct { + /* The first cache line has the info for VMA tree walking. */ + +- unsigned long vm_start; /* Our start address within vm_mm. */ +- unsigned long vm_end; /* The first byte after our end address +- within vm_mm. */ ++ union { ++ struct { ++ /* VMA covers [vm_start; vm_end) addresses within mm */ ++ unsigned long vm_start; ++ unsigned long vm_end; ++ }; ++#ifdef CONFIG_PER_VMA_LOCK ++ struct rcu_head vm_rcu; /* Used for deferred freeing. */ ++#endif ++ }; + + struct mm_struct *vm_mm; /* The address space we belong to. */ + +@@ -501,6 +512,14 @@ struct vm_area_struct { + vm_flags_t __private __vm_flags; + }; + ++#ifdef CONFIG_PER_VMA_LOCK ++ int vm_lock_seq; ++ struct vma_lock *vm_lock; + -+ if (IS_ERR(ordered)) -+ return PTR_ERR(ordered); -+ btrfs_put_ordered_extent(ordered); ++ /* Flag to indicate areas detached from the mm->mm_mt tree */ ++ bool detached; ++#endif + - return 0; - } + /* + * For areas with an address space and backing store, + * linkage into the address_space->i_mmap interval tree. +@@ -637,6 +656,9 @@ struct mm_struct { + * init_mm.mmlist, and are protected + * by mmlist_lock + */ ++#ifdef CONFIG_PER_VMA_LOCK ++ int mm_lock_seq; ++#endif -diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h -index eb40cb39f842..18007f9c00ad 100644 ---- a/fs/btrfs/ordered-data.h -+++ b/fs/btrfs/ordered-data.h -@@ -178,9 +178,14 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode, - bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode, - struct btrfs_ordered_extent **cached, - u64 file_offset, u64 io_size); -+struct btrfs_ordered_extent *btrfs_alloc_ordered_extent( -+ struct btrfs_inode *inode, u64 file_offset, -+ u64 num_bytes, u64 ram_bytes, u64 disk_bytenr, -+ u64 disk_num_bytes, u64 offset, unsigned long flags, -+ int compress_type); - int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, - u64 num_bytes, u64 ram_bytes, u64 disk_bytenr, -- u64 disk_num_bytes, u64 offset, unsigned flags, -+ u64 disk_num_bytes, u64 offset, unsigned long flags, - int compress_type); - void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry, - struct btrfs_ordered_sum *sum); -diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c -index 3eecce86f63f..75e7fa337e66 100644 ---- a/fs/btrfs/space-info.c -+++ b/fs/btrfs/space-info.c -@@ -537,7 +537,7 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, - up_read(&info->groups_sem); - } --static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, -+static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info, - u64 to_reclaim) - { - u64 bytes; -@@ -550,6 +550,18 @@ static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, - return nr; - } + unsigned long hiwater_rss; /* High-watermark of RSS usage */ +@@ -1042,6 +1064,7 @@ typedef struct { + * mapped after the fault. + * @FAULT_FLAG_ORIG_PTE_VALID: whether the fault has vmf->orig_pte cached. + * We should only access orig_pte if this flag set. ++ * @FAULT_FLAG_VMA_LOCK: The fault is handled under VMA lock. + * + * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify + * whether we would allow page faults to retry by specifying these two +@@ -1079,6 +1102,7 @@ enum fault_flag { + FAULT_FLAG_INTERRUPTIBLE = 1 << 9, + FAULT_FLAG_UNSHARE = 1 << 10, + FAULT_FLAG_ORIG_PTE_VALID = 1 << 11, ++ FAULT_FLAG_VMA_LOCK = 1 << 12, + }; + + typedef unsigned int __bitwise zap_flags_t; +diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h +index 96e113e23d04..aab8f1b28d26 100644 +--- a/include/linux/mmap_lock.h ++++ b/include/linux/mmap_lock.h +@@ -60,6 +60,29 @@ static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) + + #endif /* CONFIG_TRACING */ -+static inline u64 calc_delayed_refs_nr(const struct btrfs_fs_info *fs_info, -+ u64 to_reclaim) ++static inline void mmap_assert_locked(struct mm_struct *mm) +{ -+ const u64 bytes = btrfs_calc_delayed_ref_bytes(fs_info, 1); -+ u64 nr; ++ lockdep_assert_held(&mm->mmap_lock); ++ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); ++} + -+ nr = div64_u64(to_reclaim, bytes); -+ if (!nr) -+ nr = 1; -+ return nr; ++static inline void mmap_assert_write_locked(struct mm_struct *mm) ++{ ++ lockdep_assert_held_write(&mm->mmap_lock); ++ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); +} + - #define EXTENT_SIZE_PER_ITEM SZ_256K - - /* -@@ -727,7 +739,7 @@ static void flush_space(struct btrfs_fs_info *fs_info, - break; - } - if (state == FLUSH_DELAYED_REFS_NR) -- nr = calc_reclaim_items_nr(fs_info, num_bytes); -+ nr = calc_delayed_refs_nr(fs_info, num_bytes); - else - nr = 0; - btrfs_run_delayed_refs(trans, nr); -@@ -1599,11 +1611,22 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, - struct reserve_ticket ticket; - u64 start_ns = 0; - u64 used; -- int ret = 0; -+ int ret = -ENOSPC; - bool pending_tickets; - - ASSERT(orig_bytes); -- ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); -+ /* -+ * If have a transaction handle (current->journal_info != NULL), then -+ * the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor -+ * BTRFS_RESERVE_FLUSH_EVICT, as we could deadlock because those -+ * flushing methods can trigger transaction commits. -+ */ -+ if (current->journal_info) { -+ /* One assert per line for easier debugging. */ -+ ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL); -+ ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL); -+ ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT); -+ } - - if (flush == BTRFS_RESERVE_FLUSH_DATA) - async_work = &fs_info->async_data_reclaim_work; -@@ -1611,7 +1634,6 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, - async_work = &fs_info->async_reclaim_work; - - spin_lock(&space_info->lock); -- ret = -ENOSPC; - used = btrfs_space_info_used(space_info, true); - - /* -diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h -index 2033b71b18ce..0bb9d14e60a8 100644 ---- a/fs/btrfs/space-info.h -+++ b/fs/btrfs/space-info.h -@@ -27,6 +27,7 @@ enum btrfs_reserve_flush_enum { - * - Running delayed refs - * - Running delalloc and waiting for ordered extents - * - Allocating a new chunk -+ * - Committing transaction - */ - BTRFS_RESERVE_FLUSH_EVICT, ++#ifdef CONFIG_PER_VMA_LOCK ++static inline void vma_end_write_all(struct mm_struct *mm) ++{ ++ mmap_assert_write_locked(mm); ++ /* No races during update due to exclusive mmap_lock being held */ ++ WRITE_ONCE(mm->mm_lock_seq, mm->mm_lock_seq + 1); ++} ++#else ++static inline void vma_end_write_all(struct mm_struct *mm) {} ++#endif ++ + static inline void mmap_init_lock(struct mm_struct *mm) + { + init_rwsem(&mm->mmap_lock); +@@ -102,12 +125,14 @@ static inline bool mmap_write_trylock(struct mm_struct *mm) + static inline void mmap_write_unlock(struct mm_struct *mm) + { + __mmap_lock_trace_released(mm, true); ++ vma_end_write_all(mm); + up_write(&mm->mmap_lock); + } -diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c -index b8d5b1fa9a03..8b6a99b8d7f6 100644 ---- a/fs/btrfs/transaction.c -+++ b/fs/btrfs/transaction.c -@@ -601,15 +601,16 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, - /* - * We want to reserve all the bytes we may need all at once, so - * we only do 1 enospc flushing cycle per transaction start. We -- * accomplish this by simply assuming we'll do 2 x num_items -- * worth of delayed refs updates in this trans handle, and -- * refill that amount for whatever is missing in the reserve. -+ * accomplish this by simply assuming we'll do num_items worth -+ * of delayed refs updates in this trans handle, and refill that -+ * amount for whatever is missing in the reserve. - */ - num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); - if (flush == BTRFS_RESERVE_FLUSH_ALL && -- btrfs_block_rsv_full(delayed_refs_rsv) == 0) { -- delayed_refs_bytes = num_bytes; -- num_bytes <<= 1; -+ !btrfs_block_rsv_full(delayed_refs_rsv)) { -+ delayed_refs_bytes = btrfs_calc_delayed_ref_bytes(fs_info, -+ num_items); -+ num_bytes += delayed_refs_bytes; - } + static inline void mmap_write_downgrade(struct mm_struct *mm) + { + __mmap_lock_trace_acquire_returned(mm, false, true); ++ vma_end_write_all(mm); + downgrade_write(&mm->mmap_lock); + } - /* -@@ -942,16 +943,6 @@ void btrfs_throttle(struct btrfs_fs_info *fs_info) - wait_current_trans(fs_info); +@@ -150,18 +175,6 @@ static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) + up_read_non_owner(&mm->mmap_lock); } --static bool should_end_transaction(struct btrfs_trans_handle *trans) +-static inline void mmap_assert_locked(struct mm_struct *mm) -{ -- struct btrfs_fs_info *fs_info = trans->fs_info; -- -- if (btrfs_check_space_for_delayed_refs(fs_info)) -- return true; +- lockdep_assert_held(&mm->mmap_lock); +- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); +-} - -- return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 50); +-static inline void mmap_assert_write_locked(struct mm_struct *mm) +-{ +- lockdep_assert_held_write(&mm->mmap_lock); +- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); -} - - bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans) + static inline int mmap_lock_is_contended(struct mm_struct *mm) { - struct btrfs_transaction *cur_trans = trans->transaction; -@@ -960,7 +951,10 @@ bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans) - test_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags)) - return true; - -- return should_end_transaction(trans); -+ if (btrfs_check_space_for_delayed_refs(trans->fs_info)) -+ return true; -+ -+ return !!btrfs_block_rsv_check(&trans->fs_info->global_block_rsv, 50); - } - - static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans) -diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c -index 200cea6e49e5..df3d0753618f 100644 ---- a/fs/btrfs/tree-log.c -+++ b/fs/btrfs/tree-log.c -@@ -3648,6 +3648,9 @@ static int flush_dir_items_batch(struct btrfs_trans_handle *trans, - ret = BTRFS_LOG_FORCE_COMMIT; - else - inode->last_dir_index_offset = last_index; -+ -+ if (btrfs_get_first_dir_index_to_log(inode) == 0) -+ btrfs_set_first_dir_index_to_log(inode, batch.keys[0].offset); - out: - kfree(ins_data); - -@@ -5406,6 +5409,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, - LIST_HEAD(dir_list); - struct btrfs_dir_list *dir_elem; - u64 ino = btrfs_ino(start_inode); -+ struct btrfs_inode *curr_inode = start_inode; - int ret = 0; - - /* -@@ -5420,43 +5424,38 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, - if (!path) - return -ENOMEM; - -+ ihold(&curr_inode->vfs_inode); -+ - while (true) { -- struct extent_buffer *leaf; -- struct btrfs_key min_key; -+ struct inode *vfs_inode; -+ struct btrfs_key key; -+ struct btrfs_key found_key; -+ u64 next_index; - bool continue_curr_inode = true; -- int nritems; -- int i; -+ int iter_ret; - -- min_key.objectid = ino; -- min_key.type = BTRFS_DIR_INDEX_KEY; -- min_key.offset = 0; -+ key.objectid = ino; -+ key.type = BTRFS_DIR_INDEX_KEY; -+ key.offset = btrfs_get_first_dir_index_to_log(curr_inode); -+ next_index = key.offset; - again: -- btrfs_release_path(path); -- ret = btrfs_search_forward(root, &min_key, path, trans->transid); -- if (ret < 0) { -- break; -- } else if (ret > 0) { -- ret = 0; -- goto next; -- } -- -- leaf = path->nodes[0]; -- nritems = btrfs_header_nritems(leaf); -- for (i = path->slots[0]; i < nritems; i++) { -+ btrfs_for_each_slot(root->log_root, &key, &found_key, path, iter_ret) { -+ struct extent_buffer *leaf = path->nodes[0]; - struct btrfs_dir_item *di; - struct btrfs_key di_key; - struct inode *di_inode; - int log_mode = LOG_INODE_EXISTS; - int type; - -- btrfs_item_key_to_cpu(leaf, &min_key, i); -- if (min_key.objectid != ino || -- min_key.type != BTRFS_DIR_INDEX_KEY) { -+ if (found_key.objectid != ino || -+ found_key.type != BTRFS_DIR_INDEX_KEY) { - continue_curr_inode = false; - break; - } + return rwsem_is_contended(&mm->mmap_lock); +diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h +index 3767f18114ef..0cf8880219da 100644 +--- a/include/linux/userfaultfd_k.h ++++ b/include/linux/userfaultfd_k.h +@@ -179,6 +179,7 @@ extern int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start, + unsigned long end, struct list_head *uf); + extern void userfaultfd_unmap_complete(struct mm_struct *mm, + struct list_head *uf); ++extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma); -- di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item); -+ next_index = found_key.offset + 1; -+ -+ di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); - type = btrfs_dir_ftype(leaf, di); - if (btrfs_dir_transid(leaf, di) < trans->transid) - continue; -@@ -5496,12 +5495,24 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, - break; - } + #else /* CONFIG_USERFAULTFD */ -- if (continue_curr_inode && min_key.offset < (u64)-1) { -- min_key.offset++; -+ btrfs_release_path(path); -+ -+ if (iter_ret < 0) { -+ ret = iter_ret; -+ goto out; -+ } else if (iter_ret > 0) { -+ continue_curr_inode = false; -+ } else { -+ key = found_key; -+ } -+ -+ if (continue_curr_inode && key.offset < (u64)-1) { -+ key.offset++; - goto again; - } +@@ -274,8 +275,30 @@ static inline bool uffd_disable_fault_around(struct vm_area_struct *vma) + return false; + } --next: -+ btrfs_set_first_dir_index_to_log(curr_inode, next_index); ++static inline bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma) ++{ ++ return false; ++} + - if (list_empty(&dir_list)) - break; + #endif /* CONFIG_USERFAULTFD */ -@@ -5509,9 +5520,22 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, - ino = dir_elem->ino; - list_del(&dir_elem->list); - kfree(dir_elem); ++static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma) ++{ ++ /* Only wr-protect mode uses pte markers */ ++ if (!userfaultfd_wp(vma)) ++ return false; + -+ btrfs_add_delayed_iput(curr_inode); -+ curr_inode = NULL; ++ /* File-based uffd-wp always need markers */ ++ if (!vma_is_anonymous(vma)) ++ return true; + -+ vfs_inode = btrfs_iget(fs_info->sb, ino, root); -+ if (IS_ERR(vfs_inode)) { -+ ret = PTR_ERR(vfs_inode); -+ break; -+ } -+ curr_inode = BTRFS_I(vfs_inode); - } - out: - btrfs_free_path(path); -+ if (curr_inode) -+ btrfs_add_delayed_iput(curr_inode); ++ /* ++ * Anonymous uffd-wp only needs the markers if WP_UNPOPULATED ++ * enabled (to apply markers on zero pages). ++ */ ++ return userfaultfd_wp_unpopulated(vma); ++} + - if (ret) { - struct btrfs_dir_list *next; + static inline bool pte_marker_entry_uffd_wp(swp_entry_t entry) + { + #ifdef CONFIG_PTE_MARKER_UFFD_WP +diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h +index 7f5d1caf5890..8abfa1240040 100644 +--- a/include/linux/vm_event_item.h ++++ b/include/linux/vm_event_item.h +@@ -149,6 +149,12 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, + #ifdef CONFIG_X86 + DIRECT_MAP_LEVEL2_SPLIT, + DIRECT_MAP_LEVEL3_SPLIT, ++#endif ++#ifdef CONFIG_PER_VMA_LOCK_STATS ++ VMA_LOCK_SUCCESS, ++ VMA_LOCK_ABORT, ++ VMA_LOCK_RETRY, ++ VMA_LOCK_MISS, + #endif + NR_VM_EVENT_ITEMS + }; +diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h +index 19cf5b6892ce..fed855bae6d8 100644 +--- a/include/linux/vmstat.h ++++ b/include/linux/vmstat.h +@@ -125,6 +125,12 @@ static inline void vm_events_fold_cpu(int cpu) + #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) + #endif -diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c -index c6d592870400..236b6fdb9e92 100644 ---- a/fs/btrfs/volumes.c -+++ b/fs/btrfs/volumes.c -@@ -5438,7 +5438,7 @@ static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, - } - write_unlock(&em_tree->lock); ++#ifdef CONFIG_PER_VMA_LOCK_STATS ++#define count_vm_vma_lock_event(x) count_vm_event(x) ++#else ++#define count_vm_vma_lock_event(x) do {} while (0) ++#endif ++ + #define __count_zid_vm_events(item, zid, delta) \ + __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) -- block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size); -+ block_group = btrfs_make_block_group(trans, type, start, ctl->chunk_size); - if (IS_ERR(block_group)) - goto error_del_extent; +diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h +index 005e5e306266..90c958952bfc 100644 +--- a/include/uapi/linux/userfaultfd.h ++++ b/include/uapi/linux/userfaultfd.h +@@ -38,7 +38,8 @@ + UFFD_FEATURE_MINOR_HUGETLBFS | \ + UFFD_FEATURE_MINOR_SHMEM | \ + UFFD_FEATURE_EXACT_ADDRESS | \ +- UFFD_FEATURE_WP_HUGETLBFS_SHMEM) ++ UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \ ++ UFFD_FEATURE_WP_UNPOPULATED) + #define UFFD_API_IOCTLS \ + ((__u64)1 << _UFFDIO_REGISTER | \ + (__u64)1 << _UFFDIO_UNREGISTER | \ +@@ -203,6 +204,12 @@ struct uffdio_api { + * + * UFFD_FEATURE_WP_HUGETLBFS_SHMEM indicates that userfaultfd + * write-protection mode is supported on both shmem and hugetlbfs. ++ * ++ * UFFD_FEATURE_WP_UNPOPULATED indicates that userfaultfd ++ * write-protection mode will always apply to unpopulated pages ++ * (i.e. empty ptes). This will be the default behavior for shmem ++ * & hugetlbfs, so this flag only affects anonymous memory behavior ++ * when userfault write-protection mode is registered. + */ + #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) + #define UFFD_FEATURE_EVENT_FORK (1<<1) +@@ -217,6 +224,7 @@ struct uffdio_api { + #define UFFD_FEATURE_MINOR_SHMEM (1<<10) + #define UFFD_FEATURE_EXACT_ADDRESS (1<<11) + #define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12) ++#define UFFD_FEATURE_WP_UNPOPULATED (1<<13) + __u64 features; --- -2.40.0 - -From a22a87b1984afabbd1f3f9963647e5a4198efedc Mon Sep 17 00:00:00 2001 -From: Peter Jung -Date: Mon, 17 Apr 2023 18:32:06 +0200 -Subject: [PATCH 06/12] Implement amd-pstate guided driver - -Signed-off-by: Peter Jung ---- - .../admin-guide/kernel-parameters.txt | 40 ++-- - Documentation/admin-guide/pm/amd-pstate.rst | 31 ++- - drivers/acpi/cppc_acpi.c | 121 ++++++++++- - drivers/cpufreq/amd-pstate.c | 199 ++++++++++++------ - include/acpi/cppc_acpi.h | 11 + - include/linux/amd-pstate.h | 2 + - 6 files changed, 312 insertions(+), 92 deletions(-) - -diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt -index 4f6761a93715..bf2a402af231 100644 ---- a/Documentation/admin-guide/kernel-parameters.txt -+++ b/Documentation/admin-guide/kernel-parameters.txt -@@ -339,6 +339,29 @@ - This mode requires kvm-amd.avic=1. - (Default when IOMMU HW support is present.) + __u64 ioctls; +diff --git a/kernel/fork.c b/kernel/fork.c +index 349945168239..ebd353730887 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -455,13 +455,49 @@ static struct kmem_cache *vm_area_cachep; + /* SLAB cache for mm_struct structures (tsk->mm) */ + static struct kmem_cache *mm_cachep; -+ amd_pstate= [X86] -+ disable -+ Do not enable amd_pstate as the default -+ scaling driver for the supported processors -+ passive -+ Use amd_pstate with passive mode as a scaling driver. -+ In this mode autonomous selection is disabled. -+ Driver requests a desired performance level and platform -+ tries to match the same performance level if it is -+ satisfied by guaranteed performance level. -+ active -+ Use amd_pstate_epp driver instance as the scaling driver, -+ driver provides a hint to the hardware if software wants -+ to bias toward performance (0x0) or energy efficiency (0xff) -+ to the CPPC firmware. then CPPC power algorithm will -+ calculate the runtime workload and adjust the realtime cores -+ frequency. -+ guided -+ Activate guided autonomous mode. Driver requests minimum and -+ maximum performance level and the platform autonomously -+ selects a performance level in this range and appropriate -+ to the current workload. ++#ifdef CONFIG_PER_VMA_LOCK + - amijoy.map= [HW,JOY] Amiga joystick support - Map of devices attached to JOY0DAT and JOY1DAT - Format: , -@@ -7068,20 +7091,3 @@ - xmon commands. - off xmon is disabled. - -- amd_pstate= [X86] -- disable -- Do not enable amd_pstate as the default -- scaling driver for the supported processors -- passive -- Use amd_pstate as a scaling driver, driver requests a -- desired performance on this abstract scale and the power -- management firmware translates the requests into actual -- hardware states (core frequency, data fabric and memory -- clocks etc.) -- active -- Use amd_pstate_epp driver instance as the scaling driver, -- driver provides a hint to the hardware if software wants -- to bias toward performance (0x0) or energy efficiency (0xff) -- to the CPPC firmware. then CPPC power algorithm will -- calculate the runtime workload and adjust the realtime cores -- frequency. -diff --git a/Documentation/admin-guide/pm/amd-pstate.rst b/Documentation/admin-guide/pm/amd-pstate.rst -index 6e5298b521b1..1cf40f69278c 100644 ---- a/Documentation/admin-guide/pm/amd-pstate.rst -+++ b/Documentation/admin-guide/pm/amd-pstate.rst -@@ -303,13 +303,18 @@ efficiency frequency management method on AMD processors. - AMD Pstate Driver Operation Modes - ================================= - --``amd_pstate`` CPPC has two operation modes: CPPC Autonomous(active) mode and --CPPC non-autonomous(passive) mode. --active mode and passive mode can be chosen by different kernel parameters. --When in Autonomous mode, CPPC ignores requests done in the Desired Performance --Target register and takes into account only the values set to the Minimum requested --performance, Maximum requested performance, and Energy Performance Preference --registers. When Autonomous is disabled, it only considers the Desired Performance Target. -+``amd_pstate`` CPPC has 3 operation modes: autonomous (active) mode, -+non-autonomous (passive) mode and guided autonomous (guided) mode. -+Active/passive/guided mode can be chosen by different kernel parameters. ++/* SLAB cache for vm_area_struct.lock */ ++static struct kmem_cache *vma_lock_cachep; ++ ++static bool vma_lock_alloc(struct vm_area_struct *vma) ++{ ++ vma->vm_lock = kmem_cache_alloc(vma_lock_cachep, GFP_KERNEL); ++ if (!vma->vm_lock) ++ return false; ++ ++ init_rwsem(&vma->vm_lock->lock); ++ vma->vm_lock_seq = -1; ++ ++ return true; ++} ++ ++static inline void vma_lock_free(struct vm_area_struct *vma) ++{ ++ kmem_cache_free(vma_lock_cachep, vma->vm_lock); ++} + -+- In autonomous mode, platform ignores the desired performance level request -+ and takes into account only the values set to the minimum, maximum and energy -+ performance preference registers. -+- In non-autonomous mode, platform gets desired performance level -+ from OS directly through Desired Performance Register. -+- In guided-autonomous mode, platform sets operating performance level -+ autonomously according to the current workload and within the limits set by -+ OS through min and max performance registers. - - Active Mode - ------------ -@@ -338,6 +343,15 @@ to the Performance Reduction Tolerance register. Above the nominal performance l - processor must provide at least nominal performance requested and go higher if current - operating conditions allow. ++#else /* CONFIG_PER_VMA_LOCK */ ++ ++static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; } ++static inline void vma_lock_free(struct vm_area_struct *vma) {} ++ ++#endif /* CONFIG_PER_VMA_LOCK */ ++ + struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) + { + struct vm_area_struct *vma; -+Guided Mode -+----------- + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); +- if (vma) +- vma_init(vma, mm); ++ if (!vma) ++ return NULL; + -+``amd_pstate=guided`` ++ vma_init(vma, mm); ++ if (!vma_lock_alloc(vma)) { ++ kmem_cache_free(vm_area_cachep, vma); ++ return NULL; ++ } + -+If ``amd_pstate=guided`` is passed to kernel command line option then this mode -+is activated. In this mode, driver requests minimum and maximum performance -+level and the platform autonomously selects a performance level in this range -+and appropriate to the current workload. + return vma; + } - User Space Interface in ``sysfs`` - General - =========================================== -@@ -358,6 +372,9 @@ control its functionality at the system level. They are located in the - "passive" - The driver is functional and in the ``passive mode`` +@@ -469,26 +505,54 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) + { + struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); -+ "guided" -+ The driver is functional and in the ``guided mode`` +- if (new) { +- ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); +- ASSERT_EXCLUSIVE_WRITER(orig->vm_file); +- /* +- * orig->shared.rb may be modified concurrently, but the clone +- * will be reinitialized. +- */ +- data_race(memcpy(new, orig, sizeof(*new))); +- INIT_LIST_HEAD(&new->anon_vma_chain); +- dup_anon_vma_name(orig, new); ++ if (!new) ++ return NULL; + - "disable" - The driver is unregistered and not functional now. ++ ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); ++ ASSERT_EXCLUSIVE_WRITER(orig->vm_file); ++ /* ++ * orig->shared.rb may be modified concurrently, but the clone ++ * will be reinitialized. ++ */ ++ data_race(memcpy(new, orig, sizeof(*new))); ++ if (!vma_lock_alloc(new)) { ++ kmem_cache_free(vm_area_cachep, new); ++ return NULL; + } ++ INIT_LIST_HEAD(&new->anon_vma_chain); ++ dup_anon_vma_name(orig, new); ++ + return new; + } -diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c -index c51d3ccb4cca..02a4bfb54967 100644 ---- a/drivers/acpi/cppc_acpi.c -+++ b/drivers/acpi/cppc_acpi.c -@@ -1433,6 +1433,103 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable) +-void vm_area_free(struct vm_area_struct *vma) ++void __vm_area_free(struct vm_area_struct *vma) + { + free_anon_vma_name(vma); ++ vma_lock_free(vma); + kmem_cache_free(vm_area_cachep, vma); } - EXPORT_SYMBOL_GPL(cppc_set_epp_perf); -+/* -+ * cppc_get_auto_sel_caps - Read autonomous selection register. -+ * @cpunum : CPU from which to read register. -+ * @perf_caps : struct where autonomous selection register value is updated. -+ */ -+int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps) ++#ifdef CONFIG_PER_VMA_LOCK ++static void vm_area_free_rcu_cb(struct rcu_head *head) +{ -+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); -+ struct cpc_register_resource *auto_sel_reg; -+ u64 auto_sel; -+ -+ if (!cpc_desc) { -+ pr_debug("No CPC descriptor for CPU:%d\n", cpunum); -+ return -ENODEV; -+ } ++ struct vm_area_struct *vma = container_of(head, struct vm_area_struct, ++ vm_rcu); + -+ auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE]; ++ /* The vma should not be locked while being destroyed. */ ++ VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock->lock), vma); ++ __vm_area_free(vma); ++} ++#endif + -+ if (!CPC_SUPPORTED(auto_sel_reg)) -+ pr_warn_once("Autonomous mode is not unsupported!\n"); ++void vm_area_free(struct vm_area_struct *vma) ++{ ++#ifdef CONFIG_PER_VMA_LOCK ++ call_rcu(&vma->vm_rcu, vm_area_free_rcu_cb); ++#else ++ __vm_area_free(vma); ++#endif ++} + -+ if (CPC_IN_PCC(auto_sel_reg)) { -+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); -+ struct cppc_pcc_data *pcc_ss_data = NULL; -+ int ret = 0; + static void account_kernel_stack(struct task_struct *tsk, int account) + { + if (IS_ENABLED(CONFIG_VMAP_STACK)) { +@@ -1132,6 +1196,9 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, + seqcount_init(&mm->write_protect_seq); + mmap_init_lock(mm); + INIT_LIST_HEAD(&mm->mmlist); ++#ifdef CONFIG_PER_VMA_LOCK ++ mm->mm_lock_seq = 0; ++#endif + mm_pgtables_bytes_init(mm); + mm->map_count = 0; + mm->locked_vm = 0; +@@ -3074,6 +3141,9 @@ void __init proc_caches_init(void) + NULL); + + vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); ++#ifdef CONFIG_PER_VMA_LOCK ++ vma_lock_cachep = KMEM_CACHE(vma_lock, SLAB_PANIC|SLAB_ACCOUNT); ++#endif + mmap_init(); + nsproxy_cache_init(); + } +diff --git a/mm/Kconfig b/mm/Kconfig +index cf2e47030fe8..459af2123189 100644 +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -1202,6 +1202,18 @@ config LRU_GEN_STATS + This option has a per-memcg and per-node memory overhead. + # } + ++config ARCH_SUPPORTS_PER_VMA_LOCK ++ def_bool n + -+ if (pcc_ss_id < 0) -+ return -ENODEV; ++config PER_VMA_LOCK ++ def_bool y ++ depends on ARCH_SUPPORTS_PER_VMA_LOCK && MMU && SMP ++ help ++ Allow per-vma locking during page fault handling. + -+ pcc_ss_data = pcc_data[pcc_ss_id]; ++ This feature allows locking each virtual memory area separately when ++ handling page faults instead of taking mmap_lock. + -+ down_write(&pcc_ss_data->pcc_lock); + source "mm/damon/Kconfig" + + endmenu +diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug +index c3547a373c9c..4965a7333a3f 100644 +--- a/mm/Kconfig.debug ++++ b/mm/Kconfig.debug +@@ -279,3 +279,9 @@ config DEBUG_KMEMLEAK_AUTO_SCAN + + If unsure, say Y. + ++config PER_VMA_LOCK_STATS ++ bool "Statistics for per-vma locks" ++ depends on PER_VMA_LOCK ++ default y ++ help ++ Statistics for per-vma locks. +diff --git a/mm/filemap.c b/mm/filemap.c +index 2723104cc06a..7d898f26755b 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -1706,6 +1706,8 @@ static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) + * mmap_lock has been released (mmap_read_unlock(), unless flags had both + * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in + * which case mmap_lock is still held. ++ * If flags had FAULT_FLAG_VMA_LOCK set, meaning the operation is performed ++ * with VMA lock only, the VMA lock is still held. + * + * If neither ALLOW_RETRY nor KILLABLE are set, will always return true + * with the folio locked and the mmap_lock unperturbed. +@@ -1713,6 +1715,10 @@ static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) + bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, + unsigned int flags) + { ++ /* Can't do this if not holding mmap_lock */ ++ if (flags & FAULT_FLAG_VMA_LOCK) ++ return false; + -+ if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) { -+ cpc_read(cpunum, auto_sel_reg, &auto_sel); -+ perf_caps->auto_sel = (bool)auto_sel; -+ } else { -+ ret = -EIO; -+ } + if (fault_flag_allow_retry_first(flags)) { + /* + * CAUTION! In this case, mmap_lock is not released +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 245038a9fe4e..4d860b53a14a 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -6004,6 +6004,10 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + int need_wait_lock = 0; + unsigned long haddr = address & huge_page_mask(h); + ++ /* TODO: Handle faults under the VMA lock */ ++ if (flags & FAULT_FLAG_VMA_LOCK) ++ return VM_FAULT_RETRY; + -+ up_write(&pcc_ss_data->pcc_lock); + /* + * Serialize hugepage allocation and instantiation, so that we don't + * get spurious allocation failures if two CPUs race to instantiate +diff --git a/mm/init-mm.c b/mm/init-mm.c +index c9327abb771c..33269314e060 100644 +--- a/mm/init-mm.c ++++ b/mm/init-mm.c +@@ -37,6 +37,9 @@ struct mm_struct init_mm = { + .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock), + .arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock), + .mmlist = LIST_HEAD_INIT(init_mm.mmlist), ++#ifdef CONFIG_PER_VMA_LOCK ++ .mm_lock_seq = 0, ++#endif + .user_ns = &init_user_ns, + .cpu_bitmap = CPU_BITS_NONE, + #ifdef CONFIG_IOMMU_SVA +diff --git a/mm/internal.h b/mm/internal.h +index 7920a8b7982e..0c455d6e4e3e 100644 +--- a/mm/internal.h ++++ b/mm/internal.h +@@ -105,7 +105,7 @@ void folio_activate(struct folio *folio); + + void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt, + struct vm_area_struct *start_vma, unsigned long floor, +- unsigned long ceiling); ++ unsigned long ceiling, bool mm_wr_locked); + void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte); + + struct zap_details; +diff --git a/mm/khugepaged.c b/mm/khugepaged.c +index 92e6f56a932d..042007f0bfa1 100644 +--- a/mm/khugepaged.c ++++ b/mm/khugepaged.c +@@ -1049,6 +1049,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, + if (result != SCAN_SUCCEED) + goto out_up_write; + ++ vma_start_write(vma); + anon_vma_lock_write(vma->anon_vma); + + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address, +@@ -1172,7 +1173,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, + * enabled swap entries. Please see + * comment below for pte_uffd_wp(). + */ +- if (pte_swp_uffd_wp(pteval)) { ++ if (pte_swp_uffd_wp_any(pteval)) { + result = SCAN_PTE_UFFD_WP; + goto out_unmap; + } +@@ -1512,6 +1513,9 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, + goto drop_hpage; + } + ++ /* Lock the vma before taking i_mmap and page table locks */ ++ vma_start_write(vma); + -+ return ret; -+ } + /* + * We need to lock the mapping so that from here on, only GUP-fast and + * hardware page walks can access the parts of the page tables that +@@ -1689,6 +1693,10 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff, + result = SCAN_PTE_MAPPED_HUGEPAGE; + if ((cc->is_khugepaged || is_target) && + mmap_write_trylock(mm)) { ++ /* trylock for the same lock inversion as above */ ++ if (!vma_try_start_write(vma)) ++ goto unlock_next; + -+ return 0; -+} -+EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps); + /* + * Re-check whether we have an ->anon_vma, because + * collapse_and_free_pmd() requires that either no +diff --git a/mm/memory.c b/mm/memory.c +index 01a23ad48a04..9deb0d0f3f7f 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -104,6 +104,20 @@ EXPORT_SYMBOL(mem_map); + #endif + + static vm_fault_t do_fault(struct vm_fault *vmf); ++static vm_fault_t do_anonymous_page(struct vm_fault *vmf); ++static bool vmf_pte_changed(struct vm_fault *vmf); + +/* -+ * cppc_set_auto_sel - Write autonomous selection register. -+ * @cpu : CPU to which to write register. -+ * @enable : the desired value of autonomous selection resiter to be updated. ++ * Return true if the original pte was a uffd-wp pte marker (so the pte was ++ * wr-protected). + */ -+int cppc_set_auto_sel(int cpu, bool enable) ++static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) +{ -+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); -+ struct cpc_register_resource *auto_sel_reg; -+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); -+ struct cppc_pcc_data *pcc_ss_data = NULL; -+ int ret = -EINVAL; -+ -+ if (!cpc_desc) { -+ pr_debug("No CPC descriptor for CPU:%d\n", cpu); -+ return -ENODEV; -+ } -+ -+ auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE]; -+ -+ if (CPC_IN_PCC(auto_sel_reg)) { -+ if (pcc_ss_id < 0) { -+ pr_debug("Invalid pcc_ss_id\n"); -+ return -ENODEV; -+ } -+ -+ if (CPC_SUPPORTED(auto_sel_reg)) { -+ ret = cpc_write(cpu, auto_sel_reg, enable); -+ if (ret) -+ return ret; -+ } ++ if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) ++ return false; + -+ pcc_ss_data = pcc_data[pcc_ss_id]; ++ return pte_marker_uffd_wp(vmf->orig_pte); ++} + + /* + * A number of key systems in x86 including ioremap() rely on the assumption +@@ -348,7 +362,7 @@ void free_pgd_range(struct mmu_gather *tlb, + + void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt, + struct vm_area_struct *vma, unsigned long floor, +- unsigned long ceiling) ++ unsigned long ceiling, bool mm_wr_locked) + { + MA_STATE(mas, mt, vma->vm_end, vma->vm_end); + +@@ -366,6 +380,8 @@ void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt, + * Hide vma from rmap and truncate_pagecache before freeing + * pgtables + */ ++ if (mm_wr_locked) ++ vma_start_write(vma); + unlink_anon_vmas(vma); + unlink_file_vma(vma); + +@@ -380,6 +396,8 @@ void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt, + && !is_vm_hugetlb_page(next)) { + vma = next; + next = mas_find(&mas, ceiling - 1); ++ if (mm_wr_locked) ++ vma_start_write(vma); + unlink_anon_vmas(vma); + unlink_file_vma(vma); + } +@@ -1345,6 +1363,10 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, + unsigned long addr, pte_t *pte, + struct zap_details *details, pte_t pteval) + { ++ /* Zap on anonymous always means dropping everything */ ++ if (vma_is_anonymous(vma)) ++ return; + -+ down_write(&pcc_ss_data->pcc_lock); -+ /* after writing CPC, transfer the ownership of PCC to platform */ -+ ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE); -+ up_write(&pcc_ss_data->pcc_lock); -+ } else { -+ ret = -ENOTSUPP; -+ pr_debug("_CPC in PCC is not supported\n"); -+ } + if (zap_drop_file_uffd_wp(details)) + return; + +@@ -1451,8 +1473,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, + continue; + rss[mm_counter(page)]--; + } else if (pte_marker_entry_uffd_wp(entry)) { +- /* Only drop the uffd-wp marker if explicitly requested */ +- if (!zap_drop_file_uffd_wp(details)) ++ /* ++ * For anon: always drop the marker; for file: only ++ * drop the marker if explicitly requested. ++ */ ++ if (!vma_is_anonymous(vma) && ++ !zap_drop_file_uffd_wp(details)) + continue; + } else if (is_hwpoison_entry(entry) || + is_swapin_error_entry(entry)) { +@@ -3322,6 +3348,9 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) + struct vm_area_struct *vma = vmf->vma; + struct folio *folio = NULL; + ++ if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) ++ return VM_FAULT_RETRY; + -+ return ret; + if (likely(!unshare)) { + if (userfaultfd_pte_wp(vma, *vmf->pte)) { + pte_unmap_unlock(vmf->pte, vmf->ptl); +@@ -3633,6 +3662,14 @@ static vm_fault_t pte_marker_clear(struct vm_fault *vmf) + return 0; + } + ++static vm_fault_t do_pte_missing(struct vm_fault *vmf) ++{ ++ if (vma_is_anonymous(vmf->vma)) ++ return do_anonymous_page(vmf); ++ else ++ return do_fault(vmf); +} -+EXPORT_SYMBOL_GPL(cppc_set_auto_sel); -+ + - /** - * cppc_set_enable - Set to enable CPPC on the processor by writing the - * Continuous Performance Control package EnableRegister field. -@@ -1488,7 +1585,7 @@ EXPORT_SYMBOL_GPL(cppc_set_enable); - int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) + /* + * This is actually a page-missing access, but with uffd-wp special pte + * installed. It means this pte was wr-protected before being unmapped. +@@ -3643,11 +3680,10 @@ static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf) + * Just in case there're leftover special ptes even after the region + * got unregistered - we can simply clear them. + */ +- if (unlikely(!userfaultfd_wp(vmf->vma) || vma_is_anonymous(vmf->vma))) ++ if (unlikely(!userfaultfd_wp(vmf->vma))) + return pte_marker_clear(vmf); + +- /* do_fault() can handle pte markers too like none pte */ +- return do_fault(vmf); ++ return do_pte_missing(vmf); + } + + static vm_fault_t handle_pte_marker(struct vm_fault *vmf) +@@ -4012,6 +4048,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) + */ + static vm_fault_t do_anonymous_page(struct vm_fault *vmf) { - struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); -- struct cpc_register_resource *desired_reg; -+ struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg; - int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); - struct cppc_pcc_data *pcc_ss_data = NULL; - int ret = 0; -@@ -1499,6 +1596,8 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) ++ bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); + struct vm_area_struct *vma = vmf->vma; + struct folio *folio; + vm_fault_t ret = 0; +@@ -4045,7 +4082,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) + vma->vm_page_prot)); + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); +- if (!pte_none(*vmf->pte)) { ++ if (vmf_pte_changed(vmf)) { + update_mmu_tlb(vma, vmf->address, vmf->pte); + goto unlock; + } +@@ -4085,7 +4122,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) + + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, + &vmf->ptl); +- if (!pte_none(*vmf->pte)) { ++ if (vmf_pte_changed(vmf)) { + update_mmu_tlb(vma, vmf->address, vmf->pte); + goto release; + } +@@ -4105,6 +4142,8 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) + folio_add_new_anon_rmap(folio, vma, vmf->address); + folio_add_lru_vma(folio, vma); + setpte: ++ if (uffd_wp) ++ entry = pte_mkuffd_wp(entry); + set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); + + /* No need to invalidate - it was non-present before */ +@@ -4272,7 +4311,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) + void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) + { + struct vm_area_struct *vma = vmf->vma; +- bool uffd_wp = pte_marker_uffd_wp(vmf->orig_pte); ++ bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); + bool write = vmf->flags & FAULT_FLAG_WRITE; + bool prefault = vmf->address != addr; + pte_t entry; +@@ -4503,6 +4542,8 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf) + return ret; } - desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; -+ min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF]; -+ max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF]; ++ if (vmf->flags & FAULT_FLAG_VMA_LOCK) ++ return VM_FAULT_RETRY; + ret = __do_fault(vmf); + if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) + return ret; +@@ -4519,6 +4560,9 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf) + struct vm_area_struct *vma = vmf->vma; + vm_fault_t ret; - /* - * This is Phase-I where we want to write to CPC registers -@@ -1507,7 +1606,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) - * Since read_lock can be acquired by multiple CPUs simultaneously we - * achieve that goal here - */ -- if (CPC_IN_PCC(desired_reg)) { -+ if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) { - if (pcc_ss_id < 0) { - pr_debug("Invalid pcc_ss_id\n"); - return -ENODEV; -@@ -1530,13 +1629,19 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) - cpc_desc->write_cmd_status = 0; - } ++ if (vmf->flags & FAULT_FLAG_VMA_LOCK) ++ return VM_FAULT_RETRY; ++ + if (unlikely(anon_vma_prepare(vma))) + return VM_FAULT_OOM; -- /* -- * Skip writing MIN/MAX until Linux knows how to come up with -- * useful values. -- */ - cpc_write(cpu, desired_reg, perf_ctrls->desired_perf); +@@ -4558,6 +4602,9 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf) + struct vm_area_struct *vma = vmf->vma; + vm_fault_t ret, tmp; -- if (CPC_IN_PCC(desired_reg)) -+ /** -+ * Only write if min_perf and max_perf not zero. Some drivers pass zero -+ * value to min and max perf, but they don't mean to set the zero value, -+ * they just don't want to write to those registers. -+ */ -+ if (perf_ctrls->min_perf) -+ cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf); -+ if (perf_ctrls->max_perf) -+ cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf); ++ if (vmf->flags & FAULT_FLAG_VMA_LOCK) ++ return VM_FAULT_RETRY; + -+ if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) - up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */ - /* - * This is Phase-II where we transfer the ownership of PCC to Platform -@@ -1584,7 +1689,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) - * case during a CMD_READ and if there are pending writes it delivers - * the write command before servicing the read command - */ -- if (CPC_IN_PCC(desired_reg)) { -+ if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) { - if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */ - /* Update only if there are pending write commands */ - if (pcc_ss_data->pending_pcc_write_cmd) -diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c -index 8dd46fad151e..5a3d4aa0f45a 100644 ---- a/drivers/cpufreq/amd-pstate.c -+++ b/drivers/cpufreq/amd-pstate.c -@@ -63,7 +63,6 @@ static struct cpufreq_driver *current_pstate_driver; - static struct cpufreq_driver amd_pstate_driver; - static struct cpufreq_driver amd_pstate_epp_driver; - static int cppc_state = AMD_PSTATE_DISABLE; --struct kobject *amd_pstate_kobj; + ret = __do_fault(vmf); + if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) + return ret; +@@ -4916,12 +4963,8 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) + } + } - /* - * AMD Energy Preference Performance (EPP) -@@ -106,6 +105,8 @@ static unsigned int epp_values[] = { - [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE, - }; +- if (!vmf->pte) { +- if (vma_is_anonymous(vmf->vma)) +- return do_anonymous_page(vmf); +- else +- return do_fault(vmf); +- } ++ if (!vmf->pte) ++ return do_pte_missing(vmf); -+typedef int (*cppc_mode_transition_fn)(int); -+ - static inline int get_mode_idx_from_str(const char *str, size_t size) - { - int i; -@@ -308,7 +309,22 @@ static int cppc_init_perf(struct amd_cpudata *cpudata) - cppc_perf.lowest_nonlinear_perf); - WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); + if (!pte_present(vmf->orig_pte)) + return do_swap_page(vmf); +@@ -4929,6 +4972,9 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) + if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) + return do_numa_page(vmf); -- return 0; -+ if (cppc_state == AMD_PSTATE_ACTIVE) -+ return 0; -+ -+ ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf); -+ if (ret) { -+ pr_warn("failed to get auto_sel, ret: %d\n", ret); -+ return 0; -+ } -+ -+ ret = cppc_set_auto_sel(cpudata->cpu, -+ (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1); -+ -+ if (ret) -+ pr_warn("failed to set auto_sel, ret: %d\n", ret); ++ if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma)) ++ return VM_FAULT_RETRY; + -+ return ret; - } - - DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf); -@@ -385,12 +401,18 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) + vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); + spin_lock(vmf->ptl); + entry = vmf->orig_pte; +@@ -4965,10 +5011,10 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) } - static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, -- u32 des_perf, u32 max_perf, bool fast_switch) -+ u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags) + /* +- * By the time we get here, we already hold the mm semaphore +- * +- * The mmap_lock may have been released depending on flags and our +- * return value. See filemap_fault() and __folio_lock_or_retry(). ++ * On entry, we hold either the VMA lock or the mmap_lock ++ * (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in ++ * the result, the mmap_lock is not held on exit. See filemap_fault() ++ * and __folio_lock_or_retry(). + */ + static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, + unsigned long address, unsigned int flags) +@@ -5080,24 +5126,31 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, + * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should + * still be in per-arch page fault handlers at the entry of page fault. + */ +-static inline void mm_account_fault(struct pt_regs *regs, ++static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs, + unsigned long address, unsigned int flags, + vm_fault_t ret) { - u64 prev = READ_ONCE(cpudata->cppc_req_cached); - u64 value = prev; + bool major; - des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); ++ /* Incomplete faults will be accounted upon completion. */ ++ if (ret & VM_FAULT_RETRY) ++ return; + -+ if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) { -+ min_perf = des_perf; -+ des_perf = 0; -+ } + /* +- * We don't do accounting for some specific faults: +- * +- * - Unsuccessful faults (e.g. when the address wasn't valid). That +- * includes arch_vma_access_permitted() failing before reaching here. +- * So this is not a "this many hardware page faults" counter. We +- * should use the hw profiling for that. +- * +- * - Incomplete faults (VM_FAULT_RETRY). They will only be counted +- * once they're completed. ++ * To preserve the behavior of older kernels, PGFAULT counters record ++ * both successful and failed faults, as opposed to perf counters, ++ * which ignore failed cases. ++ */ ++ count_vm_event(PGFAULT); ++ count_memcg_event_mm(mm, PGFAULT); + - value &= ~AMD_CPPC_MIN_PERF(~0L); - value |= AMD_CPPC_MIN_PERF(min_perf); - -@@ -445,7 +467,7 @@ static int amd_pstate_target(struct cpufreq_policy *policy, - - cpufreq_freq_transition_begin(policy, &freqs); - amd_pstate_update(cpudata, min_perf, des_perf, -- max_perf, false); -+ max_perf, false, policy->governor->flags); - cpufreq_freq_transition_end(policy, &freqs, false); ++ /* ++ * Do not account for unsuccessful faults (e.g. when the address wasn't ++ * valid). That includes arch_vma_access_permitted() failing before ++ * reaching here. So this is not a "this many hardware page faults" ++ * counter. We should use the hw profiling for that. + */ +- if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY)) ++ if (ret & VM_FAULT_ERROR) + return; - return 0; -@@ -479,7 +501,8 @@ static void amd_pstate_adjust_perf(unsigned int cpu, - if (max_perf < min_perf) - max_perf = min_perf; + /* +@@ -5180,21 +5233,22 @@ static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma, + vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, + unsigned int flags, struct pt_regs *regs) + { ++ /* If the fault handler drops the mmap_lock, vma may be freed */ ++ struct mm_struct *mm = vma->vm_mm; + vm_fault_t ret; -- amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true); -+ amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true, -+ policy->governor->flags); - cpufreq_cpu_put(policy); - } + __set_current_state(TASK_RUNNING); -@@ -816,6 +839,98 @@ static ssize_t show_energy_performance_preference( - return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]); - } +- count_vm_event(PGFAULT); +- count_memcg_event_mm(vma->vm_mm, PGFAULT); +- + ret = sanitize_fault_flags(vma, &flags); + if (ret) +- return ret; ++ goto out; -+static void amd_pstate_driver_cleanup(void) -+{ -+ amd_pstate_enable(false); -+ cppc_state = AMD_PSTATE_DISABLE; -+ current_pstate_driver = NULL; -+} -+ -+static int amd_pstate_register_driver(int mode) -+{ -+ int ret; -+ -+ if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED) -+ current_pstate_driver = &amd_pstate_driver; -+ else if (mode == AMD_PSTATE_ACTIVE) -+ current_pstate_driver = &amd_pstate_epp_driver; -+ else -+ return -EINVAL; -+ -+ cppc_state = mode; -+ ret = cpufreq_register_driver(current_pstate_driver); -+ if (ret) { -+ amd_pstate_driver_cleanup(); -+ return ret; + if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, + flags & FAULT_FLAG_INSTRUCTION, +- flags & FAULT_FLAG_REMOTE)) +- return VM_FAULT_SIGSEGV; ++ flags & FAULT_FLAG_REMOTE)) { ++ ret = VM_FAULT_SIGSEGV; ++ goto out; + } -+ return 0; -+} -+ -+static int amd_pstate_unregister_driver(int dummy) + + /* + * Enable the memcg OOM handling for faults triggered in user +@@ -5223,13 +5277,70 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, + if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) + mem_cgroup_oom_synchronize(false); + } +- +- mm_account_fault(regs, address, flags, ret); ++out: ++ mm_account_fault(mm, regs, address, flags, ret); + + return ret; + } + EXPORT_SYMBOL_GPL(handle_mm_fault); + ++#ifdef CONFIG_PER_VMA_LOCK ++/* ++ * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be ++ * stable and not isolated. If the VMA is not found or is being modified the ++ * function returns NULL. ++ */ ++struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, ++ unsigned long address) +{ -+ cpufreq_unregister_driver(current_pstate_driver); -+ amd_pstate_driver_cleanup(); -+ return 0; -+} ++ MA_STATE(mas, &mm->mm_mt, address, address); ++ struct vm_area_struct *vma; + -+static int amd_pstate_change_mode_without_dvr_change(int mode) -+{ -+ int cpu = 0; ++ rcu_read_lock(); ++retry: ++ vma = mas_walk(&mas); ++ if (!vma) ++ goto inval; + -+ cppc_state = mode; ++ /* find_mergeable_anon_vma uses adjacent vmas which are not locked */ ++ if (vma_is_anonymous(vma) && !vma->anon_vma) ++ goto inval; + -+ if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE) -+ return 0; ++ if (!vma_start_read(vma)) ++ goto inval; + -+ for_each_present_cpu(cpu) { -+ cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1); ++ /* ++ * Due to the possibility of userfault handler dropping mmap_lock, avoid ++ * it for now and fall back to page fault handling under mmap_lock. ++ */ ++ if (userfaultfd_armed(vma)) { ++ vma_end_read(vma); ++ goto inval; + } + -+ return 0; -+} -+ -+static int amd_pstate_change_driver_mode(int mode) -+{ -+ int ret; -+ -+ ret = amd_pstate_unregister_driver(0); -+ if (ret) -+ return ret; ++ /* Check since vm_start/vm_end might change before we lock the VMA */ ++ if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { ++ vma_end_read(vma); ++ goto inval; ++ } + -+ ret = amd_pstate_register_driver(mode); -+ if (ret) -+ return ret; ++ /* Check if the VMA got isolated after we found it */ ++ if (vma->detached) { ++ vma_end_read(vma); ++ count_vm_vma_lock_event(VMA_LOCK_MISS); ++ /* The area was replaced with another one */ ++ goto retry; ++ } + -+ return 0; ++ rcu_read_unlock(); ++ return vma; ++inval: ++ rcu_read_unlock(); ++ count_vm_vma_lock_event(VMA_LOCK_ABORT); ++ return NULL; +} ++#endif /* CONFIG_PER_VMA_LOCK */ + -+static cppc_mode_transition_fn mode_state_machine[AMD_PSTATE_MAX][AMD_PSTATE_MAX] = { -+ [AMD_PSTATE_DISABLE] = { -+ [AMD_PSTATE_DISABLE] = NULL, -+ [AMD_PSTATE_PASSIVE] = amd_pstate_register_driver, -+ [AMD_PSTATE_ACTIVE] = amd_pstate_register_driver, -+ [AMD_PSTATE_GUIDED] = amd_pstate_register_driver, -+ }, -+ [AMD_PSTATE_PASSIVE] = { -+ [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver, -+ [AMD_PSTATE_PASSIVE] = NULL, -+ [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode, -+ [AMD_PSTATE_GUIDED] = amd_pstate_change_mode_without_dvr_change, -+ }, -+ [AMD_PSTATE_ACTIVE] = { -+ [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver, -+ [AMD_PSTATE_PASSIVE] = amd_pstate_change_driver_mode, -+ [AMD_PSTATE_ACTIVE] = NULL, -+ [AMD_PSTATE_GUIDED] = amd_pstate_change_driver_mode, -+ }, -+ [AMD_PSTATE_GUIDED] = { -+ [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver, -+ [AMD_PSTATE_PASSIVE] = amd_pstate_change_mode_without_dvr_change, -+ [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode, -+ [AMD_PSTATE_GUIDED] = NULL, -+ }, -+}; -+ - static ssize_t amd_pstate_show_status(char *buf) + #ifndef __PAGETABLE_P4D_FOLDED + /* + * Allocate p4d page table. +diff --git a/mm/mmap.c b/mm/mmap.c +index 6819eb2b77d7..bba9e996dfdc 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -133,7 +133,7 @@ void unlink_file_vma(struct vm_area_struct *vma) + /* + * Close a vm structure and free it. + */ +-static void remove_vma(struct vm_area_struct *vma) ++static void remove_vma(struct vm_area_struct *vma, bool unreachable) { - if (!current_pstate_driver) -@@ -824,55 +939,22 @@ static ssize_t amd_pstate_show_status(char *buf) - return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]); + might_sleep(); + if (vma->vm_ops && vma->vm_ops->close) +@@ -141,7 +141,10 @@ static void remove_vma(struct vm_area_struct *vma) + if (vma->vm_file) + fput(vma->vm_file); + mpol_put(vma_policy(vma)); +- vm_area_free(vma); ++ if (unreachable) ++ __vm_area_free(vma); ++ else ++ vm_area_free(vma); } --static void amd_pstate_driver_cleanup(void) --{ -- current_pstate_driver = NULL; --} -- - static int amd_pstate_update_status(const char *buf, size_t size) + static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi, +@@ -502,6 +505,15 @@ static inline void init_vma_prep(struct vma_prepare *vp, + */ + static inline void vma_prepare(struct vma_prepare *vp) { -- int ret = 0; - int mode_idx; - -- if (size > 7 || size < 6) -+ if (size > strlen("passive") || size < strlen("active")) - return -EINVAL; ++ vma_start_write(vp->vma); ++ if (vp->adj_next) ++ vma_start_write(vp->adj_next); ++ /* vp->insert is always a newly created VMA, no need for locking */ ++ if (vp->remove) ++ vma_start_write(vp->remove); ++ if (vp->remove2) ++ vma_start_write(vp->remove2); + - mode_idx = get_mode_idx_from_str(buf, size); - -- switch(mode_idx) { -- case AMD_PSTATE_DISABLE: -- if (current_pstate_driver) { -- cpufreq_unregister_driver(current_pstate_driver); -- amd_pstate_driver_cleanup(); -- } -- break; -- case AMD_PSTATE_PASSIVE: -- if (current_pstate_driver) { -- if (current_pstate_driver == &amd_pstate_driver) -- return 0; -- cpufreq_unregister_driver(current_pstate_driver); -- } -+ if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX) -+ return -EINVAL; - -- current_pstate_driver = &amd_pstate_driver; -- cppc_state = AMD_PSTATE_PASSIVE; -- ret = cpufreq_register_driver(current_pstate_driver); -- break; -- case AMD_PSTATE_ACTIVE: -- if (current_pstate_driver) { -- if (current_pstate_driver == &amd_pstate_epp_driver) -- return 0; -- cpufreq_unregister_driver(current_pstate_driver); -- } -+ if (mode_state_machine[cppc_state][mode_idx]) -+ return mode_state_machine[cppc_state][mode_idx](mode_idx); - -- current_pstate_driver = &amd_pstate_epp_driver; -- cppc_state = AMD_PSTATE_ACTIVE; -- ret = cpufreq_register_driver(current_pstate_driver); -- break; -- default: -- ret = -EINVAL; -- break; -- } -- -- return ret; -+ return 0; - } - - static ssize_t show_status(struct kobject *kobj, -@@ -930,6 +1012,7 @@ static struct attribute *pstate_global_attributes[] = { - }; + if (vp->file) { + uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); - static const struct attribute_group amd_pstate_global_attr_group = { -+ .name = "amd_pstate", - .attrs = pstate_global_attributes, - }; +@@ -590,6 +602,7 @@ static inline void vma_complete(struct vma_prepare *vp, -@@ -1251,6 +1334,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = { + if (vp->remove) { + again: ++ vma_mark_detached(vp->remove, true); + if (vp->file) { + uprobe_munmap(vp->remove, vp->remove->vm_start, + vp->remove->vm_end); +@@ -683,12 +696,12 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, + if (vma_iter_prealloc(vmi)) + goto nomem; - static int __init amd_pstate_init(void) - { -+ struct device *dev_root; - int ret; ++ vma_prepare(&vp); + vma_adjust_trans_huge(vma, start, end, 0); + /* VMA iterator points to previous, so set to start if necessary */ + if (vma_iter_addr(vmi) != start) + vma_iter_set(vmi, start); - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) -@@ -1277,7 +1361,7 @@ static int __init amd_pstate_init(void) - /* capability check */ - if (boot_cpu_has(X86_FEATURE_CPPC)) { - pr_debug("AMD CPPC MSR based functionality is supported\n"); -- if (cppc_state == AMD_PSTATE_PASSIVE) -+ if (cppc_state != AMD_PSTATE_ACTIVE) - current_pstate_driver->adjust_perf = amd_pstate_adjust_perf; - } else { - pr_debug("AMD CPPC shared memory based functionality is supported\n"); -@@ -1297,24 +1381,19 @@ static int __init amd_pstate_init(void) - if (ret) - pr_err("failed to register with return %d\n", ret); +- vma_prepare(&vp); + vma->vm_start = start; + vma->vm_end = end; + vma->vm_pgoff = pgoff; +@@ -723,8 +736,8 @@ int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, + return -ENOMEM; -- amd_pstate_kobj = kobject_create_and_add("amd_pstate", &cpu_subsys.dev_root->kobj); -- if (!amd_pstate_kobj) { -- ret = -EINVAL; -- pr_err("global sysfs registration failed.\n"); -- goto kobject_free; -- } -- -- ret = sysfs_create_group(amd_pstate_kobj, &amd_pstate_global_attr_group); -- if (ret) { -- pr_err("sysfs attribute export failed with error %d.\n", ret); -- goto global_attr_free; -+ dev_root = bus_get_dev_root(&cpu_subsys); -+ if (dev_root) { -+ ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group); -+ put_device(dev_root); -+ if (ret) { -+ pr_err("sysfs attribute export failed with error %d.\n", ret); -+ goto global_attr_free; -+ } - } + init_vma_prep(&vp, vma); +- vma_adjust_trans_huge(vma, start, end, 0); + vma_prepare(&vp); ++ vma_adjust_trans_huge(vma, start, end, 0); - return ret; + if (vma->vm_start < start) + vma_iter_clear(vmi, vma->vm_start, start); +@@ -994,12 +1007,12 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm, + if (vma_iter_prealloc(vmi)) + return NULL; - global_attr_free: -- kobject_put(amd_pstate_kobj); --kobject_free: - cpufreq_unregister_driver(current_pstate_driver); - return ret; +- vma_adjust_trans_huge(vma, vma_start, vma_end, adj_next); + init_multi_vma_prep(&vp, vma, adjust, remove, remove2); + VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && + vp.anon_vma != adjust->anon_vma); + + vma_prepare(&vp); ++ vma_adjust_trans_huge(vma, vma_start, vma_end, adj_next); + if (vma_start < vma->vm_start || vma_end > vma->vm_end) + vma_expanded = true; + +@@ -2164,7 +2177,7 @@ static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas) + if (vma->vm_flags & VM_ACCOUNT) + nr_accounted += nrpages; + vm_stat_account(mm, vma->vm_flags, -nrpages); +- remove_vma(vma); ++ remove_vma(vma, false); + } + vm_unacct_memory(nr_accounted); + validate_mm(mm); +@@ -2187,7 +2200,8 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt, + update_hiwater_rss(mm); + unmap_vmas(&tlb, mt, vma, start, end, mm_wr_locked); + free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, +- next ? next->vm_start : USER_PGTABLES_CEILING); ++ next ? next->vm_start : USER_PGTABLES_CEILING, ++ mm_wr_locked); + tlb_finish_mmu(&tlb); } -@@ -1339,7 +1418,7 @@ static int __init amd_pstate_param(char *str) - if (cppc_state == AMD_PSTATE_ACTIVE) - current_pstate_driver = &amd_pstate_epp_driver; -- if (cppc_state == AMD_PSTATE_PASSIVE) -+ if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED) - current_pstate_driver = &amd_pstate_driver; +@@ -2243,10 +2257,10 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, + if (new->vm_ops && new->vm_ops->open) + new->vm_ops->open(new); - return 0; -diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h -index 6b487a5bd638..6126c977ece0 100644 ---- a/include/acpi/cppc_acpi.h -+++ b/include/acpi/cppc_acpi.h -@@ -109,6 +109,7 @@ struct cppc_perf_caps { - u32 lowest_freq; - u32 nominal_freq; - u32 energy_perf; -+ bool auto_sel; - }; +- vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); + init_vma_prep(&vp, vma); + vp.insert = new; + vma_prepare(&vp); ++ vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); - struct cppc_perf_ctrls { -@@ -153,6 +154,8 @@ extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val); - extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val); - extern int cppc_get_epp_perf(int cpunum, u64 *epp_perf); - extern int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable); -+extern int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps); -+extern int cppc_set_auto_sel(int cpu, bool enable); - #else /* !CONFIG_ACPI_CPPC_LIB */ - static inline int cppc_get_desired_perf(int cpunum, u64 *desired_perf) - { -@@ -214,6 +217,14 @@ static inline int cppc_get_epp_perf(int cpunum, u64 *epp_perf) + if (new_below) { + vma->vm_start = addr; +@@ -2290,10 +2304,12 @@ int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, + static inline int munmap_sidetree(struct vm_area_struct *vma, + struct ma_state *mas_detach) { - return -ENOTSUPP; - } -+static inline int cppc_set_auto_sel(int cpu, bool enable) -+{ -+ return -ENOTSUPP; -+} -+static inline int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps) -+{ -+ return -ENOTSUPP; -+} - #endif /* !CONFIG_ACPI_CPPC_LIB */ ++ vma_start_write(vma); + mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1); + if (mas_store_gfp(mas_detach, vma, GFP_KERNEL)) + return -ENOMEM; - #endif /* _CPPC_ACPI_H*/ -diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h -index f5f22418e64b..c10ebf8c42e6 100644 ---- a/include/linux/amd-pstate.h -+++ b/include/linux/amd-pstate.h -@@ -97,6 +97,7 @@ enum amd_pstate_mode { - AMD_PSTATE_DISABLE = 0, - AMD_PSTATE_PASSIVE, - AMD_PSTATE_ACTIVE, -+ AMD_PSTATE_GUIDED, - AMD_PSTATE_MAX, - }; ++ vma_mark_detached(vma, true); + if (vma->vm_flags & VM_LOCKED) + vma->vm_mm->locked_vm -= vma_pages(vma); -@@ -104,6 +105,7 @@ static const char * const amd_pstate_mode_string[] = { - [AMD_PSTATE_DISABLE] = "disable", - [AMD_PSTATE_PASSIVE] = "passive", - [AMD_PSTATE_ACTIVE] = "active", -+ [AMD_PSTATE_GUIDED] = "guided", - NULL, - }; - #endif /* _LINUX_AMD_PSTATE_H */ --- -2.40.0 - -From 0f6a1f135f27479417b7e57a2cdd75e1d736b83a Mon Sep 17 00:00:00 2001 -From: Peter Jung -Date: Mon, 17 Apr 2023 18:28:52 +0200 -Subject: [PATCH 07/12] ksm - -Signed-off-by: Peter Jung ---- - arch/alpha/kernel/syscalls/syscall.tbl | 1 + - arch/arm/tools/syscall.tbl | 1 + - arch/arm64/include/asm/unistd.h | 2 +- - arch/arm64/include/asm/unistd32.h | 2 + - arch/ia64/kernel/syscalls/syscall.tbl | 1 + - arch/m68k/kernel/syscalls/syscall.tbl | 1 + - arch/microblaze/kernel/syscalls/syscall.tbl | 1 + - arch/mips/kernel/syscalls/syscall_n32.tbl | 1 + - arch/mips/kernel/syscalls/syscall_n64.tbl | 1 + - arch/mips/kernel/syscalls/syscall_o32.tbl | 1 + - arch/parisc/kernel/syscalls/syscall.tbl | 1 + - arch/powerpc/kernel/syscalls/syscall.tbl | 1 + - arch/s390/kernel/syscalls/syscall.tbl | 1 + - arch/sh/kernel/syscalls/syscall.tbl | 1 + - arch/sparc/kernel/syscalls/syscall.tbl | 1 + - arch/x86/entry/syscalls/syscall_32.tbl | 1 + - arch/x86/entry/syscalls/syscall_64.tbl | 1 + - arch/xtensa/kernel/syscalls/syscall.tbl | 1 + - include/linux/ksm.h | 4 + - include/linux/syscalls.h | 1 + - include/uapi/asm-generic/unistd.h | 5 +- - kernel/sys_ni.c | 1 + - mm/ksm.c | 82 +++++++++----- - mm/madvise.c | 117 ++++++++++++++++++++ - 24 files changed, 199 insertions(+), 31 deletions(-) - -diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl -index 8ebacf37a8cf..c9d25f85d86d 100644 ---- a/arch/alpha/kernel/syscalls/syscall.tbl -+++ b/arch/alpha/kernel/syscalls/syscall.tbl -@@ -490,3 +490,4 @@ - 558 common process_mrelease sys_process_mrelease - 559 common futex_waitv sys_futex_waitv - 560 common set_mempolicy_home_node sys_ni_syscall -+561 common pmadv_ksm sys_pmadv_ksm -diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl -index ac964612d8b0..90933eabe115 100644 ---- a/arch/arm/tools/syscall.tbl -+++ b/arch/arm/tools/syscall.tbl -@@ -464,3 +464,4 @@ - 448 common process_mrelease sys_process_mrelease - 449 common futex_waitv sys_futex_waitv - 450 common set_mempolicy_home_node sys_set_mempolicy_home_node -+451 common pmadv_ksm sys_pmadv_ksm -diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h -index 037feba03a51..64a514f90131 100644 ---- a/arch/arm64/include/asm/unistd.h -+++ b/arch/arm64/include/asm/unistd.h -@@ -39,7 +39,7 @@ - #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) - #define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) +@@ -2949,9 +2965,9 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, + if (vma_iter_prealloc(vmi)) + goto unacct_fail; --#define __NR_compat_syscalls 451 -+#define __NR_compat_syscalls 452 - #endif +- vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0); + init_vma_prep(&vp, vma); + vma_prepare(&vp); ++ vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0); + vma->vm_end = addr + len; + vm_flags_set(vma, VM_SOFTDIRTY); + vma_iter_store(vmi, vma); +@@ -3084,7 +3100,7 @@ void exit_mmap(struct mm_struct *mm) + mmap_write_lock(mm); + mt_clear_in_rcu(&mm->mm_mt); + free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS, +- USER_PGTABLES_CEILING); ++ USER_PGTABLES_CEILING, true); + tlb_finish_mmu(&tlb); + + /* +@@ -3095,7 +3111,7 @@ void exit_mmap(struct mm_struct *mm) + do { + if (vma->vm_flags & VM_ACCOUNT) + nr_accounted += vma_pages(vma); +- remove_vma(vma); ++ remove_vma(vma, true); + count++; + cond_resched(); + } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL); +@@ -3218,6 +3234,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, + get_file(new_vma->vm_file); + if (new_vma->vm_ops && new_vma->vm_ops->open) + new_vma->vm_ops->open(new_vma); ++ vma_start_write(new_vma); + if (vma_link(mm, new_vma)) + goto out_vma_link; + *need_rmap_locks = false; +@@ -3512,6 +3529,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) + * of mm/rmap.c: + * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for + * hugetlb mapping); ++ * - all vmas marked locked + * - all i_mmap_rwsem locks; + * - all anon_vma->rwseml + * +@@ -3534,6 +3552,13 @@ int mm_take_all_locks(struct mm_struct *mm) + + mutex_lock(&mm_all_locks_mutex); + ++ mas_for_each(&mas, vma, ULONG_MAX) { ++ if (signal_pending(current)) ++ goto out_unlock; ++ vma_start_write(vma); ++ } ++ ++ mas_set(&mas, 0); + mas_for_each(&mas, vma, ULONG_MAX) { + if (signal_pending(current)) + goto out_unlock; +@@ -3623,6 +3648,7 @@ void mm_drop_all_locks(struct mm_struct *mm) + if (vma->vm_file && vma->vm_file->f_mapping) + vm_unlock_mapping(vma->vm_file->f_mapping); + } ++ vma_end_write_all(mm); - #define __ARCH_WANT_SYS_CLONE -diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h -index 604a2053d006..91f2bb7199af 100644 ---- a/arch/arm64/include/asm/unistd32.h -+++ b/arch/arm64/include/asm/unistd32.h -@@ -907,6 +907,8 @@ __SYSCALL(__NR_process_mrelease, sys_process_mrelease) - __SYSCALL(__NR_futex_waitv, sys_futex_waitv) - #define __NR_set_mempolicy_home_node 450 - __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) -+#define __NR_pmadv_ksm 451 -+__SYSCALL(__NR_pmadv_ksm, sys_pmadv_ksm) + mutex_unlock(&mm_all_locks_mutex); + } +diff --git a/mm/mprotect.c b/mm/mprotect.c +index 36351a00c0e8..204194155863 100644 +--- a/mm/mprotect.c ++++ b/mm/mprotect.c +@@ -276,7 +276,15 @@ static long change_pte_range(struct mmu_gather *tlb, + } else { + /* It must be an none page, or what else?.. */ + WARN_ON_ONCE(!pte_none(oldpte)); +- if (unlikely(uffd_wp && !vma_is_anonymous(vma))) { ++ ++ /* ++ * Nobody plays with any none ptes besides ++ * userfaultfd when applying the protections. ++ */ ++ if (likely(!uffd_wp)) ++ continue; ++ ++ if (userfaultfd_wp_use_markers(vma)) { + /* + * For file-backed mem, we need to be able to + * wr-protect a none pte, because even if the +@@ -320,23 +328,46 @@ static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd) + return 0; + } + +-/* Return true if we're uffd wr-protecting file-backed memory, or false */ ++/* ++ * Return true if we want to split THPs into PTE mappings in change ++ * protection procedure, false otherwise. ++ */ + static inline bool +-uffd_wp_protect_file(struct vm_area_struct *vma, unsigned long cp_flags) ++pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags) + { ++ /* ++ * pte markers only resides in pte level, if we need pte markers, ++ * we need to split. We cannot wr-protect shmem thp because file ++ * thp is handled differently when split by erasing the pmd so far. ++ */ + return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma); + } /* - * Please add new compat syscalls above this comment and update -diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl -index 72c929d9902b..0d5b1d14b2b5 100644 ---- a/arch/ia64/kernel/syscalls/syscall.tbl -+++ b/arch/ia64/kernel/syscalls/syscall.tbl -@@ -371,3 +371,4 @@ - 448 common process_mrelease sys_process_mrelease - 449 common futex_waitv sys_futex_waitv - 450 common set_mempolicy_home_node sys_set_mempolicy_home_node -+451 common pmadv_ksm sys_pmadv_ksm -diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl -index b1f3940bc298..5ccf925567da 100644 ---- a/arch/m68k/kernel/syscalls/syscall.tbl -+++ b/arch/m68k/kernel/syscalls/syscall.tbl -@@ -450,3 +450,4 @@ - 448 common process_mrelease sys_process_mrelease - 449 common futex_waitv sys_futex_waitv - 450 common set_mempolicy_home_node sys_set_mempolicy_home_node -+451 common pmadv_ksm sys_pmadv_ksm -diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl -index 820145e47350..6b76208597f3 100644 ---- a/arch/microblaze/kernel/syscalls/syscall.tbl -+++ b/arch/microblaze/kernel/syscalls/syscall.tbl -@@ -456,3 +456,4 @@ - 448 common process_mrelease sys_process_mrelease - 449 common futex_waitv sys_futex_waitv - 450 common set_mempolicy_home_node sys_set_mempolicy_home_node -+451 common pmadv_ksm sys_pmadv_ksm -diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl -index 253ff994ed2e..e4aeedb17c38 100644 ---- a/arch/mips/kernel/syscalls/syscall_n32.tbl -+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl -@@ -389,3 +389,4 @@ - 448 n32 process_mrelease sys_process_mrelease - 449 n32 futex_waitv sys_futex_waitv - 450 n32 set_mempolicy_home_node sys_set_mempolicy_home_node -+451 n32 pmadv_ksm sys_pmadv_ksm -diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl -index 3f1886ad9d80..fe88db51efa0 100644 ---- a/arch/mips/kernel/syscalls/syscall_n64.tbl -+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl -@@ -365,3 +365,4 @@ - 448 n64 process_mrelease sys_process_mrelease - 449 n64 futex_waitv sys_futex_waitv - 450 common set_mempolicy_home_node sys_set_mempolicy_home_node -+451 n64 pmadv_ksm sys_pmadv_ksm -diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl -index 8f243e35a7b2..674cb940bd15 100644 ---- a/arch/mips/kernel/syscalls/syscall_o32.tbl -+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl -@@ -438,3 +438,4 @@ - 448 o32 process_mrelease sys_process_mrelease - 449 o32 futex_waitv sys_futex_waitv - 450 o32 set_mempolicy_home_node sys_set_mempolicy_home_node -+451 o32 pmadv_ksm sys_pmadv_ksm -diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl -index 0e42fceb2d5e..5914aa460255 100644 ---- a/arch/parisc/kernel/syscalls/syscall.tbl -+++ b/arch/parisc/kernel/syscalls/syscall.tbl -@@ -448,3 +448,4 @@ - 448 common process_mrelease sys_process_mrelease - 449 common futex_waitv sys_futex_waitv - 450 common set_mempolicy_home_node sys_set_mempolicy_home_node -+451 common pmadv_ksm sys_pmadv_ksm -diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl -index a0be127475b1..347894da4eb6 100644 ---- a/arch/powerpc/kernel/syscalls/syscall.tbl -+++ b/arch/powerpc/kernel/syscalls/syscall.tbl -@@ -537,3 +537,4 @@ - 448 common process_mrelease sys_process_mrelease - 449 common futex_waitv sys_futex_waitv - 450 nospu set_mempolicy_home_node sys_set_mempolicy_home_node -+451 common pmadv_ksm sys_pmadv_ksm -diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl -index 799147658dee..1cd523748bd2 100644 ---- a/arch/s390/kernel/syscalls/syscall.tbl -+++ b/arch/s390/kernel/syscalls/syscall.tbl -@@ -453,3 +453,4 @@ - 448 common process_mrelease sys_process_mrelease sys_process_mrelease - 449 common futex_waitv sys_futex_waitv sys_futex_waitv - 450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node -+451 common pmadv_ksm sys_pmadv_ksm sys_pmadv_ksm -diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl -index 2de85c977f54..cfc75fa43eae 100644 ---- a/arch/sh/kernel/syscalls/syscall.tbl -+++ b/arch/sh/kernel/syscalls/syscall.tbl -@@ -453,3 +453,4 @@ - 448 common process_mrelease sys_process_mrelease - 449 common futex_waitv sys_futex_waitv - 450 common set_mempolicy_home_node sys_set_mempolicy_home_node -+451 common pmadv_ksm sys_pmadv_ksm -diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl -index 4398cc6fb68d..d2c0a6426f6b 100644 ---- a/arch/sparc/kernel/syscalls/syscall.tbl -+++ b/arch/sparc/kernel/syscalls/syscall.tbl -@@ -496,3 +496,4 @@ - 448 common process_mrelease sys_process_mrelease - 449 common futex_waitv sys_futex_waitv - 450 common set_mempolicy_home_node sys_set_mempolicy_home_node -+451 common pmadv_ksm sys_pmadv_ksm -diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl -index 320480a8db4f..331aaf1a782f 100644 ---- a/arch/x86/entry/syscalls/syscall_32.tbl -+++ b/arch/x86/entry/syscalls/syscall_32.tbl -@@ -455,3 +455,4 @@ - 448 i386 process_mrelease sys_process_mrelease - 449 i386 futex_waitv sys_futex_waitv - 450 i386 set_mempolicy_home_node sys_set_mempolicy_home_node -+451 i386 pmadv_ksm sys_pmadv_ksm -diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl -index c84d12608cd2..14902db4c01f 100644 ---- a/arch/x86/entry/syscalls/syscall_64.tbl -+++ b/arch/x86/entry/syscalls/syscall_64.tbl -@@ -372,6 +372,7 @@ - 448 common process_mrelease sys_process_mrelease - 449 common futex_waitv sys_futex_waitv - 450 common set_mempolicy_home_node sys_set_mempolicy_home_node -+451 common pmadv_ksm sys_pmadv_ksm +- * If wr-protecting the range for file-backed, populate pgtable for the case +- * when pgtable is empty but page cache exists. When {pte|pmd|...}_alloc() +- * failed we treat it the same way as pgtable allocation failures during +- * page faults by kicking OOM and returning error. ++ * Return true if we want to populate pgtables in change protection ++ * procedure, false otherwise ++ */ ++static inline bool ++pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags) ++{ ++ /* If not within ioctl(UFFDIO_WRITEPROTECT), then don't bother */ ++ if (!(cp_flags & MM_CP_UFFD_WP)) ++ return false; ++ ++ /* Populate if the userfaultfd mode requires pte markers */ ++ return userfaultfd_wp_use_markers(vma); ++} ++ ++/* ++ * Populate the pgtable underneath for whatever reason if requested. ++ * When {pte|pmd|...}_alloc() failed we treat it the same way as pgtable ++ * allocation failures during page faults by kicking OOM and returning ++ * error. + */ + #define change_pmd_prepare(vma, pmd, cp_flags) \ + ({ \ + long err = 0; \ +- if (unlikely(uffd_wp_protect_file(vma, cp_flags))) { \ ++ if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \ + if (pte_alloc(vma->vm_mm, pmd)) \ + err = -ENOMEM; \ + } \ +@@ -351,7 +382,7 @@ uffd_wp_protect_file(struct vm_area_struct *vma, unsigned long cp_flags) + #define change_prepare(vma, high, low, addr, cp_flags) \ + ({ \ + long err = 0; \ +- if (unlikely(uffd_wp_protect_file(vma, cp_flags))) { \ ++ if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \ + low##_t *p = low##_alloc(vma->vm_mm, high, addr); \ + if (p == NULL) \ + err = -ENOMEM; \ +@@ -404,7 +435,7 @@ static inline long change_pmd_range(struct mmu_gather *tlb, + + if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { + if ((next - addr != HPAGE_PMD_SIZE) || +- uffd_wp_protect_file(vma, cp_flags)) { ++ pgtable_split_needed(vma, cp_flags)) { + __split_huge_pmd(vma, pmd, addr, false, NULL); + /* + * For file-backed, the pmd could have been +diff --git a/mm/mremap.c b/mm/mremap.c +index 411a85682b58..dd541e59edda 100644 +--- a/mm/mremap.c ++++ b/mm/mremap.c +@@ -623,6 +623,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, + return -ENOMEM; + } + ++ vma_start_write(vma); + new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); + new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, + &need_rmap_locks); +diff --git a/mm/rmap.c b/mm/rmap.c +index 8632e02661ac..cfdaa56cad3e 100644 +--- a/mm/rmap.c ++++ b/mm/rmap.c +@@ -25,21 +25,22 @@ + * mapping->invalidate_lock (in filemap_fault) + * page->flags PG_locked (lock_page) + * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) +- * mapping->i_mmap_rwsem +- * anon_vma->rwsem +- * mm->page_table_lock or pte_lock +- * swap_lock (in swap_duplicate, swap_info_get) +- * mmlist_lock (in mmput, drain_mmlist and others) +- * mapping->private_lock (in block_dirty_folio) +- * folio_lock_memcg move_lock (in block_dirty_folio) +- * i_pages lock (widely used) +- * lruvec->lru_lock (in folio_lruvec_lock_irq) +- * inode->i_lock (in set_page_dirty's __mark_inode_dirty) +- * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) +- * sb_lock (within inode_lock in fs/fs-writeback.c) +- * i_pages lock (widely used, in set_page_dirty, +- * in arch-dependent flush_dcache_mmap_lock, +- * within bdi.wb->list_lock in __sync_single_inode) ++ * vma_start_write ++ * mapping->i_mmap_rwsem ++ * anon_vma->rwsem ++ * mm->page_table_lock or pte_lock ++ * swap_lock (in swap_duplicate, swap_info_get) ++ * mmlist_lock (in mmput, drain_mmlist and others) ++ * mapping->private_lock (in block_dirty_folio) ++ * folio_lock_memcg move_lock (in block_dirty_folio) ++ * i_pages lock (widely used) ++ * lruvec->lru_lock (in folio_lruvec_lock_irq) ++ * inode->i_lock (in set_page_dirty's __mark_inode_dirty) ++ * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) ++ * sb_lock (within inode_lock in fs/fs-writeback.c) ++ * i_pages lock (widely used, in set_page_dirty, ++ * in arch-dependent flush_dcache_mmap_lock, ++ * within bdi.wb->list_lock in __sync_single_inode) + * + * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) + * ->tasklist_lock +diff --git a/mm/vmstat.c b/mm/vmstat.c +index 1ea6a5ce1c41..4f1089a1860e 100644 +--- a/mm/vmstat.c ++++ b/mm/vmstat.c +@@ -1399,6 +1399,12 @@ const char * const vmstat_text[] = { + "direct_map_level2_splits", + "direct_map_level3_splits", + #endif ++#ifdef CONFIG_PER_VMA_LOCK_STATS ++ "vma_lock_success", ++ "vma_lock_abort", ++ "vma_lock_retry", ++ "vma_lock_miss", ++#endif + #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ + }; + #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */ +diff --git a/tools/testing/selftests/mm/userfaultfd.c b/tools/testing/selftests/mm/userfaultfd.c +index 7f22844ed704..e030d63c031a 100644 +--- a/tools/testing/selftests/mm/userfaultfd.c ++++ b/tools/testing/selftests/mm/userfaultfd.c +@@ -1444,6 +1444,43 @@ static int pagemap_test_fork(bool present) + return result; + } - # - # Due to a historical design error, certain syscalls are numbered differently -diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl -index 52c94ab5c205..1518e261d882 100644 ---- a/arch/xtensa/kernel/syscalls/syscall.tbl -+++ b/arch/xtensa/kernel/syscalls/syscall.tbl -@@ -421,3 +421,4 @@ - 448 common process_mrelease sys_process_mrelease - 449 common futex_waitv sys_futex_waitv - 450 common set_mempolicy_home_node sys_set_mempolicy_home_node -+451 common pmadv_ksm sys_pmadv_ksm -diff --git a/include/linux/ksm.h b/include/linux/ksm.h -index 7e232ba59b86..57ed92987717 100644 ---- a/include/linux/ksm.h -+++ b/include/linux/ksm.h -@@ -16,6 +16,10 @@ - #include ++static void userfaultfd_wp_unpopulated_test(int pagemap_fd) ++{ ++ uint64_t value; ++ ++ /* Test applying pte marker to anon unpopulated */ ++ wp_range(uffd, (uint64_t)area_dst, page_size, true); ++ value = pagemap_read_vaddr(pagemap_fd, area_dst); ++ pagemap_check_wp(value, true); ++ ++ /* Test unprotect on anon pte marker */ ++ wp_range(uffd, (uint64_t)area_dst, page_size, false); ++ value = pagemap_read_vaddr(pagemap_fd, area_dst); ++ pagemap_check_wp(value, false); ++ ++ /* Test zap on anon marker */ ++ wp_range(uffd, (uint64_t)area_dst, page_size, true); ++ if (madvise(area_dst, page_size, MADV_DONTNEED)) ++ err("madvise(MADV_DONTNEED) failed"); ++ value = pagemap_read_vaddr(pagemap_fd, area_dst); ++ pagemap_check_wp(value, false); ++ ++ /* Test fault in after marker removed */ ++ *area_dst = 1; ++ value = pagemap_read_vaddr(pagemap_fd, area_dst); ++ pagemap_check_wp(value, false); ++ /* Drop it to make pte none again */ ++ if (madvise(area_dst, page_size, MADV_DONTNEED)) ++ err("madvise(MADV_DONTNEED) failed"); ++ ++ /* Test read-zero-page upon pte marker */ ++ wp_range(uffd, (uint64_t)area_dst, page_size, true); ++ *(volatile char *)area_dst; ++ /* Drop it to make pte none again */ ++ if (madvise(area_dst, page_size, MADV_DONTNEED)) ++ err("madvise(MADV_DONTNEED) failed"); ++} ++ + static void userfaultfd_pagemap_test(unsigned int test_pgsize) + { + struct uffdio_register uffdio_register; +@@ -1462,7 +1499,7 @@ static void userfaultfd_pagemap_test(unsigned int test_pgsize) + /* Flush so it doesn't flush twice in parent/child later */ + fflush(stdout); - #ifdef CONFIG_KSM -+int ksm_madvise_merge(struct mm_struct *mm, struct vm_area_struct *vma, -+ const vm_flags_t *vm_flags); -+int ksm_madvise_unmerge(struct vm_area_struct *vma, unsigned long start, -+ unsigned long end, const vm_flags_t *vm_flags); - int ksm_madvise(struct vm_area_struct *vma, unsigned long start, - unsigned long end, int advice, unsigned long *vm_flags); - int __ksm_enter(struct mm_struct *mm); -diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h -index 33a0ee3bcb2e..62f14e800839 100644 ---- a/include/linux/syscalls.h -+++ b/include/linux/syscalls.h -@@ -919,6 +919,7 @@ asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior); - asmlinkage long sys_process_madvise(int pidfd, const struct iovec __user *vec, - size_t vlen, int behavior, unsigned int flags); - asmlinkage long sys_process_mrelease(int pidfd, unsigned int flags); -+asmlinkage long sys_pmadv_ksm(int pidfd, int behavior, unsigned int flags); - asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, - unsigned long prot, unsigned long pgoff, - unsigned long flags); -diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h -index 45fa180cc56a..40f7e6d04af0 100644 ---- a/include/uapi/asm-generic/unistd.h -+++ b/include/uapi/asm-generic/unistd.h -@@ -886,8 +886,11 @@ __SYSCALL(__NR_futex_waitv, sys_futex_waitv) - #define __NR_set_mempolicy_home_node 450 - __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) +- uffd_test_ctx_init(0); ++ uffd_test_ctx_init(UFFD_FEATURE_WP_UNPOPULATED); -+#define __NR_pmadv_ksm 451 -+__SYSCALL(__NR_pmadv_ksm, sys_pmadv_ksm) + if (test_pgsize > page_size) { + /* This is a thp test */ +@@ -1482,6 +1519,10 @@ static void userfaultfd_pagemap_test(unsigned int test_pgsize) + + pagemap_fd = pagemap_open(); + ++ /* Smoke test WP_UNPOPULATED first when it's still empty */ ++ if (test_pgsize == page_size) ++ userfaultfd_wp_unpopulated_test(pagemap_fd); + - #undef __NR_syscalls --#define __NR_syscalls 451 -+#define __NR_syscalls 452 + /* Touch the page */ + *area_dst = 1; + wp_range(uffd, (uint64_t)area_dst, test_pgsize, true); +@@ -1526,7 +1567,7 @@ static int userfaultfd_stress(void) + struct uffdio_register uffdio_register; + struct uffd_stats uffd_stats[nr_cpus]; - /* - * 32 bit systems traditionally used different -diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c -index 860b2dcf3ac4..810e1fcaff94 100644 ---- a/kernel/sys_ni.c -+++ b/kernel/sys_ni.c -@@ -292,6 +292,7 @@ COND_SYSCALL(mincore); - COND_SYSCALL(madvise); - COND_SYSCALL(process_madvise); - COND_SYSCALL(process_mrelease); -+COND_SYSCALL(pmadv_ksm); - COND_SYSCALL(remap_file_pages); - COND_SYSCALL(mbind); - COND_SYSCALL(get_mempolicy); -diff --git a/mm/ksm.c b/mm/ksm.c -index 82029f1d454b..0c206bd8007d 100644 ---- a/mm/ksm.c -+++ b/mm/ksm.c -@@ -2576,52 +2576,76 @@ static int ksm_scan_thread(void *nothing) - return 0; +- uffd_test_ctx_init(0); ++ uffd_test_ctx_init(UFFD_FEATURE_WP_UNPOPULATED); + + if (posix_memalign(&area, page_size, page_size)) + err("out of memory"); +-- +2.40.0 + +From ecf6bb05c11fa452ce4e97b36fc5933186812273 Mon Sep 17 00:00:00 2001 +From: Peter Jung +Date: Sat, 22 Apr 2023 11:46:01 +0200 +Subject: [PATCH 10/13] sched + +Signed-off-by: Peter Jung +--- + arch/x86/kernel/itmt.c | 23 +-- + arch/x86/kernel/smpboot.c | 4 +- + include/linux/sched.h | 3 + + include/linux/sched/sd_flags.h | 5 +- + kernel/sched/clock.c | 3 + + kernel/sched/core.c | 4 +- + kernel/sched/deadline.c | 1 + + kernel/sched/debug.c | 1 + + kernel/sched/fair.c | 265 ++++++++++++++++++++------------- + kernel/sched/features.h | 1 + + kernel/sched/pelt.c | 60 ++++++++ + kernel/sched/pelt.h | 42 +++++- + kernel/sched/rt.c | 4 + + kernel/sched/sched.h | 23 ++- + 14 files changed, 302 insertions(+), 137 deletions(-) + +diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c +index 9ff480e94511..6510883c5e81 100644 +--- a/arch/x86/kernel/itmt.c ++++ b/arch/x86/kernel/itmt.c +@@ -174,32 +174,19 @@ int arch_asym_cpu_priority(int cpu) + + /** + * sched_set_itmt_core_prio() - Set CPU priority based on ITMT +- * @prio: Priority of cpu core +- * @core_cpu: The cpu number associated with the core ++ * @prio: Priority of @cpu ++ * @cpu: The CPU number + * + * The pstate driver will find out the max boost frequency + * and call this function to set a priority proportional +- * to the max boost frequency. CPU with higher boost ++ * to the max boost frequency. CPUs with higher boost + * frequency will receive higher priority. + * + * No need to rebuild sched domain after updating + * the CPU priorities. The sched domains have no + * dependency on CPU priorities. + */ +-void sched_set_itmt_core_prio(int prio, int core_cpu) ++void sched_set_itmt_core_prio(int prio, int cpu) + { +- int cpu, i = 1; +- +- for_each_cpu(cpu, topology_sibling_cpumask(core_cpu)) { +- int smt_prio; +- +- /* +- * Ensure that the siblings are moved to the end +- * of the priority chain and only used when +- * all other high priority cpus are out of capacity. +- */ +- smt_prio = prio * smp_num_siblings / (i * i); +- per_cpu(sched_core_priority, cpu) = smt_prio; +- i++; +- } ++ per_cpu(sched_core_priority, cpu) = prio; + } +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c +index 9013bb28255a..cea297d97034 100644 +--- a/arch/x86/kernel/smpboot.c ++++ b/arch/x86/kernel/smpboot.c +@@ -547,7 +547,7 @@ static int x86_core_flags(void) + #ifdef CONFIG_SCHED_SMT + static int x86_smt_flags(void) + { +- return cpu_smt_flags() | x86_sched_itmt_flags(); ++ return cpu_smt_flags(); } + #endif + #ifdef CONFIG_SCHED_CLUSTER +@@ -578,7 +578,7 @@ static struct sched_domain_topology_level x86_hybrid_topology[] = { + #ifdef CONFIG_SCHED_MC + { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) }, + #endif +- { cpu_cpu_mask, SD_INIT_NAME(DIE) }, ++ { cpu_cpu_mask, x86_sched_itmt_flags, SD_INIT_NAME(DIE) }, + { NULL, }, + }; --int ksm_madvise(struct vm_area_struct *vma, unsigned long start, -- unsigned long end, int advice, unsigned long *vm_flags) -+int ksm_madvise_merge(struct mm_struct *mm, struct vm_area_struct *vma, -+ const vm_flags_t *vm_flags) - { -- struct mm_struct *mm = vma->vm_mm; - int err; +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 63d242164b1a..6d398b337b0d 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -557,6 +557,9 @@ struct sched_entity { + u64 prev_sum_exec_runtime; -- switch (advice) { -- case MADV_MERGEABLE: -- /* -- * Be somewhat over-protective for now! -- */ -- if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | -- VM_PFNMAP | VM_IO | VM_DONTEXPAND | -- VM_HUGETLB | VM_MIXEDMAP)) -- return 0; /* just ignore the advice */ -+ /* -+ * Be somewhat over-protective for now! -+ */ -+ if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | -+ VM_PFNMAP | VM_IO | VM_DONTEXPAND | -+ VM_HUGETLB | VM_MIXEDMAP)) -+ return 0; /* just ignore the advice */ + u64 nr_migrations; ++ u64 prev_sleep_sum_runtime; ++ /* average duration of a task */ ++ u64 dur_avg; -- if (vma_is_dax(vma)) -- return 0; -+ if (vma_is_dax(vma)) -+ return 0; + #ifdef CONFIG_FAIR_GROUP_SCHED + int depth; +diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h +index 57bde66d95f7..fad77b5172e2 100644 +--- a/include/linux/sched/sd_flags.h ++++ b/include/linux/sched/sd_flags.h +@@ -132,12 +132,9 @@ SD_FLAG(SD_SERIALIZE, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS) + /* + * Place busy tasks earlier in the domain + * +- * SHARED_CHILD: Usually set on the SMT level. Technically could be set further +- * up, but currently assumed to be set from the base domain +- * upwards (see update_top_cache_domain()). + * NEEDS_GROUPS: Load balancing flag. + */ +-SD_FLAG(SD_ASYM_PACKING, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS) ++SD_FLAG(SD_ASYM_PACKING, SDF_NEEDS_GROUPS) - #ifdef VM_SAO - if (*vm_flags & VM_SAO) - return 0; - #endif - #ifdef VM_SPARC_ADI -- if (*vm_flags & VM_SPARC_ADI) -- return 0; -+ if (*vm_flags & VM_SPARC_ADI) -+ return 0; + /* + * Prefer to place tasks in a sibling domain +diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c +index 5732fa75ebab..b5cc2b53464d 100644 +--- a/kernel/sched/clock.c ++++ b/kernel/sched/clock.c +@@ -300,6 +300,9 @@ noinstr u64 local_clock(void) + if (static_branch_likely(&__sched_clock_stable)) + return sched_clock() + __sched_clock_offset; + ++ if (!static_branch_likely(&sched_clock_running)) ++ return sched_clock(); ++ + preempt_disable_notrace(); + clock = sched_clock_local(this_scd()); + preempt_enable_notrace(); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 0d18c3969f90..17bb9637f314 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -724,7 +724,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) + if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) + update_irq_load_avg(rq, irq_delta + steal); #endif +- update_rq_clock_pelt(rq, delta); ++ update_rq_clock_task_mult(rq, delta); + } -- if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { -- err = __ksm_enter(mm); -- if (err) -- return err; -- } -+ if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { -+ err = __ksm_enter(mm); -+ if (err) -+ return err; -+ } -+ -+ return 0; -+} -+ -+int ksm_madvise_unmerge(struct vm_area_struct *vma, unsigned long start, -+ unsigned long end, const vm_flags_t *vm_flags) -+{ -+ int err; -+ -+ if (!(*vm_flags & VM_MERGEABLE)) -+ return 0; /* just ignore the advice */ -+ -+ if (vma->anon_vma) { -+ err = unmerge_ksm_pages(vma, start, end); -+ if (err) -+ return err; -+ } -+ -+ return 0; -+} -+ -+int ksm_madvise(struct vm_area_struct *vma, unsigned long start, -+ unsigned long end, int advice, unsigned long *vm_flags) -+{ -+ struct mm_struct *mm = vma->vm_mm; -+ int err; -+ -+ switch (advice) { -+ case MADV_MERGEABLE: -+ err = ksm_madvise_merge(mm, vma, vm_flags); -+ if (err) -+ return err; - - *vm_flags |= VM_MERGEABLE; - break; + void update_rq_clock(struct rq *rq) +@@ -4434,6 +4434,8 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) + p->se.prev_sum_exec_runtime = 0; + p->se.nr_migrations = 0; + p->se.vruntime = 0; ++ p->se.dur_avg = 0; ++ p->se.prev_sleep_sum_runtime = 0; + INIT_LIST_HEAD(&p->se.group_node); - case MADV_UNMERGEABLE: -- if (!(*vm_flags & VM_MERGEABLE)) -- return 0; /* just ignore the advice */ -- -- if (vma->anon_vma) { -- err = unmerge_ksm_pages(vma, start, end); -- if (err) -- return err; -- } -+ err = ksm_madvise_unmerge(vma, start, end, vm_flags); -+ if (err) -+ return err; + #ifdef CONFIG_FAIR_GROUP_SCHED +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c +index 71b24371a6f7..ac8010f6f3a2 100644 +--- a/kernel/sched/deadline.c ++++ b/kernel/sched/deadline.c +@@ -2246,6 +2246,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) + !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) || + task_on_cpu(rq, task) || + !dl_task(task) || ++ is_migration_disabled(task) || + !task_on_rq_queued(task))) { + double_unlock_balance(rq, later_rq); + later_rq = NULL; +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c +index 1637b65ba07a..8d64fba16cfe 100644 +--- a/kernel/sched/debug.c ++++ b/kernel/sched/debug.c +@@ -1024,6 +1024,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, + __PS("nr_involuntary_switches", p->nivcsw); - *vm_flags &= ~VM_MERGEABLE; - break; -diff --git a/mm/madvise.c b/mm/madvise.c -index 340125d08c03..36e756355f04 100644 ---- a/mm/madvise.c -+++ b/mm/madvise.c -@@ -1522,3 +1522,120 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, - out: - return ret; - } -+ -+SYSCALL_DEFINE3(pmadv_ksm, int, pidfd, int, behaviour, unsigned int, flags) -+{ -+#ifdef CONFIG_KSM -+ ssize_t ret; -+ struct pid *pid; -+ struct task_struct *task; -+ struct mm_struct *mm; -+ unsigned int f_flags; -+ struct vm_area_struct *vma; -+ struct vma_iterator vmi; -+ -+ if (flags != 0) { -+ ret = -EINVAL; -+ goto out; -+ } -+ -+ switch (behaviour) { -+ case MADV_MERGEABLE: -+ case MADV_UNMERGEABLE: -+ break; -+ default: -+ ret = -EINVAL; -+ goto out; -+ break; -+ } -+ -+ pid = pidfd_get_pid(pidfd, &f_flags); -+ if (IS_ERR(pid)) { -+ ret = PTR_ERR(pid); -+ goto out; -+ } -+ -+ task = get_pid_task(pid, PIDTYPE_PID); -+ if (!task) { -+ ret = -ESRCH; -+ goto put_pid; -+ } -+ -+ /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */ -+ mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); -+ if (IS_ERR_OR_NULL(mm)) { -+ ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; -+ goto release_task; -+ } -+ -+ /* Require CAP_SYS_NICE for influencing process performance. */ -+ if (!capable(CAP_SYS_NICE)) { -+ ret = -EPERM; -+ goto release_mm; -+ } -+ -+ if (mmap_write_lock_killable(mm)) { -+ ret = -EINTR; -+ goto release_mm; -+ } -+ -+ vma_iter_init(&vmi, mm, 0); -+ for_each_vma(vmi, vma) { -+ switch (behaviour) { -+ case MADV_MERGEABLE: -+ ret = ksm_madvise_merge(vma->vm_mm, vma, &vma->vm_flags); -+ if (!ret) -+ vm_flags_set(vma, VM_MERGEABLE); -+ break; -+ case MADV_UNMERGEABLE: -+ ret = ksm_madvise_unmerge(vma, vma->vm_start, vma->vm_end, &vma->vm_flags); -+ if (!ret) -+ vm_flags_clear(vma, VM_MERGEABLE); -+ break; -+ default: -+ /* look, ma, no brain */ -+ break; -+ } -+ if (ret) -+ break; -+ } -+ -+ mmap_write_unlock(mm); -+ -+release_mm: -+ mmput(mm); -+release_task: -+ put_task_struct(task); -+put_pid: -+ put_pid(pid); -+out: -+ return ret; -+#else /* CONFIG_KSM */ -+ return -ENOSYS; -+#endif /* CONFIG_KSM */ -+} -+ -+#ifdef CONFIG_KSM -+static ssize_t ksm_show(struct kobject *kobj, struct kobj_attribute *attr, -+ char *buf) + P(se.load.weight); ++ P(se.dur_avg); + #ifdef CONFIG_SMP + P(se.avg.load_sum); + P(se.avg.runnable_sum); +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 96c66b50ee48..0f92281fbed9 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -1082,6 +1082,23 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) + * Scheduling class queueing methods: + */ + ++static inline bool is_core_idle(int cpu) +{ -+ return sprintf(buf, "%u\n", __NR_pmadv_ksm); -+} -+static struct kobj_attribute pmadv_ksm_attr = __ATTR_RO(ksm); ++#ifdef CONFIG_SCHED_SMT ++ int sibling; + -+static struct attribute *pmadv_sysfs_attrs[] = { -+ &pmadv_ksm_attr.attr, -+ NULL, -+}; ++ for_each_cpu(sibling, cpu_smt_mask(cpu)) { ++ if (cpu == sibling) ++ continue; + -+static const struct attribute_group pmadv_sysfs_attr_group = { -+ .attrs = pmadv_sysfs_attrs, -+ .name = "pmadv", -+}; ++ if (!idle_cpu(sibling)) ++ return false; ++ } ++#endif + -+static int __init pmadv_sysfs_init(void) -+{ -+ return sysfs_create_group(kernel_kobj, &pmadv_sysfs_attr_group); ++ return true; +} -+subsys_initcall(pmadv_sysfs_init); -+#endif /* CONFIG_KSM */ --- -2.40.0 - -From 1300046c6d7d6110fa0da7db293f028382ee4c16 Mon Sep 17 00:00:00 2001 -From: Peter Jung -Date: Sun, 9 Apr 2023 21:24:33 +0200 -Subject: [PATCH 08/12] maple-lru - -Signed-off-by: Peter Jung ---- - Documentation/mm/multigen_lru.rst | 44 ++++++++-- - include/linux/mmzone.h | 10 +-- - lib/maple_tree.c | 103 +++++++++------------- - mm/mmap.c | 48 +++++++++-- - mm/vmscan.c | 136 +++++++++++------------------- - tools/testing/radix-tree/maple.c | 24 ++++++ - 6 files changed, 197 insertions(+), 168 deletions(-) - -diff --git a/Documentation/mm/multigen_lru.rst b/Documentation/mm/multigen_lru.rst -index 5f1f6ecbb79b..52ed5092022f 100644 ---- a/Documentation/mm/multigen_lru.rst -+++ b/Documentation/mm/multigen_lru.rst -@@ -103,7 +103,8 @@ moving across tiers only involves atomic operations on - ``folio->flags`` and therefore has a negligible cost. A feedback loop - modeled after the PID controller monitors refaults over all the tiers - from anon and file types and decides which tiers from which types to --evict or protect. -+evict or protect. The desired effect is to balance refault percentages -+between anon and file types proportional to the swappiness level. ++ + #ifdef CONFIG_NUMA + #define NUMA_IMBALANCE_MIN 2 - There are two conceptually independent procedures: the aging and the - eviction. They form a closed-loop system, i.e., the page reclaim. -@@ -156,6 +157,27 @@ This time-based approach has the following advantages: - and memory sizes. - 2. It is more reliable because it is directly wired to the OOM killer. +@@ -1718,23 +1735,6 @@ struct numa_stats { + int idle_cpu; + }; -+``mm_struct`` list -+------------------ -+An ``mm_struct`` list is maintained for each memcg, and an -+``mm_struct`` follows its owner task to the new memcg when this task -+is migrated. -+ -+A page table walker iterates ``lruvec_memcg()->mm_list`` and calls -+``walk_page_range()`` with each ``mm_struct`` on this list to scan -+PTEs. When multiple page table walkers iterate the same list, each of -+them gets a unique ``mm_struct``, and therefore they can run in -+parallel. -+ -+Page table walkers ignore any misplaced pages, e.g., if an -+``mm_struct`` was migrated, pages left in the previous memcg will be -+ignored when the current memcg is under reclaim. Similarly, page table -+walkers will ignore pages from nodes other than the one under reclaim. -+ -+This infrastructure also tracks the usage of ``mm_struct`` between -+context switches so that page table walkers can skip processes that -+have been sleeping since the last iteration. -+ - Rmap/PT walk feedback - --------------------- - Searching the rmap for PTEs mapping each page on an LRU list (to test -@@ -170,7 +192,7 @@ promotes hot pages. If the scan was done cacheline efficiently, it - adds the PMD entry pointing to the PTE table to the Bloom filter. This - forms a feedback loop between the eviction and the aging. +-static inline bool is_core_idle(int cpu) +-{ +-#ifdef CONFIG_SCHED_SMT +- int sibling; +- +- for_each_cpu(sibling, cpu_smt_mask(cpu)) { +- if (cpu == sibling) +- continue; +- +- if (!idle_cpu(sibling)) +- return false; +- } +-#endif +- +- return true; +-} +- + struct task_numa_env { + struct task_struct *p; --Bloom Filters -+Bloom filters - ------------- - Bloom filters are a space and memory efficient data structure for set - membership test, i.e., test if an element is not in the set or may be -@@ -186,6 +208,18 @@ is false positive, the cost is an additional scan of a range of PTEs, - which may yield hot pages anyway. Parameters of the filter itself can - control the false positive rate in the limit. +@@ -6333,6 +6333,18 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) -+PID controller -+-------------- -+A feedback loop modeled after the Proportional-Integral-Derivative -+(PID) controller monitors refaults over anon and file types and -+decides which type to evict when both types are available from the -+same generation. + static void set_next_buddy(struct sched_entity *se); + ++static inline void dur_avg_update(struct task_struct *p, bool task_sleep) ++{ ++ u64 dur; + -+The PID controller uses generations rather than the wall clock as the -+time domain because a CPU can scan pages at different rates under -+varying memory pressure. It calculates a moving average for each new -+generation to avoid being permanently locked in a suboptimal state. ++ if (!task_sleep) ++ return; + - Memcg LRU - --------- - An memcg LRU is a per-node LRU of memcgs. It is also an LRU of LRUs, -@@ -223,9 +257,9 @@ parts: ++ dur = p->se.sum_exec_runtime - p->se.prev_sleep_sum_runtime; ++ p->se.prev_sleep_sum_runtime = p->se.sum_exec_runtime; ++ update_avg(&p->se.dur_avg, dur); ++} ++ + /* + * The dequeue_task method is called before nr_running is + * decreased. We remove the task from the rbtree and +@@ -6405,6 +6417,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) - * Generations - * Rmap walks --* Page table walks --* Bloom filters --* PID controller -+* Page table walks via ``mm_struct`` list -+* Bloom filters for rmap/PT walk feedback -+* PID controller for refault feedback + dequeue_throttle: + util_est_update(&rq->cfs, p, task_sleep); ++ dur_avg_update(p, task_sleep); + hrtick_update(rq); + } - The aging and the eviction form a producer-consumer model; - specifically, the latter drives the former by the sliding window over -diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h -index 9fb1b03b83b2..cabe7f51ea66 100644 ---- a/include/linux/mmzone.h -+++ b/include/linux/mmzone.h -@@ -453,18 +453,14 @@ enum { - struct lru_gen_mm_state { - /* set to max_seq after each iteration */ - unsigned long seq; -- /* where the current iteration continues (inclusive) */ -+ /* where the current iteration continues after */ - struct list_head *head; -- /* where the last iteration ended (exclusive) */ -+ /* where the last iteration ended before */ - struct list_head *tail; -- /* to wait for the last page table walker to finish */ -- struct wait_queue_head wait; - /* Bloom filters flip after each iteration */ - unsigned long *filters[NR_BLOOM_FILTERS]; - /* the mm stats for debugging */ - unsigned long stats[NR_HIST_GENS][NR_MM_STATS]; -- /* the number of concurrent page table walkers */ -- int nr_walkers; - }; +@@ -6538,6 +6551,23 @@ static int wake_wide(struct task_struct *p) + return 1; + } - struct lru_gen_mm_walk { -@@ -1369,7 +1365,7 @@ typedef struct pglist_data { ++/* ++ * If a task switches in and then voluntarily relinquishes the ++ * CPU quickly, it is regarded as a short duration task. ++ * ++ * SIS_SHORT tries to wake up the short wakee on current CPU. This ++ * aims to avoid race condition among CPUs due to frequent context ++ * switch. Besides, the candidate short task should not be the one ++ * that wakes up more than one tasks, otherwise SIS_SHORT might ++ * stack too many tasks on current CPU. ++ */ ++static inline int is_short_task(struct task_struct *p) ++{ ++ return sched_feat(SIS_SHORT) && !p->wakee_flips && ++ p->se.dur_avg && ++ ((p->se.dur_avg * 8) < sysctl_sched_min_granularity); ++} ++ + /* + * The purpose of wake_affine() is to quickly determine on which CPU we can run + * soonest. For the purpose of speed we only consider the waking and previous +@@ -6574,6 +6604,11 @@ wake_affine_idle(int this_cpu, int prev_cpu, int sync) + if (available_idle_cpu(prev_cpu)) + return prev_cpu; - #ifdef CONFIG_LRU_GEN - /* kswap mm walk data */ -- struct lru_gen_mm_walk mm_walk; -+ struct lru_gen_mm_walk mm_walk; - /* lru_gen_folio list */ - struct lru_gen_memcg memcg_lru; - #endif -diff --git a/lib/maple_tree.c b/lib/maple_tree.c -index db60edb55f2f..9172bcee94b4 100644 ---- a/lib/maple_tree.c -+++ b/lib/maple_tree.c -@@ -1303,26 +1303,21 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) - node = mas->alloc; - node->request_count = 0; - while (requested) { -- max_req = MAPLE_ALLOC_SLOTS; -- if (node->node_count) { -- unsigned int offset = node->node_count; -- -- slots = (void **)&node->slot[offset]; -- max_req -= offset; -- } else { -- slots = (void **)&node->slot; -- } -- -+ max_req = MAPLE_ALLOC_SLOTS - node->node_count; -+ slots = (void **)&node->slot[node->node_count]; - max_req = min(requested, max_req); - count = mt_alloc_bulk(gfp, max_req, slots); - if (!count) - goto nomem_bulk; ++ /* The only running task is a short duration one. */ ++ if (cpu_rq(this_cpu)->nr_running == 1 && ++ is_short_task(rcu_dereference(cpu_curr(this_cpu)))) ++ return this_cpu; ++ + return nr_cpumask_bits; + } -+ if (node->node_count == 0) { -+ node->slot[0]->node_count = 0; -+ node->slot[0]->request_count = 0; -+ } +@@ -6948,6 +6983,20 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool + /* overloaded LLC is unlikely to have idle cpu/core */ + if (nr == 1) + return -1; + - node->node_count += count; - allocated += count; - node = node->slot[0]; -- node->node_count = 0; -- node->request_count = 0; - requested -= count; ++ /* ++ * If the scan number suggested by SIS_UTIL is smaller ++ * than 60% of llc_weight, it indicates a util_avg% higher ++ * than 50%. System busier than this could lower its bar to ++ * choose a compromised "idle" CPU. This co-exists with ++ * !has_idle_core to not stack too many tasks on one CPU. ++ */ ++ if (!has_idle_core && this == target && ++ (5 * nr < 3 * sd->span_weight) && ++ cpu_rq(target)->nr_running <= 1 && ++ is_short_task(p) && ++ is_short_task(rcu_dereference(cpu_curr(target)))) ++ return target; + } } - mas->alloc->total = allocated; -@@ -2317,9 +2312,7 @@ static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode) - static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) - { - struct ma_state *mas = wr_mas->mas; -- unsigned char count; -- unsigned char offset; -- unsigned long index, min, max; -+ unsigned char count, offset; - if (unlikely(ma_is_dense(wr_mas->type))) { - wr_mas->r_max = wr_mas->r_min = mas->index; -@@ -2332,34 +2325,12 @@ static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) - count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type, - wr_mas->pivots, mas->max); - offset = mas->offset; -- min = mas_safe_min(mas, wr_mas->pivots, offset); -- if (unlikely(offset == count)) -- goto max; +@@ -9288,96 +9337,65 @@ group_type group_classify(unsigned int imbalance_pct, + } + + /** +- * asym_smt_can_pull_tasks - Check whether the load balancing CPU can pull tasks +- * @dst_cpu: Destination CPU of the load balancing +- * @sds: Load-balancing data with statistics of the local group +- * @sgs: Load-balancing statistics of the candidate busiest group +- * @sg: The candidate busiest group +- * +- * Check the state of the SMT siblings of both @sds::local and @sg and decide +- * if @dst_cpu can pull tasks. ++ * sched_use_asym_prio - Check whether asym_packing priority must be used ++ * @sd: The scheduling domain of the load balancing ++ * @cpu: A CPU + * +- * If @dst_cpu does not have SMT siblings, it can pull tasks if two or more of +- * the SMT siblings of @sg are busy. If only one CPU in @sg is busy, pull tasks +- * only if @dst_cpu has higher priority. ++ * Always use CPU priority when balancing load between SMT siblings. When ++ * balancing load between cores, it is not sufficient that @cpu is idle. Only ++ * use CPU priority if the whole core is idle. + * +- * If both @dst_cpu and @sg have SMT siblings, and @sg has exactly one more +- * busy CPU than @sds::local, let @dst_cpu pull tasks if it has higher priority. +- * Bigger imbalances in the number of busy CPUs will be dealt with in +- * update_sd_pick_busiest(). +- * +- * If @sg does not have SMT siblings, only pull tasks if all of the SMT siblings +- * of @dst_cpu are idle and @sg has lower priority. +- * +- * Return: true if @dst_cpu can pull tasks, false otherwise. ++ * Returns: True if the priority of @cpu must be followed. False otherwise. + */ +-static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds, +- struct sg_lb_stats *sgs, +- struct sched_group *sg) ++static bool sched_use_asym_prio(struct sched_domain *sd, int cpu) + { + #ifdef CONFIG_SCHED_SMT +- bool local_is_smt, sg_is_smt; +- int sg_busy_cpus; - -- max = wr_mas->pivots[offset]; -- index = mas->index; -- if (unlikely(index <= max)) -- goto done; +- local_is_smt = sds->local->flags & SD_SHARE_CPUCAPACITY; +- sg_is_smt = sg->flags & SD_SHARE_CPUCAPACITY; - -- if (unlikely(!max && offset)) -- goto max; - -- min = max + 1; -- while (++offset < count) { -- max = wr_mas->pivots[offset]; -- if (index <= max) -- goto done; -- else if (unlikely(!max)) -- break; +- sg_busy_cpus = sgs->group_weight - sgs->idle_cpus; - -- min = max + 1; +- if (!local_is_smt) { +- /* +- * If we are here, @dst_cpu is idle and does not have SMT +- * siblings. Pull tasks if candidate group has two or more +- * busy CPUs. +- */ +- if (sg_busy_cpus >= 2) /* implies sg_is_smt */ +- return true; +- +- /* +- * @dst_cpu does not have SMT siblings. @sg may have SMT +- * siblings and only one is busy. In such case, @dst_cpu +- * can help if it has higher priority and is idle (i.e., +- * it has no running tasks). +- */ +- return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu); - } -+ while (offset < count && mas->index > wr_mas->pivots[offset]) -+ offset++; +- +- /* @dst_cpu has SMT siblings. */ +- +- if (sg_is_smt) { +- int local_busy_cpus = sds->local->group_weight - +- sds->local_stat.idle_cpus; +- int busy_cpus_delta = sg_busy_cpus - local_busy_cpus; +- +- if (busy_cpus_delta == 1) +- return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu); +- +- return false; +- } +- +- /* +- * @sg does not have SMT siblings. Ensure that @sds::local does not end +- * up with more than one busy SMT sibling and only pull tasks if there +- * are not busy CPUs (i.e., no CPU has running tasks). +- */ +- if (!sds->local_stat.sum_nr_running) +- return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu); ++ if (!sched_smt_active()) ++ return true; --max: -- max = mas->max; --done: -- wr_mas->r_max = max; -- wr_mas->r_min = min; -+ wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max; -+ wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset); - wr_mas->offset_end = mas->offset = offset; +- return false; ++ return sd->flags & SD_SHARE_CPUCAPACITY || is_core_idle(cpu); + #else +- /* Always return false so that callers deal with non-SMT cases. */ +- return false; ++ return true; + #endif } -@@ -3287,7 +3258,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end - - if (tmp < max_p) - memset(pivs + tmp, 0, -- sizeof(unsigned long *) * (max_p - tmp)); -+ sizeof(unsigned long) * (max_p - tmp)); - - if (tmp < mt_slots[mt]) - memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp)); -@@ -4970,7 +4941,8 @@ static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min) - * Return: True if found in a leaf, false otherwise. - * - */ --static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) -+static bool mas_rev_awalk(struct ma_state *mas, unsigned long size, -+ unsigned long *gap_min, unsigned long *gap_max) ++/** ++ * sched_asym - Check if the destination CPU can do asym_packing load balance ++ * @env: The load balancing environment ++ * @sds: Load-balancing data with statistics of the local group ++ * @sgs: Load-balancing statistics of the candidate busiest group ++ * @group: The candidate busiest group ++ * ++ * @env::dst_cpu can do asym_packing if it has higher priority than the ++ * preferred CPU of @group. ++ * ++ * SMT is a special case. If we are balancing load between cores, @env::dst_cpu ++ * can do asym_packing balance only if all its SMT siblings are idle. Also, it ++ * can only do it if @group is an SMT group and has exactly on busy CPU. Larger ++ * imbalances in the number of CPUS are dealt with in find_busiest_group(). ++ * ++ * If we are balancing load within an SMT core, or at DIE domain level, always ++ * proceed. ++ * ++ * Return: true if @env::dst_cpu can do with asym_packing load balance. False ++ * otherwise. ++ */ + static inline bool + sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs, + struct sched_group *group) { - enum maple_type type = mte_node_type(mas->node); - struct maple_node *node = mas_mn(mas); -@@ -5035,8 +5007,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) +- /* Only do SMT checks if either local or candidate have SMT siblings */ +- if ((sds->local->flags & SD_SHARE_CPUCAPACITY) || +- (group->flags & SD_SHARE_CPUCAPACITY)) +- return asym_smt_can_pull_tasks(env->dst_cpu, sds, sgs, group); ++ /* Ensure that the whole local core is idle, if applicable. */ ++ if (!sched_use_asym_prio(env->sd, env->dst_cpu)) ++ return false; ++ ++ /* ++ * CPU priorities does not make sense for SMT cores with more than one ++ * busy sibling. ++ */ ++ if (group->flags & SD_SHARE_CPUCAPACITY) { ++ if (sgs->group_weight - sgs->idle_cpus != 1) ++ return false; ++ } - if (unlikely(ma_is_leaf(type))) { - mas->offset = offset; -- mas->min = min; -- mas->max = min + gap - 1; -+ *gap_min = min; -+ *gap_max = min + gap - 1; - return true; - } + return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu); + } +@@ -9567,10 +9585,22 @@ static bool update_sd_pick_busiest(struct lb_env *env, + * contention when accessing shared HW resources. + * + * XXX for now avg_load is not computed and always 0 so we +- * select the 1st one. ++ * select the 1st one, except if @sg is composed of SMT ++ * siblings. + */ +- if (sgs->avg_load <= busiest->avg_load) ++ ++ if (sgs->avg_load < busiest->avg_load) + return false; ++ ++ if (sgs->avg_load == busiest->avg_load) { ++ /* ++ * SMT sched groups need more help than non-SMT groups. ++ * If @sg happens to also be SMT, either choice is good. ++ */ ++ if (sds->busiest->flags & SD_SHARE_CPUCAPACITY) ++ return false; ++ } ++ + break; -@@ -5060,10 +5032,10 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) + case group_has_spare: +@@ -10045,7 +10075,6 @@ static void update_idle_cpu_scan(struct lb_env *env, + + static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) { - enum maple_type type = mte_node_type(mas->node); - unsigned long pivot, min, gap = 0; -- unsigned char offset; -- unsigned long *gaps; -- unsigned long *pivots = ma_pivots(mas_mn(mas), type); -- void __rcu **slots = ma_slots(mas_mn(mas), type); -+ unsigned char offset, data_end; -+ unsigned long *gaps, *pivots; -+ void __rcu **slots; -+ struct maple_node *node; - bool found = false; +- struct sched_domain *child = env->sd->child; + struct sched_group *sg = env->sd->groups; + struct sg_lb_stats *local = &sds->local_stat; + struct sg_lb_stats tmp_sgs; +@@ -10086,8 +10115,13 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd + sg = sg->next; + } while (sg != env->sd->groups); - if (ma_is_dense(type)) { -@@ -5071,13 +5043,15 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) - return true; - } +- /* Tag domain that child domain prefers tasks go to siblings first */ +- sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING; ++ /* ++ * Indicate that the child domain of the busiest group prefers tasks ++ * go to a child's sibling domains first. NB the flags of a sched group ++ * are those of the child domain. ++ */ ++ if (sds->busiest) ++ sds->prefer_sibling = !!(sds->busiest->flags & SD_PREFER_SIBLING); -- gaps = ma_gaps(mte_to_node(mas->node), type); -+ node = mas_mn(mas); -+ pivots = ma_pivots(node, type); -+ slots = ma_slots(node, type); -+ gaps = ma_gaps(node, type); - offset = mas->offset; - min = mas_safe_min(mas, pivots, offset); -- for (; offset < mt_slots[type]; offset++) { -- pivot = mas_safe_pivot(mas, pivots, offset, type); -- if (offset && !pivot) -- break; -+ data_end = ma_data_end(node, type, pivots, mas->max); -+ for (; offset <= data_end; offset++) { -+ pivot = mas_logical_pivot(mas, pivots, offset, type); - /* Not within lower bounds */ - if (mas->index > pivot) -@@ -5312,6 +5286,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, - unsigned long *pivots; - enum maple_type mt; + if (env->sd->flags & SD_NUMA) +@@ -10397,7 +10431,10 @@ static struct sched_group *find_busiest_group(struct lb_env *env) + goto out_balanced; + } -+ if (min >= max) -+ return -EINVAL; -+ - if (mas_is_start(mas)) - mas_start(mas); - else if (mas->offset >= 2) -@@ -5366,6 +5343,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, +- /* Try to move all excess tasks to child's sibling domain */ ++ /* ++ * Try to move all excess tasks to a sibling domain of the busiest ++ * group's child domain. ++ */ + if (sds.prefer_sibling && local->group_type == group_has_spare && + busiest->sum_nr_running > local->sum_nr_running + 1) + goto force_balance; +@@ -10499,8 +10536,15 @@ static struct rq *find_busiest_queue(struct lb_env *env, + nr_running == 1) + continue; + +- /* Make sure we only pull tasks from a CPU of lower priority */ ++ /* ++ * Make sure we only pull tasks from a CPU of lower priority ++ * when balancing between SMT siblings. ++ * ++ * If balancing between cores, let lower priority CPUs help ++ * SMT cores with more than one busy sibling. ++ */ + if ((env->sd->flags & SD_ASYM_PACKING) && ++ sched_use_asym_prio(env->sd, i) && + sched_asym_prefer(i, env->dst_cpu) && + nr_running == 1) + continue; +@@ -10589,12 +10633,19 @@ static inline bool + asym_active_balance(struct lb_env *env) { - struct maple_enode *last = mas->node; + /* +- * ASYM_PACKING needs to force migrate tasks from busy but +- * lower priority CPUs in order to pack all tasks in the +- * highest priority CPUs. ++ * ASYM_PACKING needs to force migrate tasks from busy but lower ++ * priority CPUs in order to pack all tasks in the highest priority ++ * CPUs. When done between cores, do it only if the whole core if the ++ * whole core is idle. ++ * ++ * If @env::src_cpu is an SMT core with busy siblings, let ++ * the lower priority @env::dst_cpu help it. Do not follow ++ * CPU priority. + */ + return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && +- sched_asym_prefer(env->dst_cpu, env->src_cpu); ++ sched_use_asym_prio(env->sd, env->dst_cpu) && ++ (sched_asym_prefer(env->dst_cpu, env->src_cpu) || ++ !sched_use_asym_prio(env->sd, env->src_cpu)); + } -+ if (min >= max) -+ return -EINVAL; + static inline bool +@@ -11328,9 +11379,13 @@ static void nohz_balancer_kick(struct rq *rq) + * When ASYM_PACKING; see if there's a more preferred CPU + * currently idle; in which case, kick the ILB to move tasks + * around. ++ * ++ * When balancing betwen cores, all the SMT siblings of the ++ * preferred CPU must be idle. + */ + for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { +- if (sched_asym_prefer(i, cpu)) { ++ if (sched_use_asym_prio(sd, i) && ++ sched_asym_prefer(i, cpu)) { + flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; + goto unlock; + } +diff --git a/kernel/sched/features.h b/kernel/sched/features.h +index ee7f23c76bd3..efdc29c42161 100644 +--- a/kernel/sched/features.h ++++ b/kernel/sched/features.h +@@ -62,6 +62,7 @@ SCHED_FEAT(TTWU_QUEUE, true) + */ + SCHED_FEAT(SIS_PROP, false) + SCHED_FEAT(SIS_UTIL, true) ++SCHED_FEAT(SIS_SHORT, true) + + /* + * Issue a WARN when we do multiple update_rq_clock() calls +diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c +index 0f310768260c..036b0e2cd2b4 100644 +--- a/kernel/sched/pelt.c ++++ b/kernel/sched/pelt.c +@@ -467,3 +467,63 @@ int update_irq_load_avg(struct rq *rq, u64 running) + return ret; + } + #endif + - if (mas_is_start(mas)) { - mas_start(mas); - mas->offset = mas_data_end(mas); -@@ -5385,7 +5365,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, - mas->index = min; - mas->last = max; ++__read_mostly unsigned int sched_pelt_lshift; ++ ++#ifdef CONFIG_SYSCTL ++static unsigned int sysctl_sched_pelt_multiplier = 1; ++ ++int sched_pelt_multiplier(struct ctl_table *table, int write, void *buffer, ++ size_t *lenp, loff_t *ppos) ++{ ++ static DEFINE_MUTEX(mutex); ++ unsigned int old; ++ int ret; ++ ++ mutex_lock(&mutex); ++ old = sysctl_sched_pelt_multiplier; ++ ret = proc_dointvec(table, write, buffer, lenp, ppos); ++ if (ret) ++ goto undo; ++ if (!write) ++ goto done; ++ ++ switch (sysctl_sched_pelt_multiplier) { ++ case 1: ++ fallthrough; ++ case 2: ++ fallthrough; ++ case 4: ++ WRITE_ONCE(sched_pelt_lshift, ++ sysctl_sched_pelt_multiplier >> 1); ++ goto done; ++ default: ++ ret = -EINVAL; ++ } ++ ++undo: ++ sysctl_sched_pelt_multiplier = old; ++done: ++ mutex_unlock(&mutex); ++ ++ return ret; ++} ++ ++static struct ctl_table sched_pelt_sysctls[] = { ++ { ++ .procname = "sched_pelt_multiplier", ++ .data = &sysctl_sched_pelt_multiplier, ++ .maxlen = sizeof(unsigned int), ++ .mode = 0644, ++ .proc_handler = sched_pelt_multiplier, ++ }, ++ {} ++}; ++ ++static int __init sched_pelt_sysctl_init(void) ++{ ++ register_sysctl_init("kernel", sched_pelt_sysctls); ++ return 0; ++} ++late_initcall(sched_pelt_sysctl_init); ++#endif +diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h +index 3a0e0dc28721..9b35b5072bae 100644 +--- a/kernel/sched/pelt.h ++++ b/kernel/sched/pelt.h +@@ -61,6 +61,14 @@ static inline void cfs_se_util_change(struct sched_avg *avg) + WRITE_ONCE(avg->util_est.enqueued, enqueued); + } -- while (!mas_rev_awalk(mas, size)) { -+ while (!mas_rev_awalk(mas, size, &min, &max)) { - if (last == mas->node) { - if (!mas_rewind_node(mas)) - return -EBUSY; -@@ -5400,17 +5380,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, - if (unlikely(mas->offset == MAPLE_NODE_SLOTS)) - return -EBUSY; ++static inline u64 rq_clock_task_mult(struct rq *rq) ++{ ++ lockdep_assert_rq_held(rq); ++ assert_clock_updated(rq); ++ ++ return rq->clock_task_mult; ++} ++ + static inline u64 rq_clock_pelt(struct rq *rq) + { + lockdep_assert_rq_held(rq); +@@ -72,7 +80,7 @@ static inline u64 rq_clock_pelt(struct rq *rq) + /* The rq is idle, we can sync to clock_task */ + static inline void _update_idle_rq_clock_pelt(struct rq *rq) + { +- rq->clock_pelt = rq_clock_task(rq); ++ rq->clock_pelt = rq_clock_task_mult(rq); -- /* -- * mas_rev_awalk() has set mas->min and mas->max to the gap values. If -- * the maximum is outside the window we are searching, then use the last -- * location in the search. -- * mas->max and mas->min is the range of the gap. -- * mas->index and mas->last are currently set to the search range. -- */ -- - /* Trim the upper limit to the max. */ -- if (mas->max <= mas->last) -- mas->last = mas->max; -+ if (max <= mas->last) -+ mas->last = max; + u64_u32_store(rq->clock_idle, rq_clock(rq)); + /* Paired with smp_rmb in migrate_se_pelt_lag() */ +@@ -121,6 +129,27 @@ static inline void update_rq_clock_pelt(struct rq *rq, s64 delta) + rq->clock_pelt += delta; + } - mas->index = mas->last - size + 1; ++extern unsigned int sched_pelt_lshift; ++ ++/* ++ * absolute time |1 |2 |3 |4 |5 |6 | ++ * @ mult = 1 --------****************--------****************- ++ * @ mult = 2 --------********----------------********--------- ++ * @ mult = 4 --------****--------------------****------------- ++ * clock task mult ++ * @ mult = 2 | | |2 |3 | | | | |5 |6 | | | ++ * @ mult = 4 | | | | |2|3| | | | | | | | | | |5|6| | | | | | | ++ * ++ */ ++static inline void update_rq_clock_task_mult(struct rq *rq, s64 delta) ++{ ++ delta <<= READ_ONCE(sched_pelt_lshift); ++ ++ rq->clock_task_mult += delta; ++ ++ update_rq_clock_pelt(rq, delta); ++} ++ + /* + * When rq becomes idle, we have to check if it has lost idle time + * because it was fully busy. A rq is fully used when the /Sum util_sum +@@ -147,7 +176,7 @@ static inline void update_idle_rq_clock_pelt(struct rq *rq) + * rq's clock_task. + */ + if (util_sum >= divider) +- rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt; ++ rq->lost_idle_time += rq_clock_task_mult(rq) - rq->clock_pelt; + + _update_idle_rq_clock_pelt(rq); + } +@@ -218,13 +247,18 @@ update_irq_load_avg(struct rq *rq, u64 running) return 0; -@@ -5819,6 +5791,7 @@ int mas_preallocate(struct ma_state *mas, gfp_t gfp) - mas_reset(mas); - return ret; } -+EXPORT_SYMBOL_GPL(mas_preallocate); - /* - * mas_destroy() - destroy a maple state. -diff --git a/mm/mmap.c b/mm/mmap.c -index ff68a67a2a7c..d5475fbf5729 100644 ---- a/mm/mmap.c -+++ b/mm/mmap.c -@@ -1518,7 +1518,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) - */ - static unsigned long unmapped_area(struct vm_unmapped_area_info *info) +-static inline u64 rq_clock_pelt(struct rq *rq) ++static inline u64 rq_clock_task_mult(struct rq *rq) { -- unsigned long length, gap; -+ unsigned long length, gap, low_limit; -+ struct vm_area_struct *tmp; - - MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); + return rq_clock_task(rq); + } -@@ -1527,12 +1528,29 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) - if (length < info->length) - return -ENOMEM; ++static inline u64 rq_clock_pelt(struct rq *rq) ++{ ++ return rq_clock_task_mult(rq); ++} ++ + static inline void +-update_rq_clock_pelt(struct rq *rq, s64 delta) { } ++update_rq_clock_task_mult(struct rq *rq, s64 delta) { } -- if (mas_empty_area(&mas, info->low_limit, info->high_limit - 1, -- length)) -+ low_limit = info->low_limit; -+retry: -+ if (mas_empty_area(&mas, low_limit, info->high_limit - 1, length)) - return -ENOMEM; + static inline void + update_idle_rq_clock_pelt(struct rq *rq) { } +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c +index 0a11f44adee5..4f5796dd26a5 100644 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -2000,11 +2000,15 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) + * the mean time, task could have + * migrated already or had its affinity changed. + * Also make sure that it wasn't scheduled on its rq. ++ * It is possible the task was scheduled, set ++ * "migrate_disabled" and then got preempted, so we must ++ * check the task migration disable flag here too. + */ + if (unlikely(task_rq(task) != rq || + !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) || + task_on_cpu(rq, task) || + !rt_task(task) || ++ is_migration_disabled(task) || + !task_on_rq_queued(task))) { + + double_unlock_balance(rq, lowest_rq); +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index 3e8df6d31c1e..7331d436ebc4 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -1018,6 +1018,7 @@ struct rq { + u64 clock; + /* Ensure that all clocks are in the same cache line */ + u64 clock_task ____cacheline_aligned; ++ u64 clock_task_mult; + u64 clock_pelt; + unsigned long lost_idle_time; + u64 clock_pelt_idle; +@@ -1772,6 +1773,13 @@ queue_balance_callback(struct rq *rq, + for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ + __sd; __sd = __sd->parent) - gap = mas.index; - gap += (info->align_offset - gap) & info->align_mask; -+ tmp = mas_next(&mas, ULONG_MAX); -+ if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */ -+ if (vm_start_gap(tmp) < gap + length - 1) { -+ low_limit = tmp->vm_end; -+ mas_reset(&mas); -+ goto retry; -+ } -+ } else { -+ tmp = mas_prev(&mas, 0); -+ if (tmp && vm_end_gap(tmp) > gap) { -+ low_limit = vm_end_gap(tmp); -+ mas_reset(&mas); -+ goto retry; -+ } -+ } ++/* A mask of all the SD flags that have the SDF_SHARED_CHILD metaflag */ ++#define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_SHARED_CHILD)) | ++static const unsigned int SD_SHARED_CHILD_MASK = ++#include ++0; ++#undef SD_FLAG + - return gap; - } - -@@ -1548,7 +1566,8 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) + /** + * highest_flag_domain - Return highest sched_domain containing flag. + * @cpu: The CPU whose highest level of sched domain is to +@@ -1779,16 +1787,25 @@ queue_balance_callback(struct rq *rq, + * @flag: The flag to check for the highest sched_domain + * for the given CPU. + * +- * Returns the highest sched_domain of a CPU which contains the given flag. ++ * Returns the highest sched_domain of a CPU which contains @flag. If @flag has ++ * the SDF_SHARED_CHILD metaflag, all the children domains also have @flag. */ - static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) + static inline struct sched_domain *highest_flag_domain(int cpu, int flag) { -- unsigned long length, gap; -+ unsigned long length, gap, high_limit, gap_end; -+ struct vm_area_struct *tmp; + struct sched_domain *sd, *hsd = NULL; - MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); - /* Adjust search length to account for worst case alignment overhead */ -@@ -1556,12 +1575,31 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) - if (length < info->length) - return -ENOMEM; + for_each_domain(cpu, sd) { +- if (!(sd->flags & flag)) ++ if (sd->flags & flag) { ++ hsd = sd; ++ continue; ++ } ++ ++ /* ++ * Stop the search if @flag is known to be shared at lower ++ * levels. It will not be found further up. ++ */ ++ if (flag & SD_SHARED_CHILD_MASK) + break; +- hsd = sd; + } -- if (mas_empty_area_rev(&mas, info->low_limit, info->high_limit - 1, -+ high_limit = info->high_limit; -+retry: -+ if (mas_empty_area_rev(&mas, info->low_limit, high_limit - 1, - length)) - return -ENOMEM; + return hsd; +-- +2.40.0 + +From f7077e2986dd0ae576dcffaa91900578923d270c Mon Sep 17 00:00:00 2001 +From: Peter Jung +Date: Sat, 22 Apr 2023 11:46:19 +0200 +Subject: [PATCH 11/13] Surface + +Signed-off-by: Peter Jung +--- + arch/x86/kernel/acpi/boot.c | 24 + + drivers/acpi/acpi_tad.c | 35 +- + drivers/bluetooth/btusb.c | 15 + + drivers/hid/Kconfig | 4 + + drivers/hid/Makefile | 3 + + drivers/hid/hid-multitouch.c | 196 ++++++- + drivers/hid/ipts/Kconfig | 14 + + drivers/hid/ipts/Makefile | 14 + + drivers/hid/ipts/cmd.c | 62 ++ + drivers/hid/ipts/cmd.h | 61 ++ + drivers/hid/ipts/context.h | 51 ++ + drivers/hid/ipts/control.c | 495 ++++++++++++++++ + drivers/hid/ipts/control.h | 127 +++++ + drivers/hid/ipts/desc.h | 81 +++ + drivers/hid/ipts/hid.c | 348 ++++++++++++ + drivers/hid/ipts/hid.h | 22 + + drivers/hid/ipts/main.c | 127 +++++ + drivers/hid/ipts/mei.c | 189 +++++++ + drivers/hid/ipts/mei.h | 67 +++ + drivers/hid/ipts/receiver.c | 249 ++++++++ + drivers/hid/ipts/receiver.h | 17 + + drivers/hid/ipts/resources.c | 108 ++++ + drivers/hid/ipts/resources.h | 39 ++ + drivers/hid/ipts/spec-data.h | 100 ++++ + drivers/hid/ipts/spec-device.h | 285 ++++++++++ + drivers/hid/ipts/spec-hid.h | 35 ++ + drivers/hid/ipts/thread.c | 85 +++ + drivers/hid/ipts/thread.h | 60 ++ + drivers/hid/ithc/Kbuild | 6 + + drivers/hid/ithc/Kconfig | 12 + + drivers/hid/ithc/ithc-debug.c | 96 ++++ + drivers/hid/ithc/ithc-dma.c | 258 +++++++++ + drivers/hid/ithc/ithc-dma.h | 67 +++ + drivers/hid/ithc/ithc-main.c | 534 ++++++++++++++++++ + drivers/hid/ithc/ithc-regs.c | 64 +++ + drivers/hid/ithc/ithc-regs.h | 186 ++++++ + drivers/hid/ithc/ithc.h | 60 ++ + drivers/i2c/i2c-core-acpi.c | 35 ++ + drivers/input/misc/soc_button_array.c | 33 +- + drivers/iommu/intel/iommu.c | 24 + + drivers/iommu/intel/irq_remapping.c | 16 + + drivers/misc/mei/hw-me-regs.h | 1 + + drivers/misc/mei/pci-me.c | 1 + + drivers/net/wireless/ath/ath10k/core.c | 58 ++ + drivers/net/wireless/marvell/mwifiex/pcie.c | 19 + + .../wireless/marvell/mwifiex/pcie_quirks.c | 37 +- + .../wireless/marvell/mwifiex/pcie_quirks.h | 2 + + drivers/pci/pci-driver.c | 3 + + drivers/pci/quirks.c | 36 ++ + drivers/platform/surface/Kconfig | 7 + + drivers/platform/surface/Makefile | 1 + + drivers/platform/surface/surface3-wmi.c | 7 + + drivers/platform/surface/surface_gpe.c | 17 + + .../surface/surfacebook1_dgpu_switch.c | 162 ++++++ + drivers/platform/surface/surfacepro3_button.c | 30 +- + drivers/usb/core/quirks.c | 3 + + include/linux/pci.h | 1 + + sound/soc/codecs/rt5645.c | 9 + + .../intel/common/soc-acpi-intel-cht-match.c | 8 + + 59 files changed, 4636 insertions(+), 70 deletions(-) + create mode 100644 drivers/hid/ipts/Kconfig + create mode 100644 drivers/hid/ipts/Makefile + create mode 100644 drivers/hid/ipts/cmd.c + create mode 100644 drivers/hid/ipts/cmd.h + create mode 100644 drivers/hid/ipts/context.h + create mode 100644 drivers/hid/ipts/control.c + create mode 100644 drivers/hid/ipts/control.h + create mode 100644 drivers/hid/ipts/desc.h + create mode 100644 drivers/hid/ipts/hid.c + create mode 100644 drivers/hid/ipts/hid.h + create mode 100644 drivers/hid/ipts/main.c + create mode 100644 drivers/hid/ipts/mei.c + create mode 100644 drivers/hid/ipts/mei.h + create mode 100644 drivers/hid/ipts/receiver.c + create mode 100644 drivers/hid/ipts/receiver.h + create mode 100644 drivers/hid/ipts/resources.c + create mode 100644 drivers/hid/ipts/resources.h + create mode 100644 drivers/hid/ipts/spec-data.h + create mode 100644 drivers/hid/ipts/spec-device.h + create mode 100644 drivers/hid/ipts/spec-hid.h + create mode 100644 drivers/hid/ipts/thread.c + create mode 100644 drivers/hid/ipts/thread.h + create mode 100644 drivers/hid/ithc/Kbuild + create mode 100644 drivers/hid/ithc/Kconfig + create mode 100644 drivers/hid/ithc/ithc-debug.c + create mode 100644 drivers/hid/ithc/ithc-dma.c + create mode 100644 drivers/hid/ithc/ithc-dma.h + create mode 100644 drivers/hid/ithc/ithc-main.c + create mode 100644 drivers/hid/ithc/ithc-regs.c + create mode 100644 drivers/hid/ithc/ithc-regs.h + create mode 100644 drivers/hid/ithc/ithc.h + create mode 100644 drivers/platform/surface/surfacebook1_dgpu_switch.c + +diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c +index 0dac4ab5b55b..623d94a9cb86 100644 +--- a/arch/x86/kernel/acpi/boot.c ++++ b/arch/x86/kernel/acpi/boot.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include - gap = mas.last + 1 - info->length; - gap -= (gap - info->align_offset) & info->align_mask; -+ gap_end = mas.last; -+ tmp = mas_next(&mas, ULONG_MAX); -+ if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */ -+ if (vm_start_gap(tmp) <= gap_end) { -+ high_limit = vm_start_gap(tmp); -+ mas_reset(&mas); -+ goto retry; -+ } -+ } else { -+ tmp = mas_prev(&mas, 0); -+ if (tmp && vm_end_gap(tmp) > gap) { -+ high_limit = tmp->vm_start; -+ mas_reset(&mas); -+ goto retry; -+ } + #include + #include +@@ -1252,6 +1253,24 @@ static void __init mp_config_acpi_legacy_irqs(void) + } + } + ++static const struct dmi_system_id surface_quirk[] __initconst = { ++ { ++ .ident = "Microsoft Surface Laptop 4 (AMD 15\")", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_4_1952:1953") ++ }, ++ }, ++ { ++ .ident = "Microsoft Surface Laptop 4 (AMD 13\")", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_4_1958:1959") ++ }, ++ }, ++ {} ++}; ++ + /* + * Parse IOAPIC related entries in MADT + * returns 0 on success, < 0 on error +@@ -1307,6 +1326,11 @@ static int __init acpi_parse_madt_ioapic_entries(void) + acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0, + acpi_gbl_FADT.sci_interrupt); + ++ if (dmi_check_system(surface_quirk)) { ++ pr_warn("Surface hack: Override irq 7\n"); ++ mp_override_legacy_irq(7, 3, 3, 7); + } + - return gap; - } + /* Fill in identity legacy mappings where no override */ + mp_config_acpi_legacy_irqs(); -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 71a7f4517e5a..ae60ddff831a 100644 ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -3398,18 +3398,13 @@ void lru_gen_del_mm(struct mm_struct *mm) - for_each_node(nid) { - struct lruvec *lruvec = get_lruvec(memcg, nid); +diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c +index e9b8e8305e23..944276934e7e 100644 +--- a/drivers/acpi/acpi_tad.c ++++ b/drivers/acpi/acpi_tad.c +@@ -432,6 +432,14 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr, -- /* where the last iteration ended (exclusive) */ -+ /* where the current iteration continues after */ -+ if (lruvec->mm_state.head == &mm->lru_gen.list) -+ lruvec->mm_state.head = lruvec->mm_state.head->prev; -+ -+ /* where the last iteration ended before */ - if (lruvec->mm_state.tail == &mm->lru_gen.list) - lruvec->mm_state.tail = lruvec->mm_state.tail->next; -- -- /* where the current iteration continues (inclusive) */ -- if (lruvec->mm_state.head != &mm->lru_gen.list) -- continue; -- -- lruvec->mm_state.head = lruvec->mm_state.head->next; -- /* the deletion ends the current iteration */ -- if (lruvec->mm_state.head == &mm_list->fifo) -- WRITE_ONCE(lruvec->mm_state.seq, lruvec->mm_state.seq + 1); - } + static DEVICE_ATTR_RO(caps); - list_del_init(&mm->lru_gen.list); -@@ -3505,68 +3500,54 @@ static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, - struct mm_struct **iter) ++static struct attribute *acpi_tad_attrs[] = { ++ &dev_attr_caps.attr, ++ NULL, ++}; ++static const struct attribute_group acpi_tad_attr_group = { ++ .attrs = acpi_tad_attrs, ++}; ++ + static ssize_t ac_alarm_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { - bool first = false; -- bool last = true; -+ bool last = false; - struct mm_struct *mm = NULL; - struct mem_cgroup *memcg = lruvec_memcg(lruvec); - struct lru_gen_mm_list *mm_list = get_mm_list(memcg); - struct lru_gen_mm_state *mm_state = &lruvec->mm_state; +@@ -480,15 +488,14 @@ static ssize_t ac_status_show(struct device *dev, struct device_attribute *attr, - /* -- * There are four interesting cases for this page table walker: -- * 1. It tries to start a new iteration of mm_list with a stale max_seq; -- * there is nothing left to do. -- * 2. It's the first of the current generation, and it needs to reset -- * the Bloom filter for the next generation. -- * 3. It reaches the end of mm_list, and it needs to increment -- * mm_state->seq; the iteration is done. -- * 4. It's the last of the current generation, and it needs to reset the -- * mm stats counters for the next generation. -+ * mm_state->seq is incremented after each iteration of mm_list. There -+ * are three interesting cases for this page table walker: -+ * 1. It tries to start a new iteration with a stale max_seq: there is -+ * nothing left to do. -+ * 2. It started the next iteration: it needs to reset the Bloom filter -+ * so that a fresh set of PTE tables can be recorded. -+ * 3. It ended the current iteration: it needs to reset the mm stats -+ * counters and tell its caller to increment max_seq. - */ - spin_lock(&mm_list->lock); + static DEVICE_ATTR_RW(ac_status); - VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->max_seq); -- VM_WARN_ON_ONCE(*iter && mm_state->seq > walk->max_seq); -- VM_WARN_ON_ONCE(*iter && !mm_state->nr_walkers); +-static struct attribute *acpi_tad_attrs[] = { +- &dev_attr_caps.attr, ++static struct attribute *acpi_tad_ac_attrs[] = { + &dev_attr_ac_alarm.attr, + &dev_attr_ac_policy.attr, + &dev_attr_ac_status.attr, + NULL, + }; +-static const struct attribute_group acpi_tad_attr_group = { +- .attrs = acpi_tad_attrs, ++static const struct attribute_group acpi_tad_ac_attr_group = { ++ .attrs = acpi_tad_ac_attrs, + }; -- if (walk->max_seq <= mm_state->seq) { -- if (!*iter) -- last = false; -+ if (walk->max_seq <= mm_state->seq) - goto done; -- } + static ssize_t dc_alarm_store(struct device *dev, struct device_attribute *attr, +@@ -563,13 +570,18 @@ static int acpi_tad_remove(struct platform_device *pdev) -- if (!mm_state->nr_walkers) { -- VM_WARN_ON_ONCE(mm_state->head && mm_state->head != &mm_list->fifo); -+ if (!mm_state->head) -+ mm_state->head = &mm_list->fifo; + pm_runtime_get_sync(dev); -- mm_state->head = mm_list->fifo.next; -+ if (mm_state->head == &mm_list->fifo) - first = true; -- } -- -- while (!mm && mm_state->head != &mm_list->fifo) { -- mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); ++ if (dd->capabilities & ACPI_TAD_AC_WAKE) ++ sysfs_remove_group(&dev->kobj, &acpi_tad_ac_attr_group); ++ + if (dd->capabilities & ACPI_TAD_DC_WAKE) + sysfs_remove_group(&dev->kobj, &acpi_tad_dc_attr_group); -+ do { - mm_state->head = mm_state->head->next; -+ if (mm_state->head == &mm_list->fifo) { -+ WRITE_ONCE(mm_state->seq, mm_state->seq + 1); -+ last = true; -+ break; -+ } + sysfs_remove_group(&dev->kobj, &acpi_tad_attr_group); - /* force scan for those added after the last iteration */ -- if (!mm_state->tail || mm_state->tail == &mm->lru_gen.list) { -- mm_state->tail = mm_state->head; -+ if (!mm_state->tail || mm_state->tail == mm_state->head) { -+ mm_state->tail = mm_state->head->next; - walk->force_scan = true; - } +- acpi_tad_disable_timer(dev, ACPI_TAD_AC_TIMER); +- acpi_tad_clear_status(dev, ACPI_TAD_AC_TIMER); ++ if (dd->capabilities & ACPI_TAD_AC_WAKE) { ++ acpi_tad_disable_timer(dev, ACPI_TAD_AC_TIMER); ++ acpi_tad_clear_status(dev, ACPI_TAD_AC_TIMER); ++ } + if (dd->capabilities & ACPI_TAD_DC_WAKE) { + acpi_tad_disable_timer(dev, ACPI_TAD_DC_TIMER); + acpi_tad_clear_status(dev, ACPI_TAD_DC_TIMER); +@@ -604,11 +616,6 @@ static int acpi_tad_probe(struct platform_device *pdev) + return -ENODEV; + } -+ mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); - if (should_skip_mm(mm, walk)) - mm = NULL; +- if (!acpi_has_method(handle, "_PRW")) { +- dev_info(dev, "Missing _PRW\n"); +- return -ENODEV; - } - -- if (mm_state->head == &mm_list->fifo) -- WRITE_ONCE(mm_state->seq, mm_state->seq + 1); -+ } while (!mm); - done: -- if (*iter && !mm) -- mm_state->nr_walkers--; -- if (!*iter && mm) -- mm_state->nr_walkers++; -- -- if (mm_state->nr_walkers) -- last = false; -- - if (*iter || last) - reset_mm_stats(lruvec, walk, last); + dd = devm_kzalloc(dev, sizeof(*dd), GFP_KERNEL); + if (!dd) + return -ENOMEM; +@@ -637,6 +644,12 @@ static int acpi_tad_probe(struct platform_device *pdev) + if (ret) + goto fail; -@@ -3594,9 +3575,9 @@ static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq) ++ if (caps & ACPI_TAD_AC_WAKE) { ++ ret = sysfs_create_group(&dev->kobj, &acpi_tad_ac_attr_group); ++ if (ret) ++ goto fail; ++ } ++ + if (caps & ACPI_TAD_DC_WAKE) { + ret = sysfs_create_group(&dev->kobj, &acpi_tad_dc_attr_group); + if (ret) +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 5a80379253a7..5651b4bfe72c 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -65,6 +65,7 @@ static struct usb_driver btusb_driver; + #define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25) + #define BTUSB_INTEL_NO_WBS_SUPPORT BIT(26) + #define BTUSB_ACTIONS_SEMI BIT(27) ++#define BTUSB_LOWER_LESCAN_INTERVAL BIT(28) + + static const struct usb_device_id btusb_table[] = { + /* Generic Bluetooth USB device */ +@@ -468,6 +469,7 @@ static const struct usb_device_id blacklist_table[] = { + { USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL }, + { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, + { USB_DEVICE(0x1286, 0x204e), .driver_info = BTUSB_MARVELL }, ++ { USB_DEVICE(0x1286, 0x204c), .driver_info = BTUSB_LOWER_LESCAN_INTERVAL }, + + /* Intel Bluetooth devices */ + { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_COMBINED }, +@@ -4033,6 +4035,19 @@ static int btusb_probe(struct usb_interface *intf, + if (id->driver_info & BTUSB_MARVELL) + hdev->set_bdaddr = btusb_set_bdaddr_marvell; + ++ /* The Marvell 88W8897 combined wifi and bluetooth card is known for ++ * very bad bt+wifi coexisting performance. ++ * ++ * Decrease the passive BT Low Energy scan interval a bit ++ * (0x0190 * 0.625 msec = 250 msec) and make the scan window shorter ++ * (0x000a * 0,625 msec = 6.25 msec). This allows for significantly ++ * higher wifi throughput while passively scanning for BT LE devices. ++ */ ++ if (id->driver_info & BTUSB_LOWER_LESCAN_INTERVAL) { ++ hdev->le_scan_interval = 0x0190; ++ hdev->le_scan_window = 0x000a; ++ } ++ + if (IS_ENABLED(CONFIG_BT_HCIBTUSB_MTK) && + (id->driver_info & BTUSB_MEDIATEK)) { + hdev->setup = btusb_mtk_setup; +diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig +index 4ce012f83253..aebb62488cf1 100644 +--- a/drivers/hid/Kconfig ++++ b/drivers/hid/Kconfig +@@ -1300,6 +1300,10 @@ config HID_KUNIT_TEST - VM_WARN_ON_ONCE(mm_state->seq + 1 < max_seq); + If in doubt, say "N". -- if (max_seq > mm_state->seq && !mm_state->nr_walkers) { -- VM_WARN_ON_ONCE(mm_state->head && mm_state->head != &mm_list->fifo); -- -+ if (max_seq > mm_state->seq) { -+ mm_state->head = NULL; -+ mm_state->tail = NULL; - WRITE_ONCE(mm_state->seq, mm_state->seq + 1); - reset_mm_stats(lruvec, NULL, true); - success = true; -@@ -3608,7 +3589,7 @@ static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq) - } ++source "drivers/hid/ipts/Kconfig" ++ ++source "drivers/hid/ithc/Kconfig" ++ + endmenu - /****************************************************************************** -- * refault feedback loop -+ * PID controller - ******************************************************************************/ + source "drivers/hid/bpf/Kconfig" +diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile +index 5d37cacbde33..a3ff62e922f1 100644 +--- a/drivers/hid/Makefile ++++ b/drivers/hid/Makefile +@@ -167,3 +167,6 @@ obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/ + obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/ + + obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/ ++ ++obj-$(CONFIG_HID_IPTS) += ipts/ ++obj-$(CONFIG_HID_ITHC) += ithc/ +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index e31be0cb8b85..508a250ff4bf 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -34,7 +34,10 @@ + #include + #include + #include ++#include + #include ++#include ++#include + #include + #include + #include +@@ -47,6 +50,7 @@ MODULE_DESCRIPTION("HID multitouch panels"); + MODULE_LICENSE("GPL"); - /* -@@ -4196,10 +4177,6 @@ static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end, + #include "hid-ids.h" ++#include "usbhid/usbhid.h" - walk_pmd_range(&val, addr, next, args); + /* quirks to control the device */ + #define MT_QUIRK_NOT_SEEN_MEANS_UP BIT(0) +@@ -72,12 +76,18 @@ MODULE_LICENSE("GPL"); + #define MT_QUIRK_FORCE_MULTI_INPUT BIT(20) + #define MT_QUIRK_DISABLE_WAKEUP BIT(21) + #define MT_QUIRK_ORIENTATION_INVERT BIT(22) ++#define MT_QUIRK_HAS_TYPE_COVER_BACKLIGHT BIT(23) ++#define MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH BIT(24) -- /* a racy check to curtail the waiting time */ -- if (wq_has_sleeper(&walk->lruvec->mm_state.wait)) -- return 1; -- - if (need_resched() || walk->batched >= MAX_LRU_BATCH) { - end = (addr | ~PUD_MASK) + 1; - goto done; -@@ -4232,8 +4209,14 @@ static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_ - walk->next_addr = FIRST_USER_ADDRESS; + #define MT_INPUTMODE_TOUCHSCREEN 0x02 + #define MT_INPUTMODE_TOUCHPAD 0x03 - do { -+ DEFINE_MAX_SEQ(lruvec); + #define MT_BUTTONTYPE_CLICKPAD 0 + ++#define MS_TYPE_COVER_FEATURE_REPORT_USAGE 0xff050086 ++#define MS_TYPE_COVER_TABLET_MODE_SWITCH_USAGE 0xff050072 ++#define MS_TYPE_COVER_APPLICATION 0xff050050 + - err = -EBUSY; + enum latency_mode { + HID_LATENCY_NORMAL = 0, + HID_LATENCY_HIGH = 1, +@@ -169,6 +179,8 @@ struct mt_device { -+ /* another thread might have called inc_max_seq() */ -+ if (walk->max_seq != max_seq) -+ break; + struct list_head applications; + struct list_head reports; + - /* folio_update_gen() requires stable folio_memcg() */ - if (!mem_cgroup_trylock_pages(memcg)) - break; -@@ -4466,25 +4449,12 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, - success = iterate_mm_list(lruvec, walk, &mm); - if (mm) - walk_mm(lruvec, mm, walk); -- -- cond_resched(); - } while (mm); - done: -- if (!success) { -- if (sc->priority <= DEF_PRIORITY - 2) -- wait_event_killable(lruvec->mm_state.wait, -- max_seq < READ_ONCE(lrugen->max_seq)); -- return false; -- } -- -- VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq)); -+ if (success) -+ inc_max_seq(lruvec, can_swap, force_scan); ++ struct notifier_block pm_notifier; + }; -- inc_max_seq(lruvec, can_swap, force_scan); -- /* either this sees any waiters or they will see updated max_seq */ -- if (wq_has_sleeper(&lruvec->mm_state.wait)) -- wake_up_all(&lruvec->mm_state.wait); -- -- return true; -+ return success; - } + static void mt_post_parse_default_settings(struct mt_device *td, +@@ -213,6 +225,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app); + #define MT_CLS_GOOGLE 0x0111 + #define MT_CLS_RAZER_BLADE_STEALTH 0x0112 + #define MT_CLS_SMART_TECH 0x0113 ++#define MT_CLS_WIN_8_MS_SURFACE_TYPE_COVER 0x0114 + + #define MT_DEFAULT_MAXCONTACT 10 + #define MT_MAX_MAXCONTACT 250 +@@ -397,6 +410,17 @@ static const struct mt_class mt_classes[] = { + MT_QUIRK_CONTACT_CNT_ACCURATE | + MT_QUIRK_SEPARATE_APP_REPORT, + }, ++ { .name = MT_CLS_WIN_8_MS_SURFACE_TYPE_COVER, ++ .quirks = MT_QUIRK_HAS_TYPE_COVER_BACKLIGHT | ++ MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH | ++ MT_QUIRK_ALWAYS_VALID | ++ MT_QUIRK_IGNORE_DUPLICATES | ++ MT_QUIRK_HOVERING | ++ MT_QUIRK_CONTACT_CNT_ACCURATE | ++ MT_QUIRK_STICKY_FINGERS | ++ MT_QUIRK_WIN8_PTP_BUTTONS, ++ .export_all_inputs = true ++ }, + { } + }; - /****************************************************************************** -@@ -5671,14 +5641,14 @@ static void lru_gen_change_state(bool enabled) - * sysfs interface - ******************************************************************************/ +@@ -1370,6 +1394,9 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, + field->application != HID_CP_CONSUMER_CONTROL && + field->application != HID_GD_WIRELESS_RADIO_CTLS && + field->application != HID_GD_SYSTEM_MULTIAXIS && ++ !(field->application == MS_TYPE_COVER_APPLICATION && ++ application->quirks & MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH && ++ usage->hid == MS_TYPE_COVER_TABLET_MODE_SWITCH_USAGE) && + !(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS && + application->quirks & MT_QUIRK_ASUS_CUSTOM_UP)) + return -1; +@@ -1397,6 +1424,21 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, + return 1; + } --static ssize_t show_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, char *buf) -+static ssize_t min_ttl_ms_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) ++ /* ++ * The Microsoft Surface Pro Typecover has a non-standard HID ++ * tablet mode switch on a vendor specific usage page with vendor ++ * specific usage. ++ */ ++ if (field->application == MS_TYPE_COVER_APPLICATION && ++ application->quirks & MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH && ++ usage->hid == MS_TYPE_COVER_TABLET_MODE_SWITCH_USAGE) { ++ usage->type = EV_SW; ++ usage->code = SW_TABLET_MODE; ++ *max = SW_MAX; ++ *bit = hi->input->swbit; ++ return 1; ++ } ++ + if (rdata->is_mt_collection) + return mt_touch_input_mapping(hdev, hi, field, usage, bit, max, + application); +@@ -1418,6 +1460,7 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi, { -- return sprintf(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl))); -+ return sysfs_emit(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl))); - } + struct mt_device *td = hid_get_drvdata(hdev); + struct mt_report_data *rdata; ++ struct input_dev *input; - /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ --static ssize_t store_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, -- const char *buf, size_t len) -+static ssize_t min_ttl_ms_store(struct kobject *kobj, struct kobj_attribute *attr, -+ const char *buf, size_t len) - { - unsigned int msecs; + rdata = mt_find_report_data(td, field->report); + if (rdata && rdata->is_mt_collection) { +@@ -1425,6 +1468,19 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi, + return -1; + } -@@ -5690,11 +5660,9 @@ static ssize_t store_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, - return len; ++ /* ++ * We own an input device which acts as a tablet mode switch for ++ * the Surface Pro Typecover. ++ */ ++ if (field->application == MS_TYPE_COVER_APPLICATION && ++ rdata->application->quirks & MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH && ++ usage->hid == MS_TYPE_COVER_TABLET_MODE_SWITCH_USAGE) { ++ input = hi->input; ++ input_set_capability(input, EV_SW, SW_TABLET_MODE); ++ input_report_switch(input, SW_TABLET_MODE, 0); ++ return -1; ++ } ++ + /* let hid-core decide for the others */ + return 0; } +@@ -1434,11 +1490,21 @@ static int mt_event(struct hid_device *hid, struct hid_field *field, + { + struct mt_device *td = hid_get_drvdata(hid); + struct mt_report_data *rdata; ++ struct input_dev *input; --static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR( -- min_ttl_ms, 0644, show_min_ttl, store_min_ttl --); -+static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR_RW(min_ttl_ms); + rdata = mt_find_report_data(td, field->report); + if (rdata && rdata->is_mt_collection) + return mt_touch_event(hid, field, usage, value); --static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, char *buf) -+static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) - { - unsigned int caps = 0; ++ if (field->application == MS_TYPE_COVER_APPLICATION && ++ rdata->application->quirks & MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH && ++ usage->hid == MS_TYPE_COVER_TABLET_MODE_SWITCH_USAGE) { ++ input = field->hidinput->input; ++ input_report_switch(input, SW_TABLET_MODE, (value & 0xFF) != 0x22); ++ input_sync(input); ++ return 1; ++ } ++ + return 0; + } -@@ -5711,7 +5679,7 @@ static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, c +@@ -1591,6 +1657,42 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app) + app->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE; } - /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ --static ssize_t store_enabled(struct kobject *kobj, struct kobj_attribute *attr, -+static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, - const char *buf, size_t len) ++static int get_type_cover_field(struct hid_report_enum *rep_enum, ++ struct hid_field **field, int usage) ++{ ++ struct hid_report *rep; ++ struct hid_field *cur_field; ++ int i, j; ++ ++ list_for_each_entry(rep, &rep_enum->report_list, list) { ++ for (i = 0; i < rep->maxfield; i++) { ++ cur_field = rep->field[i]; ++ if (cur_field->application != MS_TYPE_COVER_APPLICATION) ++ continue; ++ for (j = 0; j < cur_field->maxusage; j++) { ++ if (cur_field->usage[j].hid == usage) { ++ *field = cur_field; ++ return true; ++ } ++ } ++ } ++ } ++ return false; ++} ++ ++static void request_type_cover_tablet_mode_switch(struct hid_device *hdev) ++{ ++ struct hid_field *field; ++ ++ if (get_type_cover_field(&hdev->report_enum[HID_INPUT_REPORT], ++ &field, ++ MS_TYPE_COVER_TABLET_MODE_SWITCH_USAGE)) { ++ hid_hw_request(hdev, field->report, HID_REQ_GET_REPORT); ++ } else { ++ hid_err(hdev, "couldn't find tablet mode field\n"); ++ } ++} ++ + static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi) { - int i; -@@ -5738,9 +5706,7 @@ static ssize_t store_enabled(struct kobject *kobj, struct kobj_attribute *attr, - return len; + struct mt_device *td = hid_get_drvdata(hdev); +@@ -1640,6 +1742,13 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi) + /* force BTN_STYLUS to allow tablet matching in udev */ + __set_bit(BTN_STYLUS, hi->input->keybit); + break; ++ case MS_TYPE_COVER_APPLICATION: ++ if (td->mtclass.quirks & MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH) { ++ suffix = "Tablet Mode Switch"; ++ request_type_cover_tablet_mode_switch(hdev); ++ break; ++ } ++ fallthrough; + default: + suffix = "UNKNOWN"; + break; +@@ -1728,6 +1837,46 @@ static void mt_expired_timeout(struct timer_list *t) + clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); } --static struct kobj_attribute lru_gen_enabled_attr = __ATTR( -- enabled, 0644, show_enabled, store_enabled --); -+static struct kobj_attribute lru_gen_enabled_attr = __ATTR_RW(enabled); ++static void update_keyboard_backlight(struct hid_device *hdev, bool enabled) ++{ ++ struct usb_device *udev = hid_to_usb_dev(hdev); ++ struct hid_field *field = NULL; ++ ++ /* Wake up the device in case it's already suspended */ ++ pm_runtime_get_sync(&udev->dev); ++ ++ if (!get_type_cover_field(&hdev->report_enum[HID_FEATURE_REPORT], ++ &field, ++ MS_TYPE_COVER_FEATURE_REPORT_USAGE)) { ++ hid_err(hdev, "couldn't find backlight field\n"); ++ goto out; ++ } ++ ++ field->value[field->index] = enabled ? 0x01ff00ff : 0x00ff00ff; ++ hid_hw_request(hdev, field->report, HID_REQ_SET_REPORT); ++ ++out: ++ pm_runtime_put_sync(&udev->dev); ++} ++ ++static int mt_pm_notifier(struct notifier_block *notifier, ++ unsigned long pm_event, ++ void *unused) ++{ ++ struct mt_device *td = ++ container_of(notifier, struct mt_device, pm_notifier); ++ struct hid_device *hdev = td->hdev; ++ ++ if (td->mtclass.quirks & MT_QUIRK_HAS_TYPE_COVER_BACKLIGHT) { ++ if (pm_event == PM_SUSPEND_PREPARE) ++ update_keyboard_backlight(hdev, 0); ++ else if (pm_event == PM_POST_SUSPEND) ++ update_keyboard_backlight(hdev, 1); ++ } ++ ++ return NOTIFY_DONE; ++} ++ + static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) + { + int ret, i; +@@ -1751,6 +1900,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) + td->inputmode_value = MT_INPUTMODE_TOUCHSCREEN; + hid_set_drvdata(hdev, td); - static struct attribute *lru_gen_attrs[] = { - &lru_gen_min_ttl_attr.attr, -@@ -5748,7 +5714,7 @@ static struct attribute *lru_gen_attrs[] = { - NULL - }; ++ td->pm_notifier.notifier_call = mt_pm_notifier; ++ register_pm_notifier(&td->pm_notifier); ++ + INIT_LIST_HEAD(&td->applications); + INIT_LIST_HEAD(&td->reports); --static struct attribute_group lru_gen_attr_group = { -+static const struct attribute_group lru_gen_attr_group = { - .name = "lru_gen", - .attrs = lru_gen_attrs, - }; -@@ -6130,7 +6096,6 @@ void lru_gen_init_lruvec(struct lruvec *lruvec) - INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]); +@@ -1789,15 +1941,19 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) + timer_setup(&td->release_timer, mt_expired_timeout, 0); + + ret = hid_parse(hdev); +- if (ret != 0) ++ if (ret != 0) { ++ unregister_pm_notifier(&td->pm_notifier); + return ret; ++ } + + if (mtclass->quirks & MT_QUIRK_FIX_CONST_CONTACT_ID) + mt_fix_const_fields(hdev, HID_DG_CONTACTID); + + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); +- if (ret) ++ if (ret) { ++ unregister_pm_notifier(&td->pm_notifier); + return ret; ++ } + + ret = sysfs_create_group(&hdev->dev.kobj, &mt_attribute_group); + if (ret) +@@ -1826,13 +1982,24 @@ static int mt_suspend(struct hid_device *hdev, pm_message_t state) - lruvec->mm_state.seq = MIN_NR_GENS; -- init_waitqueue_head(&lruvec->mm_state.wait); + static int mt_reset_resume(struct hid_device *hdev) + { ++ struct mt_device *td = hid_get_drvdata(hdev); ++ + mt_release_contacts(hdev); + mt_set_modes(hdev, HID_LATENCY_NORMAL, true, true); ++ ++ /* Request an update on the typecover folding state on resume ++ * after reset. ++ */ ++ if (td->mtclass.quirks & MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH) ++ request_type_cover_tablet_mode_switch(hdev); ++ + return 0; } - #ifdef CONFIG_MEMCG -@@ -6163,7 +6128,6 @@ void lru_gen_exit_memcg(struct mem_cgroup *memcg) - for_each_node(nid) { - struct lruvec *lruvec = get_lruvec(memcg, nid); + static int mt_resume(struct hid_device *hdev) + { ++ struct mt_device *td = hid_get_drvdata(hdev); ++ + /* Some Elan legacy devices require SET_IDLE to be set on resume. + * It should be safe to send it to other devices too. + * Tested on 3M, Stantum, Cypress, Zytronic, eGalax, and Elan panels. */ +@@ -1841,6 +2008,10 @@ static int mt_resume(struct hid_device *hdev) -- VM_WARN_ON_ONCE(lruvec->mm_state.nr_walkers); - VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, - sizeof(lruvec->lrugen.nr_pages))); + mt_set_modes(hdev, HID_LATENCY_NORMAL, true, true); -diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c -index 4c89ff333f6f..9286d3baa12d 100644 ---- a/tools/testing/radix-tree/maple.c -+++ b/tools/testing/radix-tree/maple.c -@@ -55,6 +55,28 @@ struct rcu_reader_struct { - struct rcu_test_struct2 *test; - }; ++ /* Request an update on the typecover folding state on resume. */ ++ if (td->mtclass.quirks & MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH) ++ request_type_cover_tablet_mode_switch(hdev); ++ + return 0; + } + #endif +@@ -1848,7 +2019,23 @@ static int mt_resume(struct hid_device *hdev) + static void mt_remove(struct hid_device *hdev) + { + struct mt_device *td = hid_get_drvdata(hdev); ++ struct hid_field *field; ++ struct input_dev *input; + ++ /* Reset tablet mode switch on disconnect. */ ++ if (td->mtclass.quirks & MT_QUIRK_HAS_TYPE_COVER_TABLET_MODE_SWITCH) { ++ if (get_type_cover_field(&hdev->report_enum[HID_INPUT_REPORT], ++ &field, ++ MS_TYPE_COVER_TABLET_MODE_SWITCH_USAGE)) { ++ input = field->hidinput->input; ++ input_report_switch(input, SW_TABLET_MODE, 0); ++ input_sync(input); ++ } else { ++ hid_err(hdev, "couldn't find tablet mode field\n"); ++ } ++ } ++ ++ unregister_pm_notifier(&td->pm_notifier); + del_timer_sync(&td->release_timer); -+static int get_alloc_node_count(struct ma_state *mas) + sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group); +@@ -2226,6 +2413,11 @@ static const struct hid_device_id mt_devices[] = { + MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, + USB_DEVICE_ID_XIROKU_CSR2) }, + ++ /* Microsoft Surface type cover */ ++ { .driver_data = MT_CLS_WIN_8_MS_SURFACE_TYPE_COVER, ++ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, ++ USB_VENDOR_ID_MICROSOFT, 0x09c0) }, ++ + /* Google MT devices */ + { .driver_data = MT_CLS_GOOGLE, + HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE, +diff --git a/drivers/hid/ipts/Kconfig b/drivers/hid/ipts/Kconfig +new file mode 100644 +index 000000000000..297401bd388d +--- /dev/null ++++ b/drivers/hid/ipts/Kconfig +@@ -0,0 +1,14 @@ ++# SPDX-License-Identifier: GPL-2.0-or-later ++ ++config HID_IPTS ++ tristate "Intel Precise Touch & Stylus" ++ depends on INTEL_MEI ++ depends on HID ++ help ++ Say Y here if your system has a touchscreen using Intels ++ Precise Touch & Stylus (IPTS) technology. ++ ++ If unsure say N. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called ipts. +diff --git a/drivers/hid/ipts/Makefile b/drivers/hid/ipts/Makefile +new file mode 100644 +index 000000000000..0fe655bccdc0 +--- /dev/null ++++ b/drivers/hid/ipts/Makefile +@@ -0,0 +1,14 @@ ++# SPDX-License-Identifier: GPL-2.0-or-later ++# ++# Makefile for the IPTS touchscreen driver ++# ++ ++obj-$(CONFIG_HID_IPTS) += ipts.o ++ipts-objs := cmd.o ++ipts-objs += control.o ++ipts-objs += hid.o ++ipts-objs += main.o ++ipts-objs += mei.o ++ipts-objs += receiver.o ++ipts-objs += resources.o ++ipts-objs += thread.o +diff --git a/drivers/hid/ipts/cmd.c b/drivers/hid/ipts/cmd.c +new file mode 100644 +index 000000000000..7fd69271ccd5 +--- /dev/null ++++ b/drivers/hid/ipts/cmd.c +@@ -0,0 +1,62 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#include ++#include ++ ++#include "cmd.h" ++#include "context.h" ++#include "mei.h" ++#include "spec-device.h" ++ ++int ipts_cmd_recv_timeout(struct ipts_context *ipts, enum ipts_command_code code, ++ struct ipts_response *rsp, u64 timeout) +{ -+ int count = 1; -+ struct maple_alloc *node = mas->alloc; ++ int ret = 0; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!rsp) ++ return -EFAULT; ++ ++ /* ++ * In a response, the command code will have the most significant bit flipped to 1. ++ * If code is passed to ipts_mei_recv as is, no messages will be received. ++ */ ++ ret = ipts_mei_recv(&ipts->mei, code | IPTS_RSP_BIT, rsp, timeout); ++ if (ret < 0) ++ return ret; ++ ++ dev_dbg(ipts->dev, "Received 0x%02X with status 0x%02X\n", code, rsp->status); ++ ++ /* ++ * Some devices will always return this error. ++ * It is allowed to ignore it and to try continuing. ++ */ ++ if (rsp->status == IPTS_STATUS_COMPAT_CHECK_FAIL) ++ rsp->status = IPTS_STATUS_SUCCESS; ++ ++ return 0; ++} ++ ++int ipts_cmd_send(struct ipts_context *ipts, enum ipts_command_code code, void *data, size_t size) ++{ ++ struct ipts_command cmd = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ cmd.cmd = code; ++ ++ if (data && size > 0) ++ memcpy(cmd.payload, data, size); ++ ++ dev_dbg(ipts->dev, "Sending 0x%02X with %ld bytes payload\n", code, size); ++ return ipts_mei_send(&ipts->mei, &cmd, sizeof(cmd.cmd) + size); ++} +diff --git a/drivers/hid/ipts/cmd.h b/drivers/hid/ipts/cmd.h +new file mode 100644 +index 000000000000..924758ffee67 +--- /dev/null ++++ b/drivers/hid/ipts/cmd.h +@@ -0,0 +1,61 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_CMD_H ++#define IPTS_CMD_H ++ ++#include ++ ++#include "context.h" ++#include "spec-device.h" ++ ++/* ++ * The default timeout for receiving responses ++ */ ++#define IPTS_CMD_DEFAULT_TIMEOUT 1000 ++ ++/* ++ * ipts_cmd_recv_timeout() - Receives a response to a command. ++ * @ipts: The IPTS driver context. ++ * @code: The type of the command / response. ++ * @rsp: The address that the received response will be copied to. ++ * @timeout: How many milliseconds the function will wait at most. ++ * ++ * A negative timeout means to wait forever. ++ * ++ * Returns: 0 on success, <0 on error, -EAGAIN if no response has been received. ++ */ ++int ipts_cmd_recv_timeout(struct ipts_context *ipts, enum ipts_command_code code, ++ struct ipts_response *rsp, u64 timeout); ++ ++/* ++ * ipts_cmd_recv() - Receives a response to a command. ++ * @ipts: The IPTS driver context. ++ * @code: The type of the command / response. ++ * @rsp: The address that the received response will be copied to. ++ * ++ * Returns: 0 on success, <0 on error, -EAGAIN if no response has been received. ++ */ ++static inline int ipts_cmd_recv(struct ipts_context *ipts, enum ipts_command_code code, ++ struct ipts_response *rsp) ++{ ++ return ipts_cmd_recv_timeout(ipts, code, rsp, IPTS_CMD_DEFAULT_TIMEOUT); ++} ++ ++/* ++ * ipts_cmd_send() - Executes a command on the device. ++ * @ipts: The IPTS driver context. ++ * @code: The type of the command to execute. ++ * @data: The payload containing parameters for the command. ++ * @size: The size of the payload. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_cmd_send(struct ipts_context *ipts, enum ipts_command_code code, void *data, size_t size); ++ ++#endif /* IPTS_CMD_H */ +diff --git a/drivers/hid/ipts/context.h b/drivers/hid/ipts/context.h +new file mode 100644 +index 000000000000..3450a95e66ee +--- /dev/null ++++ b/drivers/hid/ipts/context.h +@@ -0,0 +1,51 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_CONTEXT_H ++#define IPTS_CONTEXT_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "mei.h" ++#include "resources.h" ++#include "spec-device.h" ++#include "thread.h" ++ ++struct ipts_context { ++ struct device *dev; ++ struct ipts_mei mei; ++ ++ enum ipts_mode mode; ++ ++ /* ++ * Prevents concurrent GET_FEATURE reports. ++ */ ++ struct mutex feature_lock; ++ struct completion feature_event; ++ ++ /* ++ * These are not inside of struct ipts_resources ++ * because they don't own the memory they point to. ++ */ ++ struct ipts_buffer feature_report; ++ struct ipts_buffer descriptor; ++ ++ struct hid_device *hid; ++ struct ipts_device_info info; ++ struct ipts_resources resources; ++ ++ struct ipts_thread receiver_loop; ++}; ++ ++#endif /* IPTS_CONTEXT_H */ +diff --git a/drivers/hid/ipts/control.c b/drivers/hid/ipts/control.c +new file mode 100644 +index 000000000000..2f61500b5119 +--- /dev/null ++++ b/drivers/hid/ipts/control.c +@@ -0,0 +1,495 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "cmd.h" ++#include "context.h" ++#include "control.h" ++#include "desc.h" ++#include "hid.h" ++#include "receiver.h" ++#include "resources.h" ++#include "spec-data.h" ++#include "spec-device.h" ++ ++static int ipts_control_get_device_info(struct ipts_context *ipts, struct ipts_device_info *info) ++{ ++ int ret = 0; ++ struct ipts_response rsp = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!info) ++ return -EFAULT; ++ ++ ret = ipts_cmd_send(ipts, IPTS_CMD_GET_DEVICE_INFO, NULL, 0); ++ if (ret) { ++ dev_err(ipts->dev, "GET_DEVICE_INFO: send failed: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_cmd_recv(ipts, IPTS_CMD_GET_DEVICE_INFO, &rsp); ++ if (ret) { ++ dev_err(ipts->dev, "GET_DEVICE_INFO: recv failed: %d\n", ret); ++ return ret; ++ } ++ ++ if (rsp.status != IPTS_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "GET_DEVICE_INFO: cmd failed: %d\n", rsp.status); ++ return -EBADR; ++ } ++ ++ memcpy(info, rsp.payload, sizeof(*info)); ++ return 0; ++} ++ ++static int ipts_control_set_mode(struct ipts_context *ipts, enum ipts_mode mode) ++{ ++ int ret = 0; ++ struct ipts_set_mode cmd = { 0 }; ++ struct ipts_response rsp = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ cmd.mode = mode; ++ ++ ret = ipts_cmd_send(ipts, IPTS_CMD_SET_MODE, &cmd, sizeof(cmd)); ++ if (ret) { ++ dev_err(ipts->dev, "SET_MODE: send failed: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_cmd_recv(ipts, IPTS_CMD_SET_MODE, &rsp); ++ if (ret) { ++ dev_err(ipts->dev, "SET_MODE: recv failed: %d\n", ret); ++ return ret; ++ } ++ ++ if (rsp.status != IPTS_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "SET_MODE: cmd failed: %d\n", rsp.status); ++ return -EBADR; ++ } ++ ++ return 0; ++} ++ ++static int ipts_control_set_mem_window(struct ipts_context *ipts, struct ipts_resources *res) ++{ ++ int ret = 0; ++ struct ipts_mem_window cmd = { 0 }; ++ struct ipts_response rsp = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!res) ++ return -EFAULT; ++ ++ for (int i = 0; i < IPTS_BUFFERS; i++) { ++ cmd.data_addr_lower[i] = lower_32_bits(res->data[i].dma_address); ++ cmd.data_addr_upper[i] = upper_32_bits(res->data[i].dma_address); ++ cmd.feedback_addr_lower[i] = lower_32_bits(res->feedback[i].dma_address); ++ cmd.feedback_addr_upper[i] = upper_32_bits(res->feedback[i].dma_address); ++ } ++ ++ cmd.workqueue_addr_lower = lower_32_bits(res->workqueue.dma_address); ++ cmd.workqueue_addr_upper = upper_32_bits(res->workqueue.dma_address); ++ ++ cmd.doorbell_addr_lower = lower_32_bits(res->doorbell.dma_address); ++ cmd.doorbell_addr_upper = upper_32_bits(res->doorbell.dma_address); ++ ++ cmd.hid2me_addr_lower = lower_32_bits(res->hid2me.dma_address); ++ cmd.hid2me_addr_upper = upper_32_bits(res->hid2me.dma_address); ++ ++ cmd.workqueue_size = IPTS_WORKQUEUE_SIZE; ++ cmd.workqueue_item_size = IPTS_WORKQUEUE_ITEM_SIZE; ++ ++ ret = ipts_cmd_send(ipts, IPTS_CMD_SET_MEM_WINDOW, &cmd, sizeof(cmd)); ++ if (ret) { ++ dev_err(ipts->dev, "SET_MEM_WINDOW: send failed: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_cmd_recv(ipts, IPTS_CMD_SET_MEM_WINDOW, &rsp); ++ if (ret) { ++ dev_err(ipts->dev, "SET_MEM_WINDOW: recv failed: %d\n", ret); ++ return ret; ++ } ++ ++ if (rsp.status != IPTS_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "SET_MEM_WINDOW: cmd failed: %d\n", rsp.status); ++ return -EBADR; ++ } ++ ++ return 0; ++} ++ ++static int ipts_control_get_descriptor(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ struct ipts_data_header *header = NULL; ++ struct ipts_get_descriptor cmd = { 0 }; ++ struct ipts_response rsp = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!ipts->resources.descriptor.address) ++ return -EFAULT; ++ ++ memset(ipts->resources.descriptor.address, 0, ipts->resources.descriptor.size); ++ ++ cmd.addr_lower = lower_32_bits(ipts->resources.descriptor.dma_address); ++ cmd.addr_upper = upper_32_bits(ipts->resources.descriptor.dma_address); ++ cmd.magic = 8; ++ ++ ret = ipts_cmd_send(ipts, IPTS_CMD_GET_DESCRIPTOR, &cmd, sizeof(cmd)); ++ if (ret) { ++ dev_err(ipts->dev, "GET_DESCRIPTOR: send failed: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_cmd_recv(ipts, IPTS_CMD_GET_DESCRIPTOR, &rsp); ++ if (ret) { ++ dev_err(ipts->dev, "GET_DESCRIPTOR: recv failed: %d\n", ret); ++ return ret; ++ } ++ ++ if (rsp.status != IPTS_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "GET_DESCRIPTOR: cmd failed: %d\n", rsp.status); ++ return -EBADR; ++ } ++ ++ header = (struct ipts_data_header *)ipts->resources.descriptor.address; ++ ++ if (header->type == IPTS_DATA_TYPE_DESCRIPTOR) { ++ ipts->descriptor.address = &header->data[8]; ++ ipts->descriptor.size = header->size - 8; + -+ if (!node || ((unsigned long)node & 0x1)) + return 0; -+ while (node->node_count) { -+ count += node->node_count; -+ node = node->slot[0]; + } -+ return count; ++ ++ return -ENODATA; +} + -+static void check_mas_alloc_node_count(struct ma_state *mas) ++int ipts_control_request_flush(struct ipts_context *ipts) +{ -+ mas_node_count_gfp(mas, MAPLE_ALLOC_SLOTS + 1, GFP_KERNEL); -+ mas_node_count_gfp(mas, MAPLE_ALLOC_SLOTS + 3, GFP_KERNEL); -+ MT_BUG_ON(mas->tree, get_alloc_node_count(mas) != mas->alloc->total); -+ mas_destroy(mas); ++ int ret = 0; ++ struct ipts_quiesce_io cmd = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ ret = ipts_cmd_send(ipts, IPTS_CMD_QUIESCE_IO, &cmd, sizeof(cmd)); ++ if (ret) ++ dev_err(ipts->dev, "QUIESCE_IO: send failed: %d\n", ret); ++ ++ return ret; ++} ++ ++int ipts_control_wait_flush(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ struct ipts_response rsp = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ ret = ipts_cmd_recv(ipts, IPTS_CMD_QUIESCE_IO, &rsp); ++ if (ret) { ++ dev_err(ipts->dev, "QUIESCE_IO: recv failed: %d\n", ret); ++ return ret; ++ } ++ ++ if (rsp.status == IPTS_STATUS_TIMEOUT) ++ return -EAGAIN; ++ ++ if (rsp.status != IPTS_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "QUIESCE_IO: cmd failed: %d\n", rsp.status); ++ return -EBADR; ++ } ++ ++ return 0; ++} ++ ++int ipts_control_request_data(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ ret = ipts_cmd_send(ipts, IPTS_CMD_READY_FOR_DATA, NULL, 0); ++ if (ret) ++ dev_err(ipts->dev, "READY_FOR_DATA: send failed: %d\n", ret); ++ ++ return ret; ++} ++ ++int ipts_control_wait_data(struct ipts_context *ipts, bool shutdown) ++{ ++ int ret = 0; ++ struct ipts_response rsp = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!shutdown) ++ ret = ipts_cmd_recv_timeout(ipts, IPTS_CMD_READY_FOR_DATA, &rsp, 0); ++ else ++ ret = ipts_cmd_recv(ipts, IPTS_CMD_READY_FOR_DATA, &rsp); ++ ++ if (ret) { ++ if (ret != -EAGAIN) ++ dev_err(ipts->dev, "READY_FOR_DATA: recv failed: %d\n", ret); ++ ++ return ret; ++ } ++ ++ /* ++ * During shutdown, it is possible that the sensor has already been disabled. ++ */ ++ if (rsp.status == IPTS_STATUS_SENSOR_DISABLED) ++ return 0; ++ ++ if (rsp.status == IPTS_STATUS_TIMEOUT) ++ return -EAGAIN; ++ ++ if (rsp.status != IPTS_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "READY_FOR_DATA: cmd failed: %d\n", rsp.status); ++ return -EBADR; ++ } ++ ++ return 0; +} + - /* - * check_new_node() - Check the creation of new nodes and error path - * verification. -@@ -69,6 +91,8 @@ static noinline void check_new_node(struct maple_tree *mt) - - MA_STATE(mas, mt, 0, 0); - -+ check_mas_alloc_node_count(&mas); ++int ipts_control_send_feedback(struct ipts_context *ipts, u32 buffer) ++{ ++ int ret = 0; ++ struct ipts_feedback cmd = { 0 }; ++ struct ipts_response rsp = { 0 }; + - /* Try allocating 3 nodes */ - mtree_lock(mt); - mt_set_non_kernel(0); --- -2.40.0 - -From bd89875ebbc1edad43e0af8a2bb9824ff0483cf1 Mon Sep 17 00:00:00 2001 -From: Peter Jung -Date: Mon, 17 Apr 2023 18:33:32 +0200 -Subject: [PATCH 09/12] Per-VMA locks -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Previous versions: -v3: https://lore.kernel.org/all/20230216051750.3125598-1-surenb@google.com/ -v2: https://lore.kernel.org/lkml/20230127194110.533103-1-surenb@google.com/ -v1: https://lore.kernel.org/all/20230109205336.3665937-1-surenb@google.com/ -RFC: https://lore.kernel.org/all/20220901173516.702122-1-surenb@google.com/ - -LWN article describing the feature: -https://lwn.net/Articles/906852/ - -Per-vma locks idea that was discussed during SPF [1] discussion at LSF/MM -last year [2], which concluded with suggestion that “a reader/writer -semaphore could be put into the VMA itself; that would have the effect of -using the VMA as a sort of range lock. There would still be contention at -the VMA level, but it would be an improvement.” This patchset implements -this suggested approach. - -When handling page faults we lookup the VMA that contains the faulting -page under RCU protection and try to acquire its lock. If that fails we -fall back to using mmap_lock, similar to how SPF handled this situation. - -One notable way the implementation deviates from the proposal is the way -VMAs are read-locked. During some of mm updates, multiple VMAs need to be -locked until the end of the update (e.g. vma_merge, split_vma, etc). -Tracking all the locked VMAs, avoiding recursive locks, figuring out when -it's safe to unlock previously locked VMAs would make the code more -complex. So, instead of the usual lock/unlock pattern, the proposed -solution marks a VMA as locked and provides an efficient way to: -1. Identify locked VMAs. -2. Unlock all locked VMAs in bulk. -We also postpone unlocking the locked VMAs until the end of the update, -when we do mmap_write_unlock. Potentially this keeps a VMA locked for -longer than is absolutely necessary but it results in a big reduction of -code complexity. -Read-locking a VMA is done using two sequence numbers - one in the -vm_area_struct and one in the mm_struct. VMA is considered read-locked -when these sequence numbers are equal. To read-lock a VMA we set the -sequence number in vm_area_struct to be equal to the sequence number in -mm_struct. To unlock all VMAs we increment mm_struct's seq number. This -allows for an efficient way to track locked VMAs and to drop the locks on -all VMAs at the end of the update. - -The patchset implements per-VMA locking only for anonymous pages which -are not in swap and avoids userfaultfs as their implementation is more -complex. Additional support for file-back page faults, swapped and user -pages can be added incrementally. - -Performance benchmarks show similar although slightly smaller benefits as -with SPF patchset (~75% of SPF benefits). Still, with lower complexity -this approach might be more desirable. - -Since RFC was posted in September 2022, two separate Google teams outside -of Android evaluated the patchset and confirmed positive results. Here are -the known usecases when per-VMA locks show benefits: - -Android: -Apps with high number of threads (~100) launch times improve by up to 20%. -Each thread mmaps several areas upon startup (Stack and Thread-local -storage (TLS), thread signal stack, indirect ref table), which requires -taking mmap_lock in write mode. Page faults take mmap_lock in read mode. -During app launch, both thread creation and page faults establishing the -active workinget are happening in parallel and that causes lock contention -between mm writers and readers even if updates and page faults are -happening in different VMAs. Per-vma locks prevent this contention by -providing more granular lock. - -Google Fibers: -We have several dynamically sized thread pools that spawn new threads -under increased load and reduce their number when idling. For example, -Google's in-process scheduling/threading framework, UMCG/Fibers, is backed -by such a thread pool. When idling, only a small number of idle worker -threads are available; when a spike of incoming requests arrive, each -request is handled in its own "fiber", which is a work item posted onto a -UMCG worker thread; quite often these spikes lead to a number of new -threads spawning. Each new thread needs to allocate and register an RSEQ -section on its TLS, then register itself with the kernel as a UMCG worker -thread, and only after that it can be considered by the in-process -UMCG/Fiber scheduler as available to do useful work. In short, during an -incoming workload spike new threads have to be spawned, and they perform -several syscalls (RSEQ registration, UMCG worker registration, memory -allocations) before they can actually start doing useful work. Removing -any bottlenecks on this thread startup path will greatly improve our -services' latencies when faced with request/workload spikes. -At high scale, mmap_lock contention during thread creation and stack page -faults leads to user-visible multi-second serving latencies in a similar -pattern to Android app startup. Per-VMA locking patchset has been run -successfully in limited experiments with user-facing production workloads. -In these experiments, we observed that the peak thread creation rate was -high enough that thread creation is no longer a bottleneck. - -TCP zerocopy receive: -From the point of view of TCP zerocopy receive, the per-vma lock patch is -massively beneficial. -In today's implementation, a process with N threads where N - 1 are -performing zerocopy receive and 1 thread is performing madvise() with the -write lock taken (e.g. needs to change vm_flags) will result in all N -1 -receive threads blocking until the madvise is done. Conversely, on a busy -process receiving a lot of data, an madvise operation that does need to -take the mmap lock in write mode will need to wait for all of the receives -to be done - a lose:lose proposition. Per-VMA locking _removes_ by -definition this source of contention entirely. -There are other benefits for receive as well, chiefly a reduction in -cacheline bouncing across receiving threads for locking/unlocking the -single mmap lock. On an RPC style synthetic workload with 4KB RPCs: -1a) The find+lock+unlock VMA path in the base case, without the per-vma -lock patchset, is about 0.7% of cycles as measured by perf. -1b) mmap_read_lock + mmap_read_unlock in the base case is about 0.5% -cycles overall - most of this is within the TCP read hotpath (a small -fraction is 'other' usage in the system). -2a) The find+lock+unlock VMA path, with the per-vma patchset and a trivial -patch written to take advantage of it in TCP, is about 0.4% of cycles -(down from 0.7% above) -2b) mmap_read_lock + mmap_read_unlock in the per-vma patchset is < 0.1% -cycles and is out of the TCP read hotpath entirely (down from 0.5% before, -the remaining usage is the 'other' usage in the system). -So, in addition to entirely removing an onerous source of contention, it -also reduces the CPU cycles of TCP receive zerocopy by about 0.5%+ -(compared to overall cycles in perf) for the 'small' RPC scenario. - -The patchset structure is: -0001-0008: Enable maple-tree RCU mode -0009-0031: Main per-vma locks patchset -0032-0033: Performance optimizations - -Changes since v3: -- Changed patch [3] to move vma_prepare before vma_adjust_trans_huge -- Dropped patch [4] from the set as unnecessary, per Hyeonggon Yoo -- Changed patch [5] to do VMA locking inside vma_prepare, per Liam Howlett -- Dropped patch [6] from the set as unnecessary, per Liam Howlett - -[1] https://lore.kernel.org/all/20220128131006.67712-1-michel@lespinasse.org/ -[2] https://lwn.net/Articles/893906/ -[3] https://lore.kernel.org/all/20230216051750.3125598-15-surenb@google.com/ -[4] https://lore.kernel.org/all/20230216051750.3125598-17-surenb@google.com/ -[5] https://lore.kernel.org/all/20230216051750.3125598-18-surenb@google.com/ -[6] https://lore.kernel.org/all/20230216051750.3125598-22-surenb@google.com/ - -The patchset applies cleanly over mm-unstable branch. - -Laurent Dufour (1): - powerc/mm: try VMA lock-based page fault handling first - -Liam Howlett (4): - maple_tree: Be more cautious about dead nodes - maple_tree: Detect dead nodes in mas_start() - maple_tree: Fix freeing of nodes in rcu mode - maple_tree: remove extra smp_wmb() from mas_dead_leaves() - -Liam R. Howlett (4): - maple_tree: Fix write memory barrier of nodes once dead for RCU mode - maple_tree: Add smp_rmb() to dead node detection - maple_tree: Add RCU lock checking to rcu callback functions - mm: Enable maple tree RCU mode by default. - -Michel Lespinasse (1): - mm: rcu safe VMA freeing - -Suren Baghdasaryan (23): - mm: introduce CONFIG_PER_VMA_LOCK - mm: move mmap_lock assert function definitions - mm: add per-VMA lock and helper functions to control it - mm: mark VMA as being written when changing vm_flags - mm/mmap: move vma_prepare before vma_adjust_trans_huge - mm/khugepaged: write-lock VMA while collapsing a huge page - mm/mmap: write-lock VMAs in vma_prepare before modifying them - mm/mremap: write-lock VMA while remapping it to a new address range - mm: write-lock VMAs before removing them from VMA tree - mm: conditionally write-lock VMA in free_pgtables - kernel/fork: assert no VMA readers during its destruction - mm/mmap: prevent pagefault handler from racing with mmu_notifier - registration - mm: introduce vma detached flag - mm: introduce lock_vma_under_rcu to be used from arch-specific code - mm: fall back to mmap_lock if vma->anon_vma is not yet set - mm: add FAULT_FLAG_VMA_LOCK flag - mm: prevent do_swap_page from handling page faults under VMA lock - mm: prevent userfaults to be handled under per-vma lock - mm: introduce per-VMA lock statistics - x86/mm: try VMA lock-based page fault handling first - arm64/mm: try VMA lock-based page fault handling first - mm/mmap: free vm_area_struct without call_rcu in exit_mmap - mm: separate vma->lock from vm_area_struct - -Signed-off-by: Peter Jung ---- - Documentation/admin-guide/mm/userfaultfd.rst | 17 ++ - arch/arm64/Kconfig | 1 + - arch/arm64/mm/fault.c | 36 ++++ - arch/powerpc/mm/fault.c | 37 ++++ - arch/powerpc/platforms/powernv/Kconfig | 1 + - arch/powerpc/platforms/pseries/Kconfig | 1 + - arch/s390/Kconfig | 1 + - arch/s390/mm/fault.c | 24 +++ - arch/x86/Kconfig | 1 + - arch/x86/mm/fault.c | 36 ++++ - fs/userfaultfd.c | 16 ++ - include/linux/mm.h | 127 ++++++++++++- - include/linux/mm_inline.h | 6 + - include/linux/mm_types.h | 30 ++- - include/linux/mmap_lock.h | 37 ++-- - include/linux/userfaultfd_k.h | 23 +++ - include/linux/vm_event_item.h | 6 + - include/linux/vmstat.h | 6 + - include/uapi/linux/userfaultfd.h | 10 +- - kernel/fork.c | 96 ++++++++-- - mm/Kconfig | 12 ++ - mm/Kconfig.debug | 6 + - mm/filemap.c | 6 + - mm/hugetlb.c | 4 + - mm/init-mm.c | 3 + - mm/internal.h | 2 +- - mm/khugepaged.c | 10 +- - mm/memory.c | 186 +++++++++++++++---- - mm/mmap.c | 48 +++-- - mm/mprotect.c | 51 ++++- - mm/mremap.c | 1 + - mm/rmap.c | 31 ++-- - mm/vmstat.c | 6 + - tools/testing/selftests/mm/userfaultfd.c | 45 ++++- - 34 files changed, 810 insertions(+), 113 deletions(-) - -diff --git a/Documentation/admin-guide/mm/userfaultfd.rst b/Documentation/admin-guide/mm/userfaultfd.rst -index 7dc823b56ca4..bd2226299583 100644 ---- a/Documentation/admin-guide/mm/userfaultfd.rst -+++ b/Documentation/admin-guide/mm/userfaultfd.rst -@@ -219,6 +219,23 @@ former will have ``UFFD_PAGEFAULT_FLAG_WP`` set, the latter - you still need to supply a page when ``UFFDIO_REGISTER_MODE_MISSING`` was - used. - -+Userfaultfd write-protect mode currently behave differently on none ptes -+(when e.g. page is missing) over different types of memories. ++ if (!ipts) ++ return -EFAULT; ++ ++ cmd.buffer = buffer; ++ ++ ret = ipts_cmd_send(ipts, IPTS_CMD_FEEDBACK, &cmd, sizeof(cmd)); ++ if (ret) { ++ dev_err(ipts->dev, "FEEDBACK: send failed: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_cmd_recv(ipts, IPTS_CMD_FEEDBACK, &rsp); ++ if (ret) { ++ dev_err(ipts->dev, "FEEDBACK: recv failed: %d\n", ret); ++ return ret; ++ } + -+For anonymous memory, ``ioctl(UFFDIO_WRITEPROTECT)`` will ignore none ptes -+(e.g. when pages are missing and not populated). For file-backed memories -+like shmem and hugetlbfs, none ptes will be write protected just like a -+present pte. In other words, there will be a userfaultfd write fault -+message generated when writing to a missing page on file typed memories, -+as long as the page range was write-protected before. Such a message will -+not be generated on anonymous memories by default. ++ /* ++ * We don't know what feedback data looks like so we are sending zeros. ++ * See also ipts_control_refill_buffer. ++ */ ++ if (rsp.status == IPTS_STATUS_INVALID_PARAMS) ++ return 0; + -+If the application wants to be able to write protect none ptes on anonymous -+memory, one can pre-populate the memory with e.g. MADV_POPULATE_READ. On -+newer kernels, one can also detect the feature UFFD_FEATURE_WP_UNPOPULATED -+and set the feature bit in advance to make sure none ptes will also be -+write protected even upon anonymous memory. ++ if (rsp.status != IPTS_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "FEEDBACK: cmd failed: %d\n", rsp.status); ++ return -EBADR; ++ } + - QEMU/KVM - ======== - -diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig -index 1023e896d46b..6f104c829731 100644 ---- a/arch/arm64/Kconfig -+++ b/arch/arm64/Kconfig -@@ -95,6 +95,7 @@ config ARM64 - select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 - select ARCH_SUPPORTS_NUMA_BALANCING - select ARCH_SUPPORTS_PAGE_TABLE_CHECK -+ select ARCH_SUPPORTS_PER_VMA_LOCK - select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT - select ARCH_WANT_DEFAULT_BPF_JIT - select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT -diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c -index f4cb0f85ccf4..9e0db5c387e3 100644 ---- a/arch/arm64/mm/fault.c -+++ b/arch/arm64/mm/fault.c -@@ -535,6 +535,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, - unsigned long vm_flags; - unsigned int mm_flags = FAULT_FLAG_DEFAULT; - unsigned long addr = untagged_addr(far); -+#ifdef CONFIG_PER_VMA_LOCK -+ struct vm_area_struct *vma; -+#endif - - if (kprobe_page_fault(regs, esr)) - return 0; -@@ -585,6 +588,36 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, - - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); - -+#ifdef CONFIG_PER_VMA_LOCK -+ if (!(mm_flags & FAULT_FLAG_USER)) -+ goto lock_mmap; ++ return 0; ++} ++ ++int ipts_control_hid2me_feedback(struct ipts_context *ipts, enum ipts_feedback_cmd_type cmd, ++ enum ipts_feedback_data_type type, void *data, size_t size) ++{ ++ struct ipts_feedback_header *header = NULL; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!ipts->resources.hid2me.address) ++ return -EFAULT; ++ ++ memset(ipts->resources.hid2me.address, 0, ipts->resources.hid2me.size); ++ header = (struct ipts_feedback_header *)ipts->resources.hid2me.address; ++ ++ header->cmd_type = cmd; ++ header->data_type = type; ++ header->size = size; ++ header->buffer = IPTS_HID2ME_BUFFER; ++ ++ if (size + sizeof(*header) > ipts->resources.hid2me.size) ++ return -EINVAL; ++ ++ if (data && size > 0) ++ memcpy(header->payload, data, size); ++ ++ return ipts_control_send_feedback(ipts, IPTS_HID2ME_BUFFER); ++} ++ ++static inline int ipts_control_reset_sensor(struct ipts_context *ipts) ++{ ++ return ipts_control_hid2me_feedback(ipts, IPTS_FEEDBACK_CMD_TYPE_SOFT_RESET, ++ IPTS_FEEDBACK_DATA_TYPE_VENDOR, NULL, 0); ++} ++ ++int ipts_control_start(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ struct ipts_device_info info = { 0 }; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ dev_info(ipts->dev, "Starting IPTS\n"); ++ ++ ret = ipts_control_get_device_info(ipts, &info); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to get device info: %d\n", ret); ++ return ret; ++ } ++ ++ ipts->info = info; ++ ++ ret = ipts_resources_init(&ipts->resources, ipts->dev, info.data_size, info.feedback_size); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to allocate buffers: %d", ret); ++ return ret; ++ } ++ ++ dev_info(ipts->dev, "IPTS EDS Version: %d\n", info.intf_eds); ++ ++ /* ++ * Handle newer devices ++ */ ++ if (info.intf_eds > 1) { ++ /* ++ * Fetching the descriptor will only work on newer devices. ++ * For older devices, a fallback descriptor will be used. ++ */ ++ ret = ipts_control_get_descriptor(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to fetch HID descriptor: %d\n", ret); ++ return ret; ++ } ++ ++ /* ++ * Newer devices can be directly initialized in doorbell mode. ++ */ ++ ipts->mode = IPTS_MODE_DOORBELL; ++ } ++ ++ ret = ipts_control_set_mode(ipts, ipts->mode); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to set mode: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_control_set_mem_window(ipts, &ipts->resources); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to set memory window: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_receiver_start(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to start receiver: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_control_request_data(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to request data: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_hid_init(ipts, info); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to initialize HID device: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int _ipts_control_stop(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ dev_info(ipts->dev, "Stopping IPTS\n"); ++ ++ ret = ipts_receiver_stop(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to stop receiver: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_control_reset_sensor(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to reset sensor: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_resources_free(&ipts->resources); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to free resources: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ipts_control_stop(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ ++ ret = _ipts_control_stop(ipts); ++ if (ret) ++ return ret; ++ ++ ret = ipts_hid_free(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to free HID device: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ipts_control_restart(struct ipts_context *ipts) ++{ ++ int ret = 0; ++ ++ ret = _ipts_control_stop(ipts); ++ if (ret) ++ return ret; ++ ++ /* ++ * Give the sensor some time to come back from resetting ++ */ ++ msleep(1000); ++ ++ ret = ipts_control_start(ipts); ++ if (ret) ++ return ret; ++ ++ return 0; ++} +diff --git a/drivers/hid/ipts/control.h b/drivers/hid/ipts/control.h +new file mode 100644 +index 000000000000..744bb92d682a +--- /dev/null ++++ b/drivers/hid/ipts/control.h +@@ -0,0 +1,127 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_CONTROL_H ++#define IPTS_CONTROL_H ++ ++#include ++ ++#include "context.h" ++#include "spec-data.h" ++#include "spec-device.h" ++ ++/* ++ * ipts_control_request_flush() - Stop the data flow. ++ * @ipts: The IPTS driver context. ++ * ++ * Runs the command to stop the data flow on the device. ++ * All outstanding data needs to be acknowledged using feedback before the command will return. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_control_request_flush(struct ipts_context *ipts); ++ ++/* ++ * ipts_control_wait_flush() - Wait until data flow has been stopped. ++ * @ipts: The IPTS driver context. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_control_wait_flush(struct ipts_context *ipts); ++ ++/* ++ * ipts_control_wait_flush() - Notify the device that the driver can receive new data. ++ * @ipts: The IPTS driver context. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_control_request_data(struct ipts_context *ipts); ++ ++/* ++ * ipts_control_wait_data() - Wait until new data is available. ++ * @ipts: The IPTS driver context. ++ * @block: Whether to block execution until data is available. ++ * ++ * In doorbell mode, this function will never return while the data flow is active. Instead, ++ * the doorbell will be incremented when new data is available. ++ * ++ * Returns: 0 on success, <0 on error, -EAGAIN if no data is available. ++ */ ++int ipts_control_wait_data(struct ipts_context *ipts, bool block); ++ ++/* ++ * ipts_control_send_feedback() - Submits a feedback buffer to the device. ++ * @ipts: The IPTS driver context. ++ * @buffer: The ID of the buffer containing feedback data. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_control_send_feedback(struct ipts_context *ipts, u32 buffer); ++ ++/* ++ * ipts_control_hid2me_feedback() - Sends HID2ME feedback, a special type of feedback. ++ * @ipts: The IPTS driver context. ++ * @cmd: The command that will be run on the device. ++ * @type: The type of the payload that is sent to the device. ++ * @data: The payload of the feedback command. ++ * @size: The size of the payload. ++ * ++ * HID2ME feedback is a special type of feedback, because it allows interfacing with ++ * the HID API of the device at any moment, without requiring a buffer that has to ++ * be acknowledged. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_control_hid2me_feedback(struct ipts_context *ipts, enum ipts_feedback_cmd_type cmd, ++ enum ipts_feedback_data_type type, void *data, size_t size); ++ ++/* ++ * ipts_control_refill_buffer() - Acknowledges that data in a buffer has been processed. ++ * @ipts: The IPTS driver context. ++ * @buffer: The buffer that has been processed and can be refilled. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++static inline int ipts_control_refill_buffer(struct ipts_context *ipts, u32 buffer) ++{ ++ /* ++ * IPTS expects structured data in the feedback buffer matching the buffer that will be ++ * refilled. We don't know what that data looks like, so we just keep the buffer empty. ++ * This results in an INVALID_PARAMS error, but the buffer gets refilled without an issue. ++ * Sending a minimal structure with the buffer ID fixes the error, but breaks refilling ++ * the buffers on some devices. ++ */ ++ ++ return ipts_control_send_feedback(ipts, buffer); ++} ++ ++/* ++ * ipts_control_start() - Initialized the device and starts the data flow. ++ * @ipts: The IPTS driver context. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_control_start(struct ipts_context *ipts); ++ ++/* ++ * ipts_control_stop() - Stops the data flow and resets the device. ++ * @ipts: The IPTS driver context. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_control_stop(struct ipts_context *ipts); ++ ++/* ++ * ipts_control_restart() - Stops the device and starts it again. ++ * @ipts: The IPTS driver context. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_control_restart(struct ipts_context *ipts); ++ ++#endif /* IPTS_CONTROL_H */ +diff --git a/drivers/hid/ipts/desc.h b/drivers/hid/ipts/desc.h +new file mode 100644 +index 000000000000..c058974a03a1 +--- /dev/null ++++ b/drivers/hid/ipts/desc.h +@@ -0,0 +1,81 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2022-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_DESC_H ++#define IPTS_DESC_H ++ ++#include ++ ++#define IPTS_HID_REPORT_SINGLETOUCH 64 ++#define IPTS_HID_REPORT_DATA 65 ++#define IPTS_HID_REPORT_SET_MODE 66 ++ ++#define IPTS_HID_REPORT_DATA_SIZE 7485 ++ ++/* ++ * HID descriptor for singletouch data. ++ * This descriptor should be present on all IPTS devices. ++ */ ++static const u8 ipts_singletouch_descriptor[] = { ++ 0x05, 0x0D, /* Usage Page (Digitizer), */ ++ 0x09, 0x04, /* Usage (Touchscreen), */ ++ 0xA1, 0x01, /* Collection (Application), */ ++ 0x85, 0x40, /* Report ID (64), */ ++ 0x09, 0x42, /* Usage (Tip Switch), */ ++ 0x15, 0x00, /* Logical Minimum (0), */ ++ 0x25, 0x01, /* Logical Maximum (1), */ ++ 0x75, 0x01, /* Report Size (1), */ ++ 0x95, 0x01, /* Report Count (1), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0x95, 0x07, /* Report Count (7), */ ++ 0x81, 0x03, /* Input (Constant, Variable), */ ++ 0x05, 0x01, /* Usage Page (Desktop), */ ++ 0x09, 0x30, /* Usage (X), */ ++ 0x75, 0x10, /* Report Size (16), */ ++ 0x95, 0x01, /* Report Count (1), */ ++ 0xA4, /* Push, */ ++ 0x55, 0x0E, /* Unit Exponent (14), */ ++ 0x65, 0x11, /* Unit (Centimeter), */ ++ 0x46, 0x76, 0x0B, /* Physical Maximum (2934), */ ++ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0x09, 0x31, /* Usage (Y), */ ++ 0x46, 0x74, 0x06, /* Physical Maximum (1652), */ ++ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0xB4, /* Pop, */ ++ 0xC0, /* End Collection */ ++}; ++ ++/* ++ * Fallback HID descriptor for older devices that do not have ++ * the ability to query their HID descriptor. ++ */ ++static const u8 ipts_fallback_descriptor[] = { ++ 0x05, 0x0D, /* Usage Page (Digitizer), */ ++ 0x09, 0x0F, /* Usage (Capacitive Hm Digitizer), */ ++ 0xA1, 0x01, /* Collection (Application), */ ++ 0x85, 0x41, /* Report ID (65), */ ++ 0x09, 0x56, /* Usage (Scan Time), */ ++ 0x95, 0x01, /* Report Count (1), */ ++ 0x75, 0x10, /* Report Size (16), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0x09, 0x61, /* Usage (Gesture Char Quality), */ ++ 0x75, 0x08, /* Report Size (8), */ ++ 0x96, 0x3D, 0x1D, /* Report Count (7485), */ ++ 0x81, 0x03, /* Input (Constant, Variable), */ ++ 0x85, 0x42, /* Report ID (66), */ ++ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ ++ 0x09, 0xC8, /* Usage (C8h), */ ++ 0x75, 0x08, /* Report Size (8), */ ++ 0x95, 0x01, /* Report Count (1), */ ++ 0xB1, 0x02, /* Feature (Variable), */ ++ 0xC0, /* End Collection, */ ++}; ++ ++#endif /* IPTS_DESC_H */ +diff --git a/drivers/hid/ipts/hid.c b/drivers/hid/ipts/hid.c +new file mode 100644 +index 000000000000..6782394e8dde +--- /dev/null ++++ b/drivers/hid/ipts/hid.c +@@ -0,0 +1,348 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2022-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "context.h" ++#include "control.h" ++#include "desc.h" ++#include "hid.h" ++#include "spec-data.h" ++#include "spec-device.h" ++#include "spec-hid.h" ++ ++static int ipts_hid_start(struct hid_device *hid) ++{ ++ return 0; ++} ++ ++static void ipts_hid_stop(struct hid_device *hid) ++{ ++} ++ ++static int ipts_hid_switch_mode(struct ipts_context *ipts, enum ipts_mode mode) ++{ ++ if (!ipts) ++ return -EFAULT; + -+ vma = lock_vma_under_rcu(mm, addr); -+ if (!vma) -+ goto lock_mmap; ++ if (ipts->mode == mode) ++ return 0; + -+ if (!(vma->vm_flags & vm_flags)) { -+ vma_end_read(vma); -+ goto lock_mmap; ++ /* ++ * This is only allowed on older devices. ++ */ ++ if (ipts->info.intf_eds > 1) ++ return 0; ++ ++ ipts->mode = mode; ++ return ipts_control_restart(ipts); ++} ++ ++static int ipts_hid_parse(struct hid_device *hid) ++{ ++ int ret = 0; ++ struct ipts_context *ipts = NULL; ++ ++ bool has_native_descriptor = false; ++ ++ u8 *buffer = NULL; ++ size_t size = 0; ++ ++ if (!hid) ++ return -ENODEV; ++ ++ ipts = hid->driver_data; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ size = sizeof(ipts_singletouch_descriptor); ++ has_native_descriptor = ipts->descriptor.address && ipts->descriptor.size > 0; ++ ++ if (has_native_descriptor) ++ size += ipts->descriptor.size; ++ else ++ size += sizeof(ipts_fallback_descriptor); ++ ++ buffer = kzalloc(size, GFP_KERNEL); ++ if (!buffer) ++ return -ENOMEM; ++ ++ memcpy(buffer, ipts_singletouch_descriptor, sizeof(ipts_singletouch_descriptor)); ++ ++ if (has_native_descriptor) { ++ memcpy(&buffer[sizeof(ipts_singletouch_descriptor)], ipts->descriptor.address, ++ ipts->descriptor.size); ++ } else { ++ memcpy(&buffer[sizeof(ipts_singletouch_descriptor)], ipts_fallback_descriptor, ++ sizeof(ipts_fallback_descriptor)); + } -+ fault = handle_mm_fault(vma, addr & PAGE_MASK, -+ mm_flags | FAULT_FLAG_VMA_LOCK, regs); -+ vma_end_read(vma); + -+ if (!(fault & VM_FAULT_RETRY)) { -+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS); -+ goto done; ++ ret = hid_parse_report(hid, buffer, size); ++ kfree(buffer); ++ ++ if (ret) { ++ dev_err(ipts->dev, "Failed to parse HID descriptor: %d\n", ret); ++ return ret; + } -+ count_vm_vma_lock_event(VMA_LOCK_RETRY); + -+ /* Quick path to respond to signals */ -+ if (fault_signal_pending(fault, regs)) { -+ if (!user_mode(regs)) -+ goto no_context; -+ return 0; ++ return 0; ++} ++ ++static int ipts_hid_get_feature(struct ipts_context *ipts, unsigned char reportnum, __u8 *buf, ++ size_t size, enum ipts_feedback_data_type type) ++{ ++ int ret = 0; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!buf) ++ return -EFAULT; ++ ++ mutex_lock(&ipts->feature_lock); ++ ++ memset(buf, 0, size); ++ buf[0] = reportnum; ++ ++ memset(&ipts->feature_report, 0, sizeof(ipts->feature_report)); ++ reinit_completion(&ipts->feature_event); ++ ++ ret = ipts_control_hid2me_feedback(ipts, IPTS_FEEDBACK_CMD_TYPE_NONE, type, buf, size); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to send hid2me feedback: %d\n", ret); ++ goto out; + } -+lock_mmap: -+#endif /* CONFIG_PER_VMA_LOCK */ - /* - * As per x86, we may deadlock here. However, since the kernel only - * validly references user space from well defined areas of the code, -@@ -628,6 +661,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, - } - mmap_read_unlock(mm); - -+#ifdef CONFIG_PER_VMA_LOCK -+done: -+#endif - /* - * Handle the "normal" (no error) case first. - */ -diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c -index af46aa88422b..531177a4ee08 100644 ---- a/arch/powerpc/mm/fault.c -+++ b/arch/powerpc/mm/fault.c -@@ -474,6 +474,40 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, - if (is_exec) - flags |= FAULT_FLAG_INSTRUCTION; - -+#ifdef CONFIG_PER_VMA_LOCK -+ if (!(flags & FAULT_FLAG_USER)) -+ goto lock_mmap; + -+ vma = lock_vma_under_rcu(mm, address); -+ if (!vma) -+ goto lock_mmap; ++ ret = wait_for_completion_timeout(&ipts->feature_event, msecs_to_jiffies(5000)); ++ if (ret == 0) { ++ dev_warn(ipts->dev, "GET_FEATURES timed out!\n"); ++ ret = -EIO; ++ goto out; ++ } + -+ if (unlikely(access_pkey_error(is_write, is_exec, -+ (error_code & DSISR_KEYFAULT), vma))) { -+ vma_end_read(vma); -+ goto lock_mmap; ++ if (!ipts->feature_report.address) { ++ ret = -EFAULT; ++ goto out; + } + -+ if (unlikely(access_error(is_write, is_exec, vma))) { -+ vma_end_read(vma); -+ goto lock_mmap; ++ if (ipts->feature_report.size > size) { ++ ret = -ETOOSMALL; ++ goto out; + } + -+ fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); -+ vma_end_read(vma); ++ ret = ipts->feature_report.size; ++ memcpy(buf, ipts->feature_report.address, ipts->feature_report.size); + -+ if (!(fault & VM_FAULT_RETRY)) { -+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS); -+ goto done; ++out: ++ mutex_unlock(&ipts->feature_lock); ++ return ret; ++} ++ ++static int ipts_hid_set_feature(struct ipts_context *ipts, unsigned char reportnum, __u8 *buf, ++ size_t size, enum ipts_feedback_data_type type) ++{ ++ int ret = 0; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!buf) ++ return -EFAULT; ++ ++ buf[0] = reportnum; ++ ++ ret = ipts_control_hid2me_feedback(ipts, IPTS_FEEDBACK_CMD_TYPE_NONE, type, buf, size); ++ if (ret) ++ dev_err(ipts->dev, "Failed to send hid2me feedback: %d\n", ret); ++ ++ return ret; ++} ++ ++static int ipts_hid_raw_request(struct hid_device *hid, unsigned char reportnum, __u8 *buf, ++ size_t size, unsigned char rtype, int reqtype) ++{ ++ int ret = 0; ++ struct ipts_context *ipts = NULL; ++ ++ enum ipts_feedback_data_type type = IPTS_FEEDBACK_DATA_TYPE_VENDOR; ++ ++ if (!hid) ++ return -ENODEV; ++ ++ ipts = hid->driver_data; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!buf) ++ return -EFAULT; ++ ++ if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) ++ type = IPTS_FEEDBACK_DATA_TYPE_OUTPUT_REPORT; ++ else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) ++ type = IPTS_FEEDBACK_DATA_TYPE_GET_FEATURES; ++ else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) ++ type = IPTS_FEEDBACK_DATA_TYPE_SET_FEATURES; ++ else ++ return -EIO; ++ ++ // Implemente mode switching report for older devices without native HID support ++ if (type == IPTS_FEEDBACK_DATA_TYPE_SET_FEATURES && reportnum == IPTS_HID_REPORT_SET_MODE) { ++ ret = ipts_hid_switch_mode(ipts, buf[1]); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to switch modes: %d\n", ret); ++ return ret; ++ } + } -+ count_vm_vma_lock_event(VMA_LOCK_RETRY); + -+ if (fault_signal_pending(fault, regs)) -+ return user_mode(regs) ? 0 : SIGBUS; ++ if (reqtype == HID_REQ_GET_REPORT) ++ return ipts_hid_get_feature(ipts, reportnum, buf, size, type); ++ else ++ return ipts_hid_set_feature(ipts, reportnum, buf, size, type); ++} + -+lock_mmap: -+#endif /* CONFIG_PER_VMA_LOCK */ ++static int ipts_hid_output_report(struct hid_device *hid, __u8 *data, size_t size) ++{ ++ struct ipts_context *ipts = NULL; + - /* When running in the kernel we expect faults to occur only to - * addresses in user space. All other faults represent errors in the - * kernel and should generate an OOPS. Unfortunately, in the case of an -@@ -550,6 +584,9 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, - - mmap_read_unlock(current->mm); - -+#ifdef CONFIG_PER_VMA_LOCK -+done: -+#endif - if (unlikely(fault & VM_FAULT_ERROR)) - return mm_fault_error(regs, address, fault); - -diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig -index ae248a161b43..70a46acc70d6 100644 ---- a/arch/powerpc/platforms/powernv/Kconfig -+++ b/arch/powerpc/platforms/powernv/Kconfig -@@ -16,6 +16,7 @@ config PPC_POWERNV - select PPC_DOORBELL - select MMU_NOTIFIER - select FORCE_SMP -+ select ARCH_SUPPORTS_PER_VMA_LOCK - default y - - config OPAL_PRD -diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig -index 21b22bf16ce6..4ebf2ef2845d 100644 ---- a/arch/powerpc/platforms/pseries/Kconfig -+++ b/arch/powerpc/platforms/pseries/Kconfig -@@ -22,6 +22,7 @@ config PPC_PSERIES - select HOTPLUG_CPU - select FORCE_SMP - select SWIOTLB -+ select ARCH_SUPPORTS_PER_VMA_LOCK - default y - - config PARAVIRT -diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig -index 9809c74e1240..548b5b587003 100644 ---- a/arch/s390/Kconfig -+++ b/arch/s390/Kconfig -@@ -120,6 +120,7 @@ config S390 - select ARCH_SUPPORTS_DEBUG_PAGEALLOC - select ARCH_SUPPORTS_HUGETLBFS - select ARCH_SUPPORTS_NUMA_BALANCING -+ select ARCH_SUPPORTS_PER_VMA_LOCK - select ARCH_USE_BUILTIN_BSWAP - select ARCH_USE_CMPXCHG_LOCKREF - select ARCH_WANTS_DYNAMIC_TASK_STRUCT -diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c -index a2632fd97d00..b65144c392b0 100644 ---- a/arch/s390/mm/fault.c -+++ b/arch/s390/mm/fault.c -@@ -407,6 +407,30 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) - access = VM_WRITE; - if (access == VM_WRITE) - flags |= FAULT_FLAG_WRITE; -+#ifdef CONFIG_PER_VMA_LOCK -+ if (!(flags & FAULT_FLAG_USER)) -+ goto lock_mmap; -+ vma = lock_vma_under_rcu(mm, address); -+ if (!vma) -+ goto lock_mmap; -+ if (!(vma->vm_flags & access)) { -+ vma_end_read(vma); -+ goto lock_mmap; ++ if (!hid) ++ return -ENODEV; ++ ++ ipts = hid->driver_data; ++ ++ return ipts_control_hid2me_feedback(ipts, IPTS_FEEDBACK_CMD_TYPE_NONE, ++ IPTS_FEEDBACK_DATA_TYPE_OUTPUT_REPORT, data, size); ++} ++ ++static struct hid_ll_driver ipts_hid_driver = { ++ .start = ipts_hid_start, ++ .stop = ipts_hid_stop, ++ .open = ipts_hid_start, ++ .close = ipts_hid_stop, ++ .parse = ipts_hid_parse, ++ .raw_request = ipts_hid_raw_request, ++ .output_report = ipts_hid_output_report, ++}; ++ ++int ipts_hid_input_data(struct ipts_context *ipts, u32 buffer) ++{ ++ int ret = 0; ++ u8 *temp = NULL; ++ struct ipts_hid_header *frame = NULL; ++ struct ipts_data_header *header = NULL; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!ipts->hid) ++ return -ENODEV; ++ ++ header = (struct ipts_data_header *)ipts->resources.data[buffer].address; ++ ++ if (!header) ++ return -EFAULT; ++ ++ if (header->size == 0) ++ return 0; ++ ++ if (header->type == IPTS_DATA_TYPE_HID) ++ return hid_input_report(ipts->hid, HID_INPUT_REPORT, header->data, header->size, 1); ++ ++ if (header->type == IPTS_DATA_TYPE_GET_FEATURES) { ++ ipts->feature_report.address = header->data; ++ ipts->feature_report.size = header->size; ++ ++ complete_all(&ipts->feature_event); ++ return 0; + } -+ fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); -+ vma_end_read(vma); -+ if (!(fault & VM_FAULT_RETRY)) { -+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS); -+ goto out; ++ ++ if (header->type != IPTS_DATA_TYPE_FRAME) ++ return 0; ++ ++ if (header->size + 3 + sizeof(struct ipts_hid_header) > IPTS_HID_REPORT_DATA_SIZE) ++ return -ERANGE; ++ ++ temp = kzalloc(IPTS_HID_REPORT_DATA_SIZE, GFP_KERNEL); ++ if (!temp) ++ return -ENOMEM; ++ ++ /* ++ * Synthesize a HID report matching the devices that natively send HID reports ++ */ ++ temp[0] = IPTS_HID_REPORT_DATA; ++ ++ frame = (struct ipts_hid_header *)&temp[3]; ++ frame->type = IPTS_HID_FRAME_TYPE_RAW; ++ frame->size = header->size + sizeof(*frame); ++ ++ memcpy(frame->data, header->data, header->size); ++ ++ ret = hid_input_report(ipts->hid, HID_INPUT_REPORT, temp, IPTS_HID_REPORT_DATA_SIZE, 1); ++ kfree(temp); ++ ++ return ret; ++} ++ ++int ipts_hid_init(struct ipts_context *ipts, struct ipts_device_info info) ++{ ++ int ret = 0; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (ipts->hid) ++ return 0; ++ ++ ipts->hid = hid_allocate_device(); ++ if (IS_ERR(ipts->hid)) { ++ int err = PTR_ERR(ipts->hid); ++ ++ dev_err(ipts->dev, "Failed to allocate HID device: %d\n", err); ++ return err; ++ } ++ ++ ipts->hid->driver_data = ipts; ++ ipts->hid->dev.parent = ipts->dev; ++ ipts->hid->ll_driver = &ipts_hid_driver; ++ ++ ipts->hid->vendor = info.vendor; ++ ipts->hid->product = info.product; ++ ipts->hid->group = HID_GROUP_MULTITOUCH; ++ ++ snprintf(ipts->hid->name, sizeof(ipts->hid->name), "IPTS %04X:%04X", info.vendor, ++ info.product); ++ ++ ret = hid_add_device(ipts->hid); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to add HID device: %d\n", ret); ++ ipts_hid_free(ipts); ++ return ret; + } -+ count_vm_vma_lock_event(VMA_LOCK_RETRY); -+ /* Quick path to respond to signals */ -+ if (fault_signal_pending(fault, regs)) { -+ fault = VM_FAULT_SIGNAL; -+ goto out; ++ ++ return 0; ++} ++ ++int ipts_hid_free(struct ipts_context *ipts) ++{ ++ if (!ipts) ++ return -EFAULT; ++ ++ if (!ipts->hid) ++ return 0; ++ ++ hid_destroy_device(ipts->hid); ++ ipts->hid = NULL; ++ ++ return 0; ++} +diff --git a/drivers/hid/ipts/hid.h b/drivers/hid/ipts/hid.h +new file mode 100644 +index 000000000000..62bf3cd48608 +--- /dev/null ++++ b/drivers/hid/ipts/hid.h +@@ -0,0 +1,22 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2022-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_HID_H ++#define IPTS_HID_H ++ ++#include ++ ++#include "context.h" ++#include "spec-device.h" ++ ++int ipts_hid_input_data(struct ipts_context *ipts, u32 buffer); ++ ++int ipts_hid_init(struct ipts_context *ipts, struct ipts_device_info info); ++int ipts_hid_free(struct ipts_context *ipts); ++ ++#endif /* IPTS_HID_H */ +diff --git a/drivers/hid/ipts/main.c b/drivers/hid/ipts/main.c +new file mode 100644 +index 000000000000..0f20c6c08c38 +--- /dev/null ++++ b/drivers/hid/ipts/main.c +@@ -0,0 +1,127 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "context.h" ++#include "control.h" ++#include "mei.h" ++#include "receiver.h" ++#include "spec-device.h" ++ ++/* ++ * The MEI client ID for IPTS functionality. ++ */ ++#define IPTS_ID UUID_LE(0x3e8d0870, 0x271a, 0x4208, 0x8e, 0xb5, 0x9a, 0xcb, 0x94, 0x02, 0xae, 0x04) ++ ++static int ipts_set_dma_mask(struct mei_cl_device *cldev) ++{ ++ if (!cldev) ++ return -EFAULT; ++ ++ if (!dma_coerce_mask_and_coherent(&cldev->dev, DMA_BIT_MASK(64))) ++ return 0; ++ ++ return dma_coerce_mask_and_coherent(&cldev->dev, DMA_BIT_MASK(32)); ++} ++ ++static int ipts_probe(struct mei_cl_device *cldev, const struct mei_cl_device_id *id) ++{ ++ int ret = 0; ++ struct ipts_context *ipts = NULL; ++ ++ if (!cldev) ++ return -EFAULT; ++ ++ ret = ipts_set_dma_mask(cldev); ++ if (ret) { ++ dev_err(&cldev->dev, "Failed to set DMA mask for IPTS: %d\n", ret); ++ return ret; + } -+lock_mmap: -+#endif /* CONFIG_PER_VMA_LOCK */ - mmap_read_lock(mm); - - gmap = NULL; -diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index a825bf031f49..df21fba77db1 100644 ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -27,6 +27,7 @@ config X86_64 - # Options that are inherently 64-bit kernel only: - select ARCH_HAS_GIGANTIC_PAGE - select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 -+ select ARCH_SUPPORTS_PER_VMA_LOCK - select ARCH_USE_CMPXCHG_LOCKREF - select HAVE_ARCH_SOFT_DIRTY - select MODULES_USE_ELF_RELA -diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c -index a498ae1fbe66..e4399983c50c 100644 ---- a/arch/x86/mm/fault.c -+++ b/arch/x86/mm/fault.c -@@ -19,6 +19,7 @@ - #include /* faulthandler_disabled() */ - #include /* efi_crash_gracefully_on_page_fault()*/ - #include -+#include /* find_and_lock_vma() */ - - #include /* boot_cpu_has, ... */ - #include /* dotraplinkage, ... */ -@@ -1333,6 +1334,38 @@ void do_user_addr_fault(struct pt_regs *regs, - } - #endif - -+#ifdef CONFIG_PER_VMA_LOCK -+ if (!(flags & FAULT_FLAG_USER)) -+ goto lock_mmap; + -+ vma = lock_vma_under_rcu(mm, address); -+ if (!vma) -+ goto lock_mmap; ++ ret = mei_cldev_enable(cldev); ++ if (ret) { ++ dev_err(&cldev->dev, "Failed to enable MEI device: %d\n", ret); ++ return ret; ++ } + -+ if (unlikely(access_error(error_code, vma))) { -+ vma_end_read(vma); -+ goto lock_mmap; ++ ipts = devm_kzalloc(&cldev->dev, sizeof(*ipts), GFP_KERNEL); ++ if (!ipts) { ++ mei_cldev_disable(cldev); ++ return -ENOMEM; + } -+ fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); -+ vma_end_read(vma); + -+ if (!(fault & VM_FAULT_RETRY)) { -+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS); -+ goto done; ++ ret = ipts_mei_init(&ipts->mei, cldev); ++ if (ret) { ++ dev_err(&cldev->dev, "Failed to init MEI bus logic: %d\n", ret); ++ return ret; + } -+ count_vm_vma_lock_event(VMA_LOCK_RETRY); + -+ /* Quick path to respond to signals */ -+ if (fault_signal_pending(fault, regs)) { -+ if (!user_mode(regs)) -+ kernelmode_fixup_or_oops(regs, error_code, address, -+ SIGBUS, BUS_ADRERR, -+ ARCH_DEFAULT_PKEY); ++ ipts->dev = &cldev->dev; ++ ipts->mode = IPTS_MODE_EVENT; ++ ++ mutex_init(&ipts->feature_lock); ++ init_completion(&ipts->feature_event); ++ ++ mei_cldev_set_drvdata(cldev, ipts); ++ ++ ret = ipts_control_start(ipts); ++ if (ret) { ++ dev_err(&cldev->dev, "Failed to start IPTS: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void ipts_remove(struct mei_cl_device *cldev) ++{ ++ int ret = 0; ++ struct ipts_context *ipts = NULL; ++ ++ if (!cldev) { ++ pr_err("MEI device is NULL!"); + return; + } -+lock_mmap: -+#endif /* CONFIG_PER_VMA_LOCK */ + - /* - * Kernel-mode access to the user address space should only occur - * on well-defined single instructions listed in the exception -@@ -1433,6 +1466,9 @@ void do_user_addr_fault(struct pt_regs *regs, - } - - mmap_read_unlock(mm); -+#ifdef CONFIG_PER_VMA_LOCK -+done: -+#endif - if (likely(!(fault & VM_FAULT_ERROR))) - return; - -diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c -index 44d1ee429eb0..881e9c82b9d1 100644 ---- a/fs/userfaultfd.c -+++ b/fs/userfaultfd.c -@@ -108,6 +108,21 @@ static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx) - return ctx->features & UFFD_FEATURE_INITIALIZED; - } - ++ ipts = mei_cldev_get_drvdata(cldev); ++ ++ ret = ipts_control_stop(ipts); ++ if (ret) ++ dev_err(&cldev->dev, "Failed to stop IPTS: %d\n", ret); ++ ++ mei_cldev_disable(cldev); ++} ++ ++static struct mei_cl_device_id ipts_device_id_table[] = { ++ { .uuid = IPTS_ID, .version = MEI_CL_VERSION_ANY }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(mei, ipts_device_id_table); ++ ++static struct mei_cl_driver ipts_driver = { ++ .id_table = ipts_device_id_table, ++ .name = "ipts", ++ .probe = ipts_probe, ++ .remove = ipts_remove, ++}; ++module_mei_cl_driver(ipts_driver); ++ ++MODULE_DESCRIPTION("IPTS touchscreen driver"); ++MODULE_AUTHOR("Dorian Stoll "); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/hid/ipts/mei.c b/drivers/hid/ipts/mei.c +new file mode 100644 +index 000000000000..26666fd99b0c +--- /dev/null ++++ b/drivers/hid/ipts/mei.c +@@ -0,0 +1,189 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later +/* -+ * Whether WP_UNPOPULATED is enabled on the uffd context. It is only -+ * meaningful when userfaultfd_wp()==true on the vma and when it's -+ * anonymous. ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus + */ -+bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma) ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "context.h" ++#include "mei.h" ++ ++static void locked_list_add(struct list_head *new, struct list_head *head, ++ struct rw_semaphore *lock) +{ -+ struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; ++ down_write(lock); ++ list_add(new, head); ++ up_write(lock); ++} + -+ if (!ctx) -+ return false; ++static void locked_list_del(struct list_head *entry, struct rw_semaphore *lock) ++{ ++ down_write(lock); ++ list_del(entry); ++ up_write(lock); ++} + -+ return ctx->features & UFFD_FEATURE_WP_UNPOPULATED; ++static void ipts_mei_incoming(struct mei_cl_device *cldev) ++{ ++ ssize_t ret = 0; ++ struct ipts_mei_message *entry = NULL; ++ struct ipts_context *ipts = NULL; ++ ++ if (!cldev) { ++ pr_err("MEI device is NULL!"); ++ return; ++ } ++ ++ ipts = mei_cldev_get_drvdata(cldev); ++ if (!ipts) { ++ pr_err("IPTS driver context is NULL!"); ++ return; ++ } ++ ++ entry = devm_kzalloc(ipts->dev, sizeof(*entry), GFP_KERNEL); ++ if (!entry) ++ return; ++ ++ INIT_LIST_HEAD(&entry->list); ++ ++ do { ++ ret = mei_cldev_recv(cldev, (u8 *)&entry->rsp, sizeof(entry->rsp)); ++ } while (ret == -EINTR); ++ ++ if (ret < 0) { ++ dev_err(ipts->dev, "Error while reading response: %ld\n", ret); ++ return; ++ } ++ ++ if (ret == 0) { ++ dev_err(ipts->dev, "Received empty response\n"); ++ return; ++ } ++ ++ locked_list_add(&entry->list, &ipts->mei.messages, &ipts->mei.message_lock); ++ wake_up_all(&ipts->mei.message_queue); +} + - static void userfaultfd_set_vm_flags(struct vm_area_struct *vma, - vm_flags_t flags) - { -@@ -1971,6 +1986,7 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx, - #endif - #ifndef CONFIG_PTE_MARKER_UFFD_WP - uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM; -+ uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED; - #endif - uffdio_api.ioctls = UFFD_API_IOCTLS; - ret = -EFAULT; -diff --git a/include/linux/mm.h b/include/linux/mm.h -index 1f79667824eb..c4c9de7d1916 100644 ---- a/include/linux/mm.h -+++ b/include/linux/mm.h -@@ -256,6 +256,8 @@ void setup_initial_init_mm(void *start_code, void *end_code, - struct vm_area_struct *vm_area_alloc(struct mm_struct *); - struct vm_area_struct *vm_area_dup(struct vm_area_struct *); - void vm_area_free(struct vm_area_struct *); -+/* Use only if VMA has no other users */ -+void __vm_area_free(struct vm_area_struct *vma); - - #ifndef CONFIG_MMU - extern struct rb_root nommu_region_tree; -@@ -478,7 +480,8 @@ static inline bool fault_flag_allow_retry_first(enum fault_flag flags) - { FAULT_FLAG_USER, "USER" }, \ - { FAULT_FLAG_REMOTE, "REMOTE" }, \ - { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \ -- { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" } -+ { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }, \ -+ { FAULT_FLAG_VMA_LOCK, "VMA_LOCK" } - - /* - * vm_fault is filled by the pagefault handler and passed to the vma's -@@ -623,6 +626,117 @@ struct vm_operations_struct { - unsigned long addr); - }; - -+#ifdef CONFIG_PER_VMA_LOCK -+/* -+ * Try to read-lock a vma. The function is allowed to occasionally yield false -+ * locked result to avoid performance overhead, in which case we fall back to -+ * using mmap_lock. The function should never yield false unlocked result. -+ */ -+static inline bool vma_start_read(struct vm_area_struct *vma) ++static int ipts_mei_search(struct ipts_mei *mei, enum ipts_command_code code, ++ struct ipts_response *rsp) +{ -+ /* Check before locking. A race might cause false locked result. */ -+ if (vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq)) -+ return false; ++ struct ipts_mei_message *entry = NULL; + -+ if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0)) -+ return false; ++ if (!mei) ++ return -EFAULT; ++ ++ if (!rsp) ++ return -EFAULT; ++ ++ down_read(&mei->message_lock); + + /* -+ * Overflow might produce false locked result. -+ * False unlocked result is impossible because we modify and check -+ * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq -+ * modification invalidates all existing locks. ++ * Iterate over the list of received messages, and check if there is one ++ * matching the requested command code. ++ */ ++ list_for_each_entry(entry, &mei->messages, list) { ++ if (entry->rsp.cmd == code) ++ break; ++ } ++ ++ up_read(&mei->message_lock); ++ ++ /* ++ * If entry is not the list head, this means that the loop above has been stopped early, ++ * and that we found a matching element. We drop the message from the list and return it. + */ -+ if (unlikely(vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))) { -+ up_read(&vma->vm_lock->lock); -+ return false; ++ if (!list_entry_is_head(entry, &mei->messages, list)) { ++ locked_list_del(&entry->list, &mei->message_lock); ++ ++ *rsp = entry->rsp; ++ devm_kfree(&mei->cldev->dev, entry); ++ ++ return 0; + } -+ return true; -+} + -+static inline void vma_end_read(struct vm_area_struct *vma) -+{ -+ rcu_read_lock(); /* keeps vma alive till the end of up_read */ -+ up_read(&vma->vm_lock->lock); -+ rcu_read_unlock(); ++ return -EAGAIN; +} + -+static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) ++int ipts_mei_recv(struct ipts_mei *mei, enum ipts_command_code code, struct ipts_response *rsp, ++ u64 timeout) +{ -+ mmap_assert_write_locked(vma->vm_mm); ++ int ret = 0; ++ ++ if (!mei) ++ return -EFAULT; + + /* -+ * current task is holding mmap_write_lock, both vma->vm_lock_seq and -+ * mm->mm_lock_seq can't be concurrently modified. ++ * A timeout of 0 means check and return immideately. + */ -+ *mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq); -+ return (vma->vm_lock_seq == *mm_lock_seq); -+} ++ if (timeout == 0) ++ return ipts_mei_search(mei, code, rsp); + -+static inline void vma_start_write(struct vm_area_struct *vma) -+{ -+ int mm_lock_seq; ++ /* ++ * A timeout of less than 0 means to wait forever. ++ */ ++ if (timeout < 0) { ++ wait_event(mei->message_queue, ipts_mei_search(mei, code, rsp) == 0); ++ return 0; ++ } + -+ if (__is_vma_write_locked(vma, &mm_lock_seq)) -+ return; ++ ret = wait_event_timeout(mei->message_queue, ipts_mei_search(mei, code, rsp) == 0, ++ msecs_to_jiffies(timeout)); + -+ down_write(&vma->vm_lock->lock); -+ vma->vm_lock_seq = mm_lock_seq; -+ up_write(&vma->vm_lock->lock); ++ if (ret > 0) ++ return 0; ++ ++ return -EAGAIN; +} + -+static inline bool vma_try_start_write(struct vm_area_struct *vma) ++int ipts_mei_send(struct ipts_mei *mei, void *data, size_t length) +{ -+ int mm_lock_seq; ++ int ret = 0; + -+ if (__is_vma_write_locked(vma, &mm_lock_seq)) -+ return true; ++ if (!mei) ++ return -EFAULT; + -+ if (!down_write_trylock(&vma->vm_lock->lock)) -+ return false; ++ if (!mei->cldev) ++ return -EFAULT; + -+ vma->vm_lock_seq = mm_lock_seq; -+ up_write(&vma->vm_lock->lock); -+ return true; -+} ++ if (!data) ++ return -EFAULT; + -+static inline void vma_assert_write_locked(struct vm_area_struct *vma) -+{ -+ int mm_lock_seq; ++ do { ++ ret = mei_cldev_send(mei->cldev, (u8 *)data, length); ++ } while (ret == -EINTR); + -+ VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma); ++ if (ret < 0) ++ return ret; ++ ++ return 0; +} + -+static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) ++int ipts_mei_init(struct ipts_mei *mei, struct mei_cl_device *cldev) +{ -+ /* When detaching vma should be write-locked */ -+ if (detached) -+ vma_assert_write_locked(vma); -+ vma->detached = detached; -+} ++ if (!mei) ++ return -EFAULT; + -+struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, -+ unsigned long address); ++ if (!cldev) ++ return -EFAULT; + -+#else /* CONFIG_PER_VMA_LOCK */ ++ mei->cldev = cldev; + -+static inline void vma_init_lock(struct vm_area_struct *vma) {} -+static inline bool vma_start_read(struct vm_area_struct *vma) -+ { return false; } -+static inline void vma_end_read(struct vm_area_struct *vma) {} -+static inline void vma_start_write(struct vm_area_struct *vma) {} -+static inline bool vma_try_start_write(struct vm_area_struct *vma) -+ { return true; } -+static inline void vma_assert_write_locked(struct vm_area_struct *vma) {} -+static inline void vma_mark_detached(struct vm_area_struct *vma, -+ bool detached) {} ++ INIT_LIST_HEAD(&mei->messages); ++ init_waitqueue_head(&mei->message_queue); ++ init_rwsem(&mei->message_lock); + -+#endif /* CONFIG_PER_VMA_LOCK */ ++ mei_cldev_register_rx_cb(cldev, ipts_mei_incoming); + ++ return 0; ++} +diff --git a/drivers/hid/ipts/mei.h b/drivers/hid/ipts/mei.h +new file mode 100644 +index 000000000000..eadacae54c40 +--- /dev/null ++++ b/drivers/hid/ipts/mei.h +@@ -0,0 +1,67 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* -+ * WARNING: vma_init does not initialize vma->vm_lock. -+ * Use vm_area_alloc()/vm_area_free() if vma needs locking. ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus + */ - static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) - { - static const struct vm_operations_struct dummy_vm_ops = {}; -@@ -631,6 +745,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) - vma->vm_mm = mm; - vma->vm_ops = &dummy_vm_ops; - INIT_LIST_HEAD(&vma->anon_vma_chain); -+ vma_mark_detached(vma, false); - } - - /* Use when VMA is not part of the VMA tree and needs no locking */ -@@ -644,28 +759,28 @@ static inline void vm_flags_init(struct vm_area_struct *vma, - static inline void vm_flags_reset(struct vm_area_struct *vma, - vm_flags_t flags) - { -- mmap_assert_write_locked(vma->vm_mm); -+ vma_start_write(vma); - vm_flags_init(vma, flags); - } - - static inline void vm_flags_reset_once(struct vm_area_struct *vma, - vm_flags_t flags) - { -- mmap_assert_write_locked(vma->vm_mm); -+ vma_start_write(vma); - WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags); - } - - static inline void vm_flags_set(struct vm_area_struct *vma, - vm_flags_t flags) - { -- mmap_assert_write_locked(vma->vm_mm); -+ vma_start_write(vma); - ACCESS_PRIVATE(vma, __vm_flags) |= flags; - } - - static inline void vm_flags_clear(struct vm_area_struct *vma, - vm_flags_t flags) - { -- mmap_assert_write_locked(vma->vm_mm); -+ vma_start_write(vma); - ACCESS_PRIVATE(vma, __vm_flags) &= ~flags; - } - -@@ -686,7 +801,7 @@ static inline void __vm_flags_mod(struct vm_area_struct *vma, - static inline void vm_flags_mod(struct vm_area_struct *vma, - vm_flags_t set, vm_flags_t clear) - { -- mmap_assert_write_locked(vma->vm_mm); -+ vma_start_write(vma); - __vm_flags_mod(vma, set, clear); - } - -diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h -index de1e622dd366..0e1d239a882c 100644 ---- a/include/linux/mm_inline.h -+++ b/include/linux/mm_inline.h -@@ -557,6 +557,12 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, - /* The current status of the pte should be "cleared" before calling */ - WARN_ON_ONCE(!pte_none(*pte)); - -+ /* -+ * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole -+ * thing, because when zapping either it means it's dropping the -+ * page, or in TTU where the present pte will be quickly replaced -+ * with a swap pte. There's no way of leaking the bit. -+ */ - if (vma_is_anonymous(vma) || !userfaultfd_wp(vma)) - return; - -diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h -index 22b2ac82bffd..ef74ea892c5b 100644 ---- a/include/linux/mm_types.h -+++ b/include/linux/mm_types.h -@@ -471,6 +471,10 @@ struct anon_vma_name { - char name[]; - }; - -+struct vma_lock { -+ struct rw_semaphore lock; ++ ++#ifndef IPTS_MEI_H ++#define IPTS_MEI_H ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "spec-device.h" ++ ++struct ipts_mei_message { ++ struct list_head list; ++ struct ipts_response rsp; +}; + - /* - * This struct describes a virtual memory area. There is one of these - * per VM-area/task. A VM area is any part of the process virtual memory -@@ -480,9 +484,16 @@ struct anon_vma_name { - struct vm_area_struct { - /* The first cache line has the info for VMA tree walking. */ - -- unsigned long vm_start; /* Our start address within vm_mm. */ -- unsigned long vm_end; /* The first byte after our end address -- within vm_mm. */ -+ union { -+ struct { -+ /* VMA covers [vm_start; vm_end) addresses within mm */ -+ unsigned long vm_start; -+ unsigned long vm_end; -+ }; -+#ifdef CONFIG_PER_VMA_LOCK -+ struct rcu_head vm_rcu; /* Used for deferred freeing. */ -+#endif -+ }; - - struct mm_struct *vm_mm; /* The address space we belong to. */ - -@@ -501,6 +512,14 @@ struct vm_area_struct { - vm_flags_t __private __vm_flags; - }; - -+#ifdef CONFIG_PER_VMA_LOCK -+ int vm_lock_seq; -+ struct vma_lock *vm_lock; ++struct ipts_mei { ++ struct mei_cl_device *cldev; + -+ /* Flag to indicate areas detached from the mm->mm_mt tree */ -+ bool detached; -+#endif ++ struct list_head messages; + - /* - * For areas with an address space and backing store, - * linkage into the address_space->i_mmap interval tree. -@@ -637,6 +656,9 @@ struct mm_struct { - * init_mm.mmlist, and are protected - * by mmlist_lock - */ -+#ifdef CONFIG_PER_VMA_LOCK -+ int mm_lock_seq; -+#endif - - - unsigned long hiwater_rss; /* High-watermark of RSS usage */ -@@ -1042,6 +1064,7 @@ typedef struct { - * mapped after the fault. - * @FAULT_FLAG_ORIG_PTE_VALID: whether the fault has vmf->orig_pte cached. - * We should only access orig_pte if this flag set. -+ * @FAULT_FLAG_VMA_LOCK: The fault is handled under VMA lock. - * - * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify - * whether we would allow page faults to retry by specifying these two -@@ -1079,6 +1102,7 @@ enum fault_flag { - FAULT_FLAG_INTERRUPTIBLE = 1 << 9, - FAULT_FLAG_UNSHARE = 1 << 10, - FAULT_FLAG_ORIG_PTE_VALID = 1 << 11, -+ FAULT_FLAG_VMA_LOCK = 1 << 12, - }; - - typedef unsigned int __bitwise zap_flags_t; -diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h -index 96e113e23d04..aab8f1b28d26 100644 ---- a/include/linux/mmap_lock.h -+++ b/include/linux/mmap_lock.h -@@ -60,6 +60,29 @@ static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) - - #endif /* CONFIG_TRACING */ - -+static inline void mmap_assert_locked(struct mm_struct *mm) ++ wait_queue_head_t message_queue; ++ struct rw_semaphore message_lock; ++}; ++ ++/* ++ * ipts_mei_recv() - Receive data from a MEI device. ++ * @mei: The IPTS MEI device context. ++ * @code: The IPTS command code to look for. ++ * @rsp: The address that the received data will be copied to. ++ * @timeout: How many milliseconds the function will wait at most. ++ * ++ * A negative timeout means to wait forever. ++ * ++ * Returns: 0 on success, <0 on error, -EAGAIN if no response has been received. ++ */ ++int ipts_mei_recv(struct ipts_mei *mei, enum ipts_command_code code, struct ipts_response *rsp, ++ u64 timeout); ++ ++/* ++ * ipts_mei_send() - Send data to a MEI device. ++ * @ipts: The IPTS MEI device context. ++ * @data: The data to send. ++ * @size: The size of the data. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_mei_send(struct ipts_mei *mei, void *data, size_t length); ++ ++/* ++ * ipts_mei_init() - Initialize the MEI device context. ++ * @mei: The MEI device context to initialize. ++ * @cldev: The MEI device the context will be bound to. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_mei_init(struct ipts_mei *mei, struct mei_cl_device *cldev); ++ ++#endif /* IPTS_MEI_H */ +diff --git a/drivers/hid/ipts/receiver.c b/drivers/hid/ipts/receiver.c +new file mode 100644 +index 000000000000..77234f9e0e17 +--- /dev/null ++++ b/drivers/hid/ipts/receiver.c +@@ -0,0 +1,249 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "cmd.h" ++#include "context.h" ++#include "control.h" ++#include "hid.h" ++#include "resources.h" ++#include "spec-device.h" ++#include "thread.h" ++ ++static void ipts_receiver_next_doorbell(struct ipts_context *ipts) +{ -+ lockdep_assert_held(&mm->mmap_lock); -+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); ++ u32 *doorbell = (u32 *)ipts->resources.doorbell.address; ++ *doorbell = *doorbell + 1; +} + -+static inline void mmap_assert_write_locked(struct mm_struct *mm) ++static u32 ipts_receiver_current_doorbell(struct ipts_context *ipts) +{ -+ lockdep_assert_held_write(&mm->mmap_lock); -+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); ++ u32 *doorbell = (u32 *)ipts->resources.doorbell.address; ++ return *doorbell; +} + -+#ifdef CONFIG_PER_VMA_LOCK -+static inline void vma_end_write_all(struct mm_struct *mm) ++static void ipts_receiver_backoff(time64_t last, u32 n) +{ -+ mmap_assert_write_locked(mm); -+ /* No races during update due to exclusive mmap_lock being held */ -+ WRITE_ONCE(mm->mm_lock_seq, mm->mm_lock_seq + 1); ++ /* ++ * If the last change was less than n seconds ago, ++ * sleep for a shorter period so that new data can be ++ * processed quickly. If there was no change for more than ++ * n seconds, sleep longer to avoid wasting CPU cycles. ++ */ ++ if (last + n > ktime_get_seconds()) ++ msleep(20); ++ else ++ msleep(200); +} -+#else -+static inline void vma_end_write_all(struct mm_struct *mm) {} -+#endif + - static inline void mmap_init_lock(struct mm_struct *mm) - { - init_rwsem(&mm->mmap_lock); -@@ -102,12 +125,14 @@ static inline bool mmap_write_trylock(struct mm_struct *mm) - static inline void mmap_write_unlock(struct mm_struct *mm) - { - __mmap_lock_trace_released(mm, true); -+ vma_end_write_all(mm); - up_write(&mm->mmap_lock); - } - - static inline void mmap_write_downgrade(struct mm_struct *mm) - { - __mmap_lock_trace_acquire_returned(mm, false, true); -+ vma_end_write_all(mm); - downgrade_write(&mm->mmap_lock); - } - -@@ -150,18 +175,6 @@ static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) - up_read_non_owner(&mm->mmap_lock); - } - --static inline void mmap_assert_locked(struct mm_struct *mm) --{ -- lockdep_assert_held(&mm->mmap_lock); -- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); --} -- --static inline void mmap_assert_write_locked(struct mm_struct *mm) --{ -- lockdep_assert_held_write(&mm->mmap_lock); -- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); --} -- - static inline int mmap_lock_is_contended(struct mm_struct *mm) - { - return rwsem_is_contended(&mm->mmap_lock); -diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h -index 3767f18114ef..0cf8880219da 100644 ---- a/include/linux/userfaultfd_k.h -+++ b/include/linux/userfaultfd_k.h -@@ -179,6 +179,7 @@ extern int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start, - unsigned long end, struct list_head *uf); - extern void userfaultfd_unmap_complete(struct mm_struct *mm, - struct list_head *uf); -+extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma); - - #else /* CONFIG_USERFAULTFD */ - -@@ -274,8 +275,30 @@ static inline bool uffd_disable_fault_around(struct vm_area_struct *vma) - return false; - } - -+static inline bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma) ++static int ipts_receiver_event_loop(struct ipts_thread *thread) +{ -+ return false; ++ int ret = 0; ++ u32 buffer = 0; ++ ++ struct ipts_context *ipts = NULL; ++ time64_t last = ktime_get_seconds(); ++ ++ if (!thread) ++ return -EFAULT; ++ ++ ipts = thread->data; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ dev_info(ipts->dev, "IPTS running in event mode\n"); ++ ++ while (!ipts_thread_should_stop(thread)) { ++ for (int i = 0; i < IPTS_BUFFERS; i++) { ++ ret = ipts_control_wait_data(ipts, false); ++ if (ret == -EAGAIN) ++ break; ++ ++ if (ret) { ++ dev_err(ipts->dev, "Failed to wait for data: %d\n", ret); ++ continue; ++ } ++ ++ buffer = ipts_receiver_current_doorbell(ipts) % IPTS_BUFFERS; ++ ipts_receiver_next_doorbell(ipts); ++ ++ ret = ipts_hid_input_data(ipts, buffer); ++ if (ret) ++ dev_err(ipts->dev, "Failed to process buffer: %d\n", ret); ++ ++ ret = ipts_control_refill_buffer(ipts, buffer); ++ if (ret) ++ dev_err(ipts->dev, "Failed to send feedback: %d\n", ret); ++ ++ ret = ipts_control_request_data(ipts); ++ if (ret) ++ dev_err(ipts->dev, "Failed to request data: %d\n", ret); ++ ++ last = ktime_get_seconds(); ++ } ++ ++ ipts_receiver_backoff(last, 5); ++ } ++ ++ ret = ipts_control_request_flush(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to request flush: %d\n", ret); ++ return ret; ++ } ++ ++ ret = ipts_control_wait_data(ipts, true); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to wait for data: %d\n", ret); ++ ++ if (ret != -EAGAIN) ++ return ret; ++ else ++ return 0; ++ } ++ ++ ret = ipts_control_wait_flush(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to wait for flush: %d\n", ret); ++ ++ if (ret != -EAGAIN) ++ return ret; ++ else ++ return 0; ++ } ++ ++ return 0; +} + - #endif /* CONFIG_USERFAULTFD */ - -+static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma) ++static int ipts_receiver_doorbell_loop(struct ipts_thread *thread) +{ -+ /* Only wr-protect mode uses pte markers */ -+ if (!userfaultfd_wp(vma)) -+ return false; ++ int ret = 0; ++ u32 buffer = 0; + -+ /* File-based uffd-wp always need markers */ -+ if (!vma_is_anonymous(vma)) -+ return true; ++ u32 doorbell = 0; ++ u32 lastdb = 0; + -+ /* -+ * Anonymous uffd-wp only needs the markers if WP_UNPOPULATED -+ * enabled (to apply markers on zero pages). -+ */ -+ return userfaultfd_wp_unpopulated(vma); ++ struct ipts_context *ipts = NULL; ++ time64_t last = ktime_get_seconds(); ++ ++ if (!thread) ++ return -EFAULT; ++ ++ ipts = thread->data; ++ ++ if (!ipts) ++ return -EFAULT; ++ ++ dev_info(ipts->dev, "IPTS running in doorbell mode\n"); ++ ++ while (true) { ++ if (ipts_thread_should_stop(thread)) { ++ ret = ipts_control_request_flush(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to request flush: %d\n", ret); ++ return ret; ++ } ++ } ++ ++ doorbell = ipts_receiver_current_doorbell(ipts); ++ ++ /* ++ * After filling up one of the data buffers, IPTS will increment ++ * the doorbell. The value of the doorbell stands for the *next* ++ * buffer that IPTS is going to fill. ++ */ ++ while (lastdb != doorbell) { ++ buffer = lastdb % IPTS_BUFFERS; ++ ++ ret = ipts_hid_input_data(ipts, buffer); ++ if (ret) ++ dev_err(ipts->dev, "Failed to process buffer: %d\n", ret); ++ ++ ret = ipts_control_refill_buffer(ipts, buffer); ++ if (ret) ++ dev_err(ipts->dev, "Failed to send feedback: %d\n", ret); ++ ++ last = ktime_get_seconds(); ++ lastdb++; ++ } ++ ++ if (ipts_thread_should_stop(thread)) ++ break; ++ ++ ipts_receiver_backoff(last, 5); ++ } ++ ++ ret = ipts_control_wait_data(ipts, true); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to wait for data: %d\n", ret); ++ ++ if (ret != -EAGAIN) ++ return ret; ++ else ++ return 0; ++ } ++ ++ ret = ipts_control_wait_flush(ipts); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to wait for flush: %d\n", ret); ++ ++ if (ret != -EAGAIN) ++ return ret; ++ else ++ return 0; ++ } ++ ++ return 0; +} + - static inline bool pte_marker_entry_uffd_wp(swp_entry_t entry) - { - #ifdef CONFIG_PTE_MARKER_UFFD_WP -diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h -index 7f5d1caf5890..8abfa1240040 100644 ---- a/include/linux/vm_event_item.h -+++ b/include/linux/vm_event_item.h -@@ -149,6 +149,12 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, - #ifdef CONFIG_X86 - DIRECT_MAP_LEVEL2_SPLIT, - DIRECT_MAP_LEVEL3_SPLIT, -+#endif -+#ifdef CONFIG_PER_VMA_LOCK_STATS -+ VMA_LOCK_SUCCESS, -+ VMA_LOCK_ABORT, -+ VMA_LOCK_RETRY, -+ VMA_LOCK_MISS, - #endif - NR_VM_EVENT_ITEMS - }; -diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h -index 19cf5b6892ce..fed855bae6d8 100644 ---- a/include/linux/vmstat.h -+++ b/include/linux/vmstat.h -@@ -125,6 +125,12 @@ static inline void vm_events_fold_cpu(int cpu) - #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) - #endif - -+#ifdef CONFIG_PER_VMA_LOCK_STATS -+#define count_vm_vma_lock_event(x) count_vm_event(x) -+#else -+#define count_vm_vma_lock_event(x) do {} while (0) -+#endif ++int ipts_receiver_start(struct ipts_context *ipts) ++{ ++ int ret = 0; + - #define __count_zid_vm_events(item, zid, delta) \ - __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) - -diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h -index 005e5e306266..90c958952bfc 100644 ---- a/include/uapi/linux/userfaultfd.h -+++ b/include/uapi/linux/userfaultfd.h -@@ -38,7 +38,8 @@ - UFFD_FEATURE_MINOR_HUGETLBFS | \ - UFFD_FEATURE_MINOR_SHMEM | \ - UFFD_FEATURE_EXACT_ADDRESS | \ -- UFFD_FEATURE_WP_HUGETLBFS_SHMEM) -+ UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \ -+ UFFD_FEATURE_WP_UNPOPULATED) - #define UFFD_API_IOCTLS \ - ((__u64)1 << _UFFDIO_REGISTER | \ - (__u64)1 << _UFFDIO_UNREGISTER | \ -@@ -203,6 +204,12 @@ struct uffdio_api { - * - * UFFD_FEATURE_WP_HUGETLBFS_SHMEM indicates that userfaultfd - * write-protection mode is supported on both shmem and hugetlbfs. -+ * -+ * UFFD_FEATURE_WP_UNPOPULATED indicates that userfaultfd -+ * write-protection mode will always apply to unpopulated pages -+ * (i.e. empty ptes). This will be the default behavior for shmem -+ * & hugetlbfs, so this flag only affects anonymous memory behavior -+ * when userfault write-protection mode is registered. - */ - #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) - #define UFFD_FEATURE_EVENT_FORK (1<<1) -@@ -217,6 +224,7 @@ struct uffdio_api { - #define UFFD_FEATURE_MINOR_SHMEM (1<<10) - #define UFFD_FEATURE_EXACT_ADDRESS (1<<11) - #define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12) -+#define UFFD_FEATURE_WP_UNPOPULATED (1<<13) - __u64 features; - - __u64 ioctls; -diff --git a/kernel/fork.c b/kernel/fork.c -index 349945168239..ebd353730887 100644 ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -455,13 +455,49 @@ static struct kmem_cache *vm_area_cachep; - /* SLAB cache for mm_struct structures (tsk->mm) */ - static struct kmem_cache *mm_cachep; - -+#ifdef CONFIG_PER_VMA_LOCK ++ if (!ipts) ++ return -EFAULT; + -+/* SLAB cache for vm_area_struct.lock */ -+static struct kmem_cache *vma_lock_cachep; ++ if (ipts->mode == IPTS_MODE_EVENT) { ++ ret = ipts_thread_start(&ipts->receiver_loop, ipts_receiver_event_loop, ipts, ++ "ipts_event"); ++ } else if (ipts->mode == IPTS_MODE_DOORBELL) { ++ ret = ipts_thread_start(&ipts->receiver_loop, ipts_receiver_doorbell_loop, ipts, ++ "ipts_doorbell"); ++ } else { ++ ret = -EINVAL; ++ } ++ ++ if (ret) { ++ dev_err(ipts->dev, "Failed to start receiver loop: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} + -+static bool vma_lock_alloc(struct vm_area_struct *vma) ++int ipts_receiver_stop(struct ipts_context *ipts) +{ -+ vma->vm_lock = kmem_cache_alloc(vma_lock_cachep, GFP_KERNEL); -+ if (!vma->vm_lock) -+ return false; ++ int ret = 0; + -+ init_rwsem(&vma->vm_lock->lock); -+ vma->vm_lock_seq = -1; ++ if (!ipts) ++ return -EFAULT; + -+ return true; ++ ret = ipts_thread_stop(&ipts->receiver_loop); ++ if (ret) { ++ dev_err(ipts->dev, "Failed to stop receiver loop: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; +} +diff --git a/drivers/hid/ipts/receiver.h b/drivers/hid/ipts/receiver.h +new file mode 100644 +index 000000000000..96070f34fbca +--- /dev/null ++++ b/drivers/hid/ipts/receiver.h +@@ -0,0 +1,17 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ + -+static inline void vma_lock_free(struct vm_area_struct *vma) ++#ifndef IPTS_RECEIVER_H ++#define IPTS_RECEIVER_H ++ ++#include "context.h" ++ ++int ipts_receiver_start(struct ipts_context *ipts); ++int ipts_receiver_stop(struct ipts_context *ipts); ++ ++#endif /* IPTS_RECEIVER_H */ +diff --git a/drivers/hid/ipts/resources.c b/drivers/hid/ipts/resources.c +new file mode 100644 +index 000000000000..80ba5885bb55 +--- /dev/null ++++ b/drivers/hid/ipts/resources.c +@@ -0,0 +1,108 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#include ++#include ++ ++#include "resources.h" ++#include "spec-device.h" ++ ++static int ipts_resources_alloc_buffer(struct ipts_buffer *buffer, struct device *dev, size_t size) +{ -+ kmem_cache_free(vma_lock_cachep, vma->vm_lock); ++ if (!buffer) ++ return -EFAULT; ++ ++ if (buffer->address) ++ return 0; ++ ++ buffer->address = dma_alloc_coherent(dev, size, &buffer->dma_address, GFP_KERNEL); ++ ++ if (!buffer->address) ++ return -ENOMEM; ++ ++ buffer->size = size; ++ buffer->device = dev; ++ ++ return 0; +} + -+#else /* CONFIG_PER_VMA_LOCK */ ++static void ipts_resources_free_buffer(struct ipts_buffer *buffer) ++{ ++ if (!buffer->address) ++ return; + -+static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; } -+static inline void vma_lock_free(struct vm_area_struct *vma) {} ++ dma_free_coherent(buffer->device, buffer->size, buffer->address, buffer->dma_address); + -+#endif /* CONFIG_PER_VMA_LOCK */ ++ buffer->address = NULL; ++ buffer->size = 0; + - struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) - { - struct vm_area_struct *vma; - - vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); -- if (vma) -- vma_init(vma, mm); -+ if (!vma) -+ return NULL; ++ buffer->dma_address = 0; ++ buffer->device = NULL; ++} + -+ vma_init(vma, mm); -+ if (!vma_lock_alloc(vma)) { -+ kmem_cache_free(vm_area_cachep, vma); -+ return NULL; ++int ipts_resources_init(struct ipts_resources *res, struct device *dev, size_t ds, size_t fs) ++{ ++ int ret = 0; ++ ++ if (!res) ++ return -EFAULT; ++ ++ for (int i = 0; i < IPTS_BUFFERS; i++) { ++ ret = ipts_resources_alloc_buffer(&res->data[i], dev, ds); ++ if (ret) ++ goto err; + } + - return vma; - } - -@@ -469,26 +505,54 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) - { - struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); - -- if (new) { -- ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); -- ASSERT_EXCLUSIVE_WRITER(orig->vm_file); -- /* -- * orig->shared.rb may be modified concurrently, but the clone -- * will be reinitialized. -- */ -- data_race(memcpy(new, orig, sizeof(*new))); -- INIT_LIST_HEAD(&new->anon_vma_chain); -- dup_anon_vma_name(orig, new); -+ if (!new) -+ return NULL; ++ for (int i = 0; i < IPTS_BUFFERS; i++) { ++ ret = ipts_resources_alloc_buffer(&res->feedback[i], dev, fs); ++ if (ret) ++ goto err; ++ } + -+ ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); -+ ASSERT_EXCLUSIVE_WRITER(orig->vm_file); -+ /* -+ * orig->shared.rb may be modified concurrently, but the clone -+ * will be reinitialized. -+ */ -+ data_race(memcpy(new, orig, sizeof(*new))); -+ if (!vma_lock_alloc(new)) { -+ kmem_cache_free(vm_area_cachep, new); -+ return NULL; - } -+ INIT_LIST_HEAD(&new->anon_vma_chain); -+ dup_anon_vma_name(orig, new); ++ ret = ipts_resources_alloc_buffer(&res->doorbell, dev, sizeof(u32)); ++ if (ret) ++ goto err; + - return new; - } - --void vm_area_free(struct vm_area_struct *vma) -+void __vm_area_free(struct vm_area_struct *vma) - { - free_anon_vma_name(vma); -+ vma_lock_free(vma); - kmem_cache_free(vm_area_cachep, vma); - } - -+#ifdef CONFIG_PER_VMA_LOCK -+static void vm_area_free_rcu_cb(struct rcu_head *head) -+{ -+ struct vm_area_struct *vma = container_of(head, struct vm_area_struct, -+ vm_rcu); ++ ret = ipts_resources_alloc_buffer(&res->workqueue, dev, sizeof(u32)); ++ if (ret) ++ goto err; + -+ /* The vma should not be locked while being destroyed. */ -+ VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock->lock), vma); -+ __vm_area_free(vma); ++ ret = ipts_resources_alloc_buffer(&res->hid2me, dev, fs); ++ if (ret) ++ goto err; ++ ++ ret = ipts_resources_alloc_buffer(&res->descriptor, dev, ds + 8); ++ if (ret) ++ goto err; ++ ++ return 0; ++ ++err: ++ ++ ipts_resources_free(res); ++ return ret; +} -+#endif + -+void vm_area_free(struct vm_area_struct *vma) ++int ipts_resources_free(struct ipts_resources *res) +{ -+#ifdef CONFIG_PER_VMA_LOCK -+ call_rcu(&vma->vm_rcu, vm_area_free_rcu_cb); -+#else -+ __vm_area_free(vma); -+#endif ++ if (!res) ++ return -EFAULT; ++ ++ for (int i = 0; i < IPTS_BUFFERS; i++) ++ ipts_resources_free_buffer(&res->data[i]); ++ ++ for (int i = 0; i < IPTS_BUFFERS; i++) ++ ipts_resources_free_buffer(&res->feedback[i]); ++ ++ ipts_resources_free_buffer(&res->doorbell); ++ ipts_resources_free_buffer(&res->workqueue); ++ ipts_resources_free_buffer(&res->hid2me); ++ ipts_resources_free_buffer(&res->descriptor); ++ ++ return 0; +} +diff --git a/drivers/hid/ipts/resources.h b/drivers/hid/ipts/resources.h +new file mode 100644 +index 000000000000..6cbb24a8a054 +--- /dev/null ++++ b/drivers/hid/ipts/resources.h +@@ -0,0 +1,39 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ + - static void account_kernel_stack(struct task_struct *tsk, int account) - { - if (IS_ENABLED(CONFIG_VMAP_STACK)) { -@@ -1132,6 +1196,9 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, - seqcount_init(&mm->write_protect_seq); - mmap_init_lock(mm); - INIT_LIST_HEAD(&mm->mmlist); -+#ifdef CONFIG_PER_VMA_LOCK -+ mm->mm_lock_seq = 0; -+#endif - mm_pgtables_bytes_init(mm); - mm->map_count = 0; - mm->locked_vm = 0; -@@ -3074,6 +3141,9 @@ void __init proc_caches_init(void) - NULL); - - vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); -+#ifdef CONFIG_PER_VMA_LOCK -+ vma_lock_cachep = KMEM_CACHE(vma_lock, SLAB_PANIC|SLAB_ACCOUNT); -+#endif - mmap_init(); - nsproxy_cache_init(); - } -diff --git a/mm/Kconfig b/mm/Kconfig -index cf2e47030fe8..459af2123189 100644 ---- a/mm/Kconfig -+++ b/mm/Kconfig -@@ -1202,6 +1202,18 @@ config LRU_GEN_STATS - This option has a per-memcg and per-node memory overhead. - # } - -+config ARCH_SUPPORTS_PER_VMA_LOCK -+ def_bool n ++#ifndef IPTS_RESOURCES_H ++#define IPTS_RESOURCES_H ++ ++#include ++#include ++ ++#include "spec-device.h" ++ ++struct ipts_buffer { ++ u8 *address; ++ size_t size; ++ ++ dma_addr_t dma_address; ++ struct device *device; ++}; ++ ++struct ipts_resources { ++ struct ipts_buffer data[IPTS_BUFFERS]; ++ struct ipts_buffer feedback[IPTS_BUFFERS]; ++ ++ struct ipts_buffer doorbell; ++ struct ipts_buffer workqueue; ++ struct ipts_buffer hid2me; ++ ++ struct ipts_buffer descriptor; ++}; ++ ++int ipts_resources_init(struct ipts_resources *res, struct device *dev, size_t ds, size_t fs); ++int ipts_resources_free(struct ipts_resources *res); ++ ++#endif /* IPTS_RESOURCES_H */ +diff --git a/drivers/hid/ipts/spec-data.h b/drivers/hid/ipts/spec-data.h +new file mode 100644 +index 000000000000..e8dd98895a7e +--- /dev/null ++++ b/drivers/hid/ipts/spec-data.h +@@ -0,0 +1,100 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_SPEC_DATA_H ++#define IPTS_SPEC_DATA_H ++ ++#include ++#include ++ ++/** ++ * enum ipts_feedback_cmd_type - Commands that can be executed on the sensor through feedback. ++ */ ++enum ipts_feedback_cmd_type { ++ IPTS_FEEDBACK_CMD_TYPE_NONE = 0, ++ IPTS_FEEDBACK_CMD_TYPE_SOFT_RESET = 1, ++ IPTS_FEEDBACK_CMD_TYPE_GOTO_ARMED = 2, ++ IPTS_FEEDBACK_CMD_TYPE_GOTO_SENSING = 3, ++ IPTS_FEEDBACK_CMD_TYPE_GOTO_SLEEP = 4, ++ IPTS_FEEDBACK_CMD_TYPE_GOTO_DOZE = 5, ++ IPTS_FEEDBACK_CMD_TYPE_HARD_RESET = 6, ++}; ++ ++/** ++ * enum ipts_feedback_data_type - Defines what data a feedback buffer contains. ++ * @IPTS_FEEDBACK_DATA_TYPE_VENDOR: The buffer contains vendor specific feedback. ++ * @IPTS_FEEDBACK_DATA_TYPE_SET_FEATURES: The buffer contains a HID set features report. ++ * @IPTS_FEEDBACK_DATA_TYPE_GET_FEATURES: The buffer contains a HID get features report. ++ * @IPTS_FEEDBACK_DATA_TYPE_OUTPUT_REPORT: The buffer contains a HID output report. ++ * @IPTS_FEEDBACK_DATA_TYPE_STORE_DATA: The buffer contains calibration data for the sensor. ++ */ ++enum ipts_feedback_data_type { ++ IPTS_FEEDBACK_DATA_TYPE_VENDOR = 0, ++ IPTS_FEEDBACK_DATA_TYPE_SET_FEATURES = 1, ++ IPTS_FEEDBACK_DATA_TYPE_GET_FEATURES = 2, ++ IPTS_FEEDBACK_DATA_TYPE_OUTPUT_REPORT = 3, ++ IPTS_FEEDBACK_DATA_TYPE_STORE_DATA = 4, ++}; ++ ++/** ++ * struct ipts_feedback_header - Header that is prefixed to the data in a feedback buffer. ++ * @cmd_type: A command that should be executed on the sensor. ++ * @size: The size of the payload to be written. ++ * @buffer: The ID of the buffer that contains this feedback data. ++ * @protocol: The protocol version of the EDS. ++ * @data_type: The type of data that the buffer contains. ++ * @spi_offset: The offset at which to write the payload data to the sensor. ++ * @payload: Payload for the feedback command, or 0 if no payload is sent. ++ */ ++struct ipts_feedback_header { ++ enum ipts_feedback_cmd_type cmd_type; ++ u32 size; ++ u32 buffer; ++ u32 protocol; ++ enum ipts_feedback_data_type data_type; ++ u32 spi_offset; ++ u8 reserved[40]; ++ u8 payload[]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_feedback_header) == 64); ++ ++/** ++ * enum ipts_data_type - Defines what type of data a buffer contains. ++ * @IPTS_DATA_TYPE_FRAME: Raw data frame. ++ * @IPTS_DATA_TYPE_ERROR: Error data. ++ * @IPTS_DATA_TYPE_VENDOR: Vendor specific data. ++ * @IPTS_DATA_TYPE_HID: A HID report. ++ * @IPTS_DATA_TYPE_GET_FEATURES: The response to a GET_FEATURES HID2ME command. ++ */ ++enum ipts_data_type { ++ IPTS_DATA_TYPE_FRAME = 0x00, ++ IPTS_DATA_TYPE_ERROR = 0x01, ++ IPTS_DATA_TYPE_VENDOR = 0x02, ++ IPTS_DATA_TYPE_HID = 0x03, ++ IPTS_DATA_TYPE_GET_FEATURES = 0x04, ++ IPTS_DATA_TYPE_DESCRIPTOR = 0x05, ++}; ++ ++/** ++ * struct ipts_data_header - Header that is prefixed to the data in a data buffer. ++ * @type: What data the buffer contains. ++ * @size: How much data the buffer contains. ++ * @buffer: Which buffer the data is in. ++ */ ++struct ipts_data_header { ++ enum ipts_data_type type; ++ u32 size; ++ u32 buffer; ++ u8 reserved[52]; ++ u8 data[]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_data_header) == 64); ++ ++#endif /* IPTS_SPEC_DATA_H */ +diff --git a/drivers/hid/ipts/spec-device.h b/drivers/hid/ipts/spec-device.h +new file mode 100644 +index 000000000000..93f673d981f7 +--- /dev/null ++++ b/drivers/hid/ipts/spec-device.h +@@ -0,0 +1,285 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_SPEC_DEVICE_H ++#define IPTS_SPEC_DEVICE_H ++ ++#include ++#include ++ ++/* ++ * The amount of buffers that IPTS can use for data transfer. ++ */ ++#define IPTS_BUFFERS 16 ++ ++/* ++ * The buffer ID that is used for HID2ME feedback ++ */ ++#define IPTS_HID2ME_BUFFER IPTS_BUFFERS ++ ++/** ++ * enum ipts_command - Commands that can be sent to the IPTS hardware. ++ * @IPTS_CMD_GET_DEVICE_INFO: Retrieves vendor information from the device. ++ * @IPTS_CMD_SET_MODE: Changes the mode that the device will operate in. ++ * @IPTS_CMD_SET_MEM_WINDOW: Configures memory buffers for passing data between device and driver. ++ * @IPTS_CMD_QUIESCE_IO: Stops the data flow from the device to the driver. ++ * @IPTS_CMD_READY_FOR_DATA: Informs the device that the driver is ready to receive data. ++ * @IPTS_CMD_FEEDBACK: Informs the device that a buffer was processed and can be refilled. ++ * @IPTS_CMD_CLEAR_MEM_WINDOW: Stops the data flow and clears the buffer addresses on the device. ++ * @IPTS_CMD_RESET_SENSOR: Resets the sensor to its default state. ++ * @IPTS_CMD_GET_DESCRIPTOR: Retrieves the HID descriptor of the device. ++ */ ++enum ipts_command_code { ++ IPTS_CMD_GET_DEVICE_INFO = 0x01, ++ IPTS_CMD_SET_MODE = 0x02, ++ IPTS_CMD_SET_MEM_WINDOW = 0x03, ++ IPTS_CMD_QUIESCE_IO = 0x04, ++ IPTS_CMD_READY_FOR_DATA = 0x05, ++ IPTS_CMD_FEEDBACK = 0x06, ++ IPTS_CMD_CLEAR_MEM_WINDOW = 0x07, ++ IPTS_CMD_RESET_SENSOR = 0x0B, ++ IPTS_CMD_GET_DESCRIPTOR = 0x0F, ++}; ++ ++/** ++ * enum ipts_status - Possible status codes returned by the IPTS device. ++ * @IPTS_STATUS_SUCCESS: Operation completed successfully. ++ * @IPTS_STATUS_INVALID_PARAMS: Command contained an invalid payload. ++ * @IPTS_STATUS_ACCESS_DENIED: ME could not validate a buffer address. ++ * @IPTS_STATUS_CMD_SIZE_ERROR: Command contains an invalid payload. ++ * @IPTS_STATUS_NOT_READY: Buffer addresses have not been set. ++ * @IPTS_STATUS_REQUEST_OUTSTANDING: There is an outstanding command of the same type. ++ * @IPTS_STATUS_NO_SENSOR_FOUND: No sensor could be found. ++ * @IPTS_STATUS_OUT_OF_MEMORY: Not enough free memory for requested operation. ++ * @IPTS_STATUS_INTERNAL_ERROR: An unexpected error occurred. ++ * @IPTS_STATUS_SENSOR_DISABLED: The sensor has been disabled and must be reinitialized. ++ * @IPTS_STATUS_COMPAT_CHECK_FAIL: Compatibility revision check between sensor and ME failed. ++ * The host can ignore this error and attempt to continue. ++ * @IPTS_STATUS_SENSOR_EXPECTED_RESET: The sensor went through a reset initiated by the driver. ++ * @IPTS_STATUS_SENSOR_UNEXPECTED_RESET: The sensor went through an unexpected reset. ++ * @IPTS_STATUS_RESET_FAILED: Requested sensor reset failed to complete. ++ * @IPTS_STATUS_TIMEOUT: The operation timed out. ++ * @IPTS_STATUS_TEST_MODE_FAIL: Test mode pattern did not match expected values. ++ * @IPTS_STATUS_SENSOR_FAIL_FATAL: The sensor reported an error during reset sequence. ++ * Further progress is not possible. ++ * @IPTS_STATUS_SENSOR_FAIL_NONFATAL: The sensor reported an error during reset sequence. ++ * The driver can attempt to continue. ++ * @IPTS_STATUS_INVALID_DEVICE_CAPS: The device reported invalid capabilities. ++ * @IPTS_STATUS_QUIESCE_IO_IN_PROGRESS: Command cannot be completed until Quiesce IO is done. ++ */ ++enum ipts_status { ++ IPTS_STATUS_SUCCESS = 0x00, ++ IPTS_STATUS_INVALID_PARAMS = 0x01, ++ IPTS_STATUS_ACCESS_DENIED = 0x02, ++ IPTS_STATUS_CMD_SIZE_ERROR = 0x03, ++ IPTS_STATUS_NOT_READY = 0x04, ++ IPTS_STATUS_REQUEST_OUTSTANDING = 0x05, ++ IPTS_STATUS_NO_SENSOR_FOUND = 0x06, ++ IPTS_STATUS_OUT_OF_MEMORY = 0x07, ++ IPTS_STATUS_INTERNAL_ERROR = 0x08, ++ IPTS_STATUS_SENSOR_DISABLED = 0x09, ++ IPTS_STATUS_COMPAT_CHECK_FAIL = 0x0A, ++ IPTS_STATUS_SENSOR_EXPECTED_RESET = 0x0B, ++ IPTS_STATUS_SENSOR_UNEXPECTED_RESET = 0x0C, ++ IPTS_STATUS_RESET_FAILED = 0x0D, ++ IPTS_STATUS_TIMEOUT = 0x0E, ++ IPTS_STATUS_TEST_MODE_FAIL = 0x0F, ++ IPTS_STATUS_SENSOR_FAIL_FATAL = 0x10, ++ IPTS_STATUS_SENSOR_FAIL_NONFATAL = 0x11, ++ IPTS_STATUS_INVALID_DEVICE_CAPS = 0x12, ++ IPTS_STATUS_QUIESCE_IO_IN_PROGRESS = 0x13, ++}; ++ ++/** ++ * struct ipts_command - Message that is sent to the device for calling a command. ++ * @cmd: The command that will be called. ++ * @payload: Payload containing parameters for the called command. ++ */ ++struct ipts_command { ++ enum ipts_command_code cmd; ++ u8 payload[320]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_command) == 324); ++ ++/** ++ * enum ipts_mode - Configures what data the device produces and how its sent. ++ * @IPTS_MODE_EVENT: The device will send an event once a buffer was filled. ++ * Older devices will return singletouch data in this mode. ++ * @IPTS_MODE_DOORBELL: The device will notify the driver by incrementing the doorbell value. ++ * Older devices will return multitouch data in this mode. ++ */ ++enum ipts_mode { ++ IPTS_MODE_EVENT = 0x00, ++ IPTS_MODE_DOORBELL = 0x01, ++}; ++ ++/** ++ * struct ipts_set_mode - Payload for the SET_MODE command. ++ * @mode: Changes the mode that IPTS will operate in. ++ */ ++struct ipts_set_mode { ++ enum ipts_mode mode; ++ u8 reserved[12]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_set_mode) == 16); ++ ++#define IPTS_WORKQUEUE_SIZE 8192 ++#define IPTS_WORKQUEUE_ITEM_SIZE 16 ++ ++/** ++ * struct ipts_mem_window - Payload for the SET_MEM_WINDOW command. ++ * @data_addr_lower: Lower 32 bits of the data buffer addresses. ++ * @data_addr_upper: Upper 32 bits of the data buffer addresses. ++ * @workqueue_addr_lower: Lower 32 bits of the workqueue buffer address. ++ * @workqueue_addr_upper: Upper 32 bits of the workqueue buffer address. ++ * @doorbell_addr_lower: Lower 32 bits of the doorbell buffer address. ++ * @doorbell_addr_upper: Upper 32 bits of the doorbell buffer address. ++ * @feedbackaddr_lower: Lower 32 bits of the feedback buffer addresses. ++ * @feedbackaddr_upper: Upper 32 bits of the feedback buffer addresses. ++ * @hid2me_addr_lower: Lower 32 bits of the hid2me buffer address. ++ * @hid2me_addr_upper: Upper 32 bits of the hid2me buffer address. ++ * @hid2me_size: Size of the hid2me feedback buffer. ++ * @workqueue_item_size: Magic value. Must be 16. ++ * @workqueue_size: Magic value. Must be 8192. ++ * ++ * The workqueue related items in this struct are required for using ++ * GuC submission with binary processing firmware. Since this driver does ++ * not use GuC submission and instead exports raw data to userspace, these ++ * items are not actually used, but they need to be allocated and passed ++ * to the device, otherwise initialization will fail. ++ */ ++struct ipts_mem_window { ++ u32 data_addr_lower[IPTS_BUFFERS]; ++ u32 data_addr_upper[IPTS_BUFFERS]; ++ u32 workqueue_addr_lower; ++ u32 workqueue_addr_upper; ++ u32 doorbell_addr_lower; ++ u32 doorbell_addr_upper; ++ u32 feedback_addr_lower[IPTS_BUFFERS]; ++ u32 feedback_addr_upper[IPTS_BUFFERS]; ++ u32 hid2me_addr_lower; ++ u32 hid2me_addr_upper; ++ u32 hid2me_size; ++ u8 reserved1; ++ u8 workqueue_item_size; ++ u16 workqueue_size; ++ u8 reserved[32]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_mem_window) == 320); ++ ++/** ++ * struct ipts_quiesce_io - Payload for the QUIESCE_IO command. ++ */ ++struct ipts_quiesce_io { ++ u8 reserved[12]; ++} __packed; + -+config PER_VMA_LOCK -+ def_bool y -+ depends on ARCH_SUPPORTS_PER_VMA_LOCK && MMU && SMP -+ help -+ Allow per-vma locking during page fault handling. ++static_assert(sizeof(struct ipts_quiesce_io) == 12); + -+ This feature allows locking each virtual memory area separately when -+ handling page faults instead of taking mmap_lock. ++/** ++ * struct ipts_feedback - Payload for the FEEDBACK command. ++ * @buffer: The buffer that the device should refill. ++ */ ++struct ipts_feedback { ++ u32 buffer; ++ u8 reserved[12]; ++} __packed; + - source "mm/damon/Kconfig" - - endmenu -diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug -index c3547a373c9c..4965a7333a3f 100644 ---- a/mm/Kconfig.debug -+++ b/mm/Kconfig.debug -@@ -279,3 +279,9 @@ config DEBUG_KMEMLEAK_AUTO_SCAN - - If unsure, say Y. - -+config PER_VMA_LOCK_STATS -+ bool "Statistics for per-vma locks" -+ depends on PER_VMA_LOCK -+ default y -+ help -+ Statistics for per-vma locks. -diff --git a/mm/filemap.c b/mm/filemap.c -index 2723104cc06a..7d898f26755b 100644 ---- a/mm/filemap.c -+++ b/mm/filemap.c -@@ -1706,6 +1706,8 @@ static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) - * mmap_lock has been released (mmap_read_unlock(), unless flags had both - * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in - * which case mmap_lock is still held. -+ * If flags had FAULT_FLAG_VMA_LOCK set, meaning the operation is performed -+ * with VMA lock only, the VMA lock is still held. - * - * If neither ALLOW_RETRY nor KILLABLE are set, will always return true - * with the folio locked and the mmap_lock unperturbed. -@@ -1713,6 +1715,10 @@ static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) - bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, - unsigned int flags) - { -+ /* Can't do this if not holding mmap_lock */ -+ if (flags & FAULT_FLAG_VMA_LOCK) -+ return false; ++static_assert(sizeof(struct ipts_feedback) == 16); + - if (fault_flag_allow_retry_first(flags)) { - /* - * CAUTION! In this case, mmap_lock is not released -diff --git a/mm/hugetlb.c b/mm/hugetlb.c -index 245038a9fe4e..4d860b53a14a 100644 ---- a/mm/hugetlb.c -+++ b/mm/hugetlb.c -@@ -6004,6 +6004,10 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, - int need_wait_lock = 0; - unsigned long haddr = address & huge_page_mask(h); - -+ /* TODO: Handle faults under the VMA lock */ -+ if (flags & FAULT_FLAG_VMA_LOCK) -+ return VM_FAULT_RETRY; ++/** ++ * enum ipts_reset_type - Possible ways of resetting the device. ++ * @IPTS_RESET_TYPE_HARD: Perform hardware reset using GPIO pin. ++ * @IPTS_RESET_TYPE_SOFT: Perform software reset using SPI command. ++ */ ++enum ipts_reset_type { ++ IPTS_RESET_TYPE_HARD = 0x00, ++ IPTS_RESET_TYPE_SOFT = 0x01, ++}; + - /* - * Serialize hugepage allocation and instantiation, so that we don't - * get spurious allocation failures if two CPUs race to instantiate -diff --git a/mm/init-mm.c b/mm/init-mm.c -index c9327abb771c..33269314e060 100644 ---- a/mm/init-mm.c -+++ b/mm/init-mm.c -@@ -37,6 +37,9 @@ struct mm_struct init_mm = { - .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock), - .arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock), - .mmlist = LIST_HEAD_INIT(init_mm.mmlist), -+#ifdef CONFIG_PER_VMA_LOCK -+ .mm_lock_seq = 0, -+#endif - .user_ns = &init_user_ns, - .cpu_bitmap = CPU_BITS_NONE, - #ifdef CONFIG_IOMMU_SVA -diff --git a/mm/internal.h b/mm/internal.h -index 7920a8b7982e..0c455d6e4e3e 100644 ---- a/mm/internal.h -+++ b/mm/internal.h -@@ -105,7 +105,7 @@ void folio_activate(struct folio *folio); - - void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt, - struct vm_area_struct *start_vma, unsigned long floor, -- unsigned long ceiling); -+ unsigned long ceiling, bool mm_wr_locked); - void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte); - - struct zap_details; -diff --git a/mm/khugepaged.c b/mm/khugepaged.c -index 92e6f56a932d..042007f0bfa1 100644 ---- a/mm/khugepaged.c -+++ b/mm/khugepaged.c -@@ -1049,6 +1049,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, - if (result != SCAN_SUCCEED) - goto out_up_write; - -+ vma_start_write(vma); - anon_vma_lock_write(vma->anon_vma); - - mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address, -@@ -1172,7 +1173,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, - * enabled swap entries. Please see - * comment below for pte_uffd_wp(). - */ -- if (pte_swp_uffd_wp(pteval)) { -+ if (pte_swp_uffd_wp_any(pteval)) { - result = SCAN_PTE_UFFD_WP; - goto out_unmap; - } -@@ -1512,6 +1513,9 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, - goto drop_hpage; - } - -+ /* Lock the vma before taking i_mmap and page table locks */ -+ vma_start_write(vma); ++/** ++ * struct ipts_reset - Payload for the RESET_SENSOR command. ++ * @type: How the device should get reset. ++ */ ++struct ipts_reset_sensor { ++ enum ipts_reset_type type; ++ u8 reserved[4]; ++} __packed; + - /* - * We need to lock the mapping so that from here on, only GUP-fast and - * hardware page walks can access the parts of the page tables that -@@ -1689,6 +1693,10 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff, - result = SCAN_PTE_MAPPED_HUGEPAGE; - if ((cc->is_khugepaged || is_target) && - mmap_write_trylock(mm)) { -+ /* trylock for the same lock inversion as above */ -+ if (!vma_try_start_write(vma)) -+ goto unlock_next; ++static_assert(sizeof(struct ipts_reset_sensor) == 8); + - /* - * Re-check whether we have an ->anon_vma, because - * collapse_and_free_pmd() requires that either no -diff --git a/mm/memory.c b/mm/memory.c -index 01a23ad48a04..7c8278e8b5df 100644 ---- a/mm/memory.c -+++ b/mm/memory.c -@@ -104,6 +104,20 @@ EXPORT_SYMBOL(mem_map); - #endif - - static vm_fault_t do_fault(struct vm_fault *vmf); -+static vm_fault_t do_anonymous_page(struct vm_fault *vmf); -+static bool vmf_pte_changed(struct vm_fault *vmf); ++/** ++ * struct ipts_get_descriptor - Payload for the GET_DESCRIPTOR command. ++ * @addr_lower: The lower 32 bits of the descriptor buffer address. ++ * @addr_upper: The upper 32 bits of the descriptor buffer address. ++ * @magic: A magic value. Must be 8. ++ */ ++struct ipts_get_descriptor { ++ u32 addr_lower; ++ u32 addr_upper; ++ u32 magic; ++ u8 reserved[12]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_get_descriptor) == 24); + +/* -+ * Return true if the original pte was a uffd-wp pte marker (so the pte was -+ * wr-protected). ++ * The type of a response is indicated by a ++ * command code, with the most significant bit flipped to 1. + */ -+static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) ++#define IPTS_RSP_BIT BIT(31) ++ ++/** ++ * struct ipts_response - Data returned from the device in response to a command. ++ * @cmd: The command that this response answers (IPTS_RSP_BIT will be 1). ++ * @status: The return code of the command. ++ * @payload: The data that was produced by the command. ++ */ ++struct ipts_response { ++ enum ipts_command_code cmd; ++ enum ipts_status status; ++ u8 payload[80]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_response) == 88); ++ ++/** ++ * struct ipts_device_info - Vendor information of the IPTS device. ++ * @vendor: Vendor ID of this device. ++ * @product: Product ID of this device. ++ * @hw_version: Hardware revision of this device. ++ * @fw_version: Firmware revision of this device. ++ * @data_size: Requested size for a data buffer. ++ * @feedback_size: Requested size for a feedback buffer. ++ * @mode: Mode that the device currently operates in. ++ * @max_contacts: Maximum amount of concurrent touches the sensor can process. ++ */ ++struct ipts_device_info { ++ u16 vendor; ++ u16 product; ++ u32 hw_version; ++ u32 fw_version; ++ u32 data_size; ++ u32 feedback_size; ++ enum ipts_mode mode; ++ u8 max_contacts; ++ u8 reserved1[3]; ++ u8 sensor_min_eds; ++ u8 sensor_maj_eds; ++ u8 me_min_eds; ++ u8 me_maj_eds; ++ u8 intf_eds; ++ u8 reserved2[11]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_device_info) == 44); ++ ++#endif /* IPTS_SPEC_DEVICE_H */ +diff --git a/drivers/hid/ipts/spec-hid.h b/drivers/hid/ipts/spec-hid.h +new file mode 100644 +index 000000000000..ea70f29ff00c +--- /dev/null ++++ b/drivers/hid/ipts/spec-hid.h +@@ -0,0 +1,35 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2020-2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#ifndef IPTS_SPEC_HID_H ++#define IPTS_SPEC_HID_H ++ ++#include ++#include ++ ++/* ++ * Made-up type for passing raw IPTS data in a HID report. ++ */ ++#define IPTS_HID_FRAME_TYPE_RAW 0xEE ++ ++/** ++ * struct ipts_hid_frame - Header that is prefixed to raw IPTS data wrapped in a HID report. ++ * @size: Size of the data inside the report, including this header. ++ * @type: What type of data does this report contain. ++ */ ++struct ipts_hid_header { ++ u32 size; ++ u8 reserved1; ++ u8 type; ++ u8 reserved2; ++ u8 data[]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_hid_header) == 7); ++ ++#endif /* IPTS_SPEC_HID_H */ +diff --git a/drivers/hid/ipts/thread.c b/drivers/hid/ipts/thread.c +new file mode 100644 +index 000000000000..8b46f775c107 +--- /dev/null ++++ b/drivers/hid/ipts/thread.c +@@ -0,0 +1,85 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include "thread.h" ++ ++bool ipts_thread_should_stop(struct ipts_thread *thread) +{ -+ if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) ++ if (!thread) + return false; + -+ return pte_marker_uffd_wp(vmf->orig_pte); ++ return READ_ONCE(thread->should_stop); +} - - /* - * A number of key systems in x86 including ioremap() rely on the assumption -@@ -348,7 +362,7 @@ void free_pgd_range(struct mmu_gather *tlb, - - void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt, - struct vm_area_struct *vma, unsigned long floor, -- unsigned long ceiling) -+ unsigned long ceiling, bool mm_wr_locked) - { - MA_STATE(mas, mt, vma->vm_end, vma->vm_end); - -@@ -366,6 +380,8 @@ void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt, - * Hide vma from rmap and truncate_pagecache before freeing - * pgtables - */ -+ if (mm_wr_locked) -+ vma_start_write(vma); - unlink_anon_vmas(vma); - unlink_file_vma(vma); - -@@ -380,6 +396,8 @@ void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt, - && !is_vm_hugetlb_page(next)) { - vma = next; - next = mas_find(&mas, ceiling - 1); -+ if (mm_wr_locked) -+ vma_start_write(vma); - unlink_anon_vmas(vma); - unlink_file_vma(vma); - } -@@ -1345,6 +1363,10 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, - unsigned long addr, pte_t *pte, - struct zap_details *details, pte_t pteval) - { -+ /* Zap on anonymous always means dropping everything */ -+ if (vma_is_anonymous(vma)) -+ return; + - if (zap_drop_file_uffd_wp(details)) - return; - -@@ -1451,8 +1473,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, - continue; - rss[mm_counter(page)]--; - } else if (pte_marker_entry_uffd_wp(entry)) { -- /* Only drop the uffd-wp marker if explicitly requested */ -- if (!zap_drop_file_uffd_wp(details)) -+ /* -+ * For anon: always drop the marker; for file: only -+ * drop the marker if explicitly requested. -+ */ -+ if (!vma_is_anonymous(vma) && -+ !zap_drop_file_uffd_wp(details)) - continue; - } else if (is_hwpoison_entry(entry) || - is_swapin_error_entry(entry)) { -@@ -3322,6 +3348,9 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) - struct vm_area_struct *vma = vmf->vma; - struct folio *folio = NULL; - -+ if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) -+ return VM_FAULT_RETRY; ++static int ipts_thread_runner(void *data) ++{ ++ int ret = 0; ++ struct ipts_thread *thread = data; + - if (likely(!unshare)) { - if (userfaultfd_pte_wp(vma, *vmf->pte)) { - pte_unmap_unlock(vmf->pte, vmf->ptl); -@@ -3633,6 +3662,14 @@ static vm_fault_t pte_marker_clear(struct vm_fault *vmf) - return 0; - } - -+static vm_fault_t do_pte_missing(struct vm_fault *vmf) ++ if (!thread) ++ return -EFAULT; ++ ++ if (!thread->threadfn) ++ return -EFAULT; ++ ++ ret = thread->threadfn(thread); ++ complete_all(&thread->done); ++ ++ return ret; ++} ++ ++int ipts_thread_start(struct ipts_thread *thread, int (*threadfn)(struct ipts_thread *thread), ++ void *data, const char *name) +{ -+ if (vma_is_anonymous(vmf->vma)) -+ return do_anonymous_page(vmf); -+ else -+ return do_fault(vmf); ++ if (!thread) ++ return -EFAULT; ++ ++ if (!threadfn) ++ return -EFAULT; ++ ++ init_completion(&thread->done); ++ ++ thread->data = data; ++ thread->should_stop = false; ++ thread->threadfn = threadfn; ++ ++ thread->thread = kthread_run(ipts_thread_runner, thread, name); ++ return PTR_ERR_OR_ZERO(thread->thread); +} + - /* - * This is actually a page-missing access, but with uffd-wp special pte - * installed. It means this pte was wr-protected before being unmapped. -@@ -3643,11 +3680,10 @@ static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf) - * Just in case there're leftover special ptes even after the region - * got unregistered - we can simply clear them. - */ -- if (unlikely(!userfaultfd_wp(vmf->vma) || vma_is_anonymous(vmf->vma))) -+ if (unlikely(!userfaultfd_wp(vmf->vma))) - return pte_marker_clear(vmf); - -- /* do_fault() can handle pte markers too like none pte */ -- return do_fault(vmf); -+ return do_pte_missing(vmf); - } - - static vm_fault_t handle_pte_marker(struct vm_fault *vmf) -@@ -4012,6 +4048,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) - */ - static vm_fault_t do_anonymous_page(struct vm_fault *vmf) - { -+ bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); - struct vm_area_struct *vma = vmf->vma; - struct folio *folio; - vm_fault_t ret = 0; -@@ -4045,7 +4082,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) - vma->vm_page_prot)); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); -- if (!pte_none(*vmf->pte)) { -+ if (vmf_pte_changed(vmf)) { - update_mmu_tlb(vma, vmf->address, vmf->pte); - goto unlock; - } -@@ -4085,7 +4122,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) - - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); -- if (!pte_none(*vmf->pte)) { -+ if (vmf_pte_changed(vmf)) { - update_mmu_tlb(vma, vmf->address, vmf->pte); - goto release; - } -@@ -4105,6 +4142,8 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) - folio_add_new_anon_rmap(folio, vma, vmf->address); - folio_add_lru_vma(folio, vma); - setpte: -+ if (uffd_wp) -+ entry = pte_mkuffd_wp(entry); - set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); - - /* No need to invalidate - it was non-present before */ -@@ -4272,7 +4311,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) - void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) - { - struct vm_area_struct *vma = vmf->vma; -- bool uffd_wp = pte_marker_uffd_wp(vmf->orig_pte); -+ bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); - bool write = vmf->flags & FAULT_FLAG_WRITE; - bool prefault = vmf->address != addr; - pte_t entry; -@@ -4503,6 +4542,8 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf) - return ret; - } - -+ if (vmf->flags & FAULT_FLAG_VMA_LOCK) -+ return VM_FAULT_RETRY; - ret = __do_fault(vmf); - if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) - return ret; -@@ -4519,6 +4560,9 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf) - struct vm_area_struct *vma = vmf->vma; - vm_fault_t ret; - -+ if (vmf->flags & FAULT_FLAG_VMA_LOCK) -+ return VM_FAULT_RETRY; ++int ipts_thread_stop(struct ipts_thread *thread) ++{ ++ int ret = 0; + - if (unlikely(anon_vma_prepare(vma))) - return VM_FAULT_OOM; - -@@ -4558,6 +4602,9 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf) - struct vm_area_struct *vma = vmf->vma; - vm_fault_t ret, tmp; - -+ if (vmf->flags & FAULT_FLAG_VMA_LOCK) -+ return VM_FAULT_RETRY; ++ if (!thread) ++ return -EFAULT; + - ret = __do_fault(vmf); - if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) - return ret; -@@ -4916,12 +4963,8 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) - } - } - -- if (!vmf->pte) { -- if (vma_is_anonymous(vmf->vma)) -- return do_anonymous_page(vmf); -- else -- return do_fault(vmf); -- } -+ if (!vmf->pte) -+ return do_pte_missing(vmf); - - if (!pte_present(vmf->orig_pte)) - return do_swap_page(vmf); -@@ -4929,6 +4972,9 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) - if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) - return do_numa_page(vmf); - -+ if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma)) -+ return VM_FAULT_RETRY; ++ if (!thread->thread) ++ return 0; + - vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); - spin_lock(vmf->ptl); - entry = vmf->orig_pte; -@@ -4965,10 +5011,10 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) - } - - /* -- * By the time we get here, we already hold the mm semaphore -- * -- * The mmap_lock may have been released depending on flags and our -- * return value. See filemap_fault() and __folio_lock_or_retry(). -+ * On entry, we hold either the VMA lock or the mmap_lock -+ * (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in -+ * the result, the mmap_lock is not held on exit. See filemap_fault() -+ * and __folio_lock_or_retry(). - */ - static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, - unsigned long address, unsigned int flags) -@@ -5080,24 +5126,30 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, - * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should - * still be in per-arch page fault handlers at the entry of page fault. - */ --static inline void mm_account_fault(struct pt_regs *regs, -+static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs, - unsigned long address, unsigned int flags, - vm_fault_t ret) - { - bool major; - - /* -- * We don't do accounting for some specific faults: -- * -- * - Unsuccessful faults (e.g. when the address wasn't valid). That -- * includes arch_vma_access_permitted() failing before reaching here. -- * So this is not a "this many hardware page faults" counter. We -- * should use the hw profiling for that. -- * -- * - Incomplete faults (VM_FAULT_RETRY). They will only be counted -- * once they're completed. -+ * Do not account for incomplete faults (VM_FAULT_RETRY). They will be -+ * counted upon completion. ++ WRITE_ONCE(thread->should_stop, true); ++ ++ /* ++ * Make sure that the write has gone through before waiting. + */ -+ if (ret & VM_FAULT_RETRY) -+ return; ++ wmb(); + -+ /* Register both successful and failed faults in PGFAULT counters. */ -+ count_vm_event(PGFAULT); -+ count_memcg_event_mm(mm, PGFAULT); ++ wait_for_completion(&thread->done); ++ ret = kthread_stop(thread->thread); + -+ /* -+ * Do not account for unsuccessful faults (e.g. when the address wasn't -+ * valid). That includes arch_vma_access_permitted() failing before -+ * reaching here. So this is not a "this many hardware page faults" -+ * counter. We should use the hw profiling for that. - */ -- if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY)) -+ if (ret & VM_FAULT_ERROR) - return; - - /* -@@ -5180,21 +5232,22 @@ static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma, - vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, - unsigned int flags, struct pt_regs *regs) - { -+ /* Copy vma->vm_mm in case mmap_lock is dropped and vma becomes unstable. */ -+ struct mm_struct *mm = vma->vm_mm; - vm_fault_t ret; - - __set_current_state(TASK_RUNNING); - -- count_vm_event(PGFAULT); -- count_memcg_event_mm(vma->vm_mm, PGFAULT); -- - ret = sanitize_fault_flags(vma, &flags); - if (ret) -- return ret; -+ goto out; - - if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, - flags & FAULT_FLAG_INSTRUCTION, -- flags & FAULT_FLAG_REMOTE)) -- return VM_FAULT_SIGSEGV; -+ flags & FAULT_FLAG_REMOTE)) { -+ ret = VM_FAULT_SIGSEGV; -+ goto out; -+ } - - /* - * Enable the memcg OOM handling for faults triggered in user -@@ -5223,13 +5276,70 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, - if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) - mem_cgroup_oom_synchronize(false); - } -- -- mm_account_fault(regs, address, flags, ret); -+out: -+ mm_account_fault(mm, regs, address, flags, ret); - - return ret; - } - EXPORT_SYMBOL_GPL(handle_mm_fault); - -+#ifdef CONFIG_PER_VMA_LOCK ++ thread->thread = NULL; ++ thread->data = NULL; ++ thread->threadfn = NULL; ++ ++ return ret; ++} +diff --git a/drivers/hid/ipts/thread.h b/drivers/hid/ipts/thread.h +new file mode 100644 +index 000000000000..a314843599fc +--- /dev/null ++++ b/drivers/hid/ipts/thread.h +@@ -0,0 +1,60 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* -+ * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be -+ * stable and not isolated. If the VMA is not found or is being modified the -+ * function returns NULL. ++ * Copyright (c) 2016 Intel Corporation ++ * Copyright (c) 2023 Dorian Stoll ++ * ++ * Linux driver for Intel Precise Touch & Stylus + */ -+struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, -+ unsigned long address) -+{ -+ MA_STATE(mas, &mm->mm_mt, address, address); -+ struct vm_area_struct *vma; + -+ rcu_read_lock(); -+retry: -+ vma = mas_walk(&mas); -+ if (!vma) -+ goto inval; ++#ifndef IPTS_THREAD_H ++#define IPTS_THREAD_H ++ ++#include ++#include ++#include ++ ++/* ++ * This wrapper over kthread is necessary, because calling kthread_stop makes it impossible ++ * to issue MEI commands from that thread while it shuts itself down. By using a custom ++ * boolean variable and a completion object, we can call kthread_stop only when the thread ++ * already finished all of its work and has returned. ++ */ ++struct ipts_thread { ++ struct task_struct *thread; ++ ++ bool should_stop; ++ struct completion done; ++ ++ void *data; ++ int (*threadfn)(struct ipts_thread *thread); ++}; ++ ++/* ++ * ipts_thread_should_stop() - Returns true if the thread is asked to terminate. ++ * @thread: The current thread. ++ * ++ * Returns: true if the thread should stop, false if not. ++ */ ++bool ipts_thread_should_stop(struct ipts_thread *thread); ++ ++/* ++ * ipts_thread_start() - Starts an IPTS thread. ++ * @thread: The thread to initialize and start. ++ * @threadfn: The function to execute. ++ * @data: An argument that will be passed to threadfn. ++ * @name: The name of the new thread. ++ * ++ * Returns: 0 on success, <0 on error. ++ */ ++int ipts_thread_start(struct ipts_thread *thread, int (*threadfn)(struct ipts_thread *thread), ++ void *data, const char name[]); ++ ++/* ++ * ipts_thread_stop() - Asks the thread to terminate and waits until it has finished. ++ * @thread: The thread that should stop. ++ * ++ * Returns: The return value of the thread function. ++ */ ++int ipts_thread_stop(struct ipts_thread *thread); ++ ++#endif /* IPTS_THREAD_H */ +diff --git a/drivers/hid/ithc/Kbuild b/drivers/hid/ithc/Kbuild +new file mode 100644 +index 000000000000..aea83f2ac07b +--- /dev/null ++++ b/drivers/hid/ithc/Kbuild +@@ -0,0 +1,6 @@ ++obj-$(CONFIG_HID_ITHC) := ithc.o + -+ /* find_mergeable_anon_vma uses adjacent vmas which are not locked */ -+ if (vma_is_anonymous(vma) && !vma->anon_vma) -+ goto inval; ++ithc-objs := ithc-main.o ithc-regs.o ithc-dma.o ithc-debug.o + -+ if (!vma_start_read(vma)) -+ goto inval; ++ccflags-y := -std=gnu11 -Wno-declaration-after-statement + -+ /* -+ * Due to the possibility of userfault handler dropping mmap_lock, avoid -+ * it for now and fall back to page fault handling under mmap_lock. -+ */ -+ if (userfaultfd_armed(vma)) { -+ vma_end_read(vma); -+ goto inval; -+ } +diff --git a/drivers/hid/ithc/Kconfig b/drivers/hid/ithc/Kconfig +new file mode 100644 +index 000000000000..ede713023609 +--- /dev/null ++++ b/drivers/hid/ithc/Kconfig +@@ -0,0 +1,12 @@ ++config HID_ITHC ++ tristate "Intel Touch Host Controller" ++ depends on PCI ++ depends on HID ++ help ++ Say Y here if your system has a touchscreen using Intels ++ Touch Host Controller (ITHC / IPTS) technology. + -+ /* Check since vm_start/vm_end might change before we lock the VMA */ -+ if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { -+ vma_end_read(vma); -+ goto inval; ++ If unsure say N. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called ithc. +diff --git a/drivers/hid/ithc/ithc-debug.c b/drivers/hid/ithc/ithc-debug.c +new file mode 100644 +index 000000000000..57bf125c45bd +--- /dev/null ++++ b/drivers/hid/ithc/ithc-debug.c +@@ -0,0 +1,96 @@ ++#include "ithc.h" ++ ++void ithc_log_regs(struct ithc *ithc) { ++ if (!ithc->prev_regs) return; ++ u32 __iomem *cur = (__iomem void*)ithc->regs; ++ u32 *prev = (void*)ithc->prev_regs; ++ for (int i = 1024; i < sizeof *ithc->regs / 4; i++) { ++ u32 x = readl(cur + i); ++ if (x != prev[i]) { ++ pci_info(ithc->pci, "reg %04x: %08x -> %08x\n", i * 4, prev[i], x); ++ prev[i] = x; ++ } + } ++} + -+ /* Check if the VMA got isolated after we found it */ -+ if (vma->detached) { -+ vma_end_read(vma); -+ count_vm_vma_lock_event(VMA_LOCK_MISS); -+ /* The area was replaced with another one */ -+ goto retry; ++static ssize_t ithc_debugfs_cmd_write(struct file *f, const char __user *buf, size_t len, loff_t *offset) { ++ struct ithc *ithc = file_inode(f)->i_private; ++ char cmd[256]; ++ if (!ithc || !ithc->pci) return -ENODEV; ++ if (!len) return -EINVAL; ++ if (len >= sizeof cmd) return -EINVAL; ++ if (copy_from_user(cmd, buf, len)) return -EFAULT; ++ cmd[len] = 0; ++ if (cmd[len-1] == '\n') cmd[len-1] = 0; ++ pci_info(ithc->pci, "debug command: %s\n", cmd); ++ u32 n = 0; ++ const char *s = cmd + 1; ++ u32 a[32]; ++ while (*s && *s != '\n') { ++ if (n >= ARRAY_SIZE(a)) return -EINVAL; ++ if (*s++ != ' ') return -EINVAL; ++ char *e; ++ a[n++] = simple_strtoul(s, &e, 0); ++ if (e == s) return -EINVAL; ++ s = e; ++ } ++ ithc_log_regs(ithc); ++ switch(cmd[0]) { ++ case 'x': // reset ++ ithc_reset(ithc); ++ break; ++ case 'w': // write register: offset mask value ++ if (n != 3 || (a[0] & 3)) return -EINVAL; ++ pci_info(ithc->pci, "debug write 0x%04x = 0x%08x (mask 0x%08x)\n", a[0], a[2], a[1]); ++ bitsl(((__iomem u32 *)ithc->regs) + a[0] / 4, a[1], a[2]); ++ break; ++ case 'r': // read register: offset ++ if (n != 1 || (a[0] & 3)) return -EINVAL; ++ pci_info(ithc->pci, "debug read 0x%04x = 0x%08x\n", a[0], readl(((__iomem u32 *)ithc->regs) + a[0] / 4)); ++ break; ++ case 's': // spi command: cmd offset len data... ++ // read config: s 4 0 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ++ // set touch cfg: s 6 12 4 XX ++ if (n < 3 || a[2] > (n - 3) * 4) return -EINVAL; ++ pci_info(ithc->pci, "debug spi command %u with %u bytes of data\n", a[0], a[2]); ++ if (!CHECK(ithc_spi_command, ithc, a[0], a[1], a[2], a + 3)) ++ for (u32 i = 0; i < (a[2] + 3) / 4; i++) pci_info(ithc->pci, "resp %u = 0x%08x\n", i, a[3+i]); ++ break; ++ case 'd': // dma command: cmd len data... ++ // get report descriptor: d 7 8 0 0 ++ // enable multitouch: d 3 2 0x0105 ++ if (n < 2 || a[1] > (n - 2) * 4) return -EINVAL; ++ pci_info(ithc->pci, "debug dma command %u with %u bytes of data\n", a[0], a[1]); ++ if (ithc_dma_tx(ithc, a[0], a[1], a + 2)) pci_err(ithc->pci, "dma tx failed\n"); ++ break; ++ default: ++ return -EINVAL; + } ++ ithc_log_regs(ithc); ++ return len; ++} + -+ rcu_read_unlock(); -+ return vma; -+inval: -+ rcu_read_unlock(); -+ count_vm_vma_lock_event(VMA_LOCK_ABORT); -+ return NULL; ++static const struct file_operations ithc_debugfops_cmd = { ++ .owner = THIS_MODULE, ++ .write = ithc_debugfs_cmd_write, ++}; ++ ++static void ithc_debugfs_devres_release(struct device *dev, void *res) { ++ struct dentry **dbgm = res; ++ if (*dbgm) debugfs_remove_recursive(*dbgm); +} -+#endif /* CONFIG_PER_VMA_LOCK */ + - #ifndef __PAGETABLE_P4D_FOLDED - /* - * Allocate p4d page table. -diff --git a/mm/mmap.c b/mm/mmap.c -index d5475fbf5729..cbac45aa39ae 100644 ---- a/mm/mmap.c -+++ b/mm/mmap.c -@@ -133,7 +133,7 @@ void unlink_file_vma(struct vm_area_struct *vma) - /* - * Close a vm structure and free it. - */ --static void remove_vma(struct vm_area_struct *vma) -+static void remove_vma(struct vm_area_struct *vma, bool unreachable) - { - might_sleep(); - if (vma->vm_ops && vma->vm_ops->close) -@@ -141,7 +141,10 @@ static void remove_vma(struct vm_area_struct *vma) - if (vma->vm_file) - fput(vma->vm_file); - mpol_put(vma_policy(vma)); -- vm_area_free(vma); -+ if (unreachable) -+ __vm_area_free(vma); -+ else -+ vm_area_free(vma); - } - - static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi, -@@ -502,6 +505,15 @@ static inline void init_vma_prep(struct vma_prepare *vp, - */ - static inline void vma_prepare(struct vma_prepare *vp) - { -+ vma_start_write(vp->vma); -+ if (vp->adj_next) -+ vma_start_write(vp->adj_next); -+ /* vp->insert is always a newly created VMA, no need for locking */ -+ if (vp->remove) -+ vma_start_write(vp->remove); -+ if (vp->remove2) -+ vma_start_write(vp->remove2); ++int ithc_debug_init(struct ithc *ithc) { ++ struct dentry **dbgm = devres_alloc(ithc_debugfs_devres_release, sizeof *dbgm, GFP_KERNEL); ++ if (!dbgm) return -ENOMEM; ++ devres_add(&ithc->pci->dev, dbgm); ++ struct dentry *dbg = debugfs_create_dir(DEVNAME, NULL); ++ if (IS_ERR(dbg)) return PTR_ERR(dbg); ++ *dbgm = dbg; + - if (vp->file) { - uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); - -@@ -590,6 +602,7 @@ static inline void vma_complete(struct vma_prepare *vp, - - if (vp->remove) { - again: -+ vma_mark_detached(vp->remove, true); - if (vp->file) { - uprobe_munmap(vp->remove, vp->remove->vm_start, - vp->remove->vm_end); -@@ -683,12 +696,12 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, - if (vma_iter_prealloc(vmi)) - goto nomem; - -+ vma_prepare(&vp); - vma_adjust_trans_huge(vma, start, end, 0); - /* VMA iterator points to previous, so set to start if necessary */ - if (vma_iter_addr(vmi) != start) - vma_iter_set(vmi, start); - -- vma_prepare(&vp); - vma->vm_start = start; - vma->vm_end = end; - vma->vm_pgoff = pgoff; -@@ -723,8 +736,8 @@ int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, - return -ENOMEM; - - init_vma_prep(&vp, vma); -- vma_adjust_trans_huge(vma, start, end, 0); - vma_prepare(&vp); -+ vma_adjust_trans_huge(vma, start, end, 0); - - if (vma->vm_start < start) - vma_iter_clear(vmi, vma->vm_start, start); -@@ -994,12 +1007,12 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm, - if (vma_iter_prealloc(vmi)) - return NULL; - -- vma_adjust_trans_huge(vma, vma_start, vma_end, adj_next); - init_multi_vma_prep(&vp, vma, adjust, remove, remove2); - VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && - vp.anon_vma != adjust->anon_vma); - - vma_prepare(&vp); -+ vma_adjust_trans_huge(vma, vma_start, vma_end, adj_next); - if (vma_start < vma->vm_start || vma_end > vma->vm_end) - vma_expanded = true; - -@@ -2157,7 +2170,7 @@ static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas) - if (vma->vm_flags & VM_ACCOUNT) - nr_accounted += nrpages; - vm_stat_account(mm, vma->vm_flags, -nrpages); -- remove_vma(vma); -+ remove_vma(vma, false); - } - vm_unacct_memory(nr_accounted); - validate_mm(mm); -@@ -2180,7 +2193,8 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt, - update_hiwater_rss(mm); - unmap_vmas(&tlb, mt, vma, start, end, mm_wr_locked); - free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, -- next ? next->vm_start : USER_PGTABLES_CEILING); -+ next ? next->vm_start : USER_PGTABLES_CEILING, -+ mm_wr_locked); - tlb_finish_mmu(&tlb); - } - -@@ -2236,10 +2250,10 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, - if (new->vm_ops && new->vm_ops->open) - new->vm_ops->open(new); - -- vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); - init_vma_prep(&vp, vma); - vp.insert = new; - vma_prepare(&vp); -+ vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); - - if (new_below) { - vma->vm_start = addr; -@@ -2283,10 +2297,12 @@ int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, - static inline int munmap_sidetree(struct vm_area_struct *vma, - struct ma_state *mas_detach) - { -+ vma_start_write(vma); - mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1); - if (mas_store_gfp(mas_detach, vma, GFP_KERNEL)) - return -ENOMEM; - -+ vma_mark_detached(vma, true); - if (vma->vm_flags & VM_LOCKED) - vma->vm_mm->locked_vm -= vma_pages(vma); - -@@ -2942,9 +2958,9 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, - if (vma_iter_prealloc(vmi)) - goto unacct_fail; - -- vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0); - init_vma_prep(&vp, vma); - vma_prepare(&vp); -+ vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0); - vma->vm_end = addr + len; - vm_flags_set(vma, VM_SOFTDIRTY); - vma_iter_store(vmi, vma); -@@ -3077,7 +3093,7 @@ void exit_mmap(struct mm_struct *mm) - mmap_write_lock(mm); - mt_clear_in_rcu(&mm->mm_mt); - free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS, -- USER_PGTABLES_CEILING); -+ USER_PGTABLES_CEILING, true); - tlb_finish_mmu(&tlb); - - /* -@@ -3088,7 +3104,7 @@ void exit_mmap(struct mm_struct *mm) - do { - if (vma->vm_flags & VM_ACCOUNT) - nr_accounted += vma_pages(vma); -- remove_vma(vma); -+ remove_vma(vma, true); - count++; - cond_resched(); - } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL); -@@ -3211,6 +3227,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, - get_file(new_vma->vm_file); - if (new_vma->vm_ops && new_vma->vm_ops->open) - new_vma->vm_ops->open(new_vma); -+ vma_start_write(new_vma); - if (vma_link(mm, new_vma)) - goto out_vma_link; - *need_rmap_locks = false; -@@ -3505,6 +3522,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) - * of mm/rmap.c: - * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for - * hugetlb mapping); -+ * - all vmas marked locked - * - all i_mmap_rwsem locks; - * - all anon_vma->rwseml - * -@@ -3527,6 +3545,13 @@ int mm_take_all_locks(struct mm_struct *mm) - - mutex_lock(&mm_all_locks_mutex); - -+ mas_for_each(&mas, vma, ULONG_MAX) { -+ if (signal_pending(current)) -+ goto out_unlock; -+ vma_start_write(vma); ++ struct dentry *cmd = debugfs_create_file("cmd", 0220, dbg, ithc, &ithc_debugfops_cmd); ++ if (IS_ERR(cmd)) return PTR_ERR(cmd); ++ ++ return 0; ++} ++ +diff --git a/drivers/hid/ithc/ithc-dma.c b/drivers/hid/ithc/ithc-dma.c +new file mode 100644 +index 000000000000..7e89b3496918 +--- /dev/null ++++ b/drivers/hid/ithc/ithc-dma.c +@@ -0,0 +1,258 @@ ++#include "ithc.h" ++ ++static int ithc_dma_prd_alloc(struct ithc *ithc, struct ithc_dma_prd_buffer *p, unsigned num_buffers, unsigned num_pages, enum dma_data_direction dir) { ++ p->num_pages = num_pages; ++ p->dir = dir; ++ p->size = round_up(num_buffers * num_pages * sizeof(struct ithc_phys_region_desc), PAGE_SIZE); ++ p->addr = dmam_alloc_coherent(&ithc->pci->dev, p->size, &p->dma_addr, GFP_KERNEL); ++ if (!p->addr) return -ENOMEM; ++ if (p->dma_addr & (PAGE_SIZE - 1)) return -EFAULT; ++ return 0; ++} ++ ++struct ithc_sg_table { ++ void *addr; ++ struct sg_table sgt; ++ enum dma_data_direction dir; ++}; ++static void ithc_dma_sgtable_free(struct sg_table *sgt) { ++ struct scatterlist *sg; ++ int i; ++ for_each_sgtable_sg(sgt, sg, i) { ++ struct page *p = sg_page(sg); ++ if (p) __free_page(p); ++ } ++ sg_free_table(sgt); ++} ++static void ithc_dma_data_devres_release(struct device *dev, void *res) { ++ struct ithc_sg_table *sgt = res; ++ if (sgt->addr) vunmap(sgt->addr); ++ dma_unmap_sgtable(dev, &sgt->sgt, sgt->dir, 0); ++ ithc_dma_sgtable_free(&sgt->sgt); ++} ++ ++static int ithc_dma_data_alloc(struct ithc* ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b) { ++ // We don't use dma_alloc_coherent for data buffers, because they don't have to be contiguous (we can use one PRD per page) or coherent (they are unidirectional). ++ // Instead we use an sg_table of individually allocated pages (5.13 has dma_alloc_noncontiguous for this, but we'd like to support 5.10 for now). ++ struct page *pages[16]; ++ if (prds->num_pages == 0 || prds->num_pages > ARRAY_SIZE(pages)) return -EINVAL; ++ b->active_idx = -1; ++ struct ithc_sg_table *sgt = devres_alloc(ithc_dma_data_devres_release, sizeof *sgt, GFP_KERNEL); ++ if (!sgt) return -ENOMEM; ++ sgt->dir = prds->dir; ++ if (!sg_alloc_table(&sgt->sgt, prds->num_pages, GFP_KERNEL)) { ++ struct scatterlist *sg; ++ int i; ++ bool ok = true; ++ for_each_sgtable_sg(&sgt->sgt, sg, i) { ++ struct page *p = pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); // don't need __GFP_DMA for PCI DMA ++ if (!p) { ok = false; break; } ++ sg_set_page(sg, p, PAGE_SIZE, 0); ++ } ++ if (ok && !dma_map_sgtable(&ithc->pci->dev, &sgt->sgt, prds->dir, 0)) { ++ devres_add(&ithc->pci->dev, sgt); ++ b->sgt = &sgt->sgt; ++ b->addr = sgt->addr = vmap(pages, prds->num_pages, 0, PAGE_KERNEL); ++ if (!b->addr) return -ENOMEM; ++ return 0; ++ } ++ ithc_dma_sgtable_free(&sgt->sgt); ++ } ++ devres_free(sgt); ++ return -ENOMEM; ++} ++ ++static int ithc_dma_data_buffer_put(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) { ++ struct ithc_phys_region_desc *prd = prds->addr; ++ prd += idx * prds->num_pages; ++ if (b->active_idx >= 0) { pci_err(ithc->pci, "buffer already active\n"); return -EINVAL; } ++ b->active_idx = idx; ++ if (prds->dir == DMA_TO_DEVICE) { ++ if (b->data_size > PAGE_SIZE) return -EINVAL; ++ prd->addr = sg_dma_address(b->sgt->sgl) >> 10; ++ prd->size = b->data_size | PRD_FLAG_END; ++ flush_kernel_vmap_range(b->addr, b->data_size); ++ } else if (prds->dir == DMA_FROM_DEVICE) { ++ struct scatterlist *sg; ++ int i; ++ for_each_sgtable_dma_sg(b->sgt, sg, i) { ++ prd->addr = sg_dma_address(sg) >> 10; ++ prd->size = sg_dma_len(sg); ++ prd++; ++ } ++ prd[-1].size |= PRD_FLAG_END; ++ } ++ dma_wmb(); // for the prds ++ dma_sync_sgtable_for_device(&ithc->pci->dev, b->sgt, prds->dir); ++ return 0; ++} ++ ++static int ithc_dma_data_buffer_get(struct ithc *ithc, struct ithc_dma_prd_buffer *prds, struct ithc_dma_data_buffer *b, unsigned idx) { ++ struct ithc_phys_region_desc *prd = prds->addr; ++ prd += idx * prds->num_pages; ++ if (b->active_idx != idx) { pci_err(ithc->pci, "wrong buffer index\n"); return -EINVAL; } ++ b->active_idx = -1; ++ if (prds->dir == DMA_FROM_DEVICE) { ++ dma_rmb(); // for the prds ++ b->data_size = 0; ++ struct scatterlist *sg; ++ int i; ++ for_each_sgtable_dma_sg(b->sgt, sg, i) { ++ unsigned size = prd->size; ++ b->data_size += size & PRD_SIZE_MASK; ++ if (size & PRD_FLAG_END) break; ++ if ((size & PRD_SIZE_MASK) != sg_dma_len(sg)) { pci_err(ithc->pci, "truncated prd\n"); break; } ++ prd++; ++ } ++ invalidate_kernel_vmap_range(b->addr, b->data_size); ++ } ++ dma_sync_sgtable_for_cpu(&ithc->pci->dev, b->sgt, prds->dir); ++ return 0; ++} ++ ++int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname) { ++ struct ithc_dma_rx *rx = &ithc->dma_rx[channel]; ++ mutex_init(&rx->mutex); ++ u32 buf_size = DEVCFG_DMA_RX_SIZE(ithc->config.dma_buf_sizes); ++ unsigned num_pages = (buf_size + PAGE_SIZE - 1) / PAGE_SIZE; ++ pci_dbg(ithc->pci, "allocating rx buffers: num = %u, size = %u, pages = %u\n", NUM_RX_BUF, buf_size, num_pages); ++ CHECK_RET(ithc_dma_prd_alloc, ithc, &rx->prds, NUM_RX_BUF, num_pages, DMA_FROM_DEVICE); ++ for (unsigned i = 0; i < NUM_RX_BUF; i++) ++ CHECK_RET(ithc_dma_data_alloc, ithc, &rx->prds, &rx->bufs[i]); ++ writeb(DMA_RX_CONTROL2_RESET, &ithc->regs->dma_rx[channel].control2); ++ lo_hi_writeq(rx->prds.dma_addr, &ithc->regs->dma_rx[channel].addr); ++ writeb(NUM_RX_BUF - 1, &ithc->regs->dma_rx[channel].num_bufs); ++ writeb(num_pages - 1, &ithc->regs->dma_rx[channel].num_prds); ++ u8 head = readb(&ithc->regs->dma_rx[channel].head); ++ if (head) { pci_err(ithc->pci, "head is nonzero (%u)\n", head); return -EIO; } ++ for (unsigned i = 0; i < NUM_RX_BUF; i++) ++ CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, &rx->bufs[i], i); ++ writeb(head ^ DMA_RX_WRAP_FLAG, &ithc->regs->dma_rx[channel].tail); ++ return 0; ++} ++void ithc_dma_rx_enable(struct ithc *ithc, u8 channel) { ++ bitsb_set(&ithc->regs->dma_rx[channel].control, DMA_RX_CONTROL_ENABLE | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_DATA); ++ CHECK(waitl, ithc, &ithc->regs->dma_rx[1].status, DMA_RX_STATUS_ENABLED, DMA_RX_STATUS_ENABLED); ++} ++ ++int ithc_dma_tx_init(struct ithc *ithc) { ++ struct ithc_dma_tx *tx = &ithc->dma_tx; ++ mutex_init(&tx->mutex); ++ tx->max_size = DEVCFG_DMA_TX_SIZE(ithc->config.dma_buf_sizes); ++ unsigned num_pages = (tx->max_size + PAGE_SIZE - 1) / PAGE_SIZE; ++ pci_dbg(ithc->pci, "allocating tx buffers: size = %u, pages = %u\n", tx->max_size, num_pages); ++ CHECK_RET(ithc_dma_prd_alloc, ithc, &tx->prds, 1, num_pages, DMA_TO_DEVICE); ++ CHECK_RET(ithc_dma_data_alloc, ithc, &tx->prds, &tx->buf); ++ lo_hi_writeq(tx->prds.dma_addr, &ithc->regs->dma_tx.addr); ++ writeb(num_pages - 1, &ithc->regs->dma_tx.num_prds); ++ CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0); ++ return 0; ++} ++ ++static int ithc_dma_rx_process_buf(struct ithc *ithc, struct ithc_dma_data_buffer *data, u8 channel, u8 buf) { ++ if (buf >= NUM_RX_BUF) { ++ pci_err(ithc->pci, "invalid dma ringbuffer index\n"); ++ return -EINVAL; ++ } ++ ithc_set_active(ithc); ++ u32 len = data->data_size; ++ struct ithc_dma_rx_header *hdr = data->addr; ++ u8 *hiddata = (void *)(hdr + 1); ++ if (len >= sizeof *hdr && hdr->code == DMA_RX_CODE_RESET) { ++ CHECK(ithc_reset, ithc); ++ } else if (len < sizeof *hdr || len != sizeof *hdr + hdr->data_size) { ++ if (hdr->code == DMA_RX_CODE_INPUT_REPORT) { ++ // When the CPU enters a low power state during DMA, we can get truncated messages. ++ // Typically this will be a single touch HID report that is only 1 byte, or a multitouch report that is 257 bytes. ++ // See also ithc_set_active(). ++ } else { ++ pci_err(ithc->pci, "invalid dma rx data! channel %u, buffer %u, size %u, code %u, data size %u\n", channel, buf, len, hdr->code, hdr->data_size); ++ print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0); ++ } ++ } else if (hdr->code == DMA_RX_CODE_REPORT_DESCRIPTOR && hdr->data_size > 8) { ++ CHECK(hid_parse_report, ithc->hid, hiddata + 8, hdr->data_size - 8); ++ WRITE_ONCE(ithc->hid_parse_done, true); ++ wake_up(&ithc->wait_hid_parse); ++ } else if (hdr->code == DMA_RX_CODE_INPUT_REPORT) { ++ CHECK(hid_input_report, ithc->hid, HID_INPUT_REPORT, hiddata, hdr->data_size, 1); ++ } else if (hdr->code == DMA_RX_CODE_FEATURE_REPORT) { ++ bool done = false; ++ mutex_lock(&ithc->hid_get_feature_mutex); ++ if (ithc->hid_get_feature_buf) { ++ if (hdr->data_size < ithc->hid_get_feature_size) ithc->hid_get_feature_size = hdr->data_size; ++ memcpy(ithc->hid_get_feature_buf, hiddata, ithc->hid_get_feature_size); ++ ithc->hid_get_feature_buf = NULL; ++ done = true; ++ } ++ mutex_unlock(&ithc->hid_get_feature_mutex); ++ if (done) wake_up(&ithc->wait_hid_get_feature); ++ else CHECK(hid_input_report, ithc->hid, HID_FEATURE_REPORT, hiddata, hdr->data_size, 1); ++ } else { ++ pci_dbg(ithc->pci, "unhandled dma rx data! channel %u, buffer %u, size %u, code %u\n", channel, buf, len, hdr->code); ++ print_hex_dump_debug(DEVNAME " data: ", DUMP_PREFIX_OFFSET, 32, 1, hdr, min(len, 0x400u), 0); + } ++ return 0; ++} + -+ mas_set(&mas, 0); - mas_for_each(&mas, vma, ULONG_MAX) { - if (signal_pending(current)) - goto out_unlock; -@@ -3616,6 +3641,7 @@ void mm_drop_all_locks(struct mm_struct *mm) - if (vma->vm_file && vma->vm_file->f_mapping) - vm_unlock_mapping(vma->vm_file->f_mapping); - } -+ vma_end_write_all(mm); - - mutex_unlock(&mm_all_locks_mutex); - } -diff --git a/mm/mprotect.c b/mm/mprotect.c -index 36351a00c0e8..204194155863 100644 ---- a/mm/mprotect.c -+++ b/mm/mprotect.c -@@ -276,7 +276,15 @@ static long change_pte_range(struct mmu_gather *tlb, - } else { - /* It must be an none page, or what else?.. */ - WARN_ON_ONCE(!pte_none(oldpte)); -- if (unlikely(uffd_wp && !vma_is_anonymous(vma))) { ++static int ithc_dma_rx_unlocked(struct ithc *ithc, u8 channel) { ++ struct ithc_dma_rx *rx = &ithc->dma_rx[channel]; ++ unsigned n = rx->num_received; ++ u8 head_wrap = readb(&ithc->regs->dma_rx[channel].head); ++ while (1) { ++ u8 tail = n % NUM_RX_BUF; ++ u8 tail_wrap = tail | ((n / NUM_RX_BUF) & 1 ? 0 : DMA_RX_WRAP_FLAG); ++ writeb(tail_wrap, &ithc->regs->dma_rx[channel].tail); ++ // ringbuffer is full if tail_wrap == head_wrap ++ // ringbuffer is empty if tail_wrap == head_wrap ^ WRAP_FLAG ++ if (tail_wrap == (head_wrap ^ DMA_RX_WRAP_FLAG)) return 0; ++ ++ // take the buffer that the device just filled ++ struct ithc_dma_data_buffer *b = &rx->bufs[n % NUM_RX_BUF]; ++ CHECK_RET(ithc_dma_data_buffer_get, ithc, &rx->prds, b, tail); ++ rx->num_received = ++n; ++ ++ // process data ++ CHECK(ithc_dma_rx_process_buf, ithc, b, channel, tail); ++ ++ // give the buffer back to the device ++ CHECK_RET(ithc_dma_data_buffer_put, ithc, &rx->prds, b, tail); ++ } ++} ++int ithc_dma_rx(struct ithc *ithc, u8 channel) { ++ struct ithc_dma_rx *rx = &ithc->dma_rx[channel]; ++ mutex_lock(&rx->mutex); ++ int ret = ithc_dma_rx_unlocked(ithc, channel); ++ mutex_unlock(&rx->mutex); ++ return ret; ++} + -+ /* -+ * Nobody plays with any none ptes besides -+ * userfaultfd when applying the protections. -+ */ -+ if (likely(!uffd_wp)) -+ continue; ++static int ithc_dma_tx_unlocked(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) { ++ pci_dbg(ithc->pci, "dma tx command %u, size %u\n", cmdcode, datasize); ++ struct ithc_dma_tx_header *hdr; ++ u8 padding = datasize & 3 ? 4 - (datasize & 3) : 0; ++ unsigned fullsize = sizeof *hdr + datasize + padding; ++ if (fullsize > ithc->dma_tx.max_size || fullsize > PAGE_SIZE) return -EINVAL; ++ CHECK_RET(ithc_dma_data_buffer_get, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0); ++ ++ ithc->dma_tx.buf.data_size = fullsize; ++ hdr = ithc->dma_tx.buf.addr; ++ hdr->code = cmdcode; ++ hdr->data_size = datasize; ++ u8 *dest = (void *)(hdr + 1); ++ memcpy(dest, data, datasize); ++ dest += datasize; ++ for (u8 p = 0; p < padding; p++) *dest++ = 0; ++ CHECK_RET(ithc_dma_data_buffer_put, ithc, &ithc->dma_tx.prds, &ithc->dma_tx.buf, 0); ++ ++ bitsb_set(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND); ++ CHECK_RET(waitb, ithc, &ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0); ++ writel(DMA_TX_STATUS_DONE, &ithc->regs->dma_tx.status); ++ return 0; ++} ++int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *data) { ++ mutex_lock(&ithc->dma_tx.mutex); ++ int ret = ithc_dma_tx_unlocked(ithc, cmdcode, datasize, data); ++ mutex_unlock(&ithc->dma_tx.mutex); ++ return ret; ++} + -+ if (userfaultfd_wp_use_markers(vma)) { - /* - * For file-backed mem, we need to be able to - * wr-protect a none pte, because even if the -@@ -320,23 +328,46 @@ static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd) - return 0; - } - --/* Return true if we're uffd wr-protecting file-backed memory, or false */ -+/* -+ * Return true if we want to split THPs into PTE mappings in change -+ * protection procedure, false otherwise. -+ */ - static inline bool --uffd_wp_protect_file(struct vm_area_struct *vma, unsigned long cp_flags) -+pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags) - { -+ /* -+ * pte markers only resides in pte level, if we need pte markers, -+ * we need to split. We cannot wr-protect shmem thp because file -+ * thp is handled differently when split by erasing the pmd so far. -+ */ - return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma); - } - - /* -- * If wr-protecting the range for file-backed, populate pgtable for the case -- * when pgtable is empty but page cache exists. When {pte|pmd|...}_alloc() -- * failed we treat it the same way as pgtable allocation failures during -- * page faults by kicking OOM and returning error. -+ * Return true if we want to populate pgtables in change protection -+ * procedure, false otherwise -+ */ -+static inline bool -+pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags) -+{ -+ /* If not within ioctl(UFFDIO_WRITEPROTECT), then don't bother */ -+ if (!(cp_flags & MM_CP_UFFD_WP)) -+ return false; +diff --git a/drivers/hid/ithc/ithc-dma.h b/drivers/hid/ithc/ithc-dma.h +new file mode 100644 +index 000000000000..d9f2c19a13f3 +--- /dev/null ++++ b/drivers/hid/ithc/ithc-dma.h +@@ -0,0 +1,67 @@ ++#define PRD_SIZE_MASK 0xffffff ++#define PRD_FLAG_END 0x1000000 ++#define PRD_FLAG_SUCCESS 0x2000000 ++#define PRD_FLAG_ERROR 0x4000000 ++ ++struct ithc_phys_region_desc { ++ u64 addr; // physical addr/1024 ++ u32 size; // num bytes, PRD_FLAG_END marks last prd for data split over multiple prds ++ u32 unused; ++}; + -+ /* Populate if the userfaultfd mode requires pte markers */ -+ return userfaultfd_wp_use_markers(vma); ++#define DMA_RX_CODE_INPUT_REPORT 3 ++#define DMA_RX_CODE_FEATURE_REPORT 4 ++#define DMA_RX_CODE_REPORT_DESCRIPTOR 5 ++#define DMA_RX_CODE_RESET 7 ++ ++struct ithc_dma_rx_header { ++ u32 code; ++ u32 data_size; ++ u32 _unknown[14]; ++}; ++ ++#define DMA_TX_CODE_SET_FEATURE 3 ++#define DMA_TX_CODE_GET_FEATURE 4 ++#define DMA_TX_CODE_OUTPUT_REPORT 5 ++#define DMA_TX_CODE_GET_REPORT_DESCRIPTOR 7 ++ ++struct ithc_dma_tx_header { ++ u32 code; ++ u32 data_size; ++}; ++ ++struct ithc_dma_prd_buffer { ++ void *addr; ++ dma_addr_t dma_addr; ++ u32 size; ++ u32 num_pages; // per data buffer ++ enum dma_data_direction dir; ++}; ++ ++struct ithc_dma_data_buffer { ++ void *addr; ++ struct sg_table *sgt; ++ int active_idx; ++ u32 data_size; ++}; ++ ++struct ithc_dma_tx { ++ struct mutex mutex; ++ u32 max_size; ++ struct ithc_dma_prd_buffer prds; ++ struct ithc_dma_data_buffer buf; ++}; ++ ++struct ithc_dma_rx { ++ struct mutex mutex; ++ u32 num_received; ++ struct ithc_dma_prd_buffer prds; ++ struct ithc_dma_data_buffer bufs[NUM_RX_BUF]; ++}; ++ ++int ithc_dma_rx_init(struct ithc *ithc, u8 channel, const char *devname); ++void ithc_dma_rx_enable(struct ithc *ithc, u8 channel); ++int ithc_dma_tx_init(struct ithc *ithc); ++int ithc_dma_rx(struct ithc *ithc, u8 channel); ++int ithc_dma_tx(struct ithc *ithc, u32 cmdcode, u32 datasize, void *cmddata); ++ +diff --git a/drivers/hid/ithc/ithc-main.c b/drivers/hid/ithc/ithc-main.c +new file mode 100644 +index 000000000000..09512b9cb4d3 +--- /dev/null ++++ b/drivers/hid/ithc/ithc-main.c +@@ -0,0 +1,534 @@ ++#include "ithc.h" ++ ++MODULE_DESCRIPTION("Intel Touch Host Controller driver"); ++MODULE_LICENSE("Dual BSD/GPL"); ++ ++// Lakefield ++#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT1 0x98d0 ++#define PCI_DEVICE_ID_INTEL_THC_LKF_PORT2 0x98d1 ++// Tiger Lake ++#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1 0xa0d0 ++#define PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2 0xa0d1 ++#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1 0x43d0 ++#define PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2 0x43d1 ++// Alder Lake ++#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1 0x7ad8 ++#define PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2 0x7ad9 ++#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1 0x51d0 ++#define PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2 0x51d1 ++#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1 0x54d0 ++#define PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2 0x54d1 ++// Raptor Lake ++#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1 0x7a58 ++#define PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2 0x7a59 ++// Meteor Lake ++#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT1 0x7e48 ++#define PCI_DEVICE_ID_INTEL_THC_MTL_PORT2 0x7e4a ++ ++static const struct pci_device_id ithc_pci_tbl[] = { ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT1) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_LKF_PORT2) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT1) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_LP_PORT2) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT1) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_TGL_H_PORT2) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT1) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_S_PORT2) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT1) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_P_PORT2) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT1) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_ADL_M_PORT2) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT1) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_RPL_S_PORT2) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT1) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THC_MTL_PORT2) }, ++ {} ++}; ++MODULE_DEVICE_TABLE(pci, ithc_pci_tbl); ++ ++// Module parameters ++ ++static bool ithc_use_polling = false; ++module_param_named(poll, ithc_use_polling, bool, 0); ++MODULE_PARM_DESC(poll, "Use polling instead of interrupts"); ++ ++static bool ithc_use_rx0 = false; ++module_param_named(rx0, ithc_use_rx0, bool, 0); ++MODULE_PARM_DESC(rx0, "Use DMA RX channel 0"); ++ ++static bool ithc_use_rx1 = true; ++module_param_named(rx1, ithc_use_rx1, bool, 0); ++MODULE_PARM_DESC(rx1, "Use DMA RX channel 1"); ++ ++static bool ithc_log_regs_enabled = false; ++module_param_named(logregs, ithc_log_regs_enabled, bool, 0); ++MODULE_PARM_DESC(logregs, "Log changes in register values (for debugging)"); ++ ++// Sysfs attributes ++ ++static bool ithc_is_config_valid(struct ithc *ithc) { ++ return ithc->config.device_id == DEVCFG_DEVICE_ID_TIC; ++} ++ ++static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { ++ struct ithc *ithc = dev_get_drvdata(dev); ++ if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV; ++ return sprintf(buf, "0x%04x", ithc->config.vendor_id); ++} ++static DEVICE_ATTR_RO(vendor); ++static ssize_t product_show(struct device *dev, struct device_attribute *attr, char *buf) { ++ struct ithc *ithc = dev_get_drvdata(dev); ++ if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV; ++ return sprintf(buf, "0x%04x", ithc->config.product_id); ++} ++static DEVICE_ATTR_RO(product); ++static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf) { ++ struct ithc *ithc = dev_get_drvdata(dev); ++ if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV; ++ return sprintf(buf, "%u", ithc->config.revision); ++} ++static DEVICE_ATTR_RO(revision); ++static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { ++ struct ithc *ithc = dev_get_drvdata(dev); ++ if (!ithc || !ithc_is_config_valid(ithc)) return -ENODEV; ++ u32 v = ithc->config.fw_version; ++ return sprintf(buf, "%i.%i.%i.%i", v >> 24, v >> 16 & 0xff, v >> 8 & 0xff, v & 0xff); ++} ++static DEVICE_ATTR_RO(fw_version); ++ ++static const struct attribute_group *ithc_attribute_groups[] = { ++ &(const struct attribute_group){ ++ .name = DEVNAME, ++ .attrs = (struct attribute *[]){ ++ &dev_attr_vendor.attr, ++ &dev_attr_product.attr, ++ &dev_attr_revision.attr, ++ &dev_attr_fw_version.attr, ++ NULL ++ }, ++ }, ++ NULL ++}; ++ ++// HID setup ++ ++static int ithc_hid_start(struct hid_device *hdev) { return 0; } ++static void ithc_hid_stop(struct hid_device *hdev) { } ++static int ithc_hid_open(struct hid_device *hdev) { return 0; } ++static void ithc_hid_close(struct hid_device *hdev) { } ++ ++static int ithc_hid_parse(struct hid_device *hdev) { ++ struct ithc *ithc = hdev->driver_data; ++ u64 val = 0; ++ WRITE_ONCE(ithc->hid_parse_done, false); ++ CHECK_RET(ithc_dma_tx, ithc, DMA_TX_CODE_GET_REPORT_DESCRIPTOR, sizeof val, &val); ++ if (!wait_event_timeout(ithc->wait_hid_parse, READ_ONCE(ithc->hid_parse_done), msecs_to_jiffies(1000))) return -ETIMEDOUT; ++ return 0; +} + -+/* -+ * Populate the pgtable underneath for whatever reason if requested. -+ * When {pte|pmd|...}_alloc() failed we treat it the same way as pgtable -+ * allocation failures during page faults by kicking OOM and returning -+ * error. - */ - #define change_pmd_prepare(vma, pmd, cp_flags) \ - ({ \ - long err = 0; \ -- if (unlikely(uffd_wp_protect_file(vma, cp_flags))) { \ -+ if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \ - if (pte_alloc(vma->vm_mm, pmd)) \ - err = -ENOMEM; \ - } \ -@@ -351,7 +382,7 @@ uffd_wp_protect_file(struct vm_area_struct *vma, unsigned long cp_flags) - #define change_prepare(vma, high, low, addr, cp_flags) \ - ({ \ - long err = 0; \ -- if (unlikely(uffd_wp_protect_file(vma, cp_flags))) { \ -+ if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \ - low##_t *p = low##_alloc(vma->vm_mm, high, addr); \ - if (p == NULL) \ - err = -ENOMEM; \ -@@ -404,7 +435,7 @@ static inline long change_pmd_range(struct mmu_gather *tlb, - - if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { - if ((next - addr != HPAGE_PMD_SIZE) || -- uffd_wp_protect_file(vma, cp_flags)) { -+ pgtable_split_needed(vma, cp_flags)) { - __split_huge_pmd(vma, pmd, addr, false, NULL); - /* - * For file-backed, the pmd could have been -diff --git a/mm/mremap.c b/mm/mremap.c -index 411a85682b58..dd541e59edda 100644 ---- a/mm/mremap.c -+++ b/mm/mremap.c -@@ -623,6 +623,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, - return -ENOMEM; - } - -+ vma_start_write(vma); - new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); - new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, - &need_rmap_locks); -diff --git a/mm/rmap.c b/mm/rmap.c -index 8632e02661ac..cfdaa56cad3e 100644 ---- a/mm/rmap.c -+++ b/mm/rmap.c -@@ -25,21 +25,22 @@ - * mapping->invalidate_lock (in filemap_fault) - * page->flags PG_locked (lock_page) - * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) -- * mapping->i_mmap_rwsem -- * anon_vma->rwsem -- * mm->page_table_lock or pte_lock -- * swap_lock (in swap_duplicate, swap_info_get) -- * mmlist_lock (in mmput, drain_mmlist and others) -- * mapping->private_lock (in block_dirty_folio) -- * folio_lock_memcg move_lock (in block_dirty_folio) -- * i_pages lock (widely used) -- * lruvec->lru_lock (in folio_lruvec_lock_irq) -- * inode->i_lock (in set_page_dirty's __mark_inode_dirty) -- * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) -- * sb_lock (within inode_lock in fs/fs-writeback.c) -- * i_pages lock (widely used, in set_page_dirty, -- * in arch-dependent flush_dcache_mmap_lock, -- * within bdi.wb->list_lock in __sync_single_inode) -+ * vma_start_write -+ * mapping->i_mmap_rwsem -+ * anon_vma->rwsem -+ * mm->page_table_lock or pte_lock -+ * swap_lock (in swap_duplicate, swap_info_get) -+ * mmlist_lock (in mmput, drain_mmlist and others) -+ * mapping->private_lock (in block_dirty_folio) -+ * folio_lock_memcg move_lock (in block_dirty_folio) -+ * i_pages lock (widely used) -+ * lruvec->lru_lock (in folio_lruvec_lock_irq) -+ * inode->i_lock (in set_page_dirty's __mark_inode_dirty) -+ * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) -+ * sb_lock (within inode_lock in fs/fs-writeback.c) -+ * i_pages lock (widely used, in set_page_dirty, -+ * in arch-dependent flush_dcache_mmap_lock, -+ * within bdi.wb->list_lock in __sync_single_inode) - * - * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) - * ->tasklist_lock -diff --git a/mm/vmstat.c b/mm/vmstat.c -index 1ea6a5ce1c41..4f1089a1860e 100644 ---- a/mm/vmstat.c -+++ b/mm/vmstat.c -@@ -1399,6 +1399,12 @@ const char * const vmstat_text[] = { - "direct_map_level2_splits", - "direct_map_level3_splits", - #endif -+#ifdef CONFIG_PER_VMA_LOCK_STATS -+ "vma_lock_success", -+ "vma_lock_abort", -+ "vma_lock_retry", -+ "vma_lock_miss", -+#endif - #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ - }; - #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */ -diff --git a/tools/testing/selftests/mm/userfaultfd.c b/tools/testing/selftests/mm/userfaultfd.c -index 7f22844ed704..e030d63c031a 100644 ---- a/tools/testing/selftests/mm/userfaultfd.c -+++ b/tools/testing/selftests/mm/userfaultfd.c -@@ -1444,6 +1444,43 @@ static int pagemap_test_fork(bool present) - return result; - } - -+static void userfaultfd_wp_unpopulated_test(int pagemap_fd) -+{ -+ uint64_t value; ++static int ithc_hid_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf, size_t len, unsigned char rtype, int reqtype) { ++ struct ithc *ithc = hdev->driver_data; ++ if (!buf || !len) return -EINVAL; ++ u32 code; ++ if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT) code = DMA_TX_CODE_OUTPUT_REPORT; ++ else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT) code = DMA_TX_CODE_SET_FEATURE; ++ else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT) code = DMA_TX_CODE_GET_FEATURE; ++ else { ++ pci_err(ithc->pci, "unhandled hid request %i %i for report id %i\n", rtype, reqtype, reportnum); ++ return -EINVAL; ++ } ++ buf[0] = reportnum; ++ if (reqtype == HID_REQ_GET_REPORT) { ++ mutex_lock(&ithc->hid_get_feature_mutex); ++ ithc->hid_get_feature_buf = buf; ++ ithc->hid_get_feature_size = len; ++ mutex_unlock(&ithc->hid_get_feature_mutex); ++ int r = CHECK(ithc_dma_tx, ithc, code, 1, buf); ++ if (!r) { ++ r = wait_event_interruptible_timeout(ithc->wait_hid_get_feature, !ithc->hid_get_feature_buf, msecs_to_jiffies(1000)); ++ if (!r) r = -ETIMEDOUT; ++ else if (r < 0) r = -EINTR; ++ else r = 0; ++ } ++ mutex_lock(&ithc->hid_get_feature_mutex); ++ ithc->hid_get_feature_buf = NULL; ++ if (!r) r = ithc->hid_get_feature_size; ++ mutex_unlock(&ithc->hid_get_feature_mutex); ++ return r; ++ } ++ CHECK_RET(ithc_dma_tx, ithc, code, len, buf); ++ return 0; ++} ++ ++static struct hid_ll_driver ithc_ll_driver = { ++ .start = ithc_hid_start, ++ .stop = ithc_hid_stop, ++ .open = ithc_hid_open, ++ .close = ithc_hid_close, ++ .parse = ithc_hid_parse, ++ .raw_request = ithc_hid_raw_request, ++}; ++ ++static void ithc_hid_devres_release(struct device *dev, void *res) { ++ struct hid_device **hidm = res; ++ if (*hidm) hid_destroy_device(*hidm); ++} ++ ++static int ithc_hid_init(struct ithc *ithc) { ++ struct hid_device **hidm = devres_alloc(ithc_hid_devres_release, sizeof *hidm, GFP_KERNEL); ++ if (!hidm) return -ENOMEM; ++ devres_add(&ithc->pci->dev, hidm); ++ struct hid_device *hid = hid_allocate_device(); ++ if (IS_ERR(hid)) return PTR_ERR(hid); ++ *hidm = hid; ++ ++ strscpy(hid->name, DEVFULLNAME, sizeof(hid->name)); ++ strscpy(hid->phys, ithc->phys, sizeof(hid->phys)); ++ hid->ll_driver = &ithc_ll_driver; ++ hid->bus = BUS_PCI; ++ hid->vendor = ithc->config.vendor_id; ++ hid->product = ithc->config.product_id; ++ hid->version = 0x100; ++ hid->dev.parent = &ithc->pci->dev; ++ hid->driver_data = ithc; ++ ++ ithc->hid = hid; ++ return 0; ++} ++ ++// Interrupts/polling ++ ++static void ithc_activity_timer_callback(struct timer_list *t) { ++ struct ithc *ithc = container_of(t, struct ithc, activity_timer); ++ cpu_latency_qos_update_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE); ++} ++ ++void ithc_set_active(struct ithc *ithc) { ++ // When CPU usage is very low, the CPU can enter various low power states (C2-C10). ++ // This disrupts DMA, causing truncated DMA messages. ERROR_FLAG_DMA_UNKNOWN_12 will be set when this happens. ++ // The amount of truncated messages can become very high, resulting in user-visible effects (laggy/stuttering cursor). ++ // To avoid this, we use a CPU latency QoS request to prevent the CPU from entering low power states during touch interactions. ++ cpu_latency_qos_update_request(&ithc->activity_qos, 0); ++ mod_timer(&ithc->activity_timer, jiffies + msecs_to_jiffies(1000)); ++} ++ ++static int ithc_set_device_enabled(struct ithc *ithc, bool enable) { ++ u32 x = ithc->config.touch_cfg = (ithc->config.touch_cfg & ~(u32)DEVCFG_TOUCH_MASK) | DEVCFG_TOUCH_UNKNOWN_2 ++ | (enable ? DEVCFG_TOUCH_ENABLE | DEVCFG_TOUCH_UNKNOWN_3 | DEVCFG_TOUCH_UNKNOWN_4 : 0); ++ return ithc_spi_command(ithc, SPI_CMD_CODE_WRITE, offsetof(struct ithc_device_config, touch_cfg), sizeof x, &x); ++} ++ ++static void ithc_disable_interrupts(struct ithc *ithc) { ++ writel(0, &ithc->regs->error_control); ++ bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_IRQ, 0); ++ bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0); ++ bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_IRQ_UNKNOWN_1 | DMA_RX_CONTROL_IRQ_ERROR | DMA_RX_CONTROL_IRQ_UNKNOWN_4 | DMA_RX_CONTROL_IRQ_DATA, 0); ++ bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_IRQ, 0); ++} ++ ++static void ithc_clear_dma_rx_interrupts(struct ithc *ithc, unsigned channel) { ++ writel(DMA_RX_STATUS_ERROR | DMA_RX_STATUS_UNKNOWN_4 | DMA_RX_STATUS_HAVE_DATA, &ithc->regs->dma_rx[channel].status); ++} ++ ++static void ithc_clear_interrupts(struct ithc *ithc) { ++ writel(0xffffffff, &ithc->regs->error_flags); ++ writel(ERROR_STATUS_DMA | ERROR_STATUS_SPI, &ithc->regs->error_status); ++ writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status); ++ ithc_clear_dma_rx_interrupts(ithc, 0); ++ ithc_clear_dma_rx_interrupts(ithc, 1); ++ writel(DMA_TX_STATUS_DONE | DMA_TX_STATUS_ERROR | DMA_TX_STATUS_UNKNOWN_2, &ithc->regs->dma_tx.status); ++} ++ ++static void ithc_process(struct ithc *ithc) { ++ ithc_log_regs(ithc); ++ ++ // read and clear error bits ++ u32 err = readl(&ithc->regs->error_flags); ++ if (err) { ++ if (err & ~ERROR_FLAG_DMA_UNKNOWN_12) pci_err(ithc->pci, "error flags: 0x%08x\n", err); ++ writel(err, &ithc->regs->error_flags); ++ } + -+ /* Test applying pte marker to anon unpopulated */ -+ wp_range(uffd, (uint64_t)area_dst, page_size, true); -+ value = pagemap_read_vaddr(pagemap_fd, area_dst); -+ pagemap_check_wp(value, true); ++ // process DMA rx ++ if (ithc_use_rx0) { ++ ithc_clear_dma_rx_interrupts(ithc, 0); ++ ithc_dma_rx(ithc, 0); ++ } ++ if (ithc_use_rx1) { ++ ithc_clear_dma_rx_interrupts(ithc, 1); ++ ithc_dma_rx(ithc, 1); ++ } + -+ /* Test unprotect on anon pte marker */ -+ wp_range(uffd, (uint64_t)area_dst, page_size, false); -+ value = pagemap_read_vaddr(pagemap_fd, area_dst); -+ pagemap_check_wp(value, false); ++ ithc_log_regs(ithc); ++} ++ ++static irqreturn_t ithc_interrupt_thread(int irq, void *arg) { ++ struct ithc *ithc = arg; ++ pci_dbg(ithc->pci, "IRQ! err=%08x/%08x/%08x, cmd=%02x/%08x, rx0=%02x/%08x, rx1=%02x/%08x, tx=%02x/%08x\n", ++ readl(&ithc->regs->error_control), readl(&ithc->regs->error_status), readl(&ithc->regs->error_flags), ++ readb(&ithc->regs->spi_cmd.control), readl(&ithc->regs->spi_cmd.status), ++ readb(&ithc->regs->dma_rx[0].control), readl(&ithc->regs->dma_rx[0].status), ++ readb(&ithc->regs->dma_rx[1].control), readl(&ithc->regs->dma_rx[1].status), ++ readb(&ithc->regs->dma_tx.control), readl(&ithc->regs->dma_tx.status)); ++ ithc_process(ithc); ++ return IRQ_HANDLED; ++} ++ ++static int ithc_poll_thread(void *arg) { ++ struct ithc *ithc = arg; ++ unsigned sleep = 100; ++ while (!kthread_should_stop()) { ++ u32 n = ithc->dma_rx[1].num_received; ++ ithc_process(ithc); ++ if (n != ithc->dma_rx[1].num_received) sleep = 20; ++ else sleep = min(200u, sleep + (sleep >> 4) + 1); ++ msleep_interruptible(sleep); ++ } ++ return 0; ++} + -+ /* Test zap on anon marker */ -+ wp_range(uffd, (uint64_t)area_dst, page_size, true); -+ if (madvise(area_dst, page_size, MADV_DONTNEED)) -+ err("madvise(MADV_DONTNEED) failed"); -+ value = pagemap_read_vaddr(pagemap_fd, area_dst); -+ pagemap_check_wp(value, false); ++// Device initialization and shutdown ++ ++static void ithc_disable(struct ithc *ithc) { ++ bitsl_set(&ithc->regs->control_bits, CONTROL_QUIESCE); ++ CHECK(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, CONTROL_IS_QUIESCED); ++ bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0); ++ bitsb(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND, 0); ++ bitsb(&ithc->regs->dma_tx.control, DMA_TX_CONTROL_SEND, 0); ++ bitsb(&ithc->regs->dma_rx[0].control, DMA_RX_CONTROL_ENABLE, 0); ++ bitsb(&ithc->regs->dma_rx[1].control, DMA_RX_CONTROL_ENABLE, 0); ++ ithc_disable_interrupts(ithc); ++ ithc_clear_interrupts(ithc); ++} ++ ++static int ithc_init_device(struct ithc *ithc) { ++ ithc_log_regs(ithc); ++ bool was_enabled = (readl(&ithc->regs->control_bits) & CONTROL_NRESET) != 0; ++ ithc_disable(ithc); ++ CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_READY, CONTROL_READY); ++ ithc_set_spi_config(ithc, 10, 0); ++ bitsl_set(&ithc->regs->dma_rx[0].unknown_init_bits, 0x80000000); // seems to help with reading config ++ ++ if (was_enabled) if (msleep_interruptible(100)) return -EINTR; ++ bitsl(&ithc->regs->control_bits, CONTROL_QUIESCE, 0); ++ CHECK_RET(waitl, ithc, &ithc->regs->control_bits, CONTROL_IS_QUIESCED, 0); ++ for (int retries = 0; ; retries++) { ++ ithc_log_regs(ithc); ++ bitsl_set(&ithc->regs->control_bits, CONTROL_NRESET); ++ if (!waitl(ithc, &ithc->regs->state, 0xf, 2)) break; ++ if (retries > 5) { ++ pci_err(ithc->pci, "too many retries, failed to reset device\n"); ++ return -ETIMEDOUT; ++ } ++ pci_err(ithc->pci, "invalid state, retrying reset\n"); ++ bitsl(&ithc->regs->control_bits, CONTROL_NRESET, 0); ++ if (msleep_interruptible(1000)) return -EINTR; ++ } ++ ithc_log_regs(ithc); ++ ++ CHECK(waitl, ithc, &ithc->regs->dma_rx[0].status, DMA_RX_STATUS_UNKNOWN_4, DMA_RX_STATUS_UNKNOWN_4); ++ ++ // read config ++ for (int retries = 0; ; retries++) { ++ ithc_log_regs(ithc); ++ memset(&ithc->config, 0, sizeof ithc->config); ++ CHECK_RET(ithc_spi_command, ithc, SPI_CMD_CODE_READ, 0, sizeof ithc->config, &ithc->config); ++ u32 *p = (void *)&ithc->config; ++ pci_info(ithc->pci, "config: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", ++ p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); ++ if (ithc_is_config_valid(ithc)) break; ++ if (retries > 10) { ++ pci_err(ithc->pci, "failed to read config, unknown device ID 0x%08x\n", ithc->config.device_id); ++ return -EIO; ++ } ++ pci_err(ithc->pci, "failed to read config, retrying\n"); ++ if (msleep_interruptible(100)) return -EINTR; ++ } ++ ithc_log_regs(ithc); + -+ /* Test fault in after marker removed */ -+ *area_dst = 1; -+ value = pagemap_read_vaddr(pagemap_fd, area_dst); -+ pagemap_check_wp(value, false); -+ /* Drop it to make pte none again */ -+ if (madvise(area_dst, page_size, MADV_DONTNEED)) -+ err("madvise(MADV_DONTNEED) failed"); ++ CHECK_RET(ithc_set_spi_config, ithc, DEVCFG_SPI_MAX_FREQ(ithc->config.spi_config), DEVCFG_SPI_MODE(ithc->config.spi_config)); ++ CHECK_RET(ithc_set_device_enabled, ithc, true); ++ ithc_log_regs(ithc); ++ return 0; ++} + -+ /* Test read-zero-page upon pte marker */ -+ wp_range(uffd, (uint64_t)area_dst, page_size, true); -+ *(volatile char *)area_dst; -+ /* Drop it to make pte none again */ -+ if (madvise(area_dst, page_size, MADV_DONTNEED)) -+ err("madvise(MADV_DONTNEED) failed"); ++int ithc_reset(struct ithc *ithc) { ++ // FIXME This should probably do devres_release_group()+ithc_start(). But because this is called during DMA ++ // processing, that would have to be done asynchronously (schedule_work()?). And with extra locking? ++ pci_err(ithc->pci, "reset\n"); ++ CHECK(ithc_init_device, ithc); ++ if (ithc_use_rx0) ithc_dma_rx_enable(ithc, 0); ++ if (ithc_use_rx1) ithc_dma_rx_enable(ithc, 1); ++ ithc_log_regs(ithc); ++ pci_dbg(ithc->pci, "reset completed\n"); ++ return 0; +} + - static void userfaultfd_pagemap_test(unsigned int test_pgsize) - { - struct uffdio_register uffdio_register; -@@ -1462,7 +1499,7 @@ static void userfaultfd_pagemap_test(unsigned int test_pgsize) - /* Flush so it doesn't flush twice in parent/child later */ - fflush(stdout); - -- uffd_test_ctx_init(0); -+ uffd_test_ctx_init(UFFD_FEATURE_WP_UNPOPULATED); - - if (test_pgsize > page_size) { - /* This is a thp test */ -@@ -1482,6 +1519,10 @@ static void userfaultfd_pagemap_test(unsigned int test_pgsize) - - pagemap_fd = pagemap_open(); - -+ /* Smoke test WP_UNPOPULATED first when it's still empty */ -+ if (test_pgsize == page_size) -+ userfaultfd_wp_unpopulated_test(pagemap_fd); ++static void ithc_stop(void *res) { ++ struct ithc *ithc = res; ++ pci_dbg(ithc->pci, "stopping\n"); ++ ithc_log_regs(ithc); ++ if (ithc->poll_thread) CHECK(kthread_stop, ithc->poll_thread); ++ if (ithc->irq >= 0) disable_irq(ithc->irq); ++ CHECK(ithc_set_device_enabled, ithc, false); ++ ithc_disable(ithc); ++ del_timer_sync(&ithc->activity_timer); ++ cpu_latency_qos_remove_request(&ithc->activity_qos); ++ // clear dma config ++ for(unsigned i = 0; i < 2; i++) { ++ CHECK(waitl, ithc, &ithc->regs->dma_rx[i].status, DMA_RX_STATUS_ENABLED, 0); ++ lo_hi_writeq(0, &ithc->regs->dma_rx[i].addr); ++ writeb(0, &ithc->regs->dma_rx[i].num_bufs); ++ writeb(0, &ithc->regs->dma_rx[i].num_prds); ++ } ++ lo_hi_writeq(0, &ithc->regs->dma_tx.addr); ++ writeb(0, &ithc->regs->dma_tx.num_prds); ++ ithc_log_regs(ithc); ++ pci_dbg(ithc->pci, "stopped\n"); ++} + - /* Touch the page */ - *area_dst = 1; - wp_range(uffd, (uint64_t)area_dst, test_pgsize, true); -@@ -1526,7 +1567,7 @@ static int userfaultfd_stress(void) - struct uffdio_register uffdio_register; - struct uffd_stats uffd_stats[nr_cpus]; - -- uffd_test_ctx_init(0); -+ uffd_test_ctx_init(UFFD_FEATURE_WP_UNPOPULATED); - - if (posix_memalign(&area, page_size, page_size)) - err("out of memory"); --- -2.40.0 - -From d89736ae49cf981dff598347d19170c0e8dba99d Mon Sep 17 00:00:00 2001 -From: Peter Jung -Date: Mon, 17 Apr 2023 18:37:51 +0200 -Subject: [PATCH 10/12] sched - -Signed-off-by: Peter Jung ---- - arch/x86/kernel/itmt.c | 23 +-- - arch/x86/kernel/smpboot.c | 4 +- - include/linux/sched.h | 3 + - include/linux/sched/sd_flags.h | 5 +- - kernel/sched/core.c | 4 +- - kernel/sched/debug.c | 1 + - kernel/sched/fair.c | 265 ++++++++++++++++++++------------- - kernel/sched/features.h | 1 + - kernel/sched/pelt.c | 60 ++++++++ - kernel/sched/pelt.h | 42 +++++- - kernel/sched/sched.h | 23 ++- - 11 files changed, 294 insertions(+), 137 deletions(-) - -diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c -index 9ff480e94511..6510883c5e81 100644 ---- a/arch/x86/kernel/itmt.c -+++ b/arch/x86/kernel/itmt.c -@@ -174,32 +174,19 @@ int arch_asym_cpu_priority(int cpu) - - /** - * sched_set_itmt_core_prio() - Set CPU priority based on ITMT -- * @prio: Priority of cpu core -- * @core_cpu: The cpu number associated with the core -+ * @prio: Priority of @cpu -+ * @cpu: The CPU number - * - * The pstate driver will find out the max boost frequency - * and call this function to set a priority proportional -- * to the max boost frequency. CPU with higher boost -+ * to the max boost frequency. CPUs with higher boost - * frequency will receive higher priority. - * - * No need to rebuild sched domain after updating - * the CPU priorities. The sched domains have no - * dependency on CPU priorities. - */ --void sched_set_itmt_core_prio(int prio, int core_cpu) -+void sched_set_itmt_core_prio(int prio, int cpu) - { -- int cpu, i = 1; -- -- for_each_cpu(cpu, topology_sibling_cpumask(core_cpu)) { -- int smt_prio; -- -- /* -- * Ensure that the siblings are moved to the end -- * of the priority chain and only used when -- * all other high priority cpus are out of capacity. -- */ -- smt_prio = prio * smp_num_siblings / (i * i); -- per_cpu(sched_core_priority, cpu) = smt_prio; -- i++; -- } -+ per_cpu(sched_core_priority, cpu) = prio; - } -diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c -index 9013bb28255a..cea297d97034 100644 ---- a/arch/x86/kernel/smpboot.c -+++ b/arch/x86/kernel/smpboot.c -@@ -547,7 +547,7 @@ static int x86_core_flags(void) - #ifdef CONFIG_SCHED_SMT - static int x86_smt_flags(void) - { -- return cpu_smt_flags() | x86_sched_itmt_flags(); -+ return cpu_smt_flags(); - } - #endif - #ifdef CONFIG_SCHED_CLUSTER -@@ -578,7 +578,7 @@ static struct sched_domain_topology_level x86_hybrid_topology[] = { - #ifdef CONFIG_SCHED_MC - { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) }, - #endif -- { cpu_cpu_mask, SD_INIT_NAME(DIE) }, -+ { cpu_cpu_mask, x86_sched_itmt_flags, SD_INIT_NAME(DIE) }, - { NULL, }, - }; - -diff --git a/include/linux/sched.h b/include/linux/sched.h -index 63d242164b1a..6d398b337b0d 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -557,6 +557,9 @@ struct sched_entity { - u64 prev_sum_exec_runtime; - - u64 nr_migrations; -+ u64 prev_sleep_sum_runtime; -+ /* average duration of a task */ -+ u64 dur_avg; - - #ifdef CONFIG_FAIR_GROUP_SCHED - int depth; -diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h -index 57bde66d95f7..fad77b5172e2 100644 ---- a/include/linux/sched/sd_flags.h -+++ b/include/linux/sched/sd_flags.h -@@ -132,12 +132,9 @@ SD_FLAG(SD_SERIALIZE, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS) - /* - * Place busy tasks earlier in the domain - * -- * SHARED_CHILD: Usually set on the SMT level. Technically could be set further -- * up, but currently assumed to be set from the base domain -- * upwards (see update_top_cache_domain()). - * NEEDS_GROUPS: Load balancing flag. - */ --SD_FLAG(SD_ASYM_PACKING, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS) -+SD_FLAG(SD_ASYM_PACKING, SDF_NEEDS_GROUPS) - - /* - * Prefer to place tasks in a sibling domain -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 0d18c3969f90..17bb9637f314 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -724,7 +724,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) - if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) - update_irq_load_avg(rq, irq_delta + steal); - #endif -- update_rq_clock_pelt(rq, delta); -+ update_rq_clock_task_mult(rq, delta); - } - - void update_rq_clock(struct rq *rq) -@@ -4434,6 +4434,8 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) - p->se.prev_sum_exec_runtime = 0; - p->se.nr_migrations = 0; - p->se.vruntime = 0; -+ p->se.dur_avg = 0; -+ p->se.prev_sleep_sum_runtime = 0; - INIT_LIST_HEAD(&p->se.group_node); - - #ifdef CONFIG_FAIR_GROUP_SCHED -diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c -index 1637b65ba07a..8d64fba16cfe 100644 ---- a/kernel/sched/debug.c -+++ b/kernel/sched/debug.c -@@ -1024,6 +1024,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, - __PS("nr_involuntary_switches", p->nivcsw); - - P(se.load.weight); -+ P(se.dur_avg); - #ifdef CONFIG_SMP - P(se.avg.load_sum); - P(se.avg.runnable_sum); -diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 96c66b50ee48..0f92281fbed9 100644 ---- a/kernel/sched/fair.c -+++ b/kernel/sched/fair.c -@@ -1082,6 +1082,23 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) - * Scheduling class queueing methods: - */ - -+static inline bool is_core_idle(int cpu) -+{ -+#ifdef CONFIG_SCHED_SMT -+ int sibling; ++static void ithc_clear_drvdata(void *res) { ++ struct pci_dev *pci = res; ++ pci_set_drvdata(pci, NULL); ++} ++ ++static int ithc_start(struct pci_dev *pci) { ++ pci_dbg(pci, "starting\n"); ++ if (pci_get_drvdata(pci)) { ++ pci_err(pci, "device already initialized\n"); ++ return -EINVAL; ++ } ++ if (!devres_open_group(&pci->dev, ithc_start, GFP_KERNEL)) return -ENOMEM; ++ ++ struct ithc *ithc = devm_kzalloc(&pci->dev, sizeof *ithc, GFP_KERNEL); ++ if (!ithc) return -ENOMEM; ++ ithc->irq = -1; ++ ithc->pci = pci; ++ snprintf(ithc->phys, sizeof ithc->phys, "pci-%s/" DEVNAME, pci_name(pci)); ++ init_waitqueue_head(&ithc->wait_hid_parse); ++ init_waitqueue_head(&ithc->wait_hid_get_feature); ++ mutex_init(&ithc->hid_get_feature_mutex); ++ pci_set_drvdata(pci, ithc); ++ CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_clear_drvdata, pci); ++ if (ithc_log_regs_enabled) ithc->prev_regs = devm_kzalloc(&pci->dev, sizeof *ithc->prev_regs, GFP_KERNEL); ++ ++ CHECK_RET(pcim_enable_device, pci); ++ pci_set_master(pci); ++ CHECK_RET(pcim_iomap_regions, pci, BIT(0), DEVNAME " regs"); ++ CHECK_RET(dma_set_mask_and_coherent, &pci->dev, DMA_BIT_MASK(64)); ++ CHECK_RET(pci_set_power_state, pci, PCI_D0); ++ ithc->regs = pcim_iomap_table(pci)[0]; ++ ++ if (!ithc_use_polling) { ++ CHECK_RET(pci_alloc_irq_vectors, pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX); ++ ithc->irq = CHECK(pci_irq_vector, pci, 0); ++ if (ithc->irq < 0) return ithc->irq; ++ } ++ ++ CHECK_RET(ithc_init_device, ithc); ++ CHECK(devm_device_add_groups, &pci->dev, ithc_attribute_groups); ++ if (ithc_use_rx0) CHECK_RET(ithc_dma_rx_init, ithc, 0, ithc_use_rx1 ? DEVNAME "0" : DEVNAME); ++ if (ithc_use_rx1) CHECK_RET(ithc_dma_rx_init, ithc, 1, ithc_use_rx0 ? DEVNAME "1" : DEVNAME); ++ CHECK_RET(ithc_dma_tx_init, ithc); ++ ++ CHECK_RET(ithc_hid_init, ithc); ++ ++ cpu_latency_qos_add_request(&ithc->activity_qos, PM_QOS_DEFAULT_VALUE); ++ timer_setup(&ithc->activity_timer, ithc_activity_timer_callback, 0); ++ ++ // add ithc_stop callback AFTER setting up DMA buffers, so that polling/irqs/DMA are disabled BEFORE the buffers are freed ++ CHECK_RET(devm_add_action_or_reset, &pci->dev, ithc_stop, ithc); ++ ++ if (ithc_use_polling) { ++ pci_info(pci, "using polling instead of irq\n"); ++ // use a thread instead of simple timer because we want to be able to sleep ++ ithc->poll_thread = kthread_run(ithc_poll_thread, ithc, DEVNAME "poll"); ++ if (IS_ERR(ithc->poll_thread)) { ++ int err = PTR_ERR(ithc->poll_thread); ++ ithc->poll_thread = NULL; ++ return err; ++ } ++ } else { ++ CHECK_RET(devm_request_threaded_irq, &pci->dev, ithc->irq, NULL, ithc_interrupt_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, DEVNAME, ithc); ++ } ++ ++ if (ithc_use_rx0) ithc_dma_rx_enable(ithc, 0); ++ if (ithc_use_rx1) ithc_dma_rx_enable(ithc, 1); ++ ++ // hid_add_device can only be called after irq/polling is started and DMA is enabled, because it calls ithc_hid_parse which reads the report descriptor via DMA ++ CHECK_RET(hid_add_device, ithc->hid); ++ ++ CHECK(ithc_debug_init, ithc); ++ ++ pci_dbg(pci, "started\n"); ++ return 0; ++} + -+ for_each_cpu(sibling, cpu_smt_mask(cpu)) { -+ if (cpu == sibling) -+ continue; ++static int ithc_probe(struct pci_dev *pci, const struct pci_device_id *id) { ++ pci_dbg(pci, "device probe\n"); ++ return ithc_start(pci); ++} + -+ if (!idle_cpu(sibling)) -+ return false; -+ } -+#endif ++static void ithc_remove(struct pci_dev *pci) { ++ pci_dbg(pci, "device remove\n"); ++ // all cleanup is handled by devres ++} + -+ return true; ++static int ithc_suspend(struct device *dev) { ++ struct pci_dev *pci = to_pci_dev(dev); ++ pci_dbg(pci, "pm suspend\n"); ++ devres_release_group(dev, ithc_start); ++ return 0; +} + - #ifdef CONFIG_NUMA - #define NUMA_IMBALANCE_MIN 2 - -@@ -1718,23 +1735,6 @@ struct numa_stats { - int idle_cpu; - }; - --static inline bool is_core_idle(int cpu) --{ --#ifdef CONFIG_SCHED_SMT -- int sibling; -- -- for_each_cpu(sibling, cpu_smt_mask(cpu)) { -- if (cpu == sibling) -- continue; -- -- if (!idle_cpu(sibling)) -- return false; -- } --#endif -- -- return true; --} -- - struct task_numa_env { - struct task_struct *p; - -@@ -6333,6 +6333,18 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) - - static void set_next_buddy(struct sched_entity *se); - -+static inline void dur_avg_update(struct task_struct *p, bool task_sleep) -+{ -+ u64 dur; ++static int ithc_resume(struct device *dev) { ++ struct pci_dev *pci = to_pci_dev(dev); ++ pci_dbg(pci, "pm resume\n"); ++ return ithc_start(pci); ++} + -+ if (!task_sleep) -+ return; ++static int ithc_freeze(struct device *dev) { ++ struct pci_dev *pci = to_pci_dev(dev); ++ pci_dbg(pci, "pm freeze\n"); ++ devres_release_group(dev, ithc_start); ++ return 0; ++} + -+ dur = p->se.sum_exec_runtime - p->se.prev_sleep_sum_runtime; -+ p->se.prev_sleep_sum_runtime = p->se.sum_exec_runtime; -+ update_avg(&p->se.dur_avg, dur); ++static int ithc_thaw(struct device *dev) { ++ struct pci_dev *pci = to_pci_dev(dev); ++ pci_dbg(pci, "pm thaw\n"); ++ return ithc_start(pci); +} + - /* - * The dequeue_task method is called before nr_running is - * decreased. We remove the task from the rbtree and -@@ -6405,6 +6417,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) - - dequeue_throttle: - util_est_update(&rq->cfs, p, task_sleep); -+ dur_avg_update(p, task_sleep); - hrtick_update(rq); - } - -@@ -6538,6 +6551,23 @@ static int wake_wide(struct task_struct *p) - return 1; - } - -+/* -+ * If a task switches in and then voluntarily relinquishes the -+ * CPU quickly, it is regarded as a short duration task. -+ * -+ * SIS_SHORT tries to wake up the short wakee on current CPU. This -+ * aims to avoid race condition among CPUs due to frequent context -+ * switch. Besides, the candidate short task should not be the one -+ * that wakes up more than one tasks, otherwise SIS_SHORT might -+ * stack too many tasks on current CPU. -+ */ -+static inline int is_short_task(struct task_struct *p) -+{ -+ return sched_feat(SIS_SHORT) && !p->wakee_flips && -+ p->se.dur_avg && -+ ((p->se.dur_avg * 8) < sysctl_sched_min_granularity); ++static int ithc_restore(struct device *dev) { ++ struct pci_dev *pci = to_pci_dev(dev); ++ pci_dbg(pci, "pm restore\n"); ++ return ithc_start(pci); +} + - /* - * The purpose of wake_affine() is to quickly determine on which CPU we can run - * soonest. For the purpose of speed we only consider the waking and previous -@@ -6574,6 +6604,11 @@ wake_affine_idle(int this_cpu, int prev_cpu, int sync) - if (available_idle_cpu(prev_cpu)) - return prev_cpu; - -+ /* The only running task is a short duration one. */ -+ if (cpu_rq(this_cpu)->nr_running == 1 && -+ is_short_task(rcu_dereference(cpu_curr(this_cpu)))) -+ return this_cpu; ++static struct pci_driver ithc_driver = { ++ .name = DEVNAME, ++ .id_table = ithc_pci_tbl, ++ .probe = ithc_probe, ++ .remove = ithc_remove, ++ .driver.pm = &(const struct dev_pm_ops) { ++ .suspend = ithc_suspend, ++ .resume = ithc_resume, ++ .freeze = ithc_freeze, ++ .thaw = ithc_thaw, ++ .restore = ithc_restore, ++ }, ++ //.dev_groups = ithc_attribute_groups, // could use this (since 5.14), however the attributes won't have valid values until config has been read anyway ++}; + - return nr_cpumask_bits; ++static int __init ithc_init(void) { ++ return pci_register_driver(&ithc_driver); ++} ++ ++static void __exit ithc_exit(void) { ++ pci_unregister_driver(&ithc_driver); ++} ++ ++module_init(ithc_init); ++module_exit(ithc_exit); ++ +diff --git a/drivers/hid/ithc/ithc-regs.c b/drivers/hid/ithc/ithc-regs.c +new file mode 100644 +index 000000000000..85d567b05761 +--- /dev/null ++++ b/drivers/hid/ithc/ithc-regs.c +@@ -0,0 +1,64 @@ ++#include "ithc.h" ++ ++#define reg_num(r) (0x1fff & (u16)(__force u64)(r)) ++ ++void bitsl(__iomem u32 *reg, u32 mask, u32 val) { ++ if (val & ~mask) pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", reg_num(reg), val, mask); ++ writel((readl(reg) & ~mask) | (val & mask), reg); ++} ++ ++void bitsb(__iomem u8 *reg, u8 mask, u8 val) { ++ if (val & ~mask) pr_err("register 0x%x: invalid value 0x%x for bitmask 0x%x\n", reg_num(reg), val, mask); ++ writeb((readb(reg) & ~mask) | (val & mask), reg); ++} ++ ++int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val) { ++ pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", reg_num(reg), mask, val); ++ u32 x; ++ if (readl_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) { ++ pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%08x val 0x%08x\n", reg_num(reg), mask, val); ++ return -ETIMEDOUT; ++ } ++ pci_dbg(ithc->pci, "done waiting\n"); ++ return 0; ++} ++ ++int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val) { ++ pci_dbg(ithc->pci, "waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", reg_num(reg), mask, val); ++ u8 x; ++ if (readb_poll_timeout(reg, x, (x & mask) == val, 200, 1000*1000)) { ++ pci_err(ithc->pci, "timed out waiting for reg 0x%04x mask 0x%02x val 0x%02x\n", reg_num(reg), mask, val); ++ return -ETIMEDOUT; ++ } ++ pci_dbg(ithc->pci, "done waiting\n"); ++ return 0; ++} ++ ++int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode) { ++ pci_dbg(ithc->pci, "setting SPI speed to %i, mode %i\n", speed, mode); ++ if (mode == 3) mode = 2; ++ bitsl(&ithc->regs->spi_config, ++ SPI_CONFIG_MODE(0xff) | SPI_CONFIG_SPEED(0xff) | SPI_CONFIG_UNKNOWN_18(0xff) | SPI_CONFIG_SPEED2(0xff), ++ SPI_CONFIG_MODE(mode) | SPI_CONFIG_SPEED(speed) | SPI_CONFIG_UNKNOWN_18(0) | SPI_CONFIG_SPEED2(speed)); ++ return 0; ++} ++ ++int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data) { ++ pci_dbg(ithc->pci, "SPI command %u, size %u, offset %u\n", command, size, offset); ++ if (size > sizeof ithc->regs->spi_cmd.data) return -EINVAL; ++ CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0); ++ writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status); ++ writeb(command, &ithc->regs->spi_cmd.code); ++ writew(size, &ithc->regs->spi_cmd.size); ++ writel(offset, &ithc->regs->spi_cmd.offset); ++ u32 *p = data, n = (size + 3) / 4; ++ for (u32 i = 0; i < n; i++) writel(p[i], &ithc->regs->spi_cmd.data[i]); ++ bitsb_set(&ithc->regs->spi_cmd.control, SPI_CMD_CONTROL_SEND); ++ CHECK_RET(waitl, ithc, &ithc->regs->spi_cmd.status, SPI_CMD_STATUS_BUSY, 0); ++ if ((readl(&ithc->regs->spi_cmd.status) & (SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR)) != SPI_CMD_STATUS_DONE) return -EIO; ++ if (readw(&ithc->regs->spi_cmd.size) != size) return -EMSGSIZE; ++ for (u32 i = 0; i < n; i++) p[i] = readl(&ithc->regs->spi_cmd.data[i]); ++ writel(SPI_CMD_STATUS_DONE | SPI_CMD_STATUS_ERROR, &ithc->regs->spi_cmd.status); ++ return 0; ++} ++ +diff --git a/drivers/hid/ithc/ithc-regs.h b/drivers/hid/ithc/ithc-regs.h +new file mode 100644 +index 000000000000..1a96092ed7ee +--- /dev/null ++++ b/drivers/hid/ithc/ithc-regs.h +@@ -0,0 +1,186 @@ ++#define CONTROL_QUIESCE BIT(1) ++#define CONTROL_IS_QUIESCED BIT(2) ++#define CONTROL_NRESET BIT(3) ++#define CONTROL_READY BIT(29) ++ ++#define SPI_CONFIG_MODE(x) (((x) & 3) << 2) ++#define SPI_CONFIG_SPEED(x) (((x) & 7) << 4) ++#define SPI_CONFIG_UNKNOWN_18(x) (((x) & 3) << 18) ++#define SPI_CONFIG_SPEED2(x) (((x) & 0xf) << 20) // high bit = high speed mode? ++ ++#define ERROR_CONTROL_UNKNOWN_0 BIT(0) ++#define ERROR_CONTROL_DISABLE_DMA BIT(1) // clears DMA_RX_CONTROL_ENABLE when a DMA error occurs ++#define ERROR_CONTROL_UNKNOWN_2 BIT(2) ++#define ERROR_CONTROL_UNKNOWN_3 BIT(3) ++#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_9 BIT(9) ++#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_10 BIT(10) ++#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_12 BIT(12) ++#define ERROR_CONTROL_IRQ_DMA_UNKNOWN_13 BIT(13) ++#define ERROR_CONTROL_UNKNOWN_16(x) (((x) & 0xff) << 16) // spi error code irq? ++#define ERROR_CONTROL_SET_DMA_STATUS BIT(29) // sets DMA_RX_STATUS_ERROR when a DMA error occurs ++ ++#define ERROR_STATUS_DMA BIT(28) ++#define ERROR_STATUS_SPI BIT(30) ++ ++#define ERROR_FLAG_DMA_UNKNOWN_9 BIT(9) ++#define ERROR_FLAG_DMA_UNKNOWN_10 BIT(10) ++#define ERROR_FLAG_DMA_UNKNOWN_12 BIT(12) // set when we receive a truncated DMA message ++#define ERROR_FLAG_DMA_UNKNOWN_13 BIT(13) ++#define ERROR_FLAG_SPI_BUS_TURNAROUND BIT(16) ++#define ERROR_FLAG_SPI_RESPONSE_TIMEOUT BIT(17) ++#define ERROR_FLAG_SPI_INTRA_PACKET_TIMEOUT BIT(18) ++#define ERROR_FLAG_SPI_INVALID_RESPONSE BIT(19) ++#define ERROR_FLAG_SPI_HS_RX_TIMEOUT BIT(20) ++#define ERROR_FLAG_SPI_TOUCH_IC_INIT BIT(21) ++ ++#define SPI_CMD_CONTROL_SEND BIT(0) // cleared by device when sending is complete ++#define SPI_CMD_CONTROL_IRQ BIT(1) ++ ++#define SPI_CMD_CODE_READ 4 ++#define SPI_CMD_CODE_WRITE 6 ++ ++#define SPI_CMD_STATUS_DONE BIT(0) ++#define SPI_CMD_STATUS_ERROR BIT(1) ++#define SPI_CMD_STATUS_BUSY BIT(3) ++ ++#define DMA_TX_CONTROL_SEND BIT(0) // cleared by device when sending is complete ++#define DMA_TX_CONTROL_IRQ BIT(3) ++ ++#define DMA_TX_STATUS_DONE BIT(0) ++#define DMA_TX_STATUS_ERROR BIT(1) ++#define DMA_TX_STATUS_UNKNOWN_2 BIT(2) ++#define DMA_TX_STATUS_UNKNOWN_3 BIT(3) // busy? ++ ++#define DMA_RX_CONTROL_ENABLE BIT(0) ++#define DMA_RX_CONTROL_IRQ_UNKNOWN_1 BIT(1) // rx1 only? ++#define DMA_RX_CONTROL_IRQ_ERROR BIT(3) // rx1 only? ++#define DMA_RX_CONTROL_IRQ_UNKNOWN_4 BIT(4) // rx0 only? ++#define DMA_RX_CONTROL_IRQ_DATA BIT(5) ++ ++#define DMA_RX_CONTROL2_UNKNOWN_5 BIT(5) // rx0 only? ++#define DMA_RX_CONTROL2_RESET BIT(7) // resets ringbuffer indices ++ ++#define DMA_RX_WRAP_FLAG BIT(7) ++ ++#define DMA_RX_STATUS_ERROR BIT(3) ++#define DMA_RX_STATUS_UNKNOWN_4 BIT(4) // set in rx0 after using CONTROL_NRESET when it becomes possible to read config (can take >100ms) ++#define DMA_RX_STATUS_HAVE_DATA BIT(5) ++#define DMA_RX_STATUS_ENABLED BIT(8) ++ ++#define COUNTER_RESET BIT(31) ++ ++struct ithc_registers { ++ /* 0000 */ u32 _unknown_0000[1024]; ++ /* 1000 */ u32 _unknown_1000; ++ /* 1004 */ u32 _unknown_1004; ++ /* 1008 */ u32 control_bits; ++ /* 100c */ u32 _unknown_100c; ++ /* 1010 */ u32 spi_config; ++ /* 1014 */ u32 _unknown_1014[3]; ++ /* 1020 */ u32 error_control; ++ /* 1024 */ u32 error_status; // write to clear ++ /* 1028 */ u32 error_flags; // write to clear ++ /* 102c */ u32 _unknown_102c[5]; ++ struct { ++ /* 1040 */ u8 control; ++ /* 1041 */ u8 code; ++ /* 1042 */ u16 size; ++ /* 1044 */ u32 status; // write to clear ++ /* 1048 */ u32 offset; ++ /* 104c */ u32 data[16]; ++ /* 108c */ u32 _unknown_108c; ++ } spi_cmd; ++ struct { ++ /* 1090 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq() ++ /* 1098 */ u8 control; ++ /* 1099 */ u8 _unknown_1099; ++ /* 109a */ u8 _unknown_109a; ++ /* 109b */ u8 num_prds; ++ /* 109c */ u32 status; // write to clear ++ } dma_tx; ++ /* 10a0 */ u32 _unknown_10a0[7]; ++ /* 10bc */ u32 state; // is 0xe0000402 (dev config val 0) after CONTROL_NRESET, 0xe0000461 after first touch, 0xe0000401 after DMA_RX_CODE_RESET ++ /* 10c0 */ u32 _unknown_10c0[8]; ++ /* 10e0 */ u32 _unknown_10e0_counters[3]; ++ /* 10ec */ u32 _unknown_10ec[5]; ++ struct { ++ /* 1100/1200 */ u64 addr; // cannot be written with writeq(), must use lo_hi_writeq() ++ /* 1108/1208 */ u8 num_bufs; ++ /* 1109/1209 */ u8 num_prds; ++ /* 110a/120a */ u16 _unknown_110a; ++ /* 110c/120c */ u8 control; ++ /* 110d/120d */ u8 head; ++ /* 110e/120e */ u8 tail; ++ /* 110f/120f */ u8 control2; ++ /* 1110/1210 */ u32 status; // write to clear ++ /* 1114/1214 */ u32 _unknown_1114; ++ /* 1118/1218 */ u64 _unknown_1118_guc_addr; ++ /* 1120/1220 */ u32 _unknown_1120_guc; ++ /* 1124/1224 */ u32 _unknown_1124_guc; ++ /* 1128/1228 */ u32 unknown_init_bits; // bit 2 = guc related, bit 3 = rx1 related, bit 4 = guc related ++ /* 112c/122c */ u32 _unknown_112c; ++ /* 1130/1230 */ u64 _unknown_1130_guc_addr; ++ /* 1138/1238 */ u32 _unknown_1138_guc; ++ /* 113c/123c */ u32 _unknown_113c; ++ /* 1140/1240 */ u32 _unknown_1140_guc; ++ /* 1144/1244 */ u32 _unknown_1144[23]; ++ /* 11a0/12a0 */ u32 _unknown_11a0_counters[6]; ++ /* 11b8/12b8 */ u32 _unknown_11b8[18]; ++ } dma_rx[2]; ++}; ++static_assert(sizeof(struct ithc_registers) == 0x1300); ++ ++#define DEVCFG_DMA_RX_SIZE(x) ((((x) & 0x3fff) + 1) << 6) ++#define DEVCFG_DMA_TX_SIZE(x) (((((x) >> 14) & 0x3ff) + 1) << 6) ++ ++#define DEVCFG_TOUCH_MASK 0x3f ++#define DEVCFG_TOUCH_ENABLE BIT(0) ++#define DEVCFG_TOUCH_UNKNOWN_1 BIT(1) ++#define DEVCFG_TOUCH_UNKNOWN_2 BIT(2) ++#define DEVCFG_TOUCH_UNKNOWN_3 BIT(3) ++#define DEVCFG_TOUCH_UNKNOWN_4 BIT(4) ++#define DEVCFG_TOUCH_UNKNOWN_5 BIT(5) ++#define DEVCFG_TOUCH_UNKNOWN_6 BIT(6) ++ ++#define DEVCFG_DEVICE_ID_TIC 0x43495424 // "$TIC" ++ ++#define DEVCFG_SPI_MAX_FREQ(x) (((x) >> 1) & 0xf) // high bit = use high speed mode? ++#define DEVCFG_SPI_MODE(x) (((x) >> 6) & 3) ++#define DEVCFG_SPI_UNKNOWN_8(x) (((x) >> 8) & 0x3f) ++#define DEVCFG_SPI_NEEDS_HEARTBEAT BIT(20) ++#define DEVCFG_SPI_HEARTBEAT_INTERVAL (((x) >> 21) & 7) ++#define DEVCFG_SPI_UNKNOWN_25 BIT(25) ++#define DEVCFG_SPI_UNKNOWN_26 BIT(26) ++#define DEVCFG_SPI_UNKNOWN_27 BIT(27) ++#define DEVCFG_SPI_DELAY (((x) >> 28) & 7) ++#define DEVCFG_SPI_USE_EXT_READ_CFG BIT(31) ++ ++struct ithc_device_config { ++ u32 _unknown_00; // 00 = 0xe0000402 (0xe0000401 after DMA_RX_CODE_RESET) ++ u32 _unknown_04; // 04 = 0x00000000 ++ u32 dma_buf_sizes; // 08 = 0x000a00ff ++ u32 touch_cfg; // 0c = 0x0000001c ++ u32 _unknown_10; // 10 = 0x0000001c ++ u32 device_id; // 14 = 0x43495424 = "$TIC" ++ u32 spi_config; // 18 = 0xfda00a2e ++ u16 vendor_id; // 1c = 0x045e = Microsoft Corp. ++ u16 product_id; // 1e = 0x0c1a ++ u32 revision; // 20 = 0x00000001 ++ u32 fw_version; // 24 = 0x05008a8b = 5.0.138.139 ++ u32 _unknown_28; // 28 = 0x00000000 ++ u32 fw_mode; // 2c = 0x00000000 ++ u32 _unknown_30; // 30 = 0x00000000 ++ u32 _unknown_34; // 34 = 0x0404035e (u8,u8,u8,u8 = version?) ++ u32 _unknown_38; // 38 = 0x000001c0 (0x000001c1 after DMA_RX_CODE_RESET) ++ u32 _unknown_3c; // 3c = 0x00000002 ++}; ++ ++void bitsl(__iomem u32 *reg, u32 mask, u32 val); ++void bitsb(__iomem u8 *reg, u8 mask, u8 val); ++#define bitsl_set(reg, x) bitsl(reg, x, x) ++#define bitsb_set(reg, x) bitsb(reg, x, x) ++int waitl(struct ithc *ithc, __iomem u32 *reg, u32 mask, u32 val); ++int waitb(struct ithc *ithc, __iomem u8 *reg, u8 mask, u8 val); ++int ithc_set_spi_config(struct ithc *ithc, u8 speed, u8 mode); ++int ithc_spi_command(struct ithc *ithc, u8 command, u32 offset, u32 size, void *data); ++ +diff --git a/drivers/hid/ithc/ithc.h b/drivers/hid/ithc/ithc.h +new file mode 100644 +index 000000000000..6a9b0d480bc1 +--- /dev/null ++++ b/drivers/hid/ithc/ithc.h +@@ -0,0 +1,60 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DEVNAME "ithc" ++#define DEVFULLNAME "Intel Touch Host Controller" ++ ++#undef pr_fmt ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ ++#define CHECK(fn, ...) ({ int r = fn(__VA_ARGS__); if (r < 0) pci_err(ithc->pci, "%s: %s failed with %i\n", __func__, #fn, r); r; }) ++#define CHECK_RET(...) do { int r = CHECK(__VA_ARGS__); if (r < 0) return r; } while(0) ++ ++#define NUM_RX_BUF 16 ++ ++struct ithc; ++ ++#include "ithc-regs.h" ++#include "ithc-dma.h" ++ ++struct ithc { ++ char phys[32]; ++ struct pci_dev *pci; ++ int irq; ++ struct task_struct *poll_thread; ++ struct pm_qos_request activity_qos; ++ struct timer_list activity_timer; ++ ++ struct hid_device *hid; ++ bool hid_parse_done; ++ wait_queue_head_t wait_hid_parse; ++ wait_queue_head_t wait_hid_get_feature; ++ struct mutex hid_get_feature_mutex; ++ void *hid_get_feature_buf; ++ size_t hid_get_feature_size; ++ ++ struct ithc_registers __iomem *regs; ++ struct ithc_registers *prev_regs; // for debugging ++ struct ithc_device_config config; ++ struct ithc_dma_rx dma_rx[2]; ++ struct ithc_dma_tx dma_tx; ++}; ++ ++int ithc_reset(struct ithc *ithc); ++void ithc_set_active(struct ithc *ithc); ++int ithc_debug_init(struct ithc *ithc); ++void ithc_log_regs(struct ithc *ithc); ++ +diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c +index d6037a328669..a290ebc77aea 100644 +--- a/drivers/i2c/i2c-core-acpi.c ++++ b/drivers/i2c/i2c-core-acpi.c +@@ -628,6 +628,28 @@ static int acpi_gsb_i2c_write_bytes(struct i2c_client *client, + return (ret == 1) ? 0 : -EIO; } -@@ -6948,6 +6983,20 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool - /* overloaded LLC is unlikely to have idle cpu/core */ - if (nr == 1) - return -1; ++static int acpi_gsb_i2c_write_raw_bytes(struct i2c_client *client, ++ u8 *data, u8 data_len) ++{ ++ struct i2c_msg msgs[1]; ++ int ret = AE_OK; + -+ /* -+ * If the scan number suggested by SIS_UTIL is smaller -+ * than 60% of llc_weight, it indicates a util_avg% higher -+ * than 50%. System busier than this could lower its bar to -+ * choose a compromised "idle" CPU. This co-exists with -+ * !has_idle_core to not stack too many tasks on one CPU. -+ */ -+ if (!has_idle_core && this == target && -+ (5 * nr < 3 * sd->span_weight) && -+ cpu_rq(target)->nr_running <= 1 && -+ is_short_task(p) && -+ is_short_task(rcu_dereference(cpu_curr(target)))) -+ return target; ++ msgs[0].addr = client->addr; ++ msgs[0].flags = client->flags; ++ msgs[0].len = data_len + 1; ++ msgs[0].buf = data; ++ ++ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); ++ ++ if (ret < 0) { ++ dev_err(&client->adapter->dev, "i2c write failed: %d\n", ret); ++ return ret; ++ } ++ ++ /* 1 transfer must have completed successfully */ ++ return (ret == 1) ? 0 : -EIO; ++} ++ + static acpi_status + i2c_acpi_space_handler(u32 function, acpi_physical_address command, + u32 bits, u64 *value64, +@@ -729,6 +751,19 @@ i2c_acpi_space_handler(u32 function, acpi_physical_address command, } - } - -@@ -9288,96 +9337,65 @@ group_type group_classify(unsigned int imbalance_pct, - } + break; - /** -- * asym_smt_can_pull_tasks - Check whether the load balancing CPU can pull tasks -- * @dst_cpu: Destination CPU of the load balancing -- * @sds: Load-balancing data with statistics of the local group -- * @sgs: Load-balancing statistics of the candidate busiest group -- * @sg: The candidate busiest group -- * -- * Check the state of the SMT siblings of both @sds::local and @sg and decide -- * if @dst_cpu can pull tasks. -+ * sched_use_asym_prio - Check whether asym_packing priority must be used -+ * @sd: The scheduling domain of the load balancing -+ * @cpu: A CPU - * -- * If @dst_cpu does not have SMT siblings, it can pull tasks if two or more of -- * the SMT siblings of @sg are busy. If only one CPU in @sg is busy, pull tasks -- * only if @dst_cpu has higher priority. -+ * Always use CPU priority when balancing load between SMT siblings. When -+ * balancing load between cores, it is not sufficient that @cpu is idle. Only -+ * use CPU priority if the whole core is idle. - * -- * If both @dst_cpu and @sg have SMT siblings, and @sg has exactly one more -- * busy CPU than @sds::local, let @dst_cpu pull tasks if it has higher priority. -- * Bigger imbalances in the number of busy CPUs will be dealt with in -- * update_sd_pick_busiest(). -- * -- * If @sg does not have SMT siblings, only pull tasks if all of the SMT siblings -- * of @dst_cpu are idle and @sg has lower priority. -- * -- * Return: true if @dst_cpu can pull tasks, false otherwise. -+ * Returns: True if the priority of @cpu must be followed. False otherwise. ++ case ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES: ++ if (action == ACPI_READ) { ++ dev_warn(&adapter->dev, ++ "protocol 0x%02x not supported for client 0x%02x\n", ++ accessor_type, client->addr); ++ ret = AE_BAD_PARAMETER; ++ goto err; ++ } else { ++ status = acpi_gsb_i2c_write_raw_bytes(client, ++ gsb->data, info->access_length); ++ } ++ break; ++ + default: + dev_warn(&adapter->dev, "protocol 0x%02x not supported for client 0x%02x\n", + accessor_type, client->addr); +diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c +index 09489380afda..0f02411a60f1 100644 +--- a/drivers/input/misc/soc_button_array.c ++++ b/drivers/input/misc/soc_button_array.c +@@ -507,8 +507,8 @@ static const struct soc_device_data soc_device_MSHW0028 = { + * Both, the Surface Pro 4 (surfacepro3_button.c) and the above mentioned + * devices use MSHW0040 for power and volume buttons, however the way they + * have to be addressed differs. Make sure that we only load this drivers +- * for the correct devices by checking the OEM Platform Revision provided by +- * the _DSM method. ++ * for the correct devices by checking if the OEM Platform Revision DSM call ++ * exists. */ --static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds, -- struct sg_lb_stats *sgs, -- struct sched_group *sg) -+static bool sched_use_asym_prio(struct sched_domain *sd, int cpu) - { - #ifdef CONFIG_SCHED_SMT -- bool local_is_smt, sg_is_smt; -- int sg_busy_cpus; -- -- local_is_smt = sds->local->flags & SD_SHARE_CPUCAPACITY; -- sg_is_smt = sg->flags & SD_SHARE_CPUCAPACITY; -- -- sg_busy_cpus = sgs->group_weight - sgs->idle_cpus; -- -- if (!local_is_smt) { -- /* -- * If we are here, @dst_cpu is idle and does not have SMT -- * siblings. Pull tasks if candidate group has two or more -- * busy CPUs. -- */ -- if (sg_busy_cpus >= 2) /* implies sg_is_smt */ -- return true; -- -- /* -- * @dst_cpu does not have SMT siblings. @sg may have SMT -- * siblings and only one is busy. In such case, @dst_cpu -- * can help if it has higher priority and is idle (i.e., -- * it has no running tasks). -- */ -- return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu); -- } -- -- /* @dst_cpu has SMT siblings. */ + #define MSHW0040_DSM_REVISION 0x01 + #define MSHW0040_DSM_GET_OMPR 0x02 // get OEM Platform Revision +@@ -519,31 +519,14 @@ static const guid_t MSHW0040_DSM_UUID = + static int soc_device_check_MSHW0040(struct device *dev) + { + acpi_handle handle = ACPI_HANDLE(dev); +- union acpi_object *result; +- u64 oem_platform_rev = 0; // valid revisions are nonzero - -- if (sg_is_smt) { -- int local_busy_cpus = sds->local->group_weight - -- sds->local_stat.idle_cpus; -- int busy_cpus_delta = sg_busy_cpus - local_busy_cpus; +- // get OEM platform revision +- result = acpi_evaluate_dsm_typed(handle, &MSHW0040_DSM_UUID, +- MSHW0040_DSM_REVISION, +- MSHW0040_DSM_GET_OMPR, NULL, +- ACPI_TYPE_INTEGER); - -- if (busy_cpus_delta == 1) -- return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu); -- -- return false; +- if (result) { +- oem_platform_rev = result->integer.value; +- ACPI_FREE(result); - } - - /* -- * @sg does not have SMT siblings. Ensure that @sds::local does not end -- * up with more than one busy SMT sibling and only pull tasks if there -- * are not busy CPUs (i.e., no CPU has running tasks). -- */ -- if (!sds->local_stat.sum_nr_running) -- return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu); -+ if (!sched_smt_active()) -+ return true; +- * If the revision is zero here, the _DSM evaluation has failed. This +- * indicates that we have a Pro 4 or Book 1 and this driver should not +- * be used. +- */ +- if (oem_platform_rev == 0) +- return -ENODEV; ++ bool exists; -- return false; -+ return sd->flags & SD_SHARE_CPUCAPACITY || is_core_idle(cpu); - #else -- /* Always return false so that callers deal with non-SMT cases. */ -- return false; -+ return true; - #endif +- dev_dbg(dev, "OEM Platform Revision %llu\n", oem_platform_rev); ++ // check if OEM platform revision DSM call exists ++ exists = acpi_check_dsm(handle, &MSHW0040_DSM_UUID, ++ MSHW0040_DSM_REVISION, ++ BIT(MSHW0040_DSM_GET_OMPR)); + +- return 0; ++ return exists ? 0 : -ENODEV; } -+/** -+ * sched_asym - Check if the destination CPU can do asym_packing load balance -+ * @env: The load balancing environment -+ * @sds: Load-balancing data with statistics of the local group -+ * @sgs: Load-balancing statistics of the candidate busiest group -+ * @group: The candidate busiest group -+ * -+ * @env::dst_cpu can do asym_packing if it has higher priority than the -+ * preferred CPU of @group. -+ * -+ * SMT is a special case. If we are balancing load between cores, @env::dst_cpu -+ * can do asym_packing balance only if all its SMT siblings are idle. Also, it -+ * can only do it if @group is an SMT group and has exactly on busy CPU. Larger -+ * imbalances in the number of CPUS are dealt with in find_busiest_group(). -+ * -+ * If we are balancing load within an SMT core, or at DIE domain level, always -+ * proceed. -+ * -+ * Return: true if @env::dst_cpu can do with asym_packing load balance. False -+ * otherwise. -+ */ - static inline bool - sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs, - struct sched_group *group) - { -- /* Only do SMT checks if either local or candidate have SMT siblings */ -- if ((sds->local->flags & SD_SHARE_CPUCAPACITY) || -- (group->flags & SD_SHARE_CPUCAPACITY)) -- return asym_smt_can_pull_tasks(env->dst_cpu, sds, sgs, group); -+ /* Ensure that the whole local core is idle, if applicable. */ -+ if (!sched_use_asym_prio(env->sd, env->dst_cpu)) -+ return false; + /* +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c +index 7c2f4bd33582..3ebd2260cdab 100644 +--- a/drivers/iommu/intel/iommu.c ++++ b/drivers/iommu/intel/iommu.c +@@ -37,6 +37,8 @@ + #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) + #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB) + #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) ++#define IS_IPTS(pdev) ((pdev)->vendor == PCI_VENDOR_ID_INTEL && \ ++ ((pdev)->device == 0x9d3e)) + #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e) + + #define IOAPIC_RANGE_START (0xfee00000) +@@ -287,12 +289,14 @@ int intel_iommu_enabled = 0; + EXPORT_SYMBOL_GPL(intel_iommu_enabled); + + static int dmar_map_gfx = 1; ++static int dmar_map_ipts = 1; + static int intel_iommu_superpage = 1; + static int iommu_identity_mapping; + static int iommu_skip_te_disable; + + #define IDENTMAP_GFX 2 + #define IDENTMAP_AZALIA 4 ++#define IDENTMAP_IPTS 16 + + const struct iommu_ops intel_iommu_ops; + +@@ -2588,6 +2592,9 @@ static int device_def_domain_type(struct device *dev) + + if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) + return IOMMU_DOMAIN_IDENTITY; ++ ++ if ((iommu_identity_mapping & IDENTMAP_IPTS) && IS_IPTS(pdev)) ++ return IOMMU_DOMAIN_IDENTITY; + } + + return 0; +@@ -2977,6 +2984,9 @@ static int __init init_dmars(void) + if (!dmar_map_gfx) + iommu_identity_mapping |= IDENTMAP_GFX; + ++ if (!dmar_map_ipts) ++ iommu_identity_mapping |= IDENTMAP_IPTS; + -+ /* -+ * CPU priorities does not make sense for SMT cores with more than one -+ * busy sibling. -+ */ -+ if (group->flags & SD_SHARE_CPUCAPACITY) { -+ if (sgs->group_weight - sgs->idle_cpus != 1) -+ return false; -+ } + check_tylersburg_isoch(); - return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu); + ret = si_domain_init(hw_pass_through); +@@ -4819,6 +4829,17 @@ static void quirk_iommu_igfx(struct pci_dev *dev) + dmar_map_gfx = 0; } -@@ -9567,10 +9585,22 @@ static bool update_sd_pick_busiest(struct lb_env *env, - * contention when accessing shared HW resources. - * - * XXX for now avg_load is not computed and always 0 so we -- * select the 1st one. -+ * select the 1st one, except if @sg is composed of SMT -+ * siblings. - */ -- if (sgs->avg_load <= busiest->avg_load) -+ -+ if (sgs->avg_load < busiest->avg_load) - return false; + ++static void quirk_iommu_ipts(struct pci_dev *dev) ++{ ++ if (!IS_IPTS(dev)) ++ return; + -+ if (sgs->avg_load == busiest->avg_load) { -+ /* -+ * SMT sched groups need more help than non-SMT groups. -+ * If @sg happens to also be SMT, either choice is good. -+ */ -+ if (sds->busiest->flags & SD_SHARE_CPUCAPACITY) -+ return false; -+ } ++ if (risky_device(dev)) ++ return; + - break; - - case group_has_spare: -@@ -10045,7 +10075,6 @@ static void update_idle_cpu_scan(struct lb_env *env, ++ pci_info(dev, "Passthrough IOMMU for IPTS\n"); ++ dmar_map_ipts = 0; ++} + /* G4x/GM45 integrated gfx dmar support is totally busted. */ + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx); +@@ -4854,6 +4875,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx); - static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) ++/* disable IPTS dmar support */ ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9D3E, quirk_iommu_ipts); ++ + static void quirk_iommu_rwbf(struct pci_dev *dev) { -- struct sched_domain *child = env->sd->child; - struct sched_group *sg = env->sd->groups; - struct sg_lb_stats *local = &sds->local_stat; - struct sg_lb_stats tmp_sgs; -@@ -10086,8 +10115,13 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd - sg = sg->next; - } while (sg != env->sd->groups); + if (risky_device(dev)) +diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c +index df9e261af0b5..bc2a0aefedf2 100644 +--- a/drivers/iommu/intel/irq_remapping.c ++++ b/drivers/iommu/intel/irq_remapping.c +@@ -390,6 +390,22 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev) + data.busmatch_count = 0; + pci_for_each_dma_alias(dev, set_msi_sid_cb, &data); -- /* Tag domain that child domain prefers tasks go to siblings first */ -- sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING; + /* -+ * Indicate that the child domain of the busiest group prefers tasks -+ * go to a child's sibling domains first. NB the flags of a sched group -+ * are those of the child domain. ++ * The Intel Touch Host Controller is at 00:10.6, but for some reason ++ * the MSI interrupts have request id 01:05.0. ++ * Disable id verification to work around this. ++ * FIXME Find proper fix or turn this into a quirk. + */ -+ if (sds->busiest) -+ sds->prefer_sibling = !!(sds->busiest->flags & SD_PREFER_SIBLING); ++ if (dev->vendor == PCI_VENDOR_ID_INTEL && (dev->class >> 8) == PCI_CLASS_INPUT_PEN) { ++ switch(dev->device) { ++ case 0x98d0: case 0x98d1: // LKF ++ case 0xa0d0: case 0xa0d1: // TGL LP ++ case 0x43d0: case 0x43d1: // TGL H ++ set_irte_sid(irte, SVT_NO_VERIFY, SQ_ALL_16, 0); ++ return 0; ++ } ++ } ++ + /* + * DMA alias provides us with a PCI device and alias. The only case + * where the it will return an alias on a different bus than the +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h +index bdc65d50b945..08723c01d727 100644 +--- a/drivers/misc/mei/hw-me-regs.h ++++ b/drivers/misc/mei/hw-me-regs.h +@@ -92,6 +92,7 @@ + #define MEI_DEV_ID_CDF 0x18D3 /* Cedar Fork */ + + #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ ++#define MEI_DEV_ID_ICP_LP_3 0x34E4 /* Ice Lake Point LP 3 (iTouch) */ + #define MEI_DEV_ID_ICP_N 0x38E0 /* Ice Lake Point N */ + + #define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */ +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c +index 5bf0d50d55a0..c13864512229 100644 +--- a/drivers/misc/mei/pci-me.c ++++ b/drivers/misc/mei/pci-me.c +@@ -97,6 +97,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = { + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP_3, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_N, MEI_ME_PCH12_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)}, +diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c +index 5eb131ab916f..67f074a126d1 100644 +--- a/drivers/net/wireless/ath/ath10k/core.c ++++ b/drivers/net/wireless/ath/ath10k/core.c +@@ -38,6 +38,9 @@ static bool fw_diag_log; + /* frame mode values are mapped as per enum ath10k_hw_txrx_mode */ + unsigned int ath10k_frame_mode = ATH10K_HW_TXRX_NATIVE_WIFI; + ++static char *override_board = ""; ++static char *override_board2 = ""; ++ + unsigned long ath10k_coredump_mask = BIT(ATH10K_FW_CRASH_DUMP_REGISTERS) | + BIT(ATH10K_FW_CRASH_DUMP_CE_DATA); + +@@ -50,6 +53,9 @@ module_param(fw_diag_log, bool, 0644); + module_param_named(frame_mode, ath10k_frame_mode, uint, 0644); + module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444); + ++module_param(override_board, charp, 0644); ++module_param(override_board2, charp, 0644); ++ + MODULE_PARM_DESC(debug_mask, "Debugging mask"); + MODULE_PARM_DESC(uart_print, "Uart target debugging"); + MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode"); +@@ -59,6 +65,9 @@ MODULE_PARM_DESC(frame_mode, + MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file"); + MODULE_PARM_DESC(fw_diag_log, "Diag based fw log debugging"); + ++MODULE_PARM_DESC(override_board, "Override for board.bin file"); ++MODULE_PARM_DESC(override_board2, "Override for board-2.bin file"); ++ + static const struct ath10k_hw_params ath10k_hw_params_list[] = { + { + .id = QCA988X_HW_2_0_VERSION, +@@ -911,6 +920,42 @@ static int ath10k_init_configure_target(struct ath10k *ar) + return 0; + } ++static const char *ath10k_override_board_fw_file(struct ath10k *ar, ++ const char *file) ++{ ++ if (strcmp(file, "board.bin") == 0) { ++ if (strcmp(override_board, "") == 0) ++ return file; ++ ++ if (strcmp(override_board, "none") == 0) { ++ dev_info(ar->dev, "firmware override: pretending 'board.bin' does not exist\n"); ++ return NULL; ++ } ++ ++ dev_info(ar->dev, "firmware override: replacing 'board.bin' with '%s'\n", ++ override_board); ++ ++ return override_board; ++ } ++ ++ if (strcmp(file, "board-2.bin") == 0) { ++ if (strcmp(override_board2, "") == 0) ++ return file; ++ ++ if (strcmp(override_board2, "none") == 0) { ++ dev_info(ar->dev, "firmware override: pretending 'board-2.bin' does not exist\n"); ++ return NULL; ++ } ++ ++ dev_info(ar->dev, "firmware override: replacing 'board-2.bin' with '%s'\n", ++ override_board2); ++ ++ return override_board2; ++ } ++ ++ return file; ++} ++ + static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar, + const char *dir, + const char *file) +@@ -925,6 +970,19 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar, + if (dir == NULL) + dir = "."; - if (env->sd->flags & SD_NUMA) -@@ -10397,7 +10431,10 @@ static struct sched_group *find_busiest_group(struct lb_env *env) - goto out_balanced; ++ /* HACK: Override board.bin and board-2.bin files if specified. ++ * ++ * Some Surface devices perform better with a different board ++ * configuration. To this end, one would need to replace the board.bin ++ * file with the modified config and remove the board-2.bin file. ++ * Unfortunately, that's not a solution that we can easily package. So ++ * we add module options to perform these overrides here. ++ */ ++ ++ file = ath10k_override_board_fw_file(ar, file); ++ if (!file) ++ return ERR_PTR(-ENOENT); ++ + snprintf(filename, sizeof(filename), "%s/%s", dir, file); + ret = firmware_request_nowarn(&fw, filename, ar->dev); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n", +diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c +index 9a698a16a8f3..5e1a341f63df 100644 +--- a/drivers/net/wireless/marvell/mwifiex/pcie.c ++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c +@@ -368,6 +368,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) + { + struct pcie_service_card *card; ++ struct pci_dev *parent_pdev = pci_upstream_bridge(pdev); + int ret; + + pr_debug("info: vendor=0x%4.04X device=0x%4.04X rev=%d\n", +@@ -409,6 +410,12 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, + return -1; } -- /* Try to move all excess tasks to child's sibling domain */ -+ /* -+ * Try to move all excess tasks to a sibling domain of the busiest -+ * group's child domain. ++ /* disable bridge_d3 for Surface gen4+ devices to fix fw crashing ++ * after suspend + */ - if (sds.prefer_sibling && local->group_type == group_has_spare && - busiest->sum_nr_running > local->sum_nr_running + 1) - goto force_balance; -@@ -10499,8 +10536,15 @@ static struct rq *find_busiest_queue(struct lb_env *env, - nr_running == 1) - continue; ++ if (card->quirks & QUIRK_NO_BRIDGE_D3) ++ parent_pdev->bridge_d3 = false; ++ + return 0; + } -- /* Make sure we only pull tasks from a CPU of lower priority */ -+ /* -+ * Make sure we only pull tasks from a CPU of lower priority -+ * when balancing between SMT siblings. -+ * -+ * If balancing between cores, let lower priority CPUs help -+ * SMT cores with more than one busy sibling. -+ */ - if ((env->sd->flags & SD_ASYM_PACKING) && -+ sched_use_asym_prio(env->sd, i) && - sched_asym_prefer(i, env->dst_cpu) && - nr_running == 1) - continue; -@@ -10589,12 +10633,19 @@ static inline bool - asym_active_balance(struct lb_env *env) +@@ -1762,9 +1769,21 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) + static int mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter) { - /* -- * ASYM_PACKING needs to force migrate tasks from busy but -- * lower priority CPUs in order to pack all tasks in the -- * highest priority CPUs. -+ * ASYM_PACKING needs to force migrate tasks from busy but lower -+ * priority CPUs in order to pack all tasks in the highest priority -+ * CPUs. When done between cores, do it only if the whole core if the -+ * whole core is idle. -+ * -+ * If @env::src_cpu is an SMT core with busy siblings, let -+ * the lower priority @env::dst_cpu help it. Do not follow -+ * CPU priority. - */ - return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && -- sched_asym_prefer(env->dst_cpu, env->src_cpu); -+ sched_use_asym_prio(env->sd, env->dst_cpu) && -+ (sched_asym_prefer(env->dst_cpu, env->src_cpu) || -+ !sched_use_asym_prio(env->sd, env->src_cpu)); - } + struct pcie_service_card *card = adapter->card; ++ struct pci_dev *pdev = card->dev; ++ struct pci_dev *parent_pdev = pci_upstream_bridge(pdev); + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + int tx_wrap = card->txbd_wrptr & reg->tx_wrap_mask; - static inline bool -@@ -11328,9 +11379,13 @@ static void nohz_balancer_kick(struct rq *rq) - * When ASYM_PACKING; see if there's a more preferred CPU - * currently idle; in which case, kick the ILB to move tasks - * around. -+ * -+ * When balancing betwen cores, all the SMT siblings of the -+ * preferred CPU must be idle. - */ - for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { -- if (sched_asym_prefer(i, cpu)) { -+ if (sched_use_asym_prio(sd, i) && -+ sched_asym_prefer(i, cpu)) { - flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; - goto unlock; - } -diff --git a/kernel/sched/features.h b/kernel/sched/features.h -index ee7f23c76bd3..efdc29c42161 100644 ---- a/kernel/sched/features.h -+++ b/kernel/sched/features.h -@@ -62,6 +62,7 @@ SCHED_FEAT(TTWU_QUEUE, true) - */ - SCHED_FEAT(SIS_PROP, false) - SCHED_FEAT(SIS_UTIL, true) -+SCHED_FEAT(SIS_SHORT, true) ++ /* Trigger a function level reset of the PCI bridge device, this makes ++ * the firmware of PCIe 88W8897 cards stop reporting a fixed LTR value ++ * that prevents the system from entering package C10 and S0ix powersaving ++ * states. ++ * We need to do it here because it must happen after firmware ++ * initialization and this function is called after that is done. ++ */ ++ if (card->quirks & QUIRK_DO_FLR_ON_BRIDGE) ++ pci_reset_function(parent_pdev); ++ + /* Write the RX ring read pointer in to reg->rx_rdptr */ + if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr | + tx_wrap)) { +diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c +index dd6d21f1dbfd..99b024ecbade 100644 +--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c ++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c +@@ -13,7 +13,9 @@ static const struct dmi_system_id mwifiex_quirk_table[] = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"), + }, +- .driver_data = (void *)QUIRK_FW_RST_D3COLD, ++ .driver_data = (void *)(QUIRK_FW_RST_D3COLD | ++ QUIRK_DO_FLR_ON_BRIDGE | ++ QUIRK_NO_BRIDGE_D3), + }, + { + .ident = "Surface Pro 5", +@@ -22,7 +24,9 @@ static const struct dmi_system_id mwifiex_quirk_table[] = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"), + }, +- .driver_data = (void *)QUIRK_FW_RST_D3COLD, ++ .driver_data = (void *)(QUIRK_FW_RST_D3COLD | ++ QUIRK_DO_FLR_ON_BRIDGE | ++ QUIRK_NO_BRIDGE_D3), + }, + { + .ident = "Surface Pro 5 (LTE)", +@@ -31,7 +35,9 @@ static const struct dmi_system_id mwifiex_quirk_table[] = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"), + }, +- .driver_data = (void *)QUIRK_FW_RST_D3COLD, ++ .driver_data = (void *)(QUIRK_FW_RST_D3COLD | ++ QUIRK_DO_FLR_ON_BRIDGE | ++ QUIRK_NO_BRIDGE_D3), + }, + { + .ident = "Surface Pro 6", +@@ -39,7 +45,9 @@ static const struct dmi_system_id mwifiex_quirk_table[] = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"), + }, +- .driver_data = (void *)QUIRK_FW_RST_D3COLD, ++ .driver_data = (void *)(QUIRK_FW_RST_D3COLD | ++ QUIRK_DO_FLR_ON_BRIDGE | ++ QUIRK_NO_BRIDGE_D3), + }, + { + .ident = "Surface Book 1", +@@ -47,7 +55,9 @@ static const struct dmi_system_id mwifiex_quirk_table[] = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"), + }, +- .driver_data = (void *)QUIRK_FW_RST_D3COLD, ++ .driver_data = (void *)(QUIRK_FW_RST_D3COLD | ++ QUIRK_DO_FLR_ON_BRIDGE | ++ QUIRK_NO_BRIDGE_D3), + }, + { + .ident = "Surface Book 2", +@@ -55,7 +65,9 @@ static const struct dmi_system_id mwifiex_quirk_table[] = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"), + }, +- .driver_data = (void *)QUIRK_FW_RST_D3COLD, ++ .driver_data = (void *)(QUIRK_FW_RST_D3COLD | ++ QUIRK_DO_FLR_ON_BRIDGE | ++ QUIRK_NO_BRIDGE_D3), + }, + { + .ident = "Surface Laptop 1", +@@ -63,7 +75,9 @@ static const struct dmi_system_id mwifiex_quirk_table[] = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"), + }, +- .driver_data = (void *)QUIRK_FW_RST_D3COLD, ++ .driver_data = (void *)(QUIRK_FW_RST_D3COLD | ++ QUIRK_DO_FLR_ON_BRIDGE | ++ QUIRK_NO_BRIDGE_D3), + }, + { + .ident = "Surface Laptop 2", +@@ -71,7 +85,9 @@ static const struct dmi_system_id mwifiex_quirk_table[] = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"), + }, +- .driver_data = (void *)QUIRK_FW_RST_D3COLD, ++ .driver_data = (void *)(QUIRK_FW_RST_D3COLD | ++ QUIRK_DO_FLR_ON_BRIDGE | ++ QUIRK_NO_BRIDGE_D3), + }, + {} + }; +@@ -89,6 +105,11 @@ void mwifiex_initialize_quirks(struct pcie_service_card *card) + dev_info(&pdev->dev, "no quirks enabled\n"); + if (card->quirks & QUIRK_FW_RST_D3COLD) + dev_info(&pdev->dev, "quirk reset_d3cold enabled\n"); ++ if (card->quirks & QUIRK_DO_FLR_ON_BRIDGE) ++ dev_info(&pdev->dev, "quirk do_flr_on_bridge enabled\n"); ++ if (card->quirks & QUIRK_NO_BRIDGE_D3) ++ dev_info(&pdev->dev, ++ "quirk no_brigde_d3 enabled\n"); + } + + static void mwifiex_pcie_set_power_d3cold(struct pci_dev *pdev) +diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h +index d6ff964aec5b..c14eb56eb911 100644 +--- a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h ++++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h +@@ -4,6 +4,8 @@ + #include "pcie.h" + + #define QUIRK_FW_RST_D3COLD BIT(0) ++#define QUIRK_DO_FLR_ON_BRIDGE BIT(1) ++#define QUIRK_NO_BRIDGE_D3 BIT(2) + + void mwifiex_initialize_quirks(struct pcie_service_card *card); + int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev); +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c +index 57ddcc59af30..497cbadd2c6c 100644 +--- a/drivers/pci/pci-driver.c ++++ b/drivers/pci/pci-driver.c +@@ -507,6 +507,9 @@ static void pci_device_shutdown(struct device *dev) + struct pci_dev *pci_dev = to_pci_dev(dev); + struct pci_driver *drv = pci_dev->driver; + ++ if (pci_dev->no_shutdown) ++ return; ++ + pm_runtime_resume(dev); - /* - * Issue a WARN when we do multiple update_rq_clock() calls -diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c -index 0f310768260c..036b0e2cd2b4 100644 ---- a/kernel/sched/pelt.c -+++ b/kernel/sched/pelt.c -@@ -467,3 +467,63 @@ int update_irq_load_avg(struct rq *rq, u64 running) - return ret; - } + if (drv && drv->shutdown) +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 25edf55de985..6ab563cc58f6 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -6124,3 +6124,39 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size); + #endif ++ ++static const struct dmi_system_id no_shutdown_dmi_table[] = { ++ /* ++ * Systems on which some devices should not be touched during shutdown. ++ */ ++ { ++ .ident = "Microsoft Surface Pro 9", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Pro 9"), ++ }, ++ }, ++ { ++ .ident = "Microsoft Surface Laptop 5", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 5"), ++ }, ++ }, ++ {} ++}; ++ ++static void quirk_no_shutdown(struct pci_dev *dev) ++{ ++ if (!dmi_check_system(no_shutdown_dmi_table)) ++ return; ++ ++ dev->no_shutdown = 1; ++ pci_info(dev, "disabling shutdown ops for [%04x:%04x]\n", ++ dev->vendor, dev->device); ++} ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x461e, quirk_no_shutdown); // Thunderbolt 4 USB Controller ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x461f, quirk_no_shutdown); // Thunderbolt 4 PCI Express Root Port ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x462f, quirk_no_shutdown); // Thunderbolt 4 PCI Express Root Port ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x466d, quirk_no_shutdown); // Thunderbolt 4 NHI ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x46a8, quirk_no_shutdown); // GPU +diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig +index b629e82af97c..68656e8f309e 100644 +--- a/drivers/platform/surface/Kconfig ++++ b/drivers/platform/surface/Kconfig +@@ -149,6 +149,13 @@ config SURFACE_AGGREGATOR_TABLET_SWITCH + Select M or Y here, if you want to provide tablet-mode switch input + events on the Surface Pro 8, Surface Pro X, and Surface Laptop Studio. + ++config SURFACE_BOOK1_DGPU_SWITCH ++ tristate "Surface Book 1 dGPU Switch Driver" ++ depends on SYSFS ++ help ++ This driver provides a sysfs switch to set the power-state of the ++ discrete GPU found on the Microsoft Surface Book 1. ++ + config SURFACE_DTX + tristate "Surface DTX (Detachment System) Driver" + depends on SURFACE_AGGREGATOR +diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile +index 53344330939b..7efcd0cdb532 100644 +--- a/drivers/platform/surface/Makefile ++++ b/drivers/platform/surface/Makefile +@@ -12,6 +12,7 @@ obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV) += surface_aggregator_cdev.o + obj-$(CONFIG_SURFACE_AGGREGATOR_HUB) += surface_aggregator_hub.o + obj-$(CONFIG_SURFACE_AGGREGATOR_REGISTRY) += surface_aggregator_registry.o + obj-$(CONFIG_SURFACE_AGGREGATOR_TABLET_SWITCH) += surface_aggregator_tabletsw.o ++obj-$(CONFIG_SURFACE_BOOK1_DGPU_SWITCH) += surfacebook1_dgpu_switch.o + obj-$(CONFIG_SURFACE_DTX) += surface_dtx.o + obj-$(CONFIG_SURFACE_GPE) += surface_gpe.o + obj-$(CONFIG_SURFACE_HOTPLUG) += surface_hotplug.o +diff --git a/drivers/platform/surface/surface3-wmi.c b/drivers/platform/surface/surface3-wmi.c +index ca4602bcc7de..490b9731068a 100644 +--- a/drivers/platform/surface/surface3-wmi.c ++++ b/drivers/platform/surface/surface3-wmi.c +@@ -37,6 +37,13 @@ static const struct dmi_system_id surface3_dmi_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"), + }, + }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), ++ DMI_MATCH(DMI_SYS_VENDOR, "OEMB"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "OEMB"), ++ }, ++ }, #endif + { } + }; +diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c +index c219b840d491..69c4352e8406 100644 +--- a/drivers/platform/surface/surface_gpe.c ++++ b/drivers/platform/surface/surface_gpe.c +@@ -41,6 +41,11 @@ static const struct property_entry lid_device_props_l4F[] = { + {}, + }; + ++static const struct property_entry lid_device_props_l52[] = { ++ PROPERTY_ENTRY_U32("gpe", 0x52), ++ {}, ++}; + -+__read_mostly unsigned int sched_pelt_lshift; + static const struct property_entry lid_device_props_l57[] = { + PROPERTY_ENTRY_U32("gpe", 0x57), + {}, +@@ -107,6 +112,18 @@ static const struct dmi_system_id dmi_lid_device_table[] = { + }, + .driver_data = (void *)lid_device_props_l4B, + }, ++ { ++ /* ++ * We match for SKU here due to product name clash with the ARM ++ * version. ++ */ ++ .ident = "Surface Pro 9", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_9_2038"), ++ }, ++ .driver_data = (void *)lid_device_props_l52, ++ }, + { + .ident = "Surface Book 1", + .matches = { +diff --git a/drivers/platform/surface/surfacebook1_dgpu_switch.c b/drivers/platform/surface/surfacebook1_dgpu_switch.c +new file mode 100644 +index 000000000000..8b816ed8f35c +--- /dev/null ++++ b/drivers/platform/surface/surfacebook1_dgpu_switch.c +@@ -0,0 +1,162 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later + -+#ifdef CONFIG_SYSCTL -+static unsigned int sysctl_sched_pelt_multiplier = 1; ++#include ++#include ++#include ++#include + -+int sched_pelt_multiplier(struct ctl_table *table, int write, void *buffer, -+ size_t *lenp, loff_t *ppos) ++ ++#ifdef pr_fmt ++#undef pr_fmt ++#endif ++#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ ++ ++ ++static const guid_t dgpu_sw_guid = GUID_INIT(0x6fd05c69, 0xcde3, 0x49f4, ++ 0x95, 0xed, 0xab, 0x16, 0x65, 0x49, 0x80, 0x35); ++ ++#define DGPUSW_ACPI_PATH_DSM "\\_SB_.PCI0.LPCB.EC0_.VGBI" ++#define DGPUSW_ACPI_PATH_HGON "\\_SB_.PCI0.RP05.HGON" ++#define DGPUSW_ACPI_PATH_HGOF "\\_SB_.PCI0.RP05.HGOF" ++ ++ ++static int sb1_dgpu_sw_dsmcall(void) +{ -+ static DEFINE_MUTEX(mutex); -+ unsigned int old; -+ int ret; ++ union acpi_object *ret; ++ acpi_handle handle; ++ acpi_status status; + -+ mutex_lock(&mutex); -+ old = sysctl_sched_pelt_multiplier; -+ ret = proc_dointvec(table, write, buffer, lenp, ppos); -+ if (ret) -+ goto undo; -+ if (!write) -+ goto done; ++ status = acpi_get_handle(NULL, DGPUSW_ACPI_PATH_DSM, &handle); ++ if (status) ++ return -EINVAL; + -+ switch (sysctl_sched_pelt_multiplier) { -+ case 1: -+ fallthrough; -+ case 2: -+ fallthrough; -+ case 4: -+ WRITE_ONCE(sched_pelt_lshift, -+ sysctl_sched_pelt_multiplier >> 1); -+ goto done; -+ default: -+ ret = -EINVAL; ++ ret = acpi_evaluate_dsm_typed(handle, &dgpu_sw_guid, 1, 1, NULL, ACPI_TYPE_BUFFER); ++ if (!ret) ++ return -EINVAL; ++ ++ ACPI_FREE(ret); ++ return 0; ++} ++ ++static int sb1_dgpu_sw_hgon(void) ++{ ++ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; ++ acpi_status status; ++ ++ status = acpi_evaluate_object(NULL, DGPUSW_ACPI_PATH_HGON, NULL, &buf); ++ if (status) { ++ pr_err("failed to run HGON: %d\n", status); ++ return -EINVAL; + } + -+undo: -+ sysctl_sched_pelt_multiplier = old; -+done: -+ mutex_unlock(&mutex); ++ if (buf.pointer) ++ ACPI_FREE(buf.pointer); + -+ return ret; ++ pr_info("turned-on dGPU via HGON\n"); ++ return 0; +} + -+static struct ctl_table sched_pelt_sysctls[] = { -+ { -+ .procname = "sched_pelt_multiplier", -+ .data = &sysctl_sched_pelt_multiplier, -+ .maxlen = sizeof(unsigned int), -+ .mode = 0644, -+ .proc_handler = sched_pelt_multiplier, -+ }, -+ {} -+}; -+ -+static int __init sched_pelt_sysctl_init(void) ++static int sb1_dgpu_sw_hgof(void) +{ -+ register_sysctl_init("kernel", sched_pelt_sysctls); ++ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; ++ acpi_status status; ++ ++ status = acpi_evaluate_object(NULL, DGPUSW_ACPI_PATH_HGOF, NULL, &buf); ++ if (status) { ++ pr_err("failed to run HGOF: %d\n", status); ++ return -EINVAL; ++ } ++ ++ if (buf.pointer) ++ ACPI_FREE(buf.pointer); ++ ++ pr_info("turned-off dGPU via HGOF\n"); + return 0; +} -+late_initcall(sched_pelt_sysctl_init); -+#endif -diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h -index 3a0e0dc28721..9b35b5072bae 100644 ---- a/kernel/sched/pelt.h -+++ b/kernel/sched/pelt.h -@@ -61,6 +61,14 @@ static inline void cfs_se_util_change(struct sched_avg *avg) - WRITE_ONCE(avg->util_est.enqueued, enqueued); - } - -+static inline u64 rq_clock_task_mult(struct rq *rq) ++ ++ ++static ssize_t dgpu_dsmcall_store(struct device *dev, struct device_attribute *attr, ++ const char *buf, size_t len) +{ -+ lockdep_assert_rq_held(rq); -+ assert_clock_updated(rq); ++ int status, value; + -+ return rq->clock_task_mult; -+} ++ status = kstrtoint(buf, 0, &value); ++ if (status < 0) ++ return status; + - static inline u64 rq_clock_pelt(struct rq *rq) - { - lockdep_assert_rq_held(rq); -@@ -72,7 +80,7 @@ static inline u64 rq_clock_pelt(struct rq *rq) - /* The rq is idle, we can sync to clock_task */ - static inline void _update_idle_rq_clock_pelt(struct rq *rq) - { -- rq->clock_pelt = rq_clock_task(rq); -+ rq->clock_pelt = rq_clock_task_mult(rq); - - u64_u32_store(rq->clock_idle, rq_clock(rq)); - /* Paired with smp_rmb in migrate_se_pelt_lag() */ -@@ -121,6 +129,27 @@ static inline void update_rq_clock_pelt(struct rq *rq, s64 delta) - rq->clock_pelt += delta; - } - -+extern unsigned int sched_pelt_lshift; ++ if (value != 1) ++ return -EINVAL; + -+/* -+ * absolute time |1 |2 |3 |4 |5 |6 | -+ * @ mult = 1 --------****************--------****************- -+ * @ mult = 2 --------********----------------********--------- -+ * @ mult = 4 --------****--------------------****------------- -+ * clock task mult -+ * @ mult = 2 | | |2 |3 | | | | |5 |6 | | | -+ * @ mult = 4 | | | | |2|3| | | | | | | | | | |5|6| | | | | | | -+ * -+ */ -+static inline void update_rq_clock_task_mult(struct rq *rq, s64 delta) ++ status = sb1_dgpu_sw_dsmcall(); ++ ++ return status < 0 ? status : len; ++} ++ ++static ssize_t dgpu_power_store(struct device *dev, struct device_attribute *attr, ++ const char *buf, size_t len) +{ -+ delta <<= READ_ONCE(sched_pelt_lshift); ++ bool power; ++ int status; + -+ rq->clock_task_mult += delta; ++ status = kstrtobool(buf, &power); ++ if (status < 0) ++ return status; + -+ update_rq_clock_pelt(rq, delta); ++ if (power) ++ status = sb1_dgpu_sw_hgon(); ++ else ++ status = sb1_dgpu_sw_hgof(); ++ ++ return status < 0 ? status : len; +} + - /* - * When rq becomes idle, we have to check if it has lost idle time - * because it was fully busy. A rq is fully used when the /Sum util_sum -@@ -147,7 +176,7 @@ static inline void update_idle_rq_clock_pelt(struct rq *rq) - * rq's clock_task. - */ - if (util_sum >= divider) -- rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt; -+ rq->lost_idle_time += rq_clock_task_mult(rq) - rq->clock_pelt; - - _update_idle_rq_clock_pelt(rq); - } -@@ -218,13 +247,18 @@ update_irq_load_avg(struct rq *rq, u64 running) - return 0; - } - --static inline u64 rq_clock_pelt(struct rq *rq) -+static inline u64 rq_clock_task_mult(struct rq *rq) - { - return rq_clock_task(rq); - } - -+static inline u64 rq_clock_pelt(struct rq *rq) ++static DEVICE_ATTR_WO(dgpu_dsmcall); ++static DEVICE_ATTR_WO(dgpu_power); ++ ++static struct attribute *sb1_dgpu_sw_attrs[] = { ++ &dev_attr_dgpu_dsmcall.attr, ++ &dev_attr_dgpu_power.attr, ++ NULL, ++}; ++ ++static const struct attribute_group sb1_dgpu_sw_attr_group = { ++ .attrs = sb1_dgpu_sw_attrs, ++}; ++ ++ ++static int sb1_dgpu_sw_probe(struct platform_device *pdev) +{ -+ return rq_clock_task_mult(rq); ++ return sysfs_create_group(&pdev->dev.kobj, &sb1_dgpu_sw_attr_group); +} + - static inline void --update_rq_clock_pelt(struct rq *rq, s64 delta) { } -+update_rq_clock_task_mult(struct rq *rq, s64 delta) { } - - static inline void - update_idle_rq_clock_pelt(struct rq *rq) { } -diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index 3e8df6d31c1e..7331d436ebc4 100644 ---- a/kernel/sched/sched.h -+++ b/kernel/sched/sched.h -@@ -1018,6 +1018,7 @@ struct rq { - u64 clock; - /* Ensure that all clocks are in the same cache line */ - u64 clock_task ____cacheline_aligned; -+ u64 clock_task_mult; - u64 clock_pelt; - unsigned long lost_idle_time; - u64 clock_pelt_idle; -@@ -1772,6 +1773,13 @@ queue_balance_callback(struct rq *rq, - for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ - __sd; __sd = __sd->parent) - -+/* A mask of all the SD flags that have the SDF_SHARED_CHILD metaflag */ -+#define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_SHARED_CHILD)) | -+static const unsigned int SD_SHARED_CHILD_MASK = -+#include -+0; -+#undef SD_FLAG ++static int sb1_dgpu_sw_remove(struct platform_device *pdev) ++{ ++ sysfs_remove_group(&pdev->dev.kobj, &sb1_dgpu_sw_attr_group); ++ return 0; ++} + - /** - * highest_flag_domain - Return highest sched_domain containing flag. - * @cpu: The CPU whose highest level of sched domain is to -@@ -1779,16 +1787,25 @@ queue_balance_callback(struct rq *rq, - * @flag: The flag to check for the highest sched_domain - * for the given CPU. - * -- * Returns the highest sched_domain of a CPU which contains the given flag. -+ * Returns the highest sched_domain of a CPU which contains @flag. If @flag has -+ * the SDF_SHARED_CHILD metaflag, all the children domains also have @flag. - */ - static inline struct sched_domain *highest_flag_domain(int cpu, int flag) - { - struct sched_domain *sd, *hsd = NULL; - - for_each_domain(cpu, sd) { -- if (!(sd->flags & flag)) -+ if (sd->flags & flag) { -+ hsd = sd; -+ continue; -+ } ++/* ++ * The dGPU power seems to be actually handled by MSHW0040. However, that is ++ * also the power-/volume-button device with a mainline driver. So let's use ++ * MSHW0041 instead for now, which seems to be the LTCH (latch/DTX) device. ++ */ ++static const struct acpi_device_id sb1_dgpu_sw_match[] = { ++ { "MSHW0041", }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(acpi, sb1_dgpu_sw_match); + -+ /* -+ * Stop the search if @flag is known to be shared at lower -+ * levels. It will not be found further up. -+ */ -+ if (flag & SD_SHARED_CHILD_MASK) - break; -- hsd = sd; - } ++static struct platform_driver sb1_dgpu_sw = { ++ .probe = sb1_dgpu_sw_probe, ++ .remove = sb1_dgpu_sw_remove, ++ .driver = { ++ .name = "surfacebook1_dgpu_switch", ++ .acpi_match_table = sb1_dgpu_sw_match, ++ .probe_type = PROBE_PREFER_ASYNCHRONOUS, ++ }, ++}; ++module_platform_driver(sb1_dgpu_sw); ++ ++MODULE_AUTHOR("Maximilian Luz "); ++MODULE_DESCRIPTION("Discrete GPU Power-Switch for Surface Book 1"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/platform/surface/surfacepro3_button.c b/drivers/platform/surface/surfacepro3_button.c +index 2755601f979c..4240c98ca226 100644 +--- a/drivers/platform/surface/surfacepro3_button.c ++++ b/drivers/platform/surface/surfacepro3_button.c +@@ -149,7 +149,8 @@ static int surface_button_resume(struct device *dev) + /* + * Surface Pro 4 and Surface Book 2 / Surface Pro 2017 use the same device + * ID (MSHW0040) for the power/volume buttons. Make sure this is the right +- * device by checking for the _DSM method and OEM Platform Revision. ++ * device by checking for the _DSM method and OEM Platform Revision DSM ++ * function. + * + * Returns true if the driver should bind to this device, i.e. the device is + * either MSWH0028 (Pro 3) or MSHW0040 on a Pro 4 or Book 1. +@@ -157,30 +158,11 @@ static int surface_button_resume(struct device *dev) + static bool surface_button_check_MSHW0040(struct acpi_device *dev) + { + acpi_handle handle = dev->handle; +- union acpi_object *result; +- u64 oem_platform_rev = 0; // valid revisions are nonzero +- +- // get OEM platform revision +- result = acpi_evaluate_dsm_typed(handle, &MSHW0040_DSM_UUID, +- MSHW0040_DSM_REVISION, +- MSHW0040_DSM_GET_OMPR, +- NULL, ACPI_TYPE_INTEGER); +- +- /* +- * If evaluating the _DSM fails, the method is not present. This means +- * that we have either MSHW0028 or MSHW0040 on Pro 4 or Book 1, so we +- * should use this driver. We use revision 0 indicating it is +- * unavailable. +- */ +- +- if (result) { +- oem_platform_rev = result->integer.value; +- ACPI_FREE(result); +- } +- +- dev_dbg(&dev->dev, "OEM Platform Revision %llu\n", oem_platform_rev); + +- return oem_platform_rev == 0; ++ // make sure that OEM platform revision DSM call does not exist ++ return !acpi_check_dsm(handle, &MSHW0040_DSM_UUID, ++ MSHW0040_DSM_REVISION, ++ BIT(MSHW0040_DSM_GET_OMPR)); + } + + +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index 934b3d997702..2c6604c6e8e1 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -220,6 +220,9 @@ static const struct usb_device_id usb_quirk_list[] = { + /* Microsoft Surface Dock Ethernet (RTL8153 GigE) */ + { USB_DEVICE(0x045e, 0x07c6), .driver_info = USB_QUIRK_NO_LPM }, + ++ /* Microsoft Surface Go 3 Type-Cover */ ++ { USB_DEVICE(0x045e, 0x09b5), .driver_info = USB_QUIRK_DELAY_INIT }, ++ + /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */ + { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME }, + +diff --git a/include/linux/pci.h b/include/linux/pci.h +index a5dda515fcd1..69f6fc707ae5 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -464,6 +464,7 @@ struct pci_dev { + unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */ + unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */ + unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */ ++ unsigned int no_shutdown:1; /* Do not touch device on shutdown */ + pci_dev_flags_t dev_flags; + atomic_t enable_cnt; /* pci_enable_device has been called */ + +diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c +index 7c7cbb6362ea..81a8ff40e86e 100644 +--- a/sound/soc/codecs/rt5645.c ++++ b/sound/soc/codecs/rt5645.c +@@ -3717,6 +3717,15 @@ static const struct dmi_system_id dmi_platform_data[] = { + }, + .driver_data = (void *)&intel_braswell_platform_data, + }, ++ { ++ .ident = "Microsoft Surface 3", ++ .matches = { ++ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), ++ DMI_MATCH(DMI_SYS_VENDOR, "OEMB"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "OEMB"), ++ }, ++ .driver_data = (void *)&intel_braswell_platform_data, ++ }, + { + /* + * Match for the GPDwin which unfortunately uses somewhat +diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c +index 6beb00858c33..d82d77387a0a 100644 +--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c ++++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c +@@ -27,6 +27,14 @@ static const struct dmi_system_id cht_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"), + }, + }, ++ { ++ .callback = cht_surface_quirk_cb, ++ .matches = { ++ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), ++ DMI_MATCH(DMI_SYS_VENDOR, "OEMB"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "OEMB"), ++ }, ++ }, + { } + }; - return hsd; -- 2.40.0 -From 6ba6433df40db59f2d25083d7ddbf7fad065825e Mon Sep 17 00:00:00 2001 +From dfd409a7ff5e935c78a89d4da47f6fe4f5c41cda Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 17 Apr 2023 18:38:04 +0200 -Subject: [PATCH 11/12] zram +Date: Sat, 22 Apr 2023 11:46:32 +0200 +Subject: [PATCH 12/13] zram Signed-off-by: Peter Jung --- @@ -17705,10 +26583,10 @@ index c5254626f051..ca7a15bd4845 100644 -- 2.40.0 -From 56a4ed844bc68f1920a53b06740409e03f47f847 Mon Sep 17 00:00:00 2001 +From 3e19b1053d5a2324f7255db192c3e520c4d33495 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 17 Apr 2023 18:38:21 +0200 -Subject: [PATCH 12/12] zstd: import 1.5.5 +Date: Sat, 22 Apr 2023 11:46:46 +0200 +Subject: [PATCH 13/13] zstd: import 1.5.5 Signed-off-by: Peter Jung --- diff --git a/Files b/Files index e7dd0edf..e93a49c7 100644 --- a/Files +++ b/Files @@ -437,7 +437,8 @@ │   ├── 0014-zstd-import-1.5.5.patch │   ├── 0015-v4l2-core-add-v4l2loopback.patch │   ├── all -│   │   └── 0001-cachyos-base-all.patch +│   │   ├── 0001-cachyos-base-all.patch +│   │   └── 0001-cachyos-base-all.tar.gz │   ├── misc │   │   ├── 0001-Add-latency-priority-for-CFS-class.patch │   │   ├── 0001-aufs-6.2-merge-v20230227.patch @@ -472,8 +473,9 @@ │   ├── 0008-maple-lru.patch │   ├── 0009-Per-VMA-locks.patch │   ├── 0010-sched.patch -│   ├── 0011-zram.patch -│   ├── 0012-zstd-import-1.5.5.patch +│   ├── 0011-Surface.patch +│   ├── 0012-zram.patch +│   ├── 0013-zstd-import-1.5.5.patch │   ├── all │   │   └── 0001-cachyos-base-all.patch │   ├── misc @@ -493,4 +495,4 @@ ├── Files └── README.md -45 directories, 449 files +45 directories, 451 files