From 1caa5f0625854b97ecc3a2cf195e8ad33e883915 Mon Sep 17 00:00:00 2001 From: bishe <123456789@163.com> Date: Sun, 23 Feb 2025 18:42:21 +0800 Subject: [PATCH] fix about netG --- checkpoints/ROMA_UNSB_001/loss_log.txt | 19 + checkpoints/ROMA_UNSB_001/train_opt.txt | 7 +- .../__pycache__/ncsn_networks.cpython-39.pyc | Bin 0 -> 22578 bytes models/__pycache__/networks.cpython-39.pyc | Bin 50516 -> 51012 bytes .../roma_unsb_model.cpython-39.pyc | Bin 19497 -> 19898 bytes models/ncsn_networks.py | 719 ++++++++++++++++++ models/networks.py | 9 + models/roma_unsb_model.py | 25 +- .../__pycache__/base_options.cpython-39.pyc | Bin 7577 -> 7598 bytes options/base_options.py | 2 +- scripts/train.sh | 2 +- 11 files changed, 770 insertions(+), 13 deletions(-) create mode 100644 models/__pycache__/ncsn_networks.cpython-39.pyc create mode 100644 models/ncsn_networks.py diff --git a/checkpoints/ROMA_UNSB_001/loss_log.txt b/checkpoints/ROMA_UNSB_001/loss_log.txt index 8d9f361..19fcafc 100644 --- a/checkpoints/ROMA_UNSB_001/loss_log.txt +++ b/checkpoints/ROMA_UNSB_001/loss_log.txt @@ -6,3 +6,22 @@ ================ Training Loss (Sun Feb 23 16:06:44 2025) ================ ================ Training Loss (Sun Feb 23 16:09:38 2025) ================ ================ Training Loss (Sun Feb 23 16:44:56 2025) ================ +================ Training Loss (Sun Feb 23 16:49:46 2025) ================ +================ Training Loss (Sun Feb 23 16:51:03 2025) ================ +================ Training Loss (Sun Feb 23 16:51:23 2025) ================ +================ Training Loss (Sun Feb 23 18:04:02 2025) ================ +================ Training Loss (Sun Feb 23 18:04:39 2025) ================ +================ Training Loss (Sun Feb 23 18:05:17 2025) ================ +================ Training Loss (Sun Feb 23 18:06:40 2025) ================ +================ Training Loss (Sun Feb 23 18:11:48 2025) ================ +================ Training Loss (Sun Feb 23 18:13:31 2025) ================ +================ Training Loss (Sun Feb 23 18:14:11 2025) ================ +================ Training Loss (Sun Feb 23 18:14:29 2025) ================ +================ Training Loss (Sun Feb 23 18:16:27 2025) ================ +================ Training Loss (Sun Feb 23 18:16:44 2025) ================ +================ Training Loss (Sun Feb 23 18:20:39 2025) ================ +================ Training Loss (Sun Feb 23 18:21:44 2025) ================ +================ Training Loss (Sun Feb 23 18:35:27 2025) ================ +================ Training Loss (Sun Feb 23 18:39:21 2025) ================ +================ Training Loss (Sun Feb 23 18:40:15 2025) ================ +================ Training Loss (Sun Feb 23 18:41:15 2025) ================ diff --git a/checkpoints/ROMA_UNSB_001/train_opt.txt b/checkpoints/ROMA_UNSB_001/train_opt.txt index b15424d..d7766e4 100644 --- a/checkpoints/ROMA_UNSB_001/train_opt.txt +++ b/checkpoints/ROMA_UNSB_001/train_opt.txt @@ -43,6 +43,7 @@ n_epochs: 100 n_epochs_decay: 100 n_layers_D: 3 + n_mlp: 3 name: ROMA_UNSB_001 [default: experiment_name] nce_T: 0.07 nce_idt: False [default: True] @@ -52,7 +53,7 @@ nce_includes_all_negatives_from_minibatch: False netD: basic netF: mlp_sample netF_nc: 256 - netG: resnet_9blocks + netG: resnet_9blocks_cond ngf: 64 no_antialias: False no_antialias_up: False @@ -63,7 +64,7 @@ nce_includes_all_negatives_from_minibatch: False normG: instance num_patches: 256 num_threads: 4 - num_timesteps: 10 + num_timesteps: 10 [default: 5] output_nc: 3 phase: train pool_size: 0 @@ -77,7 +78,7 @@ nce_includes_all_negatives_from_minibatch: False serial_batches: False stylegan2_G_num_downsampling: 1 suffix: - tau: 0.1 + tau: 0.1 [default: 0.01] update_html_freq: 1000 use_idt: False verbose: False diff --git a/models/__pycache__/ncsn_networks.cpython-39.pyc b/models/__pycache__/ncsn_networks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..283037180f935e25088b7fa8c4e536f35370bb07 GIT binary patch literal 22578 zcmd^nd5|2}d0$_1?(FPhu~;0W$RPxRSQ*#_KtiM-m;wnB6lrh`6QHEpP#QO;dv<3r z+p~Dxy8y|XD%cVy1+ro3bIjCb675NQ?9 zx?oIx%>4~4QSsuuUkUu^YD-pc0`}V{xhJFZw~` zMhzsj>XkFsYfb;D$7U*S;8o5%f3f((sxj9H+-Bwc^@}YuR#E;^tH0#Q%6t?pg(s({ z=Ni%c@};R-YjJvEIk@fy?dhfK5i1=@n>~VNk1RE=9mjo#rk}sKWvh|vhs(j-(sZpA zM9K}L#)^MrIaqQV%J+^SB?z1HCcUQ9jw?&^s$GJJ~(yk z=!uU#IEBZ)+RtCSbH(FFk3Kwg?8HY;9FvNl_AASwFH^hK0#xcPRcS4&is#qe&HXnsX~s??XhbzW^}7fq2H|EGUGzET5GytSgqbN-aa-x-&*viTT6a$Eu3Chm{zStx4ImJm!|Qi zelwg7YGF`KrY4+Px~{V5atHh7APloT)*a?l+FMPsn2n3^Jnl2N!~F<4W`yxZ#)8$s z>+du?i!XoE_A(G z`qgS&tX44>%T2~h)#{aHx2bbvD%37EJDf~v&2KiV)xJ*o=88QluGc8;&_R$lOSYuk z9(}+tYAJXi#~||N3&?!^CL)p1F&4}ln5)ZHIQES3MHBO8?K4*G$Xc^I*1FlTJ#&w7 zm$B+Z&RQmN7P1{1VGi~3o_*8qI77xb=62QT7;A-fz=>zPY;4#8E!O>l-8W$6g_VWV8% zVhe;#%=u9@ZK~RBXg7us66y?d%*U-eZ=`QFD)}jw8Ls%>Ijj!Yh0nL5eg; zoIz?F&#nVm0D+Jk7iKeQdfnz~fWTzEyjK89$q7oyV^G5cUH4e z1|XPy4S-wC1ut#P#dF@X(lYCoX&_!862MCk)BsR70jkwvG~}6`%nj=aEH$g{vD z*)UQK4^RVe;L^uCYOUoU0tT%$mp#9V=KbDeS#hJP2N`hQ;!Lo-SbZWca`~Gw z$FUUzaq)tGWf}Ck;Wp!p)Q*d2KS3#R;jxvu^Q~6%xEDKK>nh+{gRb2OBE&HOZ?d2f zT3!%ZHK@gQ;ML=NkaklHKqK^nI?17Alm5-dYz9TE4JBwuP@&)=4)_r6u!6v_%2w9Q z0}#vBD1p;--nC5Y-Mm#c$C29JJqR^DG|w4GFSbjV5w!I^L_Bjf<5_?OAOwMpjAwVU zKq48Se_7*oYtCBDp$wp5wGi1J;78W_+-k9tM+$(vQv`v?-9VX6PFk`$g>eIrmPgoa zvE%5J8ze1h+xEl8YVOL(ZcFwZTe9@nwr{E1KYGSU@ou`toXLJVIf1X++_ODAcgv24 zF8eC*o7J$<_SHw3`!IsHe*Wiped7}sZ*yrTGwKxn;_R7LumVshob%nw*PrvxJ+HX; zV|%d?Ob!Vcs3(!2&N6t4!6%t-UA`JSfX}OOJ``}P0gQusbrHKfE+vublJZw#GuVKz z)W^{R!6rP7z%U*BiU0wG?>aW0%5qx(h3vbIRWyt59z!^SS!5>Y9up>$TE>*|S2EiZ8zw1XJFwG9Ce$7V zdl?8+3_b2fwRtjv$34w+aLJ?_wU4o0|FX|zixV+DfIDO+wlr3!JT_JlgjGY*z{S zFGR&8e+cO%5Cok*ET}&-bIVcBqVL`wjGapjH@tXO%^*3&=bVYSNK72M6t!dp)On_6 z@RBRX;ymPqDlu;CFjPOnJlm~Baq)>>HG)o@J=X|)SAo~@gV3KWYCK+S1l8KS8vrZB zL#^c~eEZl#7Pd9D_>?2#<)M3nApZ-^})J;Il1p^xI=P(26oyQ zUYR=%q1B$)@XF~v%l2S30oWNef%<)|_jb}}kaDz6${dBtHz=l_&flY!rvERlmO4eGjH1;NsCFKRzKH(*q*IU{t*FL)Ue)z!s8GVk~$kd0);A zW^YF{rhDxV^=PrJhK1|h>6NirM%)WnD!?ze(&bvT?Wz}xmE<}Psb|rTz}%ngrZFpJ z8!LyvQ%+VYM~-N9$1-FbC^zbr1Q|iu6zIZQzYz|!0zNWrK>FL%O1;r!-)w}6DySHu z3Na>P5I6?gN;ucF!A))T>UG;20QJGvy4^2*1tt1;9K;PDDme+CGMIadE!kff^9W$3@^Xs3-*C6Z_3f zj%%S}5f56Za70Kh)R%)=1Suh;aH2$(aG0j~gYwfNC)a$&?+ znY;_^=^}&Y8GMStrx|>Pfgsr;V}i^{e18IW$SE}n!Y+~D6!yt6_gnid@S_k*G*4=o zcN5irKcC5(``^uGn3^>Yyz4moz?){}mw%^y-*zaqO95wUDe`8&90i}x;10Va5M&R` zC=@MVNDf9Ol~)}bvMP`})gl2H7^+tmY%r;1u%#JdBj7FP%2AL(s7A=pPC9SzxB!)g z2MrWei_}C-B|Pc9=nfEe9e<+AP(Q%f3WKZ6%JvANx{QQ4D;xW`YVsi?K;B2cy43OS zAzSo>2BtD^j)SEyf+=WMww;!gN}4mbHZr7fu3Gh4HK@4AnW`5UaK7Gy86`m9z#XzI zm{Dub3B|M;?tv>^hwvzp2Z<<2XkF_}sVgl&!9haWui(hVS{XS&J=@bg!g=i4v2BQF zQt0RaL{Lx}#%<#ce;L6h3JUR~OF``}1$96{?Sz7ok`^=-q^br`Lt0s+ z`exmdJPQ;u3yJ~u3RJ@ZeFXEQ(}bc8lI1 zH2gnFO+Fz0=SpfdV-NL<5=fBz3=kaGTfN9YV0($ND-0;SsE~nxf5aGNgG2~`nq2)L z96nblb;6(5~!`7%QhyV^i5GF_yCSeUoM+h6VBZ><$X^Spem=*O!oj_QQ zoycEYl5nHOTX6q7C?hLwK)+Em+j}>UFC{Dat&|*^FOm5q6mGBA;JW`2JL72c=ER6V z!v7Bz6m>TzW zFB&z=X5&7K3t@B}COV?vDxV$Cja)#pAmaJH5>&5jvE4>Ebxem6!5V{oradw6s=e)% zP8-V-U*J%L?eNb z{{-T30k&A!5B#r~JTIJ%z~kbm|f9A2F9^Li%(?@(dyWgp_Y}xymJK zD&bB6KMUUvDsH9jU#)aCeK7b;FE5GG577!N;^>OzzA4QU!BM~^=vTB)MGf{y@wkA+ z2ra>EUU_C2?hQfZnbv#|wt@#jcyuf)I19)cX=i~t1nwo(T7Y-Qp{;x{Y74sZbRE+3 z5nca?)SncQS6I`tw`pgIbIPwb(a=ndi<4gCh?H;>NW(88glfdRoC!;`!w#~oogztL z1knWSB{m||Kc}Gn$wm2v0@Oc4qW($ZQ68)+KpA;d_;B&e&X6cI0L-YLr`iX0Vh5@l zlGtG{yGpgsuxRbySS@?mPI;%XI?~Cj-|dv79lJAvO(3DLvtWO{99qqbDyN&?*E*Fs zr7JIOTR(T_`q4zVF|DC$UXgnPgO_~r4 zFbDD$LpRaaVD5&Q7!A&&rCqu{^RPBDYlF4WX5YLF=JmD6*%-z6KhM$W)fUs%sEn2^ zgBb%7E8TQ@>1z|~hJHftyE`QjJN8I?RN{N1eK!oWcNdWRut(Ef@(=bZzjn7b44AQ3 zcVNDX3m;16e5|u$?VhdcP`Q@mN?ks`Ze97R^teNMychGov$Hd;-|J7tiS&ia+I<}; z+4dR>`=vdW)gz%PudnXlc;9b*8Ou%?%ZI%YAoo%C6!`LfRo>Q^>w^nlRCsMxo@v{{ zoz;1X>M5wDI=J_0NmtlXGdMS>8QgMEA4EI2L7q$6yOU0jMQE1sRBH(XtsJ@pWtmoW zHtOQwmF9?=WP@h2nox#BKf2}kv0^iRl+cg?^<&~KP^G4>atPYmJ{2qUu_QoA5v>WO zg0xS+c@pk=E#z0By_Pe-dCJ$)t z>^0_*JA&0_DN=V7q2rO~^b~;Ho_FCNsqJkJ?ceZqn5%0orOzgp*bC!FTv1gP)dB1qtk_zs$5Y z(?fTxU_fJYT!!*EwKIZK5i>LdOq!*?jQrX1BCOZQNWDYM>g*6+5&ml- z$F>#kdyq`si>Pc2mqdpOHH1}!^^nH2{P!Iv3yo`ujv?lL<~!yDavi%2-=}eT-UnWL)VxQ+_J{Ox4sv=u z)|&v8US&im2yl5ril4%I)7qGYqY}jns$by)2kRsZO{;m$>3|z@)YqY`Er3oGBN$~C zG_Btc)_e%uOQ}$8tO|3U}*jd;86k=X8ZMN^;5=>G7W?R)1ky3>n zav*@_3>tm6d{HsIhI`<$lc!Cp=&oWaKkHDGfeIRmX!zV2eG004Jk*Do-rl8k&=q$; z7#T3r61F+{>^D4!U<0>KI3`@fT-(X4)0hZF8T=|Om)rMgxcSoV($>Drx^GRcdThmG zpfD7qbUJ;@T}_IF09LNT8@v);ZY(h|fM`MSr%Qt%03o?+jT92rSMSntqv?tDNTalh z*sRdP;cg3bQ1AM$Z|aD2@l>*ek9KQMb!&Is+LL#?vwQf7W5=hCKKS4xC&bH}B%Rj` zXd7{c`ZWfe@pyFOz~drcO}|7cY!laL6y3$$+iG)vfdEDcncCcTBDxBF+8U2hT;_cS zcS!LXcnLBaFc*wVz!LjAaG`@b0hmoV&}{G!avYJ?m;3d&X9vdc?t0_01^z|MFZ5HZd>fV$29efkkyaaP?w;EbQ zhhUor-=AGTdBT!75)rhb9$I6#T>pu9Jn#R43ogzxIxX|U8V~3pB+xI35Q~3+`ho2>a!$5mD$M>|y?@ytD zi*F&o^&dVlnb$h|pT`sVNzH?&ylA|0rWHM%p1Se9CzWa`ZS?sCR>(b}We#ol$!oT6 zTYT|%S;bu1ND2QAl7ESKLnvX16p+l9zO&HP9YV?|Sge6mvtp%`P~(@4sBFMAWTOwF zCBZXeh|mt8jYGTXXG#A^vwi5Swn}#qa78cVMzSyoI}%?e)v*2w;@cAW-|3=6*zVgq z`v~Jyg79x`G79`S8zhESoSgTtFB6@AiJb^Cg?+g+ls_a$zJ{EEi_^gC&q>zRUqM~< zw-D6WS9&o{tsGkqA3|^^;tdza+s>x_sA}I&LhXLkqqhhMwmVSEgFMyL54J4HB zayDY63Xcr{uLv63Lfp@wP4%-3I7SV4!hNyr-|H?6Y1Q7og%QrLN3iT z9@XQALT_KUME@Ju!QGl1qr^wHc?XQ>j_<$`mjExOSWSjey1X_5v0!*%l&UMq#T zhm#UJ#EPemSySV}{6k*lKPi2C~s z{sDs%3K`$9lfiigFEjWj42UMt^Q1wnEKLJkRS$%ys9{A;YULDC=!`3_;7_9A0gyAZi{;r-YTF76xdPuVLghzQE1O~~PC z8i=SS1iXhJOlH7(VVxm*k~!n4nsF+d_0g2m*;Gmt7w2@;Q zS6(3O1F`Fq39wspb;17rCjQjlU?9sS3&J%^5s4u3zsdZsBAC@W4SkgEGfcgsG&ccG zP1_&gYouj39&y5aLz&RtvuW;9iS0Moa&OH9QwfqH7W~&-I403?2#EcJ#=(TgfrF96 zz1_L~79Ma+fb%$kT>{1W9G#fW;5DS$NS)HDNat#p!3xsJRG(!!EK!*+>++vNI!z;= zOVV>mI-Hw5c*&}jB)yQNPa(aC^w*N~p(K3*=_RCpB}pIF>0%j$y|eE?o;>vqYFH4W6}OA-OD17r#>p)dmM~VKAO;olM(E)gSsX;Uy+A~f#)fx8!NC^FUvlWRwT~|Co{U3;Y7$y=-PmWhCMo91B6pB7T^V< zT7&g$VaNj;;8}HN9YhQyYeb*YSb!c4ZH}C$FrjyF1{XGK5Ibyt0)
tv-t|i#W>fP z|A`0L4Pp3UiRBIkIYNk)Og+e)o_|i9t>a)6VRRSB97N0+pszd>J@^W8NgUi(^QZ`mZRaN0k*iBW!G3 zOqG^6tUH9$624k$;rkT6`s1|tEe!!m*rXoEBHM`&@B)X;>B5&0cJzjvF1)G!h%Iry z^$?bReQDwkk?|=uLBa!)l~GKo;U2iQCQChR&hvU&Bi&o+f?7ixu|t)KJ_A7xe!zQ@ z)7>RWzlw*gND@CW0I~yQ&aI85$9W-5hCFa50|bP8sU{#ov`}`CfH*fzaJcvc1U@^! zZ9MG38;V=MfBO?LY)?qQsb#+m!iTmsDarNwixEw$f6qXWw4(`(pn`6YqV6>o>!EHz z*ftDjab8%CmZvR=DrPxB6u-~dcNvg(Q2&v^e`4?l3tk=i|kEMO$d z&+c6=KizmVLLLJjtO56MkZ5(8G_<7_LXE)t(JeN-yy?Rjhx*S9zJve> zSCH$dsM*Dyniy&Y@yfJFHJNB)UG*odmbmi$7sToN6jfK;X5Su?`Qr|3asDVS80J~b zoMW{|(|wp~o6y$xkl)({+i%D5=X4Ov!9*~Ff3-p}9;Pa^=QZ^HaH%DIPRfdi7B zh35*MEj-_iXG$PP@oeL{gmdn|PgK(rr2<1`_z4$m)Ou6s1kjZXw=0mh?>_H(6>&Ha zJtvi%%CEKN0@!~%u<@rbik=W27iOUwMHQSZ&nzjP0<#pMG0r3L9343~P9!1tDqnjP z5i-x+9|U57?;11uFO)V;;}+QMvdW-5pf~Ur!)rNl{%M?jilE=!gu2Nd%LEd&aT3HE zSr6-fAZg)L+1;$ThrwP3tkc&{Z`=JDGH#@ey<5a=UNB9mBmBs2z@Rosh3dVkwV@67P|P8_}CPVmcsoc zeR)KonD=!@XQBD;o<%=gxvIaN)CAKIxR;uK#jW#%DvTrPK}1pQV~f`=SJZM4(6JJ8 z8iBKI)L?Po=X$2Y2nXU;Cbd<9J@lVulTN&rsPSQdsNA8isR}HVNzv`P)Sbi`c}sp+ zD65AY@01$VWRO)3GF0zidC&5cH*BGV&-7xP_XXU-qR`Pf<$g5uKs&w_hww?%_*np& z`Zgl%{p96)2A`gw(MxOJzr`vI7&q$jiI8mH`4rjwFQ5ZO0-)YRfHy5jqaqn_6n*=E zm@_U9j^$PE2HE=s92vvz=S>5>f(Qr3SdZ$h9~=Lbr@qtv=}(-kr5p*`>Nd{yj{KzQIKc{mOSfx)d$W?X8N`>fe8SIcM7uKJC+`6%_q z-4f(s*gljLhYuF5SZU>`1-DjZRJl6L6#N~!+kT^`t;L#(AxW{%d$!G7jk*tieLXNWjJj*z@CD`{?6J~yW4_Hf!Ttz*3g zd)~<04iD(E4`7gDu4|>Ae#2oley~k{6E6KgoLJ1KDv!fKE*Ui2PKUk7`IYg?e8Wu| ze6I{B#3#Zka4HhtH2MTgRnGc;6JF0gybW4#ZN6T)j*q1A17muJ4Qv-;XrfFlzGHR4 zO$cS=XFWU|M1e!Bxdy}uvCqD2hg0$dkl>U0_N#_E3C9}nc&72HHgA6_!pR--B>22l z_EmG;HjUsC@^V0^ir*5_d9UI_R5t;y!CBJUBf|y&}I%c#|~`A$J(Lzl_|W z4K+(q$t!D6h5IzeJA(0k3#IT`37iD|7DhrP$1q1i^+s@9Mi1>gM-R>aXx~_O=&^3B zBRtdCOq|s)9y@0sb_%bg{m?K-I0YnEoYiZ`k75y?@L4nsccS2>z!l(&MIi9MhYBL$ z;GA(69@|H@1m^*a(sj{*xm2|9f=hl2%}tIdiU&ehOWc!u3U+^uNR1378JdI8^^>VdksI97%yA&Naq!#x3R3%C zX*WeXT_upDvbFZU?o{?Z-Y7=z`$I~YoDgU-v2Ct{Bl`lL)E-U<&ta$u274LEKzA`F zc(~Nx>l+*}WddVpbQf^%T_|>OK#9mq-oiSaMg+&Z;Epd&ZijeEdCgvj)Gi-klaF(d z-zTz#>)9zP_#hqgtf_U=Au0v}w%tEKFHfgP4;w$lq9-fIrod)uy;SAQ`RAvfI{!RA zjL7e|C01g3AvTUM8Q&jV!kSKgymEZ1he8KGw3y7T;GFk^&^5*s)+Hed?;o%C)lFES z|7)myBUaa4eQ?B%dUDMzEyHaN>=79C3RRAiBotTY8(_>ZwV`18l(KE5Noe*z3?R~)cc-@OxpAoxR^1i1(Ikf# zTTtc@3Q_P2EZBwUIF}4tKsz4m?Tu=(%o{ll@Ynb2p=U=DR0_0IX%pkKc)S4AH^#g;Ufr~JbY~v!y5f@$8yEdvB$^ujXgPbU~E@$ ccx-I!+Std(E)*w_Rvg=%FO6l2=l}o! literal 0 HcmV?d00001 diff --git a/models/__pycache__/networks.cpython-39.pyc b/models/__pycache__/networks.cpython-39.pyc index 800e7d35afde24d86786a9b7dbdc241f6864e501..40fbb5c3111eb70879d74788f829d845264fd328 100644 GIT binary patch delta 15360 zcmb7r34Gkeb^reM(q58fSrW1(+do~l7wfR(`(7C%*+!NnTQ>G$yk75bC9S<$DYGm2 zK=zWFgd?~(nHJgvN=QgRv>eG2nouC-N+nR)ZBc{B6#tGC#m`(s;4xVYG3p`Z01K0es$_;g8)694qlMB1)7@v%e57RSz%%p5oV>fiZH88%woc< z*47Yajfq)8n6=tE!mKkfe!{HRHV|fmiMc#YsEyhtLTxfpO9``C3lJt?Vwwokp=}|| z78BDH5%cy0$wu4%Bm@RFDxk~FMOm{SG=(e3u zJGCA{^_WD<3A0PHnAL>2TH8;U z{U&A&VGd|P!UUt%G@I61LS3UBB-BBZXdPj$)eaHnkcnAOn2>guFo#Xd2Ev53BZN6( zVm1;cqG^QDOw1<2M706J3`94Q5pSmYpf*JHA*OKKf&oq0WAP8mTW!Pbeyd*9(;Yq; z6ix65-T{2qhxPgkHWonoLAfUC~5T zMZbM&iz^^z5z=UOpxGh3ve!`=ot;BFfp$*qRUR-0ZU~49(3R(ii$Gk&#G;JKCSglp zkSg@EA|NiKKioz$eaEp;QKj3tQJF2foJ*~B7WuZ=CZBLtI5cQyUg+j4JEENhF6ia}!#S$q|CEsy1DRr{cy>@duQ)05z zN$SaCEhC%&Igd)rxI>{xJe*9@P>X>EJEEDu5_yyR=Z8SAI|s$c*l5!4%FiZvpjBv9 zI(27cBr)Eu@tDNtPHfK+J=yDCK0aMPBPWRD2b1AR$|uCtvW@F4>;NOq#1z zPE6sFy9bD*OU^DptEj5mXu-N`AQn%BMl?;dfMgK>>#z+an2M+aSPrlPU?l)F zOv{C=45BqCo;PtL04~B;~+PEY6$2E1}Y_BQC#_dR-YnP%Fp+d%jIRg z_1mb`=({UdQc=n+U_^eoOkKMLxGMm*0t5imO9A%?5iUW$Nh<-DB0`yu%A8i^uyj_m zwqPQ#=&pMbBVsrdN7(+jj8w#Ft$$Z>#inQh(vm?EdY(KRZiLzul&v21DB~7iKp_$!^20ka7c{Bqqoar^FCe-QBigT14rORcFB%Fb!>JSnlz~`66Nk_Mjp#+j93gQ|cB*TfSev+A-k@%( zP*G&o-5kCL#uAZ7i2E~R zwO3oqa{c{I-4>4~C`!FqUz5I{Xvv4~!$`S#^rFGhu~1A)@`QRB29Jf~B>Rv##dN!I z@%*%`JL39>(xt0ugRPGyTxs?WKPf3+y_1Uf$5K%G)_%)11ew zBABwuyBn&TRjL~jukyPn$0j#&vz6s|< z(S%1Uov@87TJ<>xWlJ@g_Zy{S{xK4PULrUx3avd7ABh}GzUL++DJLrLS-MUclW#2b zx6L6`FiW?>pyR-%yT(ZTp|;Ri0i&0|T~emlh=liiW!0Abb)#k)_72c&sjE0fiU* z)|kJQ^jH2%^DH;HbPZhkyK-}~N>;W!@69vtp!{h|<0bXV*5Uo^gfLNB*UR*WSI{~0 z3Z^lyQkMcOcUH(Bw$>`IXa1|zQMBg~BGMg4!pT^K^BKQQcO{HKn=~}V!qJH=t2!Wb z$7t+CG)_UAMf8Nx%%o#-@9LGVZv*2bb$n)ZdvSgtKJl{r-sF8YyI~QtNAaEWh4ha9%&0DSMpMdV40DcPaE&!rA=SY;_+O*iP-=*7;dyDtb@IJu5 z5}-^l8cv7}D7um(1F6yYSW>r-$D+rxDbIg^_WyyvdL$`+h8lVrjHy&AHDWX6f{&Y_1oH%GWnLcoHF&L zPK2n4RerC#N;Ynvzi}H?$r9N5=Fr-W1l}<^ay%-CQMRsf;0qYni!#0a#vW+k-E9v) z@!1b9p4#NM>o)2uxJwPSz$(VGX4*>#UW{%V8B0yk0oWG-037fqF0uBtz=J3g~nk z6~kkxs9rP>r}T=l7z~v?3qV;x_lXc~T{vYHTIx^bo}Sv7mkPdrL?h;x`Wh|ui1#8G z`4qsdvUt}uWm6T!%bX)m?P^$iFDSkU@F0P#4PA;YwB&!!T-@bSmSEKPRDb{hW1CR; z@Z&EPeDY}lzDgGLEnM48McsLYakfsNV&5I@-YZzt*v#_joWV_z;TpX%)HknM1p$Lv zD(RjGg@RNxG#nrOmHd;w$*ql$wo{9y!jU0Uyt?gJ^rT>inAW>uY*gdkBja?E3x&t& zQfDygkRRXuvqMne6R#YJ|8uk`Qm{eap#vu0AX_h=f6@AP{OheoS0U)~4458U4q z&|M^YzP6dJHuHSVlzB@+A0vq>;6c?^;<40Xkw4q}l(J7gyl>$ZEN+;LVIQ}lG?^RY z0V?T5NzyhQS&U>oKh;`VF}6H8`|7z1UPhln5{3*H$!%A+D~siISC6-?GkP)G7=hI+ z)>DZN7v@AuLE|=5xlz{bUtYfnlo(#OkrNlvVw*g%zpZ~Uk@S!i%&5%(_?&UugevAi zNKPualq#>wD}KN^PSaDxa`Az7=XcP5u?!vfi&YI&r2LETHfUP7^>g$`chHSdY}~-V z1iTrzxofjr9^BE-M#>_4g$M+w#M1!J03ca5Hp=Izr27)I`5#Xt!^5NG1Uy{<-ENlO z3-+($(5{G1;wS5!MUpX6d<6< z^M@KY8thWFr>9eNpiRvIo{H(g=e1b6iyFmyrGf(Wy=T}wq)?M#BZMLw#%mp(xKNH84d5qXU)5K)&e zq)fjzw)DKia}0*V>n^?rqDG8c zz#CW{zxMD2N$9$D+G)NOF}y_sp`CJyA%C%IgMJYhw z8}hZ-vMa)0_UIIxm;rbwhn9F(6MIXmP-FKmev_ww6>6SE^o z={9l~O5orP0y+x1PKJ#8rJ&;kUDN!pBR(|y0Vcwr7P@UzKeAgXw!JC3OOImMDQwFh zkDgESer(71ydfagP@>A>XL=Sthmf3gvUIX%8n(*pd^@$cDvTmT*hwjN1QiYdnpDT?FTX%_Jc&~t5f4YAvSNL zi?=nTLj|2_GRhJGilk=>SJCC<*oFZd%>i|%5faj}46>@Rg$ej7) zW2L;hN$ln1@~^L3Sgw1GSFSC*85v_{s!nt(>2Yur#t3ZhhLTVvyhV(l^#V$xC;`b@ z5=oR&0EpaTi~wCBvNK!{1Um!!0jHPNfr#$i5>7>iApJItm7-}#_8~Bvn|@Lp zq0YK3I%-(B?(PkX@NhImPofBUq8iQgI25A~D~3bj6siuo?;fG+>?3p%&&G&PVxczz zU_e7Ivs4so0Wk5$Xog(24g_CK zfLz>)$yJmX@U#J{UI!7CQo*8BDGTJ@GquYj=^Iz_M^ae~tMn`-3qXSORZ z$XV%M`4$sNr+iN3(Yun6r06q5VK_1(OTUrZW8IT>qiq>Fv!{b# z(ysf=j~R?_YNBY)0KnQN9n(7LC3K^V15)-22pZ%7Wl{Ngd;FD#=3l@d>?>m^m4LAU zlm-dV1wOqL8@o5+m*5bGaAkxq?DC>s-v5cZbPJmCi39tGacslRJXva3^~-4DtBPk) z$}iwCR6m1(9!2SE0L%*e5r)u9rd(g(dk#)(?Zad7RL;eGQ%vcc-22Ht_aIrgf_{^A z^8H&I)AZI_I-#7l4_Zz;@Lqe=dREzLx%QQV*3-_@Mf3`5Kk7KEC>DA|wVx$AdQP>D zlPD$}pSMid@1d52C1uxa6VA_BG<(W9;XJE+&T<#gD!@BZN~*{h^{k>ft1LuSlyc^< zgNlK^q?2pHN@Q*tjF_1+m}kOv)Ilv?YO&>8Ci26XqtWy7nh6{II;G7#Y5{+}B3EU8cMiTLpi}?bbkQFA`ls2NJe3DO*lf=7` zm+ z3iz0&ls+xs!=}}P-siyX)G(4+aPwJf`i}|iuQD8$>1J|VaXSWJ zzG4`!px>h54$wi?da+>?hBk!wGc-(d>Loae8cO+dlnU%`zr@OhWbl*g<=IGZ5Z8rPk82OJsJi_Gmsd>&J>38*O50#t!>ukmtYTuU*_ z6BzsN2>9n9`fRp{yFmRH0JwP!x`)HbV{w3<(D*o2IYkcgIog(w&^KaSsUSE6~qvf-GN!5OF9>_{e z@uyVk@s}HRa39cY2bad~=_bIjn$?|GE$28Kv^l1em87S~Z-2=j7Qb^*TYMIbeGXvy zISy+8$HQa@$*okd`1pe!rNk;P{ON6taq5(Phh)X6L}CcG7Nr1zyJYJ2y4D%R4ynX2 zyy<1Onq}fX<6%@ize_H@BXp_kKXb=3w)_q)Zlmr~c4b5M&n1x-yDZeb6?o8L;4~`*PF$thzA_lc61qgg!zH;a9kSUh@ zg-c}GL}D->o85B#U8|KlGReDcQ@ku8wzXzie0M`T=e0k_G`!L0H-BD50gm-dB{uahs9qK`k(Cb#1|h~019yffQs1|)S?t1P%Jy|Exb{%n_?KIn|Y6b=q^Ii zE*g%$y|DVct|b(!`Pz7N&20I~O-1s9dux>!W%-xtmBpDgUmCQf|AvTr`YGpz4bkU| zV@J$aPskfr=JZAU@JKSnS7Iiacm&+!6;eW^blb#Mm~TR?p4x#vn536-V=(lko4_WE zn+AH3D)bbxcyfL}#~Y+?*^B$UWj_t7X902(2R;0WnQ>J)Q@p5@e>QpG_psv@S^hxX z%pn;}&T-^pgSE0syl95xYAGLRRx+8VA9&H8hA{G}xe)|#L(rei;Fp1bv-AhC&@v-5 zWFS&j^;c2)-C>z+luwu(j5#*rgee|Ux(Mb21n*>Ysl<|}|LPE=?;=T|RGOkKp71N^ zo|CLNF8dzYIv1KKv|+E64?Xg7GxuW>KGzL{c&1wcA@Bdnvi2DxFIK-C(#8How(GDe zzK=@QicIz@K<@c!m2zIjzbdwGBa-Z$D+`Rhn9u7)asDW_O}BSlNuM6zfUKLic9IQh zn&Hzp@yW%R+PQxNt_uW{Q!mcHDI+H(+>P##BgsgtFwd1crk_YFZ^9}$t z2YAw2R39ValUO~X79G32gki6}@6q+j8}gk;7a4Dhp3JPTEw&jJ#sLm4CLYC9Y+(Y< z%of)}A80h8@&*7)>G!p$dh}pF)aUFDIn1E zvho`bEQq6Zrmf@|`Q|s4%?!cO7^W;D4U+z<9z}$Mcyww@|LI8Us;FgtcL!v%&gLL*+1P>(EZ=BP?}5*U1*v)Tf0C5y*pJTRW@-Un^`QE?x(wqPxEYfNzi{Mpo=Q= zKM0xMtUV8Y3P|8r^4!zUvjqA{n7r**fV7eTt^ZDx`C!2Vuq1v#yz`||_NK2F2oUM+ zMD%#b_zEmX6?C33J|R*;#-J9Z0D<2cB9PyEra9dKq?!ZFD-%9PJp2-bM%?hxhaiL@ zOM}Yj&td$rnI8aAxsxitp}+Z$F#R3IGlYx&uTf;EC6VBiC^#ouZK10idHPw`@}20m z0f4U7jmw^(*P!D61|jIQLf3dC-qDkMz!LY!uRq(E4i@A8*M;Z{?BgTiSW+N25$qmy z=-}LGd}75xrxR^_uvm#w2p|q{3&8yV4+DG`;B|nv0Nw^DqlME)j09dq^)-Ntd@}g~ ziu?`CE;RQ6@FRE^N_fc<2LT2FjsoljxE|mHzzY}vCo(z<5I{m}oPb{hj`wW@a3j=m zY=LF6Dn!2unmnkbs_xogq*%CZup|=%T=Z65M!)!r*XnYmw8&FgHm}T6cA3jp=I~Uw Z=6mYP%bkknBhOk{{`^wqs0=(`{y%`L-v#NgE8 zZUyzyR$HsMp}5p#)V9_IXc3FH{j^%^tGLwus&r9n?ec5;`~1%Tyf+JR#;*;(oZNTr zx#ynko_p?nj~^efymp->*O!yyG||tQcb@M4rE*hliL&{osiEi&HCwe`p>*cAm{f=A zyuze91LmCt`z@U#3FA`TgmD{~Lc-*zxrE6zFr(BFYTgy5&LZl`R|}}8!00(o?NJNW zQN+1eEmF@Tn$hY`b*x%Km@#UxI=b7Wj#0-&yRy{rYAGSds%3 zlq7o?pRG<%D+xbNwNVd`Fz{9CM8c16F{zW($t28VLzq%^ms+h(qrNigo375Fz8TyX zwRDyfYNlF4s2YR3f-tq}EW*q(FcSzfTb)CgIR>VZFmu&;gqdexst7Y*T|k%x24*5* z7OIN~v&g_qB21lHPndcg6YZQ#sKx3MLM<_fJcMab8wt~BV5Sgesk)3X%M8p^!ZfMN z3A5b5RIANu3z>Nu^{h}=QqM}GXF6e8)m4O9Wng9yX0^J8Fwr#zY9^uD)U||KYhY>! zvravqFy|YXTEeVXHxOonftf{^cJ%_nTwq{k6XrtoBEno`VCE2JquN234g)inFrDfq z!fc8fsCk6itZpIH76UV%Fk4kGVY~)r0b#bOKEn76%tFF!SN(+X8<<6eQPlup0)d4T z#C7!CrFPSEw?R>_DjQ6m;90DNHjlj9(iK}|EmER(diGd^gZ_MgKKg0kr?XaSdaS8E z_6CtdNWD7?-C3#Kc?|}K;s!AyL1zPPLdGAVHlC>TISKdDJ#IPH*U@ z2l|=aAV$$2?h_7~W2;xj%T)yPfl2bT<1br5ui3gq-#~xZV^7W|n9*$XD$SbB-xnIJRhcKr zxx?2V@R-FM^paHlLf4rdnZ1_X)hBlQggRG_b$MJB!0Q&1Yh5!MFu7Qau{nlp{y;ED z@+G;DK%FowY*%tei1Ks1@uBPD<|&{CSIySt^GEuGX6@}$0~CNPp*!sN1p{8a zORNAf^G|q($2Q74+*@;2q8B6-T*cCvQyy)JV~!dMD|=D|Wots0X8H}A37C|<`4{wu zeZBp`K<#wH5FSM=r!KJsAf1{`)X_k{aW%69Fn*Isb{FI(LholeGi`3dQ>0mQ-x!50 zNRK>3lkfnHlb__4jADzd#WNe5Bcn)`j~Lli4P+$%J9q|Kh$B%3PyO=rth@>?Tb9?PMXP2s8bgu2B7qCNXL{ZN!Gl$YlfwSYTElr9PjRR`R9 z0*3QMJ+-xL5r}vPeZdsdB@Evo-_EOAv<~BR)1Z~n&ag=<{l@K_E7r=z`Mq|;C`I%- z8Oxuew92#ji|V0kr+uAIUG2MsmSxgwJT>g&10&=UlVVT)GrJWri(*urfSAz7tx2?! zL5HXb$qx(47Brx@5nw3*I#Q-fj@UvSz4Xg*W>>_PSmQ{WS=lOAjG8eWlTny~hW@nC zpA;StS#yT^M6WjpZN4J!9Tg-!3yPL4rXc$QX>KdR|-j0{5mb9=+2ld zI%-iIvVKg*c?n51SJ2nHUG;gzKrrxi`S6&_b8?9V<$Yd#EppM=W33^~yMxi1qpv^G z)7!I`>>LI+_36V{R zAvv{btmBvTL11Z&GJuG6RO;i zY<2XIrO32j`Kl7J54neIL)k-4HE+l=sHkIZ$f7!AY;4m+nWgl|kEf1R{L)puPzlTC zYLAjH`>P8}UnNSe}_T z$NpnrY~+bC^J{aGinzsT*)e}?BilQvKMx!xJ~PgsLsNx(a(_dT zu&3V_QoX(~#>y2-W>gFlE+2_7;uHKQ5DNE+aO~D4P3C3qfyr+G-Us+C08%5D3see+rnkU7>7dr-ogz z+H#BS`Ha=FdMPSYy|y1oz5Z+h^b@OCR%|UeKrPKG0=}ST?egskh=0h{ExTvH6y48U z`>St0{_1UYyt>85grac6fVMVIz9`m5lKn-U{IsRQ_8B3>4moB;ZS9}Yd;oxh>QicY z+~O_z)9JumUey=zbviI1H^sl?Wh(-%q-Le^qZOl-!_vO8Mj0V%RwYjtAG*q2ZM+A; zGfsKq%7yj3wOhp$OqfSjV6)M3=-V9-Br^LbANVvubxLNpUfl*?e6Zq?XTJ668+#Xf zteSH3nk`t19r>uC<>(ArzABqnuaB~w z%wi0Pm~stRrdS=1(BM~)%uEfFvNvyCUtf?4)aH1to>?hK_zH%yQi|IqfI-sWAK6=A zm!)fFE3d|t+=eaZ93@``LRXSnT7IMIqsnJ6&^Xu)Ip~O0oRFSf&B}}gE{$`~$egyp zjqC|53|`#iN9e9)(}}QmAQGU%SkM=VP(=Y&@v<6T?sV+&iU1$x(i!P}`Rlgw>LuWt zG+}Z{LX)?MW6~I}CJtuH*DiOxhJn`tTqoaKP$-YCZ7G1Hv{!N;=3Qr3CWGtQGJpmG`j(>bnZlDN_&`w&e33kOerdFonwo8yew+>B z!MZ-M`U2hTTpFD_FI^k(`hvkUWtpa|=(L3>d+iQsmfm2$ru3v(2TMeA`Y8oO0$%F< zE9_H5NRv{e6f0K6EvK!&#Zy6O&87w-KK~BGA(~}Z;F2#??ee4bV-&M=Z1^M!CHH1k zsopAeuid|+CrGU@*^>1uCx&7MxH{>y<)ygn}$2Sf|-D*;*ongJFP=yY>`n9?*&HH<&Ird7VbdFp2NO`Au_Hp`#+$Hd zHmw~3+jL5Q4KI|wu2Jc1ct~E? zRbR6a!Ylzunwfpau@9ZEN-<=PSduX0pLWi6c`@)gfQ_{eG0y;Ur0ebvqta z#7eYt0pJtO91L-Uv#tN>+>Y?{Ub;}-$9E|D)&6!6TB*L+;mOfm&;*6q1s&AV+_bsi z>V-#(C%}A?5+&G#COc>WM%Vy2?BN>P;W<${Ltc78!I$c~bJ#D4`?zzcl~e`}qALbo zM`?olK3J)c=9O(d4{!H^3;iO}eHiHS`yyWKzXKwmFGCi$4s;OD*s|G!nsSM_2!oW6 z@Pi=f8FtEI;`@}`vUBFL6!Sx4qNP2qEwejezauH`93h%g+&Lq!5woQ*C7*s0-3%mH z>q}w_9utlfTk%+dUS2h@K+=0+8}-K54>M1$A9zr=MABQseI&Lkre(%H`~Q1K@ajtA zn;*%ygM)L8qZ}_@6>{nCa`Z(LfHZ0B{JOlZx8lFHGgLj1V&?+Un6R_{zOOW2umPTh zy^IC4GIUw=1fI`_Fp0>c#XSXBrcc_BQ;$8KdQ!sMr~{m#fnI%Ogi|ukxva(TtmD8$ znBzUUwr?$;erfHp$ub>$KZKt2%WtXT;)LTTUn;XwR0Ma@?Mo64lzcaCIUBv4%N8AS zLjV4qoQ|v^OuQ+;knsaj zau*op6Sj7mDE@m=*j%$!ULwxt0#fvVsd2#NBc+|vVvjmN@3x9CeVVBL+$^o(%7zq` zAR-ZeiD#hH-{PJj$Jj;pa7z{GIhXms9!6ltPK9q%Y!yIklsyBJiaEnz@@A^YeZX@T z*)H!Nm@oy4fo>DCsoV(q`~fX1C<4I&aVeqcehO{6VgzQJ{OdqP$05RdM(K8EYs2nz zB~J&>?L9tYNA4gTzj+j!(Bl1|3#}6X-2^lTzmmf>nV3Qfi1TItV8u4nLzmYkn}dsR|*{LVzW@F+vv$|rZT)*tWR;?LYvMhc6iTfB^rht2^Sfz z0UN@bL=e54uzJzz1AuQtKUx<9An6NG)0HQ$hkZcsdf=tNWv98zuep}`BK{q)d5y}h z&~jIXqWW!tfgQk5_LuGzJPrcv2G|2|34!>cN_a>2EW05Bq9K4Z9;k;|V006`&9a&c zELQpZy$2StODy6tjN%vpC#uHvsub#GT3Xv=?9%GFNzY_-3XD>R*^L@?g4mbhl4`RU zklThX+rX0K>ULyfaD2Ow+>LS~EVhwAnkCS$N4w@&=M%o(K!jd$A~lN9=%$xcJ@n;3 zuUG7mHTy{8hRb9Zyv}{f|D2$lAfTj|I>#nGq0+M@?GRw&|objx0<7QdcdAW*GmA$0g?hg zfyaXw^Eg^D02YPU3^pPyH|0J9-&}ARZ0#KgMiSxTPWb}pI(g=*TU}YiXc_&6Z!yWs zudduIt{PJIS-Va9vha#`r+L59WZKfa*}TuTFPmPuTK7{gy?iwfnndSN)?w3-^-e`G zg-k=Xh=NzS7_nbbEyX5!^*eXSLcdvNx=+|(x_GX<{+h8-Yn%QmiX=9!tg{54j`+mH zJ1UM%niZc+8OK_>DZz=B-Us0G1zJ0>gMMT}s}o=oz(2t>O_08fa4Vrs(@1(Pny0ww zHrA>X&}&k=*=?2oy0-Gd1`KTkSPJlW0M3D3XmLtJxM#k^M2K90c{p_|)~vFn;w0a3 zU3*=7(F+)R9b`<6Y5DE1jF07CA23I+r!G&i9&Aj@Go}ePA;E+t%N%Q%>=5W+R4s?i z<_&~!Atl7u(J?F#CnSm<@eOpR$-f`T;sXbj1F`!L+^Ohkh6qjk%1lo<$Tj-?`-rq2 zrSnxt?d#RDjr=CQgPAzPj6-WYKoJCX>2ELeTNIXbFL?iyfM+BY$`X_K7O1}sfN1D+ z^!mcPyn?RY(**{-1f(8KSt|5PZJB-3tkIc*rB-gZY1{PMz&aU9$t@b*`S2V+H;EVM zp6v9w+3I^YziZ)pYH_6` zJv1o(29KSVG-_fKIV4x!Qjyu@u*x>DH_l(Q7IOPA{)GYsfi}zgfci(!# zq8I*b|Mca}85%!hX{5yS?yb35zlbOUt02}4ed#ZE?v2Q*g zB>lX*gwD{d)X?|PPPBFaBz99RyGiu*q_Ln?jy+Pjp#c&!0wnhaUZh~7f3dd=y-5bibOF=-3`RxNr4R;!Y=jV5MKZWl9t|viHG`{~&s+m>;$)47txJCVSaE66EzJG9m?B|95s?!gZyL>oP@2^=Q|^EKr_*>Glkl-&Cpcs} zfcD6Jv6;0Qw=<|Z1!hEaP<&>Y75C@43&f*FgyOUJY0~q=dCCFV_=H&5Ol0wULbgyV zOw0$xZ2FRdKD?~atS!swr8*8`nt`ho5Au_$uU(umPgVDren2n=KVDWO4wb9oHQ}FO602}H*jiPdl;C07U57Hdp>(SLZ zC*RYfLGWvUHvl+v_*4*)cRxL2bQcDsvFX+F4^KZZz6ZUTHjE$0*bgUX-iM*f6{$~q6GTEVLK2)M9c40fA=4p$+AkZNCe`S|@F@lr6@y_Wq5NroF zL1dWTi5t@6()xT?)icyhmsrdtlm2hYgU?UOtUqLaE>-`+VxcAUzwn`v^59dWr03XF z*AE~IDt%#>-eZfFmtu0W9uZJF{t_Xn!Z25+@!xy$on!ClB_;Nfvp96?Zx(ZPj(-N? zGXSoqvoJl!;9~R}ul>0My9-@Y0jdG+2Eb@d>Clvo!-EIIe+#u~9$9~)IaJJKlt4|d7u|=@;*x{6U8G|@YjTP}NUY{Kt&KY-drym;z_Pd3JX}mUEPSoO429p-pjq?wlvBr=}{ZoO!D7{{Y#0A3HnLI6Js-c2aOG*3sPi24XRh_7|RqZ@!6B1wdZBnOoQ{@jGeAl~kZ?z!ilbMLvocc0&SjbFRMU9pxHJHuz<(`$)cPrmCq;^QxsrhVglS4asf z)+H-vI!iHSr`DhhD1&vHA!v3fyK9<8WskDgxbE+A3@=GE>NpEVN;my4Tl0yhyl)@8 zxo}}@q3u9M6Z6^gayBMnb8#&$7FxG&&zzf!YRS2{s>QSFtpxWJXJsX+&WPlk zJey3zRF#t%SvwO)Q>dCeGoN`vPRGu}M-zNR=h9ixLSxs?XVT~8b81|Xle2Or?$jnn zM>A2=R5e~-|8pKOV%@upm%5Mg9~w8iZ|0R&Bxy~xU18sr7CC2{r_iqRR2#?(>Hu|u zd?3G*fp>w{fYyT6VeERH7o9qbc0zV7OX4a#bfb?(Zve@l9?(Y6%Gk;{yPjuJ`R)M4 z?WGAme-MiXwBA&o(x!Nn_9Y3EZ_+lWwkTe-JC>wE5TR@>a_G0gcRze?FY-F=js*57 z8iJKGKgmvY=d2Jz1&0>a`C`qmh}Br;kAJA`ObrwUi>%JU5OljzyD_S>u%}2t?*$Em zBD0dxxnzO+eKi|-Rhtp*_+3YVRgFe5%|6fwXf#2tAJWD!)i`MXB4z)uHnB(t;i-B*TG{ken`f6+wNlRd#k?_1>2?Pcf9LY@foEFW@$fNQt zD>{PGt*1HqUx9ogD3z^hav@%Bj%BoDES-$0##o@UJIKrStcYtOmdwNzSV?J7Za5v+ zV(PKr0YeKsCzl(tBAL<3w!0GuD)q8WiO*t8^kB)?JI?v*#+UmFjekOqI%)i~uh%Jg zq=?1F!+c0`m-;tZB)+K>-r|<{NNNApA4&XF>308h-XPw@5Tmr?691+#5<1Dx8n1-j zUH3;Q#Svm|V!sCqHi;pUZy7V;5dVYm%l`Hj$4pF%inw|%t*MJfDLj?$wK4PLr?_}V zlN8iZ%966)Js>(OG?1pqbLcVUM#$I)Q*MHcJu+n*WGg&K*r~elsb-yg+7$~Bg!a>c zRJmIx(<8Yk|_S$geP^ktC(53#S#KSk1HQO3~?sRPu#`8~sC5)Nm8N`xE6s}i0@HkYqvG(hMLr4gF=W#O6TeP5+H`ys zXBS(`ayF%UJ|~Z(Y>&LAO_fcGiM!uLbSAC+MnGE$BQ&T{XLJ z)aC$0%?f>bdo5Hiu=;)7O(7wZu-;ykc48~&OoZ#U0xf8;q@@W@U>O&N7~z zEWd0~{}DzB(f1^XOl#GNMOE7MW>os>3)*2Wee-=SD#6pk5~n^oNP*#Z2)`_~YuPG( zhDRbqzHFVpq;;fr7j~n1Th3CXdtkJOOi&qnArC6v1TWG9V6dtgs%hX5tHV;{IHY$J z31)_?I+`@3cbd9L6{?0!#2|}?Jh~%h6PFM2+amaJDW?IE;S zYE0=s%{Pe)X6qiPLFsnhgEaa7H`EN)ZRUNDCciq=Bv{T@g&Tf(`X0Ceh!R!;WWqI& zX%w&B4`d#IG5LBRc_7GC4`-veisgL}q|_hI&qb8{>X1?aik$y{fEFr4H-Oqi6Kz~) zici~5a7%y;ylbOlx7_%6xONQl)pNMqq%Qo|F{Fa>qo8a;MhFk5CXjbiVUkXKstJ$N zt|bd0Vy>&!J6!D@uIpU`>jQ;H^e%l(QTi&KpM#)7sQoGQ9IDj*VTh>x)Bs{+(IeW? zYWwh_lv`^aB=FkNI-+=R^IBv4cyIsFh1S#Y*?AF{ajC#U zi)!;{;~90qQ|-hxBqFEd8F^GLbGyK;iwT?Y&hYTIx45uS%e=g;c8$AA*R;$>2{#b)t_A)7jRE|!hR$Al9d<{O1rMhKJn z)n@&xIV%~zNbDD2NY6PV;xR2AO^8_bOz=_hEcLxXj4o>8Lt?GOt`Yk!u^Ys061z_9 zJ78t++&L|o#@#GtBqKd-WeZkkBPV#X@#)Cq#>-Hu@4=4YuIb^<%GcuN zUhb7Pa+%AdCw(zao&fRq_uf@*ol82YRH@cs9&;?dpB6gJ6Plz=e zcOLp89330;@!rzxSQih`g;ZQ5wqk3i^=zRao73|N_1s+iYgzHrii5YuC-^|=qw$jx z|BG>SqSyU5($l_H7Wj_w^n`a^6AiYgnlf!q$L3BevFO45$L_}P&cu%J%Csw!*-yu` znKMx;*UD@GisMOgT{K*i!82}Ze@yK*NHy0pT1IRpt(W@OkhFzZ2N^Y`2cqeDRTaO1 zD(I-(ynSRydm?&B3KH8$te04y@!d&(*9KE08?)lgH)24viWUJTb^}7vfDP>dPN52!FA;Z{OgcCFFq-) IJJQ1c4Ip=AqyPW_ delta 3852 zcmb_fYj7J^6~1>@kJZDnWJ#7}JCWNCQYCfNIid$*U7N47P7gP`|19jcw3}qnknEAT1!RAe&>Yac&qFza7tNLpXo& zUIw*k?Mespv%o5aDYk2!N*AkRK^9sSj2!?ZQ9^$=OuZAP>?zSIZ*Kh*$XoAkif#L zB22$;BPM>=nEe|uZB;SjTPGTYW%hyW2N})a)MMHKSZW+(Vnu-Vq&BsJJ37ef;q6Cw zP1_Q!@JHIR^h5q`+mr3jz>8MDhHCqU>O|{VP=7a!04(cpqZ&Q~c%54!5{ZlxUI;3BK~LIz@rDm*3gp7wAa&Pu;Hw zbiRD0_jT&5*q797PUES*r|2qwukQps&8L&EHvIwSQIDeXqq+$y;Z%pvevb$G`{?ia zWPhAK%2WMw#cNJto*ad$pVx^2`BKCZEkJdcwhe6XnYJBl$ggQTz{a|8>JX_&j55J| ztuE7tvuyg@nn!^HptzU_Csiq{hzx_&GqDIbq>O-r zG>;L{B{&IDDCSrR!eAOAHz0DOk>T!Lu#+ZS7+|4{?GQ1VVXRxXj;_aO zDUmhdVw1^FtwvjkPy|nuxX}*l6gx_Ui4k)jYxLxef=Bl-`x)CBRo{o@Ju!kBcN!y6 zQt(=P+2X6wy?RX}F}5=avIxJDt>DwWOtkz_gjC&EYpx)`3V+v0QE}ylA=DQox*zDp zrw5Eq+(m@wwYnXKcHti3aQh)M_~u;r?#wj$br0^|^d*=*M$D{iiL!D5QQezS%mX%2 zc7{ksU`6+8)qp}B?92^&CUOrHTC47d&>p?6Bo+ees0H;J_}m+ygC^+ET|fudLAy6V z2RA^w6#!k_f_gn{3u-egMz)~74cd$c&=G z-vfksZAD@uDAMZYd?|&nnP}G?BP^)( zI43Ngw4liSNajQtShK0qrbX9m=R#UQ88il=FkNR5=@9e`VFc6xl6sQ)GPH!unq2qv zZ+QC6To47Gkd1H&`FMEUhl}>;^`>tR_>d)v!zK}62sZbaE$CFhBkx5IJV)T^w0u>x zf~1dLjNT+0{0zW{fYqK6)|@KV&z)JsZ?Q=6|3f1H`#-Zt@(wHl^?wgEk|Bi%q#CJ& zUY!3Fl@P^`JP7=e+~5y~+Ho)ZQ3O)?ZwbVqVrBeq`~m!b4+LVdLLvHZh-B+Yu#Ti3 zNZ8~KZA>RD0M`OwQsi17w*a2HHGA>=56p8|eGHv>$G;pyET}rOpk4+DZ?7T`XmwCy#+jXaecbkj++P=PyIT-p|5cqp%T)hkr^?6YDt6nr~P<;a?o}Rng zT)#8t!Z?oV4Nw5lyP&31T3XJisio72qv}_&@4Kk3^N+`(j@Qw=!hf^BuW|GKqK461 z52_++sb%#gK0JDazQDgSdeT-4UjEi-l3wDzu}ieXOJl9{Y5v2p2cjS00D64hmd!2X z)qh}1IF(lg-h1FUb@Im!OgDWUK;~~i4}lw(*ZK(|8WI|41Aq6xj=0!Kn?JD$J7_m` zQ@^m;3W;v&7RrO;lQjGRj8~sRbqv*OsGR)iiOfWXHi`USsp_ z#njnTEF~YFy#My9lQX;gc;{1}L}e|;i!)(QE3W%RbiTx|%p^|xu>DtT*MhY%)q<8& zyU<&Q{S9dCKo!Iw=i-38crKq;e-5sM$GTO9F%WaD#?k6R)q<)GRXa}}ipI8~X_lU; z%=`7vV8~!Ob?8kR+>YIUhIV#BAmn&LRR73lXPde!;$k|dUctG|Oqg&@;E37ENdm2I z{_Jdv;}!pX){M+t~J^RRbzfYvQd)Ie9Vla9YjZan79(Iq$_y%6YiXXH$!6 zAG+Muxsn%hn%cuh=e|wnzw^sEO1JaMk+@?QE^>u`awJO!c=qUh_HJ-I!oPp?%MSBC z{sA977Ih9{R5#`4@2ltMj|CG6X^&JdwMsrIDtV= 0) + super(ResnetGenerator_ncsn, self).__init__() + self.opt = opt + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + self.ngf = ngf + n_downsampling = 2 + for i in range(n_downsampling): # add downsampling layers + mult = 2 ** i + if no_antialias: + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + else: + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True), + Downsample(ngf * mult * 2) + # nn.AvgPool2d(kernel_size=2, stride=2) + ] + self.model_res = nn.ModuleList() + mult = 2 ** n_downsampling + for i in range(n_blocks): # add ResNet blocks + + self.model_res += [ResnetBlock_cond(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias,temb_dim=4*ngf,z_dim=4*ngf)] + + model_upsample = [] + for i in range(n_downsampling): # add upsampling layers + mult = 2 ** (n_downsampling - i) + if no_antialias_up: + model_upsample += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + else: + model_upsample += [ + Upsample(ngf * mult), + # nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), + nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=1, bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model_upsample += [nn.ReflectionPad2d(3)] + model_upsample += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model_upsample += [nn.Tanh()] + + self.model = nn.Sequential(*model) + self.model_upsample = nn.Sequential(*model_upsample) + mapping_layers = [PixelNorm(), + nn.Linear(self.ngf*4, self.ngf*4), + nn.LeakyReLU(0.2)] + for _ in range(opt.n_mlp): + mapping_layers.append(nn.Linear(self.ngf*4, self.ngf*4)) + mapping_layers.append(nn.LeakyReLU(0.2)) + self.z_transform = nn.Sequential(*mapping_layers) + modules_emb = [] + modules_emb += [nn.Linear(self.ngf,self.ngf*4)] + + nn.init.zeros_(modules_emb[-1].bias) + modules_emb += [nn.LeakyReLU(0.2)] + modules_emb += [nn.Linear(self.ngf*4,self.ngf*4)] + + nn.init.zeros_(modules_emb[-1].bias) + modules_emb += [nn.LeakyReLU(0.2)] + self.time_embed = nn.Sequential(*modules_emb) + + def forward(self, x, time_cond,z,layers=[], encode_only=False): + z_embed = self.z_transform(z) + # print(z_embed.shape) + temb = get_timestep_embedding(time_cond, self.ngf) + time_embed = self.time_embed(temb) + if len(layers) > 0: + feat = x + feats = [] + for layer_id, layer in enumerate(self.model): + feat = layer(feat) + if layer_id in layers: + feats.append(feat) + + for layer_id, layer in enumerate(self.model_res): + feat = layer(feat,time_embed,z_embed) + if layer_id+len(self.model) in layers: + feats.append(feat) + if layer_id+len(self.model) == layers[-1] and encode_only: + return feats + return feat, feats + else: + + out = self.model(x) + for layer in self.model_res: + out = layer(out,time_embed,z_embed) + out = self.model_upsample(out) + return out +################################################################################## +# Basic Blocks +################################################################################## +class ResnetBlock(nn.Module): + """Define a Resnet block""" + + def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias): + """Initialize the Resnet block + + A resnet block is a conv block with skip connections + We construct a conv block with build_conv_block function, + and implement skip connections in function. + Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf + """ + super(ResnetBlock, self).__init__() + self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias) + + def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias): + """Construct a convolutional block. + + Parameters: + dim (int) -- the number of channels in the conv layer. + padding_type (str) -- the name of padding layer: reflect | replicate | zero + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers. + use_bias (bool) -- if the conv layer uses bias or not + + Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU)) + """ + conv_block = [] + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)] + if use_dropout: + conv_block += [nn.Dropout(0.5)] + + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)] + + return nn.Sequential(*conv_block) + + def forward(self, x): + """Forward function (with skip connections)""" + out = x + self.conv_block(x) # add skip connections + return out + +class ResnetBlock_cond(nn.Module): + """Define a Resnet block""" + + def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias,temb_dim,z_dim): + """Initialize the Resnet block + + A resnet block is a conv block with skip connections + We construct a conv block with build_conv_block function, + and implement skip connections in function. + Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf + """ + super(ResnetBlock_cond, self).__init__() + self.conv_block,self.adaptive,self.conv_fin = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias,temb_dim,z_dim) + + def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias,temb_dim,z_dim): + """Construct a convolutional block. + + Parameters: + dim (int) -- the number of channels in the conv layer. + padding_type (str) -- the name of padding layer: reflect | replicate | zero + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers. + use_bias (bool) -- if the conv layer uses bias or not + + Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU)) + """ + + self.conv_block = nn.ModuleList() + self.conv_fin = nn.ModuleList() + p = 0 + if padding_type == 'reflect': + self.conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + self.conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + + self.conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)] + self.adaptive = AdaptiveLayer(dim,z_dim) + self.conv_fin += [nn.ReLU(True)] + if use_dropout: + self.conv_fin += [nn.Dropout(0.5)] + + p = 0 + if padding_type == 'reflect': + self.conv_fin += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + self.conv_fin += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + self.conv_fin += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)] + + self.Dense_time = nn.Linear(temb_dim, dim) + # self.Dense_time.weight.data = default_init()(self.Dense_time.weight.data.shape) + nn.init.zeros_(self.Dense_time.bias) + + self.style = nn.Linear(z_dim, dim * 2) + + self.style.bias.data[:dim] = 1 + self.style.bias.data[dim:] = 0 + + return self.conv_block,self.adaptive,self.conv_fin + + def forward(self, x,time_cond,z): + + time_input = self.Dense_time(time_cond) + for n,layer in enumerate(self.conv_block): + out = layer(x) + if n==0: + out += time_input[:, :, None, None] + out = self.adaptive(out,z) + for layer in self.conv_fin: + out = layer(out) + """Forward function (with skip connections)""" + out = x + out # add skip connections + return out +############################################################################### +# Helper Functions +############################################################################### +def get_filter(filt_size=3): + if(filt_size == 1): + a = np.array([1., ]) + elif(filt_size == 2): + a = np.array([1., 1.]) + elif(filt_size == 3): + a = np.array([1., 2., 1.]) + elif(filt_size == 4): + a = np.array([1., 3., 3., 1.]) + elif(filt_size == 5): + a = np.array([1., 4., 6., 4., 1.]) + elif(filt_size == 6): + a = np.array([1., 5., 10., 10., 5., 1.]) + elif(filt_size == 7): + a = np.array([1., 6., 15., 20., 15., 6., 1.]) + + filt = torch.Tensor(a[:, None] * a[None, :]) + filt = filt / torch.sum(filt) + + return filt + + +class Downsample(nn.Module): + def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0): + super(Downsample, self).__init__() + self.filt_size = filt_size + self.pad_off = pad_off + self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))] + self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes] + self.stride = stride + self.off = int((self.stride - 1) / 2.) + self.channels = channels + + filt = get_filter(filt_size=self.filt_size) + self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1))) + + self.pad = get_pad_layer(pad_type)(self.pad_sizes) + + def forward(self, inp): + if(self.filt_size == 1): + if(self.pad_off == 0): + return inp[:, :, ::self.stride, ::self.stride] + else: + return self.pad(inp)[:, :, ::self.stride, ::self.stride] + else: + return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1]) + + +class Upsample2(nn.Module): + def __init__(self, scale_factor, mode='nearest'): + super().__init__() + self.factor = scale_factor + self.mode = mode + + def forward(self, x): + return torch.nn.functional.interpolate(x, scale_factor=self.factor, mode=self.mode) + + +class Upsample(nn.Module): + def __init__(self, channels, pad_type='repl', filt_size=4, stride=2): + super(Upsample, self).__init__() + self.filt_size = filt_size + self.filt_odd = np.mod(filt_size, 2) == 1 + self.pad_size = int((filt_size - 1) / 2) + self.stride = stride + self.off = int((self.stride - 1) / 2.) + self.channels = channels + + filt = get_filter(filt_size=self.filt_size) * (stride**2) + self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1))) + + self.pad = get_pad_layer(pad_type)([1, 1, 1, 1]) + + def forward(self, inp): + ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:] + if(self.filt_odd): + return ret_val + else: + return ret_val[:, :, :-1, :-1] + + +def get_pad_layer(pad_type): + if(pad_type in ['refl', 'reflect']): + PadLayer = nn.ReflectionPad2d + elif(pad_type in ['repl', 'replicate']): + PadLayer = nn.ReplicationPad2d + elif(pad_type == 'zero'): + PadLayer = nn.ZeroPad2d + else: + print('Pad type [%s] not recognized' % pad_type) + return PadLayer + + +class Identity(nn.Module): + def forward(self, x): + return x + + +def get_norm_layer(norm_type='instance'): + """Return a normalization layer + + Parameters: + norm_type (str) -- the name of the normalization layer: batch | instance | none + + For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev). + For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics. + """ + if norm_type == 'batch': + norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True) + elif norm_type == 'instance': + norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) + elif norm_type == 'none': + def norm_layer(x): return Identity() + else: + raise NotImplementedError('normalization layer [%s] is not found' % norm_type) + return norm_layer + + +def init_weights(net, init_type='normal', init_gain=0.02, debug=False): + """Initialize network weights. + + Parameters: + net (network) -- network to be initialized + init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal + init_gain (float) -- scaling factor for normal, xavier and orthogonal. + + We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might + work better for some applications. Feel free to try yourself. + """ + def init_func(m): # define the initialization function + classname = m.__class__.__name__ + if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): + if debug: + print(classname) + if init_type == 'normal': + init.normal_(m.weight.data, 0.0, init_gain) + elif init_type == 'xavier': + init.xavier_normal_(m.weight.data, gain=init_gain) + elif init_type == 'kaiming': + init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') + elif init_type == 'orthogonal': + init.orthogonal_(m.weight.data, gain=init_gain) + else: + raise NotImplementedError('initialization method [%s] is not implemented' % init_type) + if hasattr(m, 'bias') and m.bias is not None: + init.constant_(m.bias.data, 0.0) + elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies. + init.normal_(m.weight.data, 1.0, init_gain) + init.constant_(m.bias.data, 0.0) + + print('initialize network with %s' % init_type) + net.apply(init_func) # apply the initialization function + + +def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True): + """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights + Parameters: + net (network) -- the network to be initialized + init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal + gain (float) -- scaling factor for normal, xavier and orthogonal. + gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 + + Return an initialized network. + """ + if len(gpu_ids) > 0: + assert(torch.cuda.is_available()) + net.to(gpu_ids[0]) + if initialize_weights: + init_weights(net, init_type, init_gain=init_gain, debug=debug) + return net \ No newline at end of file diff --git a/models/networks.py b/models/networks.py index ebae8aa..74343e6 100644 --- a/models/networks.py +++ b/models/networks.py @@ -7,6 +7,7 @@ from torch.optim import lr_scheduler import numpy as np import random from .stylegan_networks import StyleGAN2Discriminator, StyleGAN2Generator, TileStyleGAN2Discriminator +from .ncsn_networks import NLayerDiscriminator_ncsn, ResnetGenerator_ncsn ############################################################################### # Helper Functions @@ -266,6 +267,8 @@ def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, in elif netG == 'resnet_cat': n_blocks = 8 net = G_Resnet(input_nc, output_nc, opt.nz, num_downs=2, n_res=n_blocks - 4, ngf=ngf, norm='inst', nl_layer='relu') + elif netG == 'resnet_9blocks_cond': + net = ResnetGenerator_ncsn(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=9, opt=opt) else: raise NotImplementedError('Generator model name [%s] is not recognized' % netG) return init_net(net, init_type, init_gain, gpu_ids, initialize_weights=('stylegan2' not in netG)) @@ -977,6 +980,7 @@ class ResnetGenerator(nn.Module): feats = [] for layer_id, layer in enumerate(self.model): # print(layer_id, layer) + print(feat.shape) feat = layer(feat) if layer_id in layers: # print("%d: adding the output of %s %d" % (layer_id, layer.__class__.__name__, feat.size(1))) @@ -984,6 +988,11 @@ class ResnetGenerator(nn.Module): else: # print("%d: skipping %s %d" % (layer_id, layer.__class__.__name__, feat.size(1))) pass + print(f"layer_id: {layer_id}, type(layer_id): {type(layer_id)}") + print(f"len(list(self.model)) - 1: {len(list(self.model)) - 1}, type(len(list(self.model)) - 1): {type(len(list(self.model)) - 1)}") + # Print layers to see what it is. If layers is still a tensor here, we need to understand why. + print(f"layers: {layers}, type(layers): {type(layers)}") + print(f"encode_only'shape: {encode_only.shape}, type(encode_only): {type(encode_only)}") if layer_id == len(list(self.model)) - 1 and encode_only: # print('encoder only return features') return feats # return intermediate features alone; stop in the last layers diff --git a/models/roma_unsb_model.py b/models/roma_unsb_model.py index 11ce30c..b9c2ad7 100644 --- a/models/roma_unsb_model.py +++ b/models/roma_unsb_model.py @@ -221,8 +221,10 @@ class RomaUnsbModel(BaseModel): parser.add_argument('--atten_layers', type=str, default='1,3,5', help='compute Cross-Similarity on which layers') - parser.add_argument('--tau', type=float, default=0.1, help='used in unsb') - parser.add_argument('--num_timesteps', type=int, default=10, help='used in unsb') + parser.add_argument('--tau', type=float, default=0.01, help='Entropy parameter') + parser.add_argument('--num_timesteps', type=int, default=5, help='# of discrim filters in the first conv layer') + + parser.add_argument('--n_mlp', type=int, default=3, help='only used if netD==n_layers') parser.set_defaults(pool_size=0) # no image pooling @@ -260,7 +262,8 @@ class RomaUnsbModel(BaseModel): else: self.model_names = ['G'] - + + print(f'input_nc = {self.opt.input_nc}') # 创建网络 self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt) @@ -269,7 +272,7 @@ class RomaUnsbModel(BaseModel): self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt) self.netE = networks.define_D(opt.output_nc*4, opt.ndf, opt.netD, opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt) - self.resize = tfs.Resize(size=(384,384)) + self.resize = tfs.Resize(size=(384,384), antialias=True) # 加入预训练VIT self.netPreViT = timm.create_model("vit_base_patch16_384", pretrained=True).to(self.device) @@ -397,16 +400,20 @@ class RomaUnsbModel(BaseModel): """执行前向传递以生成输出图像""" if self.opt.isTrain: + print(f'before resize: {self.real_A0.shape}') real_A0 = self.resize(self.real_A0) real_A1 = self.resize(self.real_A1) real_B0 = self.resize(self.real_B0) real_B1 = self.resize(self.real_B1) # 使用VIT + + print(f'before vit: {real_A0.shape}') self.mutil_real_A0_tokens = self.netPreViT(real_A0, self.atten_layers, get_tokens=True) self.mutil_real_A1_tokens = self.netPreViT(real_A1, self.atten_layers, get_tokens=True) - self.mutil_real_A0_tokens = torch.cat(self.mutil_real_A0_tokens, dim=1).to(self.device) - self.mutil_real_A1_tokens = torch.cat(self.mutil_real_A1_tokens, dim=1).to(self.device) + print(f'before cat: len = {len(self.mutil_real_A0_tokens)}\n{self.mutil_real_A0_tokens[0].shape}') + self.mutil_real_A0_tokens = torch.cat(self.mutil_real_A0_tokens, dim=0).unsqueeze(0).to(self.device) + self.mutil_real_A1_tokens = torch.cat(self.mutil_real_A1_tokens, dim=0).unsqueeze(0).to(self.device) # 执行一次SB模块 @@ -436,6 +443,7 @@ class RomaUnsbModel(BaseModel): inter = (delta / denom).reshape(-1, 1, 1, 1) scale = (delta * (1 - delta / denom)).reshape(-1, 1, 1, 1) + print(f'before noisy: {self.mutil_real_A0_tokens.shape}') # 对 Xt、Xt2 进行随机噪声更新 Xt = self.mutil_real_A0_tokens if (t == 0) else (1 - inter) * Xt + inter * Xt_1.detach() + \ (scale * tau).sqrt() * torch.randn_like(Xt).to(self.mutil_real_A0_tokens.device) @@ -454,7 +462,8 @@ class RomaUnsbModel(BaseModel): self.real_A_noisy = Xt.detach() self.real_A_noisy2 = Xt2.detach() # 保存noisy_map - self.noisy_map = self.real_A_noisy - self.real_A0 + print(f'after noisy map: {self.real_A_noisy.shape}') + self.noisy_map = self.real_A_noisy - self.mutil_real_A0_tokens # ============ 第三步:拼接输入并执行网络推理 ============= bs = self.mutil_real_A0_tokens.size(0) @@ -518,7 +527,7 @@ class RomaUnsbModel(BaseModel): if self.opt.phase == 'train': # 真实图像的梯度 - real_gradient = torch.autograd.grad(self.real_B.sum(), self.real_B, create_graph=True)[0] + real_gradient = torch.autograd.grad(self.real_B0.sum(), self.real_B0, create_graph=True)[0] # 生成图像的梯度 fake_gradient = torch.autograd.grad(self.fake_B.sum(), self.fake_B, create_graph=True)[0] # 梯度图 diff --git a/options/__pycache__/base_options.cpython-39.pyc b/options/__pycache__/base_options.cpython-39.pyc index 066b359a3662685d097d5c97675e70b877251737..55ab7e1a65961dcda63124aa2f74d66d46317a27 100644 GIT binary patch delta 76 zcmbPfz0R69k(ZZ?0SG*Q?n7jFOn delta 54 zcmZ2yJ=2;uk(ZZ?0SM%e?nZ<&;npPXH+$x$Q;l%MRv6}5RW H7q>6~o45|` diff --git a/options/base_options.py b/options/base_options.py index 5837dd5..f9de39b 100644 --- a/options/base_options.py +++ b/options/base_options.py @@ -36,7 +36,7 @@ class BaseOptions(): parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer') parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer') parser.add_argument('--netD', type=str, default='basic', choices=['basic', 'n_layers', 'pixel', 'patch', 'tilestylegan2', 'stylegan2'], help='specify discriminator architecture. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator') - parser.add_argument('--netG', type=str, default='resnet_9blocks', choices=['resnet_9blocks','resnet_9blocks_mask', 'resnet_6blocks', 'unet_256', 'unet_128', 'stylegan2', 'smallstylegan2', 'resnet_cat'], help='specify generator architecture') + parser.add_argument('--netG', type=str, default='resnet_9blocks_cond', choices=['resnet_9blocks','resnet_9blocks_mask', 'resnet_6blocks', 'unet_256', 'unet_128', 'stylegan2', 'smallstylegan2', 'resnet_cat', 'resnet_9blocks_cond'], help='specify generator architecture') parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers') parser.add_argument('--normG', type=str, default='instance', choices=['instance', 'batch', 'none'], help='instance normalization or batch normalization for G') parser.add_argument('--normD', type=str, default='instance', choices=['instance', 'batch', 'none'], help='instance normalization or batch normalization for D') diff --git a/scripts/train.sh b/scripts/train.sh index 1c60f02..93a5f96 100755 --- a/scripts/train.sh +++ b/scripts/train.sh @@ -30,4 +30,4 @@ python train.py \ --eta_ratio 0.1 \ --tau 0.1 \ --num_timesteps 10 \ - --input_nc 1 + --input_nc 3