From fe4eb0e57f8dde2b970ccff4537b2e27b2e0000f Mon Sep 17 00:00:00 2001 From: Abel Shields Date: Mon, 4 Jan 2021 17:47:34 +0000 Subject: [PATCH 1/2] Initial VIN implementation --- data/algorithms/vin_pretrained_16x16.pth | Bin 0 -> 13874 bytes data/algorithms/vin_pretrained_28x28.pth | Bin 0 -> 13870 bytes data/algorithms/vin_pretrained_8x8.pth | Bin 0 -> 13870 bytes src/algorithms/algorithm_manager.py | 10 +- .../testing/way_point_navigation_testing.py | 2 +- src/algorithms/configuration/configuration.py | 4 +- .../LSTM_CAE_tile_by_tile.py | 6 +- .../LSTM_CNN_tile_by_tile_obsolete.py | 2 +- .../{lstm => learning}/LSTM_tile_by_tile.py | 4 +- src/algorithms/{lstm => learning}/ML_model.py | 4 +- src/algorithms/learning/VIN/.gitignore | 135 ++++++ src/algorithms/learning/VIN/LICENSE | 29 ++ src/algorithms/learning/VIN/README.md | 143 ++++++ src/algorithms/learning/VIN/VIN.py | 284 +++++++++++ src/algorithms/learning/VIN/dataset/README.MD | 8 + .../VIN/dataset}/__init__.py | 0 .../learning/VIN/dataset/dataset.py | 67 +++ .../VIN/dataset/make_training_data.py | 142 ++++++ .../VIN/dataset/make_training_data_og.py | 116 +++++ .../learning/VIN/domains/__init__.py | 0 .../learning/VIN/domains/gridworld.py | 452 ++++++++++++++++++ .../VIN/domains/gridworld_before_mem.py | 435 +++++++++++++++++ .../learning/VIN/domains/gridworld_og.py | 385 +++++++++++++++ .../VIN/download_weights_and_datasets.sh | 9 + src/algorithms/learning/VIN/general_test16.py | 340 +++++++++++++ src/algorithms/learning/VIN/general_test28.py | 340 +++++++++++++ src/algorithms/learning/VIN/general_test8.py | 339 +++++++++++++ .../learning/VIN/generators/__init__.py | 0 .../learning/VIN/generators/obstacle_gen.py | 93 ++++ src/algorithms/learning/VIN/model.py | 68 +++ src/algorithms/learning/VIN/requirements.txt | 4 + .../learning/VIN/results/16x16_1.png | Bin 0 -> 14540 bytes .../learning/VIN/results/16x16_2.png | Bin 0 -> 14722 bytes .../learning/VIN/results/28x28_1.png | Bin 0 -> 14453 bytes .../learning/VIN/results/28x28_2.png | Bin 0 -> 14751 bytes src/algorithms/learning/VIN/results/8x8_1.png | Bin 0 -> 11951 bytes src/algorithms/learning/VIN/results/8x8_2.png | Bin 0 -> 13256 bytes src/algorithms/learning/VIN/results/8x8_3.png | Bin 0 -> 12684 bytes src/algorithms/learning/VIN/scrap.py | 14 + src/algorithms/learning/VIN/test.py | 346 ++++++++++++++ src/algorithms/learning/VIN/test16.py | 339 +++++++++++++ src/algorithms/learning/VIN/test28.py | 339 +++++++++++++ src/algorithms/learning/VIN/test64.py | 339 +++++++++++++ src/algorithms/learning/VIN/test8.py | 338 +++++++++++++ .../learning/VIN/test_house_expo.py | 340 +++++++++++++ src/algorithms/learning/VIN/test_nonzeros.py | 23 + src/algorithms/learning/VIN/test_og.py | 173 +++++++ src/algorithms/learning/VIN/train.py | 142 ++++++ src/algorithms/learning/VIN/train_og.py | 142 ++++++ src/algorithms/learning/VIN/trained/README.md | 4 + .../learning/VIN/utility/__init__.py | 0 src/algorithms/learning/VIN/utility/utils.py | 34 ++ src/algorithms/learning/__init__.py | 0 .../a_star_heuristic_augmentation.py | 2 +- .../{lstm => learning}/a_star_waypoint.py | 4 +- .../combined_online_LSTM.py | 2 +- .../{lstm => learning}/map_processing.py | 0 src/algorithms/{lstm => learning}/trainer.py | 0 src/analyzer/analyzer.py | 4 +- src/generator/generator.py | 4 +- src/main.py | 2 +- src/run_trainer.py | 12 +- .../services/resources/directories.py | 2 +- .../map/display/online_lstm_map_display.py | 2 +- 64 files changed, 5996 insertions(+), 32 deletions(-) create mode 100644 data/algorithms/vin_pretrained_16x16.pth create mode 100644 data/algorithms/vin_pretrained_28x28.pth create mode 100644 data/algorithms/vin_pretrained_8x8.pth rename src/algorithms/{lstm => learning}/LSTM_CAE_tile_by_tile.py (98%) rename src/algorithms/{lstm => learning}/LSTM_CNN_tile_by_tile_obsolete.py (98%) rename src/algorithms/{lstm => learning}/LSTM_tile_by_tile.py (98%) rename src/algorithms/{lstm => learning}/ML_model.py (99%) create mode 100755 src/algorithms/learning/VIN/.gitignore create mode 100644 src/algorithms/learning/VIN/LICENSE create mode 100644 src/algorithms/learning/VIN/README.md create mode 100644 src/algorithms/learning/VIN/VIN.py create mode 100644 src/algorithms/learning/VIN/dataset/README.MD rename src/algorithms/{lstm => learning/VIN/dataset}/__init__.py (100%) create mode 100644 src/algorithms/learning/VIN/dataset/dataset.py create mode 100644 src/algorithms/learning/VIN/dataset/make_training_data.py create mode 100644 src/algorithms/learning/VIN/dataset/make_training_data_og.py create mode 100644 src/algorithms/learning/VIN/domains/__init__.py create mode 100644 src/algorithms/learning/VIN/domains/gridworld.py create mode 100644 src/algorithms/learning/VIN/domains/gridworld_before_mem.py create mode 100644 src/algorithms/learning/VIN/domains/gridworld_og.py create mode 100755 src/algorithms/learning/VIN/download_weights_and_datasets.sh create mode 100644 src/algorithms/learning/VIN/general_test16.py create mode 100644 src/algorithms/learning/VIN/general_test28.py create mode 100644 src/algorithms/learning/VIN/general_test8.py create mode 100644 src/algorithms/learning/VIN/generators/__init__.py create mode 100644 src/algorithms/learning/VIN/generators/obstacle_gen.py create mode 100644 src/algorithms/learning/VIN/model.py create mode 100644 src/algorithms/learning/VIN/requirements.txt create mode 100644 src/algorithms/learning/VIN/results/16x16_1.png create mode 100644 src/algorithms/learning/VIN/results/16x16_2.png create mode 100644 src/algorithms/learning/VIN/results/28x28_1.png create mode 100644 src/algorithms/learning/VIN/results/28x28_2.png create mode 100644 src/algorithms/learning/VIN/results/8x8_1.png create mode 100644 src/algorithms/learning/VIN/results/8x8_2.png create mode 100644 src/algorithms/learning/VIN/results/8x8_3.png create mode 100644 src/algorithms/learning/VIN/scrap.py create mode 100644 src/algorithms/learning/VIN/test.py create mode 100644 src/algorithms/learning/VIN/test16.py create mode 100644 src/algorithms/learning/VIN/test28.py create mode 100644 src/algorithms/learning/VIN/test64.py create mode 100644 src/algorithms/learning/VIN/test8.py create mode 100644 src/algorithms/learning/VIN/test_house_expo.py create mode 100644 src/algorithms/learning/VIN/test_nonzeros.py create mode 100644 src/algorithms/learning/VIN/test_og.py create mode 100644 src/algorithms/learning/VIN/train.py create mode 100644 src/algorithms/learning/VIN/train_og.py create mode 100644 src/algorithms/learning/VIN/trained/README.md create mode 100644 src/algorithms/learning/VIN/utility/__init__.py create mode 100644 src/algorithms/learning/VIN/utility/utils.py create mode 100644 src/algorithms/learning/__init__.py rename src/algorithms/{lstm => learning}/a_star_heuristic_augmentation.py (98%) rename src/algorithms/{lstm => learning}/a_star_waypoint.py (98%) rename src/algorithms/{lstm => learning}/combined_online_LSTM.py (98%) rename src/algorithms/{lstm => learning}/map_processing.py (100%) rename src/algorithms/{lstm => learning}/trainer.py (100%) diff --git a/data/algorithms/vin_pretrained_16x16.pth b/data/algorithms/vin_pretrained_16x16.pth new file mode 100644 index 0000000000000000000000000000000000000000..574bc7b0398075305f1e027b791f8a3e8c229255 GIT binary patch literal 13874 zcma)?c|28L_xOz&GK54@rW6fIgL3!UmsB(m5|xrlLdh&ig_0>zGKU5#ipmu3S^Jt& zG$@rKDw?Eu)}Z`6-{*NgzwhtA-+i6edF_4fIq$vJ-s_yb*V-#WEMC@oAlve%_Lc&> z(MAzsHhtnl=iUCI296{A0(^sf-F&@Wx4Q=fdinbJ|Lb2z*`QEAch^9#o$i7D;=BDN z3>?M1e1iNXmr6KF273Ai1o;nJD&{ER?d#*=FSS%6*iqiwD=5g@-PPS^lh+0xf9crZ zp%&bRxZdt=K||O;S=)e3?g8$b7I?V@$@+`B_=_31_=_7jiiWra`3AUo8o34sd3guQ zx(2v!4EFNgv#h1J?}i|Ue_xdKALb}+GI#oP z)1i~Ul;beB;7uEN)BUC6MV*%g*)J6_h!>TO7Zr^c6(2f+hH5hZa}3o)gMya(%ecGv z%NjUJdm4qfdwF;U`49ITYSUBF^IuqFlR2i-r_c45`v)r@FP0$tAFf!MNW7@n{{<`m zKVXF+u%dzEFi)e6UK;}aNBo18H8!4YI^D!{`kWd5O8)YW&jvP0pw4}N_vH%Xd?Pa1Lve8*HnhvB6 z!ku3V?DkV#HojPkdWhNLI{FpEfA3@Z!;RT5sebZ2Ap$%FrLcYdX!x`#lKEs;&<$?e zaY9WNHS-l?>qC6NXQUM?f3zMZG#&&!lWzP|h}`n9k*rTK4LgR7p^F1Q;un#Lg2i8^ z(GAJ%5b2*wM zZ@*>CZ)6nn3K6G!l2?HEvy;rYT$e3x6DOo%AN{4ogEQR}-J&*;zXP-ARW~skGj8$SaIbXqQ<0;&fn8N1N z?}MeK8>qd&3d~dv;Qkx8d27P=V)#ZSxWE4x28=bNl4awmZKww;-87dZ$o|{D0_v?% zyeN(Lgr<;c>!UbT{~ildT~BmEjX@=IJZ36PV#G(0nl53mT`>~;^9Dej#X&RA1Z^-5 zO4$rSlhr}o`Y8i0_a1}ik!Nx0*x}6g;YYgZs61)AX9JIf(rm`2=%HuNptD#$&WnuT zJginy$5G9Au5&c+;K=C$Tdfwsk~i_yRPYvC{7nVD@m}zy=?r{NvZ1?zBv2K#nR0z8 zXKQzrOqwml+`IT3~S5R-c1a%*YQum!``nW=k z);s0!`&Ax8!>V^AcbFNtZ@xAAwo=HSTX|9MoSZ0j0(yT%S>cIqsAV-yp#~ zYzQTHlbvx`s5-IvJco_Te2-bnEO5!!L!_iy3`TJd_|onN1on@CbBp)k2=^3n#d#9T zTsDDT=nI9v(NV(VtKM)G-`;a>b{@3(wiKhQG-;l=DfNChf{e7dO@p?iaSu%LNW>Cj z*c7C}Tl^y!yvE3~kxM1Hp5fB;Nt_&YUU;8BLct6Qn1J|njRKpXt8j~0fa`vBI&4e^ zI9+}v(7ES^W9)}>(cUK1w_6;#Z>o`mUSIN4L4=Bl{KTf&Zs?n6DD*HH;5?=q;gWBo zSzzG|xcq1p)7fpoNY5wk(merA*icF88pcA%7H7;_+0UuDl?qao%VCf6a!hwQg=)RY zc;VFrVP=hx>v}yK_{v&XH=fV-L?ZSkrr?abs<5TcnCcZqK%qe_JP0sk+b%d`__|2g z`0fF8e`46Nwi1mtPhsT#6Lz&d7R*;XfgdYnQLtn(lqNV+sReUjN|ymCvP>Y>SIUWb znFf&AW9jm^GBVl72u^Okz=<7pFg|{N3R64|(-K7?R2>z;8jXppIM0K-KShgXM@!K7588bDBvHJp z+=1KXsKDsh>+m5&44!TlrKeTKG3zl4*g3A3d*AvVV<*(HDOUp7nc4DePoo0uC^&-F zv4%`(p%_z1%Mxyhsjhw>v;+xm%seh6*#^58n-4#o|?8Ug(Y!e^zUL>x;8u! zDlDC-rHCh9{GJQu9qbuFyv#QUnCy8PCEf-#4TnH}VKTQ2#RY0_#f=~ zIFs1_I)?2_uHg3JIjs3kDywgOh|PY9_}uUT?2phx|D9vu$ai;o>S!MM?Wlnuc@;B* z7tT+8%tegb-^DFd zY{9=zB2envJlb}C3Dl}IGj)?RRJ`&H%Nxhx3ERokqJIjv;I{#lczpsZFW9iD!AD4m zm>Ba^Jb~BbN3u)PlvqXga9WXa0_QD@M}xZ}?B(*jn%k|v@uKrtT=(b|ZW7+W+fAlS z;;9llPUg-H*gG9r-ptCss-n{JZ)8z>BTsCHKF(gBAZWjZkiRGvPptYu zE*o9IBRkeWR?BE=fAJM0*JeZ6fxVckIEuAD5#je1MxsEm`^dLA)?uf}3Nv8Se6%$g^*e)lwe{ zA$f%~weVcV?U?t4>+g$%?%oo}`WXOQS4HrBi>1i!&m#EZhZI!GPp770XTj(}4!jfR z;h3#K@U3J&N{l&ylggv1OXE{gW495D8g!t25l}_!~Il{CGAX zcOOfpMd3~z5%{Yxk<{Br;YX_vXguCrXuHFPC7MgJ&GlWlzi<&ra?*o&V~${^-zzRD za~6{p_W|L%Ot77q!$hWuG70xMI8iMHjcq}!V22EizBrHgmPW8Wt>1`Uh6C&#uR&Xj z_ThU`8T!GQM=y`%1Y0U{z;;nGuiURqaEntywQGT3XKe~Ijt`?fN#@L)|HSk|VL2`w zu39@|-bi-3)P$u4zQ)c#Nw#Ct8dmT36^-AUkn?XW;IE<`4J>;>{^VG**#pOs-~Wfp zZHOd`UrR%++yThn)yQ4xs3-e(x#QWB5jgBbFwfbr6tYeo#IB3A+_*|Vj4FIgY(sTP zC^u+2DX|4JK6D#EMB!UeESIK}_!99Vk#J$v$ zIw?LRgYt@4UHTT5t;#c<+BpW|mMPLSaR(eV+X%vs3sLsSD=<-T!n0}JtC13(ReC7(HuIBST-<*hHX{WKyOOIw&MWB38A?^zb z2BS(Xj2K!oTg{jVZ7&P3`ph>h!$dTDnZ^DX2Y}Q2L0G7nfj3qtqm)P&W`#e(;h|2Z zrk>$U|NR&^wDCOG|MoZ&X}f~U1M~1}ST|RsS0?zgDgZh(MQFyV0yZIOkT@k?BWjPQ zQ@5r*++(RtecsE^1EW_^Ptr~|rl*)53b_Z9PPNfng{!D~QakM5oy`?4@#XA2vbp@m zvrsW9pDnUYA%;1~OY^$Ud!=51ZycZEGn+Di%~EuA&__;0sT178Du(=0Mpl9PMhu>LYqQ2 zDtT-zefa1$Y-~)TlSQJrW6vXDrT2GQdV3RnWmF1h*6c?4?@~-vRG02qcvm1(kbolF z$D^^l7F+)ypOf7vMXU2?vN6z3oKMVVuU(I`Mkjlsd8iTH)|O-MTyfSg_av@!xIv`{ zqp5@PL-smP$ige7=EZw|q8AU;)9opr=vPPGQs{oH&#wYOuM9OdzJa|>ipqF#&sp16sd+G0ote#_9+ z`KzI4j~G3Cr4{>6MdP@MlX38O1{SFJk%ZN2aAl$!_$zq8y~#hob)q-lS7RTDi@oEb z_b!Kx<>RRO5g^wO$G{m~dt6zPhb9K9I7L4i6&H=BQN=3YFvA%Iv2kF#V;D>f$iT&M zHQ4EYLilIca7-CpM~3-_a8N&%89s7_-wp4%&F&L;*OS%h&AcfPXj;Kduziai9+NR- z!Xy;&T?6f}p9v=F6p_0-o4M)nS7F)CwTNfubBSrOpm)=>+RoC1PT9E-dK-8|!~78x z+b`sn+FRm=YrC0TLmNKn$l>vZYq6Fws{H9&6xq0*GobjZ-`pIEsk`k+{+opb6kJ}A z$eohZS+WD|MqQ|`Q81+;3;(cu{aWthunKA_{TnQTpM%AtNHp-2r&W`mR$EYOZdAro z{y3CI|0ff0i_|I9U-=elPJa~yB<#bME4dKnnhyItJaF901TJiU32kX}q7LJ3a4)VO zBED1ak*0Hp(c-NaJkJuanp$-@)n_b-RFj1_9+sx5Mic0nm~7C}Q>5E{r_su-Urd*^ zr-AywJvzZQlDcSh!rkmks9co>zw`28UhFbhGOeATBFut6Yd=<-%gwEx_+6bIinV}! zp2}2N^Ek>JK8*E6Je(ihg$3hNz_4QtRyB5GrkXhyKXEv**|rKdUPHmmp56ShQG-x+ zQ5SBx#Gr1^CrIepg8ObsBZi98owkuoJTs9v4?h4dZzE{F(GKd}S_MDT6>-PsBOq|l zL!x#LWu~kZ)-5T-k58t+@BQPLQ_MIlQ#;63gnS>0g9-8S+6;D8co2LdF5%sFCE?WO zDE4HVK8ur4qKkDJaiuX2C*7&VQ2vdY*af2O=!z;#IQ$9CB){VdG-f4*4=_#P8Y$Ux z7K$R(Y0Gs*@V(%|K)!)i~Ok_d&$`|2#L+#U=&rP#>5N_6t+eKh!P6d6`|5!(J-g}1lLz$#!A?OCZ0 zq*n)i?cRcm`&VF~`+Q;IYiC%PHb~@k>@dSTxw=zkC40183FQi1S>i->_HddU#E-gw zuWp=UuKJI#E2AB!IcN%GILgc=T1&87 zzl_tT+c0H20k6v|6EEwHWojA)EUfM_%DmBG%?pn3mdB{lq+Vqzzb%cQRVWA37 zSX!|6Xc;&J7Lm2-bLq~Zc--=pzT~XT00+J=xfNl8w0kkS5a|mJH9=(?b%@n*Bro$drRn!@_QaJkiQGD~@8t?eFiLm6-YTR0>NOR(|;k=X*o^_l> z8y{v@pPqja>kKNH&E$A?=Ew>Bt`UwOMt{RE5;mZEZaaKBp+I*%{zl{?-SBx#5`>o= z=PdVD!PJ;87(Z?~@1o5pR=Zf4826??y{s84SnEL!`@99s)!T4krW!8Z{t|Nh=Fym* z`#hblYOYAC3uWrBL!Oibm3SP7w&PBbT~8U3nW7wGx;zvftmB4c^h3`&ExO(( zmzdq}Bg^~q>Fxn>Y&4AHepxP|1^stT@2t^>Gs{lEmdDLxmqkC$%&5n0EdpUs>3Nh> zScRv|L($o<25sxC&@}NeUYlFQU974{%R6as_sSt2P0D7M4fwb#$&7nzAVq8Jey}s= zBxp%Qk^oAs)K0R_WLab7g_|P{nWolNZhrnY+`F#^F5ZZQ^L~R|RmD|$Yl|&`idc5o zFb0X4HcEOLqiE_aa3A*zL(U9B+`J+b=~PAA2ql5#m~!;)J&6`ClUV+d(QMk9R4(?* zHfA5J12<-SbvmuEjiDhWk^+x~JHWL23~hd@OpUz`L5im&y)-ur6M~HKtnL_= z)XaoWeB{}ROchd;?S*3Vjk&6Y+hP3AY_#0;9CLi4KyGNZPdNRCWBw`3J9rq|@YW8U z%Ka{w47tPCV<3X2e5c?|w z&##LhX8Lb=U9OYShGdY{#UZe1Y%EQla}hii{SbJXALV9zL-?>vip}{>xqIfb`CYe9 zlg#8YNcO*l{qt9_g7XE~5Mzi=lPQdj*Tk<*58*_k6ckU}&U6Mm$tFJq^14L~O}Z^u z?~`1(|0f%yPc0D?7z(N-lXJN{`CU-_`UyVF`vV?DpWw)sM3BfjEtq!fHmOj34XV5` zxc;dq_1?k2?7R!cRT;oS`Bd^XWdv4~4Xx2{8M3F7w3*(lS7>upz~xT&VtWsnGD8_R z%vvDDd~eSr{j1GQ<6cZ;Z7nm2!rJv@VS64)zdVZhtWu;Oj<#V~RyX7GExCiu2e8Gc zgd=Os>7;9Zu+l3AK7Sg?D5kJeohgFjEhoA8)uzm|?=aJM55zrrg)~W0UYM2E4)tHP znW%?UU3~2k+{tMO-K=w%rH%s5UaiTNi05&$9?!u#kVYaT~@cj1a1f$z)r+ zUa;Kw9Oizc7$s!NAvcmwjV2DO3rH|x%Ij)bQ{pILM57co8A=G9)|{p5u9~u$X$nGv zuL}IQ7adeI)m%8oIDkdyT!+?!!M6G1&?`V<(GYX*5wE^zO`a*=b9=Cfz6!!Laf~^A|FFq)y zHQNqw#p0Fl(O}32%~!*edO|i>$c&>7@JEOxW8V z55n-DI7)R1XZzd_0=E5zAnmg(!>Je^R5#$tv-@gGglkz|Obz>%@fk;LO+@+TF>Kb4 z`&gcr4sO?m-t}9D=(XGXP`Zl8e%Xq$bEmS=xI~S6Q+I_E%x)x>ykUYWBX_>EMhF;> z2Hx_7OGJ8v8a=IPf}a*Y#w&FvG2OBRgSExr`NT-N;C?oA9?^m$sT#~-ZX(DOS6gssCwH#hMztV-msQ zmyKe!??hQ>&@{R~Q5@Ba>#=^wH>-}cs!47|)Uq(d;2sg&(6R>?R@i~c&%bE(y#kVI z_S5i9o**2N2hv~61-_R{A=u-+R{o1GL8(Rr+vRL<bhyhD=ltmgI+v^AKBN1QXshLQl*7JeM`2Xde`t)G3U zctL)A$|jSPW$6l)WYh{4r~Jo{;m;&ThI}@PzMmVP% zc?{p2N#|--d*NUBPA{$-!)gZ8csJlRx3otejN5|9*E3x()#g4PdO8vgMCL-wmMjQ( zUw}Rl=W)f61%zp7N%c*OaFozUN@%~slcgz~Uv(oxVtGRLiirk(?2sjexVxW^E} zlOrkfm8Oz1^^l-Cfv$3V$mti@2{`4GG_|Dw!X}DBz|9@t-x>#I^Piw%&J~2ojzVFx z6#k4+gbx=ZFv|ETHhnh6F~{b!F}~rXDRU$}>amI}dXNB4(X!|*-NYsR-p%PvYT#*F ze@43R1kjo$?)#qYeD&YSa5`i$SeJ)^V(k-97*_=ewLY)UI(wvUQ&)=wjDWOOo0%W2S!%VlA1 z$LAqg^r?A#0}S-a!%N#~0>vqD++CgqTX|mzEH)Z}l!FYtRPu_mpOB2Ddw6tAQ5b&8 zpNr4C_VLUQpy1Z)I7pixiKi9z&{&%TT>A(WZ20_5pyB!#O#Nl)>Ai8FGF+BL936z> zWF;o1`~sWbJ2HNr58L+hJD%Hr6_fQ(u{#fosijXM{kvbH?y0*Gb1nGJ#55h*o>@)E zi>YVvn;l_vU=>7NQ5S9wp2Y8U*h7|mTQA(%Qp)~F`LJ~B6?BAuj%l&%UvyhHj~zZ& zN$bMyLuZ&4t*p97COAr?1p0{rA?-)_hSXU_w#_}9e{UM z4d`tk&tjL@(8uIDXgBe>MuSW=PpL*LQ6CtnTMOZ4tsvvPf)+);;*KtS$muPq<4$_7 zLFw`?+@R6UtM{)bYeKtmSpNu4(%}la>X-&j0tT7}E=)3IU2LyX^V zPCc}gsr#!LRM%i1nvaM=GI<33DbRz0vJm=hg&*Cy>jV^>ok48^bumA1FSie}q1`r` z%kX?cRLA&1nokz?nRud!Z3YgLRut}9nZj+nDFKcp&U0rkHf3&ypJ4yKE%^OS5e)AZ zqt`m8f~D0e__IudB^b!??EE`nR<;kfYW!C2<4VT8)jbP)4AR*O_d8^+?iMaBxg4b( zOX220Nv7F9Kz7?5#Fd+hS?a=eQ}wFJ^@LrVdY=$yMP@G}3JFa|LCSD9L}d7a+UN10YaY)2?v5a< zU7o_xx(?i2mdxGDsROyyUpRxBL{K@Z%w;b91GDPh(n@nZD1wE!yxbHz*E!N1Z|`9v z_2fQP9ze6M4$ieG8}F-5MUm+dP#69gYk3Q>F+_~cb+%^~!#9xat<9*mdR*;?tz($| zK?kO@<~hDu_Y>PjEN6>-CD|UIA`;`U9_sGc(^&1p#80Te?mu~eu|LP-#Vy~t;|IlP z)YCq=@ca~NbxYOs@&NIUgX6E%5i+g;dkk=beTg0oC+3H2fr(U z_v@#StHNR;{V)`oZi~^ilX|iFqa4$AkK{l4W(d_WD)6FEo&A;Wg4Cn)*npk^%ldMN z>k1JW%2}AjlQ}1iy()Gv-rbIu$T9^dBgKjR=fyPO*FCH^tj6&AL;TmNy4>$85peC; z50qIki)+eV!NyuWMb#u@tnbtWM}840TF8cD z&)50YL5U?qTYfEx7`Fz)Z5@a51g=5y60Sc{@D1xp&_#pg#W+G<{FSeQ6>f;y9NsIj;=Tzox?W@tUTcyQARZ=0#v{oeU#y zra+qLXm)!$heOc`=FqkctXH3fN8NSYFsj8;yGkJHRUZ10`!H|Ec)DUeM^{^nY8JrAF8$^&xPy^asxl3O{a@@o3!gCddLG!Ez5ao{F?f_s*H76Q)Cgu90; zx!D({Q0d_jT;H8zWRAQQZH+oY8h*Bd{TL^-;>)0@;2E^bsL)O$Gk$L8T*#Py6q4!OgCF;2jqVIZbJB;}mjbuRjs@ z=XDTg5`o&6AHw#Ev9v|*5a;)GHhh@;7($ND{a; zeR(_CGWxO``p5Nhrsee z6!-JRaqQDgMzegr@T$lH*!E!#wNOeS%PKZvM_?s5w&xPZOYgXIOC7kmJ~v2!mL5J1 zyM$(BJtm}AaS;yVht^BE5NYg#=|6vC){J4o^_DyF%bkd7y&fNQzt@4jJ0sZgAv^ju zlxtI1IE&q0Z4Fh+USqCw8OtvQ_9A>ZtGZ=Nv_CL#Ii$@-rN1M`{>t(tV_jfo#Y^}) zkVnpG4r474gCeF$==)BF25&k5p{D^_tWzPXzgBQDw2G)GTY;*?a3)tF!mrDgU?a54 zhW4bdnZGy7p!@E~^Y4pIqbq6+AwZ!PjL)azYVSC#F;E%uUs_pM+@{Kt3(BZlf&`sx zm;e*!0=CtqzzRDHD9qZ=jX&MSX3E8JGtP=Z?$z~JGzNGfV#l$gS%vJ*o6b4|{-QAY z1#{~Yt)MgkHvlLPu-i50&WoWnX z6+~<|hv`}zl;p?prs*$$yG{GRdB@QDHMO6tF|va)5iX-pi6Eetsvnllc0uy*^RnE0tdj~Jq72xM@P55auhaQj9gtN#VT%0xwo^&uy zrdo|n?0Cz`R7o*Orv&yMecNXI}nP?bOHpA)ADF!uq&*#QpS^J zedN-l6ewu;g^5JK%^xg+q8szDrK8$(>yK7cpLh~uQ^&J(Q7vYy&&Sp<3EZ?pgZ!); zn)^=t#1BSQ@T~Tm!1+IU$?2OAB1c5i*Y6exX9XS-Hm^vjRZ!5PXyoLt0cj z?R$vO$i;xK)Vqa!I~z~^GPELhx6ld zYWiQO@C&A^qKV&2IB#lLv$Q~k*=+Qv`Sj9=eBI(9ylR|Vv$bLd|GZx{>uGO-2#;IB zJD-c`ny9;+=rcE%Cp%2Ia{enks4#|6PeDOk`ycL{Idl{hZR?J)OJP6dpb zy~vi~Yl+4>B-+i7N$3tWw0gOg6TcD9g}LRDyv139-=Bc|@+s$?ue2d2J=+CG$LI2{ z-tgy^#l9ztdUCmTgYJk!3(0Wv7&l==Oq%T+bMW6tB!CdKA9To-Qsx#h6qZlH}dvqoHcc+xx(!nmCD;a zUxKu!e7*CYTyB6~|l^C0{0u79L#s$aL(BOT16Z#EErk8!3u*Ag8O;xNFV| zWP`pZ*&_0ej2z)2czo^{mz40CbDgWq-6CRG5U`014j$+3RTpp>Z{n(EFP0QE#i;N; z4gNNrGJ^4X#)`wZ?D3fIwvQ~by2_jX`6Kr;%al`3t*M@%@t6}z&n5*sn@lA&I=Fz* zd`{BEi1*XISCD9!#@YSneOA?;X*N~wfKbbYb2vUQNe(Resb;#hM0}WMO!D zsaZgUr`dJ+ETP}}dBShXD&(x6f^e0hJ(HA=5y}?zm^r0w7JeC2sPS8|miJCL8ZCPgwrsboxXITj+@PF(uRdUAP?uQ~NDHXLLK~siiqSPs!!y zty0tfsDSTXTFI^*3a2Lx7V~wN&Jw!UJ;#*yPG&PT7O>*0p)gD%3ySXD$KyuXU{djo zT>l)yQd3UQ83m7V)+rP0srEKo^f(rx6S`0~#uhHHcwF_9Pj@7p!byE|`0m#S*`$sp z(i;7Z-?QcnU(rAw-vvD9uU~Xn==7-r-$r oRoaBJm< z{1OQD?j7c{ox7>-4r}4nzwg->!~IPC!UP(kU`|yE6v1qN4Z{;5{3l+em~So3cfwBL zBE#(QryTEID`TfIgR2)A0oPCYnVk|n1V=Ns zm}&5)@h#=wV0F?{HelGyMy<9aZr-ti;QUC`94^Ce(>ug}+pb;H{2`sQe;^_JT52eK QZ>&OVKMDAkp8NCv53lULwEzGB literal 0 HcmV?d00001 diff --git a/data/algorithms/vin_pretrained_28x28.pth b/data/algorithms/vin_pretrained_28x28.pth new file mode 100644 index 0000000000000000000000000000000000000000..d3ac1b05ec80829d5ac56cfe73e7ea5f04dd1551 GIT binary patch literal 13870 zcma*Nc|29$_xPV#GKb7V6rusWI$*{RJiCOL093R&=CJ{u|>ksu2MmPQBgsDUVb6#1ARim#S^2) zdhi|N2Ko6$jbS4toFmrzMfk0s8|WJ)5iZ~vE@;u*EkjG{QGvhF5e{U{Iul zSA^fX=)j=$UQvD_k)aV1V|^RAibak+-cw7FOn#=B5_mlK;g@B?%@={0~>~=(r>S!T$xz z{~xgQ7+A)@RWxA6xHOVog@L5f3y}Dn=gs@f2Jya zO!diresRmfb;r!e^IwMgzf&Rj@7exGK%sxP{}~41f2`o&9f8qF->OMN zeV#C3)B6xgNyy|wFp z9AcA9LvT~Q7ON_KAOD^xM;X2$Y_qMPNp0E8J-Z(8T3C-x{v1p__zvYhEyL^^ENCTk zLaTH&=tj4~-ruHl!HcimFN4ex62o(qsCy6K+ ze&+?*ijC;N+7S&EmUyVnA>DEU+{vmbcHTi9WT(vx;%?tVN(K^0p7%Ur!i|H4Uryk# z;dVIgs>lj@Xp&xbIkB`kLMHqig7(a7%!fuV2)VPJE{sZoPB&fdjhw3xJ3|3;b02_& z@_F9A#|N=0AfC71V}7;Svp8nB&zHIRjti9?Z^5hRAw;|`#J%b}QD`*>1r$1XP;(e% zMXks&hdlHi)ZuMSs>G>E#%ybtKPcrHk`KKm@TyFRh>A(E?nhK`VdHboLHTHQ*(O!6 zY2t!oM;Qn!*}=NEDlqg;l)q!LBr#^4p)vamcube1@o|svxyg6Tk33E{Hzl$%`2(n|+pw8>S@*!d3)#P4NzKJ0> z8yDl9>RgWZ1|2X+3uR$jKCF=OqMO&N;`%4cQ0z?wW$&!Si_aFr3`tk?)Xv9gu9cL{ z>SHvd^?=`S3B%9Ez+8<;2(9ix`PrASHT@T)(sC@yzllk&G;#H)H=0N3QMTm^NRJG` zWFt+YUe(Q&Ik}rr{riq-s{0N3E8XGbX%SX&b~5tj^r1?R6&vwcj`Y3w1NrG!(NR`| z5G^j#QA?B7lZc+FFno9yfc?T#Ve$Kq zsJkH-o^*s^nuI7eU7t;+C_kW5ArB$y^=&YV-3ui-ui)@KKU?!Ed(zr(0+W1Jlg6|X zDA_j%to~Jwmdk8>s>p>&H^m^`F`mS5PJ`U?D|Bga9z0OF3R1dqwD5*9 zx;hlY)*4A7dr6U`8h<3a%aS2BV*%_iy$2Pqe=zu|i`jd683vE73#asFvCciZCyh0npGEAF(q`wpO60dY#DHk624`=X3n%Q@w(U&WSX|xdj&OT?Ch$T*%|Ud%?RVjp!z-*v*9<i zgfC1WMp?#q+S(WDb52ql?@&p_>Ttbr{tc8xEd~{f> z$!<$t2uYit!-_Xw7*}ma&Kl1JP+%p)S$W+F%Hv(I)#X0im;TC}mx_mXM-w3BV>$i| zn#u9^`O0bE7lbA%F+faYxoU$YIQa&pp|dBjD~d0}rkoONFHdGaw-%z}v&rlp!zgy} zd=k6+$|Rhz*`F0zoXEBw5++gGuYgXs5P8|NpSk1lnOxi)Ps7DIIPy@J`23cFb?!!3 z{+mnG6YXVVn%SJ4SGofGDqH{(fdy{1QwI6QFM;{=h0#S*WS-<0!}6hK15;xS{PEv$#!x z&E4*dH>4Y|(Q9YzU* z-Xm~N^$zkbO=JhW^uf(+2HWv9AES@Ft%;DI0%BKZ!lM1VSyPihoOJXq1cj}~mE%?r zJIh$^*Woj~85<>8V}T4v@Ti2GWfZm)8Zt#!CXu2nF6o~-oxNP;fqxz>CBBD`!MFwr z#hb1`tw9BIeVre3TyF=BSH17Sa<^ow{OjrHR|1yLf4Y%jq3fDJ@qw-s~HUJ->|A2Cs~Cpg32t@O`ETfW7& zvv?wEK5Q>nVTYsIAVk(0Vnt2K(_U+IpK~07LMM@gl31MI-_E?9d=>USe#>Znx(DaW z!r^s(8pO#}F+1C>Y`1P#COhVR1vXNP=ofS{nT9t&r`ZTPV)bEzwm2gaPzN5jkCNK` zQ{h6RK2gd41TRNQIY;*P)9M$w7&{`3D?A!WZQvm7b~VL%F(KBrQ-N8Vb;YXS<}Y60 zXJ=d!+J%KrV==ffn|*WdDQ3!;u#5M_p?1jw@E+jeN^d1h-FN^WE%W4`ce_O&hk4@o zdNF)+m&ai;{cuThKRxm@5XD3K=n`Q^SiVM%rLIZLX7OJ1j|>7!n{zeG&ikNxz$%QY zPRG`&GyI;OmvlqnH=h69US3ds1AY6@53X%kh-+<5)ol9E0{2!Iqs1#TR<=n3KBRSG z?AK|GdCnrvkEC#1XcGf2l~Q;q`YTudZYs8y>4N5TJ+RqxgFZj>3|tm_!Fz=U#JRnk zw|~-M$Nm=lUUimHDu@8BHwu&tRpN%}PpCt+3a-$JhlXjJ$a9~&xWD%ol93L&xYvsA z-@}l!Qf)Xadl`3SS1^i#dhCy#F~Bcgh+XfOTTijs4aXf#cvEINkh3B+G~KrjMpIMi z&%Uw!T>lo+tkq60SqYHCcLLE@N*(P}zCZ+b9e#@oU{YQ#pkDe0B(Fyi<}|;;B2jZZ zQCN?A?km7C(`-yAJju8GQh?v`85rp`!0fhzXuYQbUF#5X#Z!4w9jP3ypB8ad@51Y4 zLNIHME{*AVjc@1uX6EWUlOEO4Fo->mw_CZLN|AFN!HH-xHy=}dpIo%BOU5#PF58f|CT2wira9!ehbzw*Me^K zW%yxa0;BZn5?XY7!277bxNYOQ+B+&8*tq31Bc`B@J@fQY$ITvF?*zmAW}w@xOEIZy zIT7%T#3$k^xK}%liSyfym%hIu_ZPnB9)G=t8@RiJD{7x&AEA@U-5Hn6owqlYTd0!G z9q(hvta&{Sb0^(mrqc>Mw>g+Ev*ZTdzFLU%I@|{DI~!1{LIxLXJVYUGh%>Dz7vvso zq0*&_ba2B3I^T4#+V;={=4+cGQ+i4sOD^`p`ejF9i-jUP@vjmb4vXh*H8WN--tdaJ|9LF9J7gF=+IPEOukb3{~q)Mq33aZxwo%EFHPn<@!u0M?{ z1maoG3$}1mV-@0$v& z&LRTjSPYix-3FtwCgww04IYbm46~FjqJQFDcz@tGC*YzfZTj|wuI<)fr%5MagES8g z$jxMA4w#cpcVFAqr5C{KaX!}neGWg`m1xL0L(+70h&j9OEu=Wg(&Par_V?Wfpsjxx zG$Jc_g;7tS{(%{;QPgH5Uc@0Bo5?2q(X1`^vtbYZ8l|PlLF`0K$Iojn(;>~900|ms z${k_y3o|I4>&^aX>%-ugPHgB}FD58V6d#X#haKE3I6bZvQuYk8A1vN*zNZ?4nDj~L zeG&_e-eM43dJY>joj`t@8g2!9n7Vl*xwGXoEO^if1}DGLTD~R8iWVTA>u!R>#>r%` z?}V*fo+EkqVL$MG=8~AOC(vJ?4CCItL+`0&cyXl_Ueer&iI-&=Z>wr>8b1iXhV^t9Z0?l1^icgWblUWd-Qy(B z?!E8;qn=B%Z!-6znCUvk>_InJykRjC9vb;Fy35ydcR>LT=VldjEq@GGMifZdI|3uA^0-FQ9|Y5%F~Rk5AoPrI{qDKJoeF(u2tLE8 zJe`OlLi?!2#V$M|=?epP;<#k!G0xD}hBnnb_%rq;E*-xX{3hd3unamLSDGUVR@%HT5y=bRO6=&IH!|5!P?{1zU5}Skd@y{-!B0 z*e|C*ltufoc=Z_?Wgf#yzB57(3qS0hcD^Q~X*rowGLgMDi=bb`JNo$9H#D-$VwAfw z;Iu~)ihO8gyyTa2pQYUb*@QTb@{cc^Rga!9xig<|#88X58(|BtB6cye_rJ&XWyhH` z-F!~6v^{=SiG?!fRlvj=kS{kZF>^{S?1@=wTgRJ7vSy8gld`v=u&fM(b~eKg)3cb; z-$Os{HiD+I&uDZY1M2F>Vl#s?K=HvTP)e?W))!4EnLm@&x@ySI?dl}NIT03Fi=bg$ zi_M9id6@6M5L`_6^DcFagL7uagfrm|ewwut`>vg%uGZ67xBMaArX4jf+~p19#cXip zlpc;j)lv4Ak|w_F+RfON<-p?wYUGQ&KYV?*gRcK=2p9h(QajISFuT}~1bv#oTp4g7 zx`!{?EH0P^7ia%qcD37K$)SU^UAT*USG&cO7|gDg_!|hL-+dT4zsu0I;wM_!M}x6s zDpk)a!I@i@fTGX^Jn?*fjbKYIT-=yzBeBU2Pv!bjgPk9l!{&zk#Y?Z!8FVMw-*jT; zhbci~#uIq1TaCP;NiYbnV9Sg)?4;9~pmoRKU5YpyI2QuP9t2~=#R=?-8--9MxD2JY zs?^r(c#2An3Dn_|8y@km!qPJZRHtDQ;f+cV)lyT&zrP5|B68ty?J#ZVR|kw0L&?2; zwAig1ZHLVv;oWvRq%6aKT@j1nDqQS|D#BZK-Wb-e#$Mt`u*d5>Xj()uhTjcEBT~f1 zG}a=S70QlZF`u2~n8`aZETAF5>yPBmA;tLw59K2+nFzVBNpTQ{z)+oaYIa z)bt8+OqBD`#mE^abhxlpwl}fo#(Z}3xYKy}pef9_R0120%CpT|8t_};ME>aYEasNF z7wm8f#rD&eASHA*d39$)k!4d<9ypVnIt$9P{&&#p-i^M>Og642wZb{#(8;Bge1?k!KM1PsCz*NCbcM| zT>oq4#+>b_yDl2zX9Y76kEda=mox?pe&t?tTm*e{4{;WKvu5Xf6T{1k4&Za`R@j^( z#n!cq#Zrn5(Aak+>+3TG4y#UJTNG}s;<}u8tU%&q<0M{+`546LPG2{Hy*TbQ}9i>6`HJm z1Rc$d$TO40X?w)Tc|nq{If|5zfi>s5yjXLZ=`s|u)DdKqd4nUIZV0$Jh1e{hD_8{UA-A$*`+ zLrVnWm^nT-@uYee%;vo36}v05ddV8B(qAohyuCLHK6XcqC5m+7LM33 zJPZS^h2Zh00k$Qb<$e!2NS&AGFnx!Si5U?FvTZ7^3Jry+JHFQRs#c@=vTHE+=r5Sj zI~^UI3gE|RI%d_T;>8$qaJ(DMeBXH!${xzw=H>2T&gX_=bm0X!5IIaM!dsX*pN=qU z!)3H#y#hO>uK<_t+6G(3?8M*y2!_vUgKwYj((yTUOnF!ZJ@tWy&C|vB^Ty(wbrCM` zy)PWOcOTG^#1rsnl`}cD^cJ(Ebd-5BeFY?~PUdu-(G2b;gqrlXv>HauEZPqcuhV`_$tp^uxvlgEX>DkA6-CYc?6`UK1A6I z7La`DHT`_>8#Qj933F^?&|u*TI5j((uRXn=C(F&luJ4+dkkHO79AeQBhp<3R596Ja z!K`pUZ`Zzfwz4<`Zp#srm1%|#ska%q@s}9>+eJ{(JcK5z)&r8zKwypp~DHy zqaSe%-a3~st@FpTT2{hX=4Hs5N!Mc~qr^^zE}Ac3$=l!l31t*dGRvP1VX#plka%@+ zva^efx>l17XvZD5y~3_=>`V=1L$GM`Q*t@v35ly#AXy8GQ0C@jHn_V5H9|*Gx;&TR zn}25vq(xwI=nR?}8LiHo`3B_AXr91AeltgqeB4)K*#-x_pyiiF*a#H=}}iqozy6 zgj>h<2NCjO{U^HHK!!bix*n>}7_d7xEvDig?|8k^TbPlKA+YS!WY5Tn;;UuZ%$11! za4FuJ<_?wdv-)_<;Um+T>5=a6v0*Q2ztCdz|6YUs8^4*gNb_L@{ zr!k)2_c71h29TQd;_+R@HF>3StoF1c_(ozHwP3wa(<756zAWb|HEgH7K^oXLu*T~9 z-)NkiC`6`gx(v@A7S}jNPDkg3vB(jUAxB<5LX)>0jQsCVvi;2s`e$@LyX^Kx(D4gp z|JV%I3W~31H~h)tT)4N0E!303i_;z0uFgyNeL))bx=XWDejFvC20zi;j-c4WhrF%R zqmePng_kRvprP$C%&TtUyl=b3RrijCSGW3LYsPYn&2NW?TNGIJABn{0&T)3(>pN&D z<4jygH{L$*3hH9d!HslnGHaGDdD`wlwCq0dIvzQxP~6E>`Vq3IIBhG z`!6StbwlVA@PXPt@ipkJi{I&XA}rnx0=1!Iu{<7C_3dJOJ7YQ3!aeYpF9{M`#&O%e zrO*$vLtww!A2=hL%Gum61?@{O(#wT6VXj9W7AxwIlI|859Q;W;mt28Ab|$Qyg&ng% zP=zQ?Iu6spn8@vQv(*rk0F{l`z~n**JZn}bh7AY7akkA!b z7!(xcr|+xEqX4OPjfH-g02SKmu{%Xs~a{!U3q-aQT;1yHDE zxwcl1L`b%-95%Y%0xO3^CiBnHY|M!}u82tL`>uf{kCz>fv(my33%zpfu@| zOk$=klpt+xH?UOx2YW^fPQ zx-7?}8hEkzs2)x1##gcC0k~iJhFleQF0ADNxH;Liao@bUf2S< zV(B2&nZ=}*s}QfL=itSx2$bHO4o($IpjQ4qO3ii0$&+gFjMp5@K2eIfhQ<87pCs9o zZR2r;_X<|@xGXK4{tkCJuc5;MdLU9=hz@6zabVD&z1#5>U3KP7mq}E@(+HREe9dz@G=xqzYp@{28#%!nv3N@#p34ztes9{$&i!;3 z#l?Ks&7lWvmWDSlYk!xaxr+^$w9I7O&&cp6o^eAlqm|f^_ZrrFI^#&rMc{8c29@Kd z1N%}O?l<)?dVw36wok+Jw`Y8f+qRmp{^g=AyVa}c{A$WB{T!Q>w|po0PN5Egcd zEhcGLg7AxzP`W$@0v%;JPRVQ7LmLZG zO!hTy`5ng!epp1Vd~k$z>15Q`djp#0&oJ@6d4$P(IW`t<1^n^9AVVO3a?AeH++I&V>B$2zd#U1Aa#iCoVB%Sfm z6?SzAliOTr=8XLaeBq^#SSf9`VO9n82>ZZ@ZtTHcLp?~JJLY?wQDf<-F4YZAM*D4b z{8wK-;{8?a*z}c$C24o?nbSGkvvD@mywO9c7Rn^ZEW}+No@}UZB5qos$+|2Shoyxz zba{#@!L8z8&JiIAdosxu(g_EL_rUO2e%7Y30z?BWc_7cQ?>Ec9@s*R=hd(UATv?qA zg;;`XxH`EsAp^_m!?5bz9UADFLM3mW$F}C^+Fr{rJZhI}^IFJ;T{JAt$sG}6;nruQ zS<1GJ?Om_8LXK=Gfv3nI(OmzU#6RsTh}zSjXOpU8t(+C^|F*aeAU zrQi^}iu+=}BvIY3NqUqO=t+Aua;>FltUhNsGhXEy@6XdvGG$5zq58c(U3vVC3hMO-6lB+KJIBN~w zaCS(%2NHM|S9!-F-|-g>#Ao;?=o!sP+X<;ko^ZKe5?>s<&E6OP&2x8`B&GJR(DKP% zSazra&XMhqs^<(_gRODnc|qd+w}mQrtpYLsW^kN)0_D2vIpN2K@c80=loiyXohx;@ zBOkV+?8^P5m z;Xa>3uzkM++`f8^o`a?Az|S{OKB z%>j6*$Bne@M&;#+oKllv)y}UFN&w;5#Q*i1Fxy?OmW2MKK>c&<7j;#98bx0ATj*TS!y%_ zo6;T0x3`l}H|iv~S*!w0(E#e_KOO30ro;MGhOD4#33w%Ff~VFF{9GoF)7W#|j8#)` zvC$FEi`QjXl`leG>BM4p+k4!Z*@{nn2(xTeHd;KsgTE6y8I7aW7%hF1shzPL50qX4 z6`^I+#;*mE2j3F;s+O8leq5X!e45d1+{)BXpTOGlN3e4F7Eae)ik0_SUUon|6BT2Q z$A}sJnzaK?1z!QDW)t`oA_+e6D)4$|J9+YT0Os0P!S}IxhVH|Kkb3_tO7sV_a`=I{ z^4pO0)%Ig%{!*n82`g}%@IcL$;rXol5?xd@aiphy&c~qa6#C-CNv?$VWcKmw8Aw#G zVQbAV=HM0)Vp8Wq^w+My2-9_Av6k_8c+405{q`*SRc*udN4AZ{2sYu#^kJs+GG&Z5 ztpTl_hqwjW_Pj*hUFb3Y2bC{*MkCa3fO*IXm@84p`PMJWHdmRkGD{sGjlT`tRrjJ< z&|5Irm9n$Qu7M-`4=ej%=eh%=?~@^m zwnvCVo>SfS?G~h9hh^Pbxd`r+1U_GtInSM0%BzjM=ggJi<=39~=R=Rrc2e*~gu8wJ zCX)I$pPxJ>r|#MBeByC@E|fSbaj#Cj$}v89l3Twpy^fdR%(wjPSr@ozF}J2Hk@QV0 zCT0~kiN*mfmTRKT6}pLJC~F+|!a>bCg;inPx0XFb+1rNe)R$RTaP>s!# zO*u?R`TQ%dr)NC-xso7J~&wDa)I_E{v zN(d5IK#RZagV)VryobU^IBVl3a9$RVQgyG@Hov1@aTEnAI9n}ZX^;t@hTp$Vi&i!> zCBI(sEG3g{Vng$I7fpzJL=k|Bv(+_bnOE5n($H?nz2 z&*XSRPz$}^n`OQ`HiL3*+*hgLn@OGCb%WO^nYrn=L0ctWRlaDJRkugR+v zI2{Kl%83JX;;Ye=NI>>lYKw_kUPS{@f1Y zlMUW>-e-?;on-E?(*j~3!~GeEV-3I1@(9s!b+dC+(C6v}%;%r^Ez1`jdBkOoorT=~ zBG&bfFSuD>;9Cc&5N3NRzfF7&*GSWluU@XkZ`jwt70K3yDenU8sHr$sM<$WWWplVc z?pU#Jlbr0<2ZZxw^Od=_kFwZ!r?>1_bcQV1xCym=pAvz*D)RBp2wd4C!)-Bf;g7C% z;Ht)G^RN3G+p(c#T<9)h?-mR5AMI-7T9#<>zil&P@1Iy=S4dSs@U14-uTZDK^w}t!;_XSrQW-25lYV$$@x23^%L?-MX>fw#!+sU=o#|nvIW8IvD}Gv)3|GPpW-@gG~`~2%A|ui+wp<2 zBuPo=MC-d{*sZh6Hc)|~=I(Jet6peAs#Y>BN&QW}S*^3ZQz6Mtmi)_huXsSt7Io5w z+DxpR@{RHHkAtbo19Y$~5uOW7VXJh^_-Qzvn>K$7=qm|u7c5<4Tj`XBh5A*fl$AxZ zm!x5%-zhY%FvA}fufa`WuWh1bEb~*Un7@AhPn2JDja_2>hxBDi*p`Z) zSZF_@#OjB90(o~e0s;&X+bzzX8uV>JYaS-Y-O zvIW2=-_P~*dJl#c9ZeN34%aw?Lkpe63mkVZJipY$w2e9n;R9t#* zE!o!L&+gv&3jz-xA+jqIaI~(3({kI9T+gnAfFu`oqO<|K`NR)`X3lhUG%h<*C4GW$f#1+6QDVP)87*tvj^ zzQSjq{#BAq@W@~ziUio9*8%8ibBIj*(!}I+TeE8<-63e6J!@xK!z`F|3gS;jp|$K1 VXw|s}UQ5@rJcT8^?W^bF{{w{Yff@h+ literal 0 HcmV?d00001 diff --git a/data/algorithms/vin_pretrained_8x8.pth b/data/algorithms/vin_pretrained_8x8.pth new file mode 100644 index 0000000000000000000000000000000000000000..a8ad96a378140967b72ed1681672f2d7a05b8644 GIT binary patch literal 13870 zcma)@c~ni&7xU2`b!f&9gVkr|Yn#VO z&3P+5@VzBm(!@3zyy!E@R>* z9TXZFF6$!WCc7?hZA4_aoQss3Oz_&!RpIh3GEr_rf`cL>gZ;hyL;ZrhL&FskqXv5L z9pDE0`$i66*D1}5@bi!G_j3&LjZ_Mk^bD6W@eG$Xag*HO8@V>ZH_+TGDl#Z|osw6C zzfV+9u%B0?f9Sfk5lRDno4CoZ8+gWhm4CSGzaCba5xmwra^b&zR0@}KQ?QgKYZNdlt7ds?LDp~2@T&YZnBuT0Nfz|#W zu<`&{#l%f6(A+1;dtJEd|FDzo?CfkOPO=Oi`d_SC(g5tA*niyY9FT42fNbgmh+zY= zMf^v$;s1jSs65He-qt2uA5IpL$CUDE;62e`~EZK-L~0hyRDH^FOkc z)kN!ov9t^y@n5p;|L0)p4Pf<6+!OmKkh$_G|_I7Rk*=_xrRxS^Z%u_eXgaOq@`TOs54Ie$=MxOt0RE7VJnbd$;{v)9D|79YXe|o{chD7#X9`Vo4{vYq~ zH_w##*C#Y30K|XJlV3OX!;LRHKwJ2hE^59HFQWBebm0ToGb9w^O1{!h5!ujlWhF0F zLt=J4QynAjd=0=~!Nd1S*U6oI0OmD&JKRom1S9q<{QyCV2Rf+oF>D+yn?pV=P@Kv$cCuCCR$VGu!()qs5$8| z30GbLzGcQ_jcp|A6u06s$sv5`Mm>J3-3$c9X>3iQ9&oqMpo&izt}c~Dj{+ftf8NP2 zEgO#oLo6Y2igTOZ5{2Kz_@^{Z0KT)p-}eDH9>(R{7lwa}s8R#jO!~D-9(D^)wlT=dGl9&CM^!%46I3RdXwG&=Wg2?7`#Rd$n+O zJi@YrEp)!saPHyC2WaJZ2#0yqK;{ifwu(lvIJ|^@G_Ha1dOR%qw|{Xbg?wFvJpb_P z1-fa%bcay~%-GWK@o=-tiM{i5J$p6l9f@3(1^M>^*l5NL7S~Ng!ThnL>9_-{;pR$j z_Yc92tnctCS4Hf4aJTUFtOV4brQ?vOz8|K%o{xL=wy;ZDl3`wo1+kDqy!H9Wiw9D2t($m~0gKlS5iRR>{pn$J>_Tt{s>Q~CSqwd{pKnZ$kfELJEG zv&q-&Fy_)R{^+D(!YP$v_;t?E;nv1#FdL?@rPLTaT{|JVErzcr$6$+2Jc&`& za7gruBx_p_iH@u0(EB%Q=@^ zmx&2lML3=J_&UctA*dTt%DdTH@13$8D?2rrAtrs5E^JNlMq@E3=bV@E%Q*mHsv0JKrwh<8v}FG_{*-aM=StzxyH^X)1QBPGhFY zZ9_YwXyW^6C^~)AWMALSFtY~A2M8*ngy1&sB0co; zF4!;5AKkxDjK7|_$xtccD10(ux=qm*-EN-XHnvz&nff#s{=^sGWHb}mZ+f&& zbe;3@mZM*!f@#``VmKo81G=WC!0sio760P!u7+G#k6kF@*l%weg7ZrJxurI8nbSc;i z#$w(EQ&9cBnKo%&qYK~p!`4+pV1!Q$cWrDQBfhLeX2=i1G|%reCv_W0|Go;w*W>_q z*y6UX_qM+#odf3uO6-C33K*x_OTwl}Lh9j7zzN+b`_T{-CcGsTYH1MuNFSc6O@&E( z5%bdg5_)TVBAYc2^HDn`*oI;9SP&doIZh!FH=nr&_78T$$C5TthTcqQaJon@ol*y* zBMqG7yH>CnQAqWkePAltBG52i0*Tk$nGJXDV%6Po&~~_AuYr{t4qHG7q9b7<@=r-Dw zb{6Vg_mQq>UFPNPI2m29PTGookx?JEkhn8G(Bt09*v2fx9YYGh zYnVVZ=^-WSH`}sp^_hY{7d-LSVht>nzf6+vnn1zCFWlYL8Z>Q7qd;wC9tpoJggXY# z(0_9{bt#C#2`^95{h!0x5UFkCp7tGq%phrp;XVJOD>YlB4TbbNGTCK=S_LHt~XlCH`7=U zUz_8+qCl-p+{x6wJkX5v zg4qv8@Yl4S(UR4Lu;%Up@w6SqIJ@&Y@jN^P?NZkfAHJT6P_)Ed(ULgw*(K@ zGUW2rqGT%VwgeWqv zmnosyi&6L`U7H?#AIdrB`e4Ccd%E-VM%);p&3C!BQkfq|c>QnM>_d}7nC7*ra+qNk z4n7kH2MUVd(FYsuq?szbF$+gk4RbKx6hl=bKS8(T1^OsHsG<*JAY|$_GFTxMGNl;$ zo*Bij%of6r5nAxZXCC>`D-9PCEudd@Ghv?=ik@HG1^LCBpv`qHOjOUO$3u$fdTtE0 zd1waFX@F9%Tj++k1?bu$P1o9FQ^O}`u+d>K#;L?`yI%{Sg4M&hRod9=oX;Hl*h73; z;%HiOKIM%D;elIs>5x|$a0;csTgRMtx+x*l70RnUR2xw{c2d2XS5_!H1__;fatIErpGyM_=!b=_nH8fx&sW2OlCv0 zIv57-Aze}5q2~~ZLX_aCex^e_ISZlC; zDI4Hx%WI5$uwS^lwv+spK7bR0XTZ_x*2Kj4GUsz~2l4OR#n|kTBwEssX`r7X4C4&# zx5qu?G8|)ZkyjJFKQ$Bny3V3L(^8o5<9_V>C3TQ^=_R@Bx|GbiyMwW)?BU*r&4zhN zYoJ-Th8*@dM>T7Q;vq zHVTQ#>NPl5NlY`cQJ|ol#Kr8bp%<0b6M20p`1tlOZaOZ7t0E7>sM#rSZ`KFKdwM=K zHdTRzW8$%)Egd{73uyEMGe)-hEX*$PB`H@g6V)n?PI62l7YBc||Cv|@nI9KGOOYuK zc{ZM$oN}6rp8kx!bxmfPW7dG(#GUlgi}R>9L=CRR*^uuF-mom#1dpx0%>~OHAgWyv zM8-Xblf3f@bKW@9x!pPCbz^e|_M4lbu=O-IIWb=RhV~FY1y5R-VUDL{HWIbDl?3;_ z;g0{!r5AKNxkjJKFsX$j{)%@P9koJ{@(3qlm6;?`eM@nTp*)CMF2RV$h_5>*ppvQ_8Ra+_@JD0K3_y9p7CaSbz0EfLW+H#Q^(5& zTd{^8V!(A_Yvtp^YV0qGP}ZlyfGu?x#)vI9VA7vQ&`%TKyQu=StZ!j+pBjRmZv;Jg zaw{lAjo>Xx-e9+vG5=@Few6qUjkPlZ@JB)q-s(Al32R5Q->kX_m*niFJFk_}8CO-sPP?Re4|NWIHSZ;x z3olSRIRR>|i^3b5?3gpIOUP%X0Q_m0O0zcOl@nPfz;AvOPM2#GO&X-c-1wV}-#V)3 zgzwG#?J?4<(kV6U9ihbc&sT?zWOZu2_$_9BewGZ6*15cAQVttPR4Q446 zXq=(vWDSY+Oonl&?Gv4L`$-;|Z%6N)9b9TvAao9z3eMyR#5??i7KaH!tHuLVd7%P? zc}T#E4@rXRQ!C-dtQNY@Wh>|Z_Z2g}Q31RP&ylc)V?pBLS7yv)Ww5>y3P*3s@OEzb zjHqEI{G3@MuGx`Hy9|%RtX&7m_=CT=@?;s3Ff@MZe4<9ND!E8q@S}M}D@DV#Z<(hT%Sd)v&$ zXi`qyC#SJ{P&z6cmMk@6qimb7@974}IB^1A=Dc7s6Sb)I zHB%y~<^(^+?tx#rr>IV&DOnh6L3;n3fffl1+?&JFF~c6h_Mpk^i@3Ac;FZYE>X;3e z)PB*v^a+rVUQEcHJrqaBLWkBanr~A`_s&Wq*=0Y-pQiDG>{IzzXZ4fhU6~3+Df5B9 zaDj*v1n}#Ck1+X5mPqrB4O-;x16}_kWcvDWC@>WQeLDpl_AG;=kr&9N7H^PVI1wxr zPC@AS+qieZZ45YQ#(2!yhI0#l&~9-e?|XL`HZ8QosIXA%&@`jVyTWO&j1}51`pXH% ztReCvqVNmjgX7}L2jY7hSE#icvTqv;g6hrrXP=zF_=Pr2Ie%FEJ^l$+`u`+%`&?0t zEE&jaRWtYFlSz|AC6{@uf*T$g0&`Q9NYM2gqUcuwPWx#!cFx+4);%-XD>v-nYS&4e z9ehdXdTk{vvsi%bS@Xc;-e0cUN|tUj<;YpNK_Z|1reu5e5{zqjNETLQ5yx^@y2m93 zkIZ_D3oW|X6_qM{nUWFTT20NwT*YDd!p)A|_h%69Qc%ThE1n1s-lO2T`5;UlX!&wC zS-!QLsWa<@gn2F~asNE;HChUybPGtp{XU@i$2e)JP@H{#B>p_3O#Y0krkZkN$=yX0 zDxP~T!8OB|)59qP`Gwq4X4a;~FyvvWC^c9KR~`9DHwJxXDxRjp_qJVl_eDAJ@+l@V zyJ~55pe6?OyytY14s-ozh$?(7H{YoN{bH5r?&${z9(2WuVfV2rz8V_W8;IwhiGYf{ zU@n&N!^B;8=}oVlbj_{}(2%eei`-cvVc|}iiq0_^w~i6VULB0C=_1iD77}6Z7qq_> zi|u=Uw?^hIu*emfhunDazZ;n z1O}hlOs{sC!o`Id{1|f{+5|L6b+%U=N=u`CIf?F z8U;b_FX8Z-96GVFk*O}p2kC-oqB}R#!J^HImz;c^u}FT0=50;lk_QLzK&UJ{kvhrU zzL?GRnLVU4Pg+rx(}U4DCz*DzF3=jNfWn6haP1vCOsTv|FD&h1Zr-)xeH>-**NH25 zdWSuURKh*rY|2H#{KrOCE_rTU5{%wn3!a3bH4%hOB;E zO#^y4PH}w=6&+hZ9Q6(0$W#N!S#pE>{X2^)TH8X^zD?rVqH$!qgFjq!Tf|*T8B9LT zKSvIwCvcW>g;ZN_AIVytN+up!h6`VQr5+z{Q(@39rp&Ag-cdD_JU<)Pc@5aPK|=Ux zP8e5T-9)E`Yl7zT5j4wvJ9omqjP6@y3x$foH1hdes{Z%~U8J#%j@6OkCY$VMc%vfH z?Km6PXiNwBt^jh~Pf5JAcQ?_vd5b)Cdx&kLY>CKjJ27}2PZ@)`xUu04XZXQJu;5u0 zRqpSk~8o$U8df zun!&bQx%^?-NwS;*>rrBJ309II|g_Ef#VHZIh&|$tV#1-{7@STZB4{sVcSVa+%+1% zTn@pETfdoK=Ds*-oF)|Ks^ged`^k@^h}Yi4lOOV`bbCnxDp)J9U1tlS-|q;Vp8Srx z6t2uZfA*OTvq(k{htDu~dkpm491q4Ci=o+h15{cnf#(+v)ZV0!J!7Ad=tbS+*tk9L zN%t^(oVprE#GPUzcKyc2A(D`t8{lwPRvJbvYeIt<8@w{_z5Om9XS|o6NWOc?^Qugn4jw5X3QzVu)tPgJ8eFl&Fy5D`yRlYggJDY&pe0CpAW;@ORuo>d?2R3 z8^V=Yj>Y6k1?aT=OSLtexXzG9%rk8yFHS1ZxETeQ7L^JrCuCs5l6dmx%SC|Vvvg*F z6+3)Bu!8aM>@M0^xE#P?!YpAi}5 zwiwU9c?%7rt`ePfXK|-RhOj(yG1L`|MiZq9s(HkTNvoJZM+(wGIYkkIUhJa*M&a~n z(jMqKS49GvKVOk`uP5!MJ7Hg;Ayk))1pl*o%+cxf?9U^&A^k}vcUt(2yB)KJ)D)J| zbz|bdqyG=1Gc%2T{H;Z_dn?GhZ9lndHxASE3p>#I;9pF>WXFm++97e~ebiXJSlAsT z$!>{%NtGXIVlo#`#4h&4XVxXM!Oa2Y)^$?}76g$mlR=?>Fs$kBrU|DrU`*IP%F0y3 ziK)x5dmy(v_1h)kyg~&?5sgIY=Q<#Fdk$6CcujU_G?0?cc%aU^VcX8Xl=T?`m+btZ z;oS(fzpEY=r;n$*#vR5G-4HT%c_;Zb_$mK3uODYTafT4(8vL%Yo}Bb-voG#&B=yA~ zsOMfafwsZ(EA|^pxYanHlXG5(%UdVHo{Z7>?Rh;-`BexLhZT_5d>JX6!}0m7eHLgPMVO!@ zLhT7D(5!R>exK~44>1A8s;4pLgYv1QW(qu|dpSMT1Jvex0Z|%sfZST0FDjY~7^u@h zKdC+D8h8;p0Zo%gn(6lLoQ>!?} zwZ~2-h6ls1SbUxf3w3UwpM)gYHB^iB2{}PiFK>YD?%zSlT?6v2#?rpx#SphqnvA}a zNR~$Lg_;sAxaFx2J=ITW(TRJ^SA%Pef142`|9%V&`=`LD1@B4Y#ji9e1i@;zlWGR*+6<6&UQQNWm4<{FH`str=#R~F?E|RV{*JCuDCK6>&)~e}>zThNl<{mjfML2P{S~8tQ&g5?%jYWSvz{Z? zIx0_Nh#F}L?i8JseCsSz%AG0qf^F9@qdY{QL4EIaC%0T(dTKj-16T+Zapz77c^wT?1>@ zvr9>dZz_&hx&p6FUB}5Zb%3VFGrE|*qqnpQg{o$9Af7Bh`P@w6wxED(@=l?-i$vt? z_)X-D{%&$~{YF$)J<0{1KP#B+W=5+HFz7juhjg8=kbN9b#}#kWL5ur8gcVWIaD6?G zeWU);2bXi1V;9Tl&s+lb&`guYA7YjZPhwN!HS%%&I1+w)0vfEybeHDynO&L`fkwSjFH-+$JhnaNgY$idWimcbZPUGb? z2+T2sGmdhQFsYv??7D@9W-d^mvRJ5Ht&W|s`e4)DMyJGeki5VCl)YWXj4T$y%}RCf zvCzb9ck8_<9{IlKKhDb@w682q1oQ+t4CG4$l@uqLXw9Dr;>Q%vuzPB_4jL`K|xT zt0$3UN{kVmZF z0kd`#1G|jUxL@yUxCvP!Fgnqhctj>%$ft zw=9Pq9F$0kE)T(k>>Su~;vJbHH5FUiorNWTrE$^`5zUW!LS`LmA>)KUX_lQO2^lBN zke<(E{`jL*`OjvuWZ?pCmi#X=DJvPL#$AQHDYd-Z+o^Q^)mPxyn_H>#t{_*F*>Ze@gt zF7rl^uQLd4+T?Sx_e~g4rV6_GjbW<;M5JL=9rlDK;tVZG;-5K(UfmQ#U#i;SvBp8D zxyYRfuQuUE>`}q1M@uoU*8;06t`5}5UHyG=1bt z{>Ju_f$BGSMWbn7?N?4(!*lR)BaqAGLr-#<`2o?+$1QM1$bhg4f6s|!Jh63Fe`s9BtOcA;c~md$7KjBoEC=q z`iJ=3BXyXv^$$F1OR4XdkQv-}(~lfkytk6 z7%z2%9xcjdOD+sy&#aFkr?<3|I<}DEJVo4^*fi>);|M2?rgBHHmTMTCMgN+la=C3! z=)DP^aB1QvCc@?(I+gFDj)RRMVE;(*o^=PY|J^kbqjsA9a9z!mR{UUk%ci50x(vEc zsH6UcZREJ=d9wAmB1z7?NiIzTe5xOev*nH1-#0$e1Kl08>v@52eldjt!4}Rb=P0v4 zB8-NfF{9rqmeI{INrcU}pe0(%&|~NXZcmyW`J2*1Cl9PQJl`H8($Dup!$nr97$apKpoaaI=k z3@@TvL(JjGx>Wdk`Yb*kKU`c<-2#5TOYp=HE0{5VC6zz@mFu`a392(on7SB$)L2o@ zrH79vWw-t5*EdS!NUMP2@r&fS+&u&%7hEl$fTe%uh^?DNRCs$eIxcJ_59^26Ydd=4 zs4{JsYc-6xo4AqZhwsuEvrSNJ?14248fom`(^M#!kI|tSFnehaciXxF-7akwnvL8E zE9$?{-J;1vc*LHY*)^V8nkhm7`;>fjD&;PT!fE4s30NgBPfljqkTd6G*m?5j;PF{W zFj161!{CpM``g7rw}UG|wpk4hu6w{-Y%UT!Fpz^^O;gC)V>gKM+d;TPYA&!ktKrbK z5)wQ(m5wj&1o@SN@ZQ24I`XUsu&R5h-wB?*85N6r?=WO%(=4*E$d5bnb%jm?oi#@o;AG19kWvkVAvtXVZPd`e@pSPvmHdCsTD&hDQ5M z!cnWb$z#=Fkh1HGV<8Djh!rME@#@|C=OsX`wK5i10r5mw_17Cs7Gz)q!Cn1 zc$UyTw2MyeNrshY(`euF?PM807y_+|Nb+!H@YPZTy^k(bwDBOfxAPDgX*Lml)^$>% z6pIUWfz8e@p`Qxu;c#ZV@Z-`b7+DaAABMTmcULbk&b{}@$>D}%$l+e%Q{2hCl*`7| zVnbN{d^Y?_x=o6|9mG_jG~SbHr$<*?;HA&CXwVZPeza~IU{MY|`f?yQotjCHYUgwF z)g-y0CO_z!>a(6P(6wEm|91Mr2W6H#zlo{oOnl)Zqe$5YN!5J7$wdHpPPWH*o{Gs3%0K^D}XoEW<8!UIew) zx5$g+NR;U+gNZVZ!tCeDu=GC&^HgKg$AZ#pH(nJ=9jle-$+zPrJQng?{E-!g&w zw>olM<^wpAbEvRuH25S3z_PY+ta-^eoa}Uj?UI=TQO9C&s_huq8F+|sx%!Nb^s<0? zzipWM{$|>fe3uEIb({>j@srj%n<77TBIbLrg9QAQNoW) zA9E6Pk~cBtm*3(2U?bXn&xF10cwGDvccEg&2q^z+hhe>UncS1BY3JshB+gV9kN-|5 zD-S#%b=sZur9m~9>{Uyoyw=0kl;AquijfE}KZdUv~V@hrAuV*IhoQ44t zBCV(a`jxCFj*e!XSI`EuA?X&xd~Ny1B#D4$$#p=udW?T=SfEs znT@ixY4lqZZ=;!NgsPmJIPwPtSJ@P#c8gSc7A9!$!O;Uet^ z=Jeb9c9D$7UM_XVb^Bq9t*>O&u3;1p^8)lW=N3;k73B&21T(Kk7R&>iQfAB4Jm%-n(bVhcY$i@=jXx>?A%hZcp2pyarF^NPvkzVV;Eu(uZ>8Su41wPpd^IL+$C)b!P?7 z4Tq4E&l%C7ozt(F#@Ptcr|XIg>?Il31G$`5REc2pR$E5e*n#`unabRoHc?RZ=^0Zp z>l{-%ej*fSZ{;i;bjVWqDfSnNo(Rle$a3TFXmaNZ4l>c#MBE8zqB)>rd7}j zVH#Za{)wXK5hdJ$lP&}C`W}_JIF_8TuprmP!2-L}v$?S@I~aL9Ciotj$Bmh7MZV@o zF*{=P86}TWuBX$FQSf#Wy&YP^yviGR58!MM_kFDeW9n5Z_^`caL%g5g?TIQI#8 z5jsh@bkZJn@9g7zZNCctq5FXF#0BJyO2zDQYd1F2Z!wNpBjO*l%@#kt`58XlGiSFx zUWd6l9&GVpV|JaQig3nPMK+?TO!#2CCa>;rLU<~)3~ol=;>pc3ymZ|wJ}`G9e@MZd z-!i0{onM~B9-4T9pEcA-EGQi&W@bmhR3~epd9pYA?1eQn>4xz3VS~jZ3d{J%{g&|e z^&H{Ym@BMN(iP$K3l6MzOFi#1r6Wbou6`B~2p)2eA1k)m| zF)aTA#0|QSCJ*JnbkuGfd~qmSQJF|NO>ekArU=gbIRIOq$_ZQ6E+FI5646gFgjao; z#J<0)&93=e$9GE~;eVQ>uuey+Fr{-I9G@M@Ce+Lo=XVzKg4tV{W!ly3qwJkTt$?Nc zZhdfZ)M1^iUc&u6f3~bym){aOg7;2IWOsf%#P6TDlpQhkG8l_5;rG>F*`@$j;h*l2 zkYw$~mn~l*R*76hx%UG4(EU1d&EYZ>ZeKEx!#>3-=GD*?)l_VsFUt=1>%+5hros^G wanO2u1TPV{75Z*T!c&8>ko4G)9PZr4?i5(_&z96u|AmpzyhBYmZfq6&9}8)rwg3PC literal 0 HcmV?d00001 diff --git a/src/algorithms/algorithm_manager.py b/src/algorithms/algorithm_manager.py index 1447e7cc9..bf69dbe9e 100644 --- a/src/algorithms/algorithm_manager.py +++ b/src/algorithms/algorithm_manager.py @@ -30,9 +30,10 @@ from algorithms.classic.sample_based.rrt_star import RRT_Star from algorithms.classic.sample_based.rrt_connect import RRT_Connect from algorithms.classic.graph_based.wavefront import Wavefront -from algorithms.lstm.LSTM_tile_by_tile import OnlineLSTM -from algorithms.lstm.a_star_waypoint import WayPointNavigation -from algorithms.lstm.combined_online_LSTM import CombinedOnlineLSTM +from algorithms.learning.LSTM_tile_by_tile import OnlineLSTM +from algorithms.learning.a_star_waypoint import WayPointNavigation +from algorithms.learning.combined_online_LSTM import CombinedOnlineLSTM +from algorithms.learning.VIN.VIN import VINAlgorithm if HAS_OMPL: from algorithms.classic.sample_based.ompl_rrt import OMPL_RRT @@ -103,7 +104,8 @@ def _static_init_(cls): "Dijkstra": (Dijkstra, DijkstraTesting, ([], {})), "Bug1": (Bug1, BasicTesting, ([], {})), "Bug2": (Bug2, BasicTesting, ([], {})), - "Potential Field": (PotentialField, BasicTesting, ([], {})) + "Potential Field": (PotentialField, BasicTesting, ([], {})), + "VIN": (VINAlgorithm, BasicTesting, ([], {"load_name": "vin_pretrained"})) } if HAS_OMPL: diff --git a/src/algorithms/classic/testing/way_point_navigation_testing.py b/src/algorithms/classic/testing/way_point_navigation_testing.py index 39cdaae92..dbce9939c 100644 --- a/src/algorithms/classic/testing/way_point_navigation_testing.py +++ b/src/algorithms/classic/testing/way_point_navigation_testing.py @@ -3,7 +3,7 @@ import numpy as np from algorithms.basic_testing import BasicTesting -from algorithms.lstm.combined_online_LSTM import CombinedOnlineLSTM +from algorithms.learning.combined_online_LSTM import CombinedOnlineLSTM from simulator.services.debug import DebugLevel diff --git a/src/algorithms/configuration/configuration.py b/src/algorithms/configuration/configuration.py index c62ca1be2..8c1978fdd 100644 --- a/src/algorithms/configuration/configuration.py +++ b/src/algorithms/configuration/configuration.py @@ -4,8 +4,8 @@ from algorithms.algorithm import Algorithm from algorithms.basic_testing import BasicTesting from algorithms.configuration.maps.map import Map -from algorithms.lstm.LSTM_tile_by_tile import BasicLSTMModule -from algorithms.lstm.ML_model import MLModel +from algorithms.learning.LSTM_tile_by_tile import BasicLSTMModule +from algorithms.learning.ML_model import MLModel from simulator.services.debug import DebugLevel from structures import Point diff --git a/src/algorithms/lstm/LSTM_CAE_tile_by_tile.py b/src/algorithms/learning/LSTM_CAE_tile_by_tile.py similarity index 98% rename from src/algorithms/lstm/LSTM_CAE_tile_by_tile.py rename to src/algorithms/learning/LSTM_CAE_tile_by_tile.py index 8812271e0..282969a8c 100644 --- a/src/algorithms/lstm/LSTM_CAE_tile_by_tile.py +++ b/src/algorithms/learning/LSTM_CAE_tile_by_tile.py @@ -15,9 +15,9 @@ from algorithms.basic_testing import BasicTesting from algorithms.configuration.maps.map import Map -from algorithms.lstm.LSTM_tile_by_tile import BasicLSTMModule, OnlineLSTM -from algorithms.lstm.ML_model import MLModel, EvaluationResults -from algorithms.lstm.map_processing import MapProcessing +from algorithms.learning.LSTM_tile_by_tile import BasicLSTMModule, OnlineLSTM +from algorithms.learning.ML_model import MLModel, EvaluationResults +from algorithms.learning.map_processing import MapProcessing from simulator.services.services import Services from utility.constants import DATA_PATH diff --git a/src/algorithms/lstm/LSTM_CNN_tile_by_tile_obsolete.py b/src/algorithms/learning/LSTM_CNN_tile_by_tile_obsolete.py similarity index 98% rename from src/algorithms/lstm/LSTM_CNN_tile_by_tile_obsolete.py rename to src/algorithms/learning/LSTM_CNN_tile_by_tile_obsolete.py index 359ba691d..b52a1bddb 100644 --- a/src/algorithms/lstm/LSTM_CNN_tile_by_tile_obsolete.py +++ b/src/algorithms/learning/LSTM_CNN_tile_by_tile_obsolete.py @@ -10,7 +10,7 @@ from torch import nn from algorithms.basic_testing import BasicTesting -from algorithms.lstm.online_lstm import BasicLSTMModule, OnlineLSTM +from algorithms.learning.online_lstm import BasicLSTMModule, OnlineLSTM from simulator.services.services import Services diff --git a/src/algorithms/lstm/LSTM_tile_by_tile.py b/src/algorithms/learning/LSTM_tile_by_tile.py similarity index 98% rename from src/algorithms/lstm/LSTM_tile_by_tile.py rename to src/algorithms/learning/LSTM_tile_by_tile.py index 13ca0fd13..2f8520fb6 100644 --- a/src/algorithms/lstm/LSTM_tile_by_tile.py +++ b/src/algorithms/learning/LSTM_tile_by_tile.py @@ -10,8 +10,8 @@ from algorithms.basic_testing import BasicTesting from algorithms.configuration.entities.goal import Goal from algorithms.configuration.maps.map import Map -from algorithms.lstm.ML_model import MLModel, SingleTensorDataset, PackedDataset -from algorithms.lstm.map_processing import MapProcessing +from algorithms.learning.ML_model import MLModel, SingleTensorDataset, PackedDataset +from algorithms.learning.map_processing import MapProcessing from simulator.services.services import Services from simulator.views.map.display.entities_map_display import EntitiesMapDisplay from simulator.views.map.display.online_lstm_map_display import OnlineLSTMMapDisplay diff --git a/src/algorithms/lstm/ML_model.py b/src/algorithms/learning/ML_model.py similarity index 99% rename from src/algorithms/lstm/ML_model.py rename to src/algorithms/learning/ML_model.py index d28587346..c1d2b5e74 100644 --- a/src/algorithms/lstm/ML_model.py +++ b/src/algorithms/learning/ML_model.py @@ -12,7 +12,7 @@ from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence, pack_sequence, PackedSequence from torch.utils import data from torch.utils.data import DataLoader, TensorDataset, Dataset, Subset -from algorithms.lstm.map_processing import MapProcessing +from algorithms.learning.map_processing import MapProcessing from simulator.services.debug import DebugLevel from simulator.services.services import Services @@ -154,7 +154,7 @@ class PackedDataset(Dataset): lengths: torch.Tensor def __init__(self, seq: List[torch.Tensor]) -> None: - from algorithms.lstm.LSTM_tile_by_tile import BasicLSTMModule + from algorithms.learning.LSTM_tile_by_tile import BasicLSTMModule ls = list(map(lambda el: el.shape[0], seq)) self.perm = BasicLSTMModule.get_sort_by_lengths_indices(ls) diff --git a/src/algorithms/learning/VIN/.gitignore b/src/algorithms/learning/VIN/.gitignore new file mode 100755 index 000000000..80247da36 --- /dev/null +++ b/src/algorithms/learning/VIN/.gitignore @@ -0,0 +1,135 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +# npz +*.npz + +# pth +*.pth +*.pth.1 + +vin_my_implementation.code-workspace + + +.vscode/launch.json +/resources/training_maps/ +/resources/testing_maps/ + +/resources/logs/ +/resources/test_maps/ +/resources/16_100k/ +/resources/16_60k/ +/resources/16_60k_pt2/ + +/resources/house_expo/ +/resources/house_expo_old/ +/resources/house_expo_100x100/ + + + + + + +resources/100k_no_block.tar.gz diff --git a/src/algorithms/learning/VIN/LICENSE b/src/algorithms/learning/VIN/LICENSE new file mode 100644 index 000000000..07a851703 --- /dev/null +++ b/src/algorithms/learning/VIN/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2020, Hussein Ali Jaafar +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/algorithms/learning/VIN/README.md b/src/algorithms/learning/VIN/README.md new file mode 100644 index 000000000..101577f46 --- /dev/null +++ b/src/algorithms/learning/VIN/README.md @@ -0,0 +1,143 @@ + +## Introduction + +This is a modified implementation of Kent Sommer's PyTorch Value Iteration Networks implementation, meant to work with PathBench. + +Read about Kent Sommers Implementation: [Here](https://github.com/kentsommer/pytorch-value-iteration-networks "Pytorch implementation of Value Iteration Networks") + +Read about PathBench: [Here](https://github.com/husseinalijaafar/PathBench "A Benchmarking Platform for Classic and Learned Path Planning Algorithms") + +Read the original paper:[Here](https://arxiv.org/abs/1602.02867) + +Read about similar implemenations, which have made this modified implemenation possible +* [@kentsommer](https://github.com/kentsommer) ([PyTorch implementation](https://github.com/kentsommer/pytorch-value-iteration-networks)) +* [@avivt](https://github.com/avivt) ([Paper Author](https://arxiv.org/abs/1602.02867), [MATLAB implementation](https://github.com/avivt/VIN)) +* [@zuoxingdong](https://github.com/zuoxingdong) ([Tensorflow implementation](https://github.com/zuoxingdong/VIN_TensorFlow), [Pytorch implementation](https://github.com/zuoxingdong/VIN_PyTorch_Visdom)) +* [@TheAbhiKumar](https://github.com/TheAbhiKumar) ([Tensorflow implementation](https://github.com/TheAbhiKumar/tensorflow-value-iteration-networks)) +* [@onlytailei](https://github.com/onlytailei) ([Pytorch implementation](https://github.com/onlytailei/Value-Iteration-Networks-PyTorch)) + +## What has been modified? +In order to run on PathBench generated maps, I had to modify a few areas of the code. The main difference is within the `test.py` file and `gridworld` class. The maps (resources>maps), which are JSON files structured with the goal position, agent position and grid. +I have also added a few metrics, such as path deviation from optimal path, and time. + +The PathBench maps are structured where 0 is obstacle and 1 is free space. +Sample Map: + +## Installation +Instructions: +1. Install Packages +2. Download Training Data +3. Train on Training Data +4. Run `test.py`. + +This repository requires following packages: +- [SciPy](https://www.scipy.org/install.html) >= 0.19.0 +- [Python](https://www.python.org/) >= 2.7 (if using Python 3.x: python3-tk should be installed) +- [Numpy](https://pypi.python.org/pypi/numpy) >= 1.12.1 +- [Matplotlib](https://matplotlib.org/users/installing.html) >= 2.0.0 +- [PyTorch](http://pytorch.org/) >= 0.1.11 + +Use `pip` to install the necessary dependencies: +``` +pip install -U -r requirements.txt +``` +Note that PyTorch cannot be installed directly from PyPI; refer to http://pytorch.org/ for custom installation instructions specific to your needs. + +#### Downloading Training Data +WIP + +To aquire the training data (NOT for PathBench Maps), either run the training data generator `make_training_data.py` in the dataset folder (resource intensive), or run the shell script to download them: +1. `cd` into main directory +2. `chmod +x download_weights_and_datasets.sh` +3. `./download_weights_and_datasets` + +It should download the training data. +#### PathBench Training Data + +*Outdated- Please await updated training files* + +To download PathBench training data, visit [GDrive](https://drive.google.com/file/d/11D-QCf5qZ4qusv66XhxOqqdcyHl5RLHk/view?usp=sharing) +Currently, only the 90000 map training data is uploaded. I will upload more as I generate them. + +To generate your own training data, add the json files with correct structure to a specified path, and pass that path in the `make_training_data.py` file. It is fairly straight forward. + +## How to train +#### 8x8 gridworld +```bash +python train.py --datafile dataset/gridworld_8x8.npz --imsize 8 --lr 0.005 --epochs 30 --k 10 --batch_size 128 +``` +#### 16x16 gridworld +```bash +python train.py --datafile dataset/gridworld_16x16.npz --imsize 16 --lr 0.002 --epochs 30 --k 20 --batch_size 128 +``` +#### 28x28 gridworld +```bash +python train.py --datafile dataset/gridworld_28x28.npz --imsize 28 --lr 0.002 --epochs 30 --k 36 --batch_size 128 +``` +**Flags**: +- `datafile`: The path to the data files. +- `imsize`: The size of input images. One of: [8, 16, 28] +- `lr`: Learning rate with RMSProp optimizer. Recommended: [0.01, 0.005, 0.002, 0.001] +- `epochs`: Number of epochs to train. Default: 30 +- `k`: Number of Value Iterations. Recommended: [10 for 8x8, 20 for 16x16, 36 for 28x28] +- `l_i`: Number of channels in input layer. Default: 2, i.e. obstacles image and goal image. +- `l_h`: Number of channels in first convolutional layer. Default: 150, described in paper. +- `l_q`: Number of channels in q layer (~actions) in VI-module. Default: 10, described in paper. +- `batch_size`: Batch size. Default: 128 + +## How to test / visualize paths (requires training first) +#### 8x8 gridworld +```bash +python test.py --weights trained/vin_8x8.pth --imsize 8 --k 10 +``` +#### 16x16 gridworld +```bash +python test.py --weights trained/vin_16x16.pth --imsize 16 --k 20 +``` +#### 28x28 gridworld +```bash +python test.py --weights trained/vin_28x28.pth --imsize 28 --k 36 +``` +#### 64x64 gridworld +```bash +python test.py --weights trained/vin_28x28.pth --imsize 28 --k 36 +``` +(64x64 still uses 28x28 trained data, we haven't trained VIN on 64x64 maps yet.) + +To visualize the optimal and predicted paths simply pass: +```bash +--plot +``` + +**Flags**: +- `weights`: Path to trained weights. +- `imsize`: The size of input images. One of: [8, 16, 28] +- `plot`: If supplied, the optimal and predicted paths will be plotted +- `k`: Number of Value Iterations. Recommended: [10 for 8x8, 20 for 16x16, 36 for 28x28] +- `l_i`: Number of channels in input layer. Default: 2, i.e. obstacles image and goal image. +- `l_h`: Number of channels in first convolutional layer. Default: 150, described in paper. +- `l_q`: Number of channels in q layer (~actions) in VI-module. Default: 10, described in paper. + +## Results +The maps that VIN trains on are NOT the PathBench maps, rather they are maps generated from Kent's implementation. This is still WIP +Therefore, when running VIN w/ PathBench maps, it is running on untrained style maps (Block map and Uniform Random Fill are unfamiliar to +the algorithm). + +Logs are saved in `resources>logs`. You can change the logging behaviour (debug vs info) in `test_pb.py`. Ensure logs don't overwrite eachother by changing the name at each run. + +### Sample Maps: +Block Map: + + + +Uniform Random Fill Map + + +House Map + + + +### Training +WIP + + diff --git a/src/algorithms/learning/VIN/VIN.py b/src/algorithms/learning/VIN/VIN.py new file mode 100644 index 000000000..3c466cdc8 --- /dev/null +++ b/src/algorithms/learning/VIN/VIN.py @@ -0,0 +1,284 @@ +import sys +import random +import numpy as np +import torch +import logging +import time +import math +from torch.autograd import Variable +from typing import Dict, List, Type + +from .utility.utils import * + +from .model import * +from .domains.gridworld import * +from .generators.obstacle_gen import * + +from algorithms.algorithm import Algorithm +from simulator.services.services import Services +from algorithms.basic_testing import BasicTesting +from structures.point import Point + +class VINConfig: + def __init__(self, l_i=2, l_h=150, l_q=10, k=10): + self.l_i = l_i + self.l_h = l_h + self.l_q = l_q + self.k = k + +class VINAlgorithm(Algorithm): + cached_models: Dict[int, Type[VIN]] + use_GPU: bool + + def __init__(self, services: Services, testing: BasicTesting = None, config: VINConfig = VINConfig(), load_name: str = "VIN"): + super().__init__(services, testing) + self.cached_models = {} + self.use_GPU = torch.cuda.is_available() + self._load_name = load_name + self.config = config + + def set_display_info(self): + return super().set_display_info() + [ + + ] + + def _find_path_internal(self) -> None: + mp = self._get_grid() + assert mp.size[0] == mp.size[1] and len(mp.size) == 2, \ + f"VIN only accepts square 2D maps, map size {mp.size}" + imsize = mp.size[0] + grid = np.copy(mp.grid) + self.config.imsize = imsize + model: VIN = self.load_VIN(mp.size[0]) + start: Tuple[int] = (mp.agent.position.x, mp.agent.position.y) + goal: Tuple[int] = (mp.goal.position.x, mp.goal.position.y) + + grid[mp.agent.position.x, mp.agent.position.y] = 0 #Set the start position as freespace too + grid[mp.goal.position.x, mp.goal.position.y] = 0 #Set the goal position as freespace too + + obs = obstacles([imsize, imsize], goal) + obs.dom = grid + + im = obs.get_final() + G = gridworld(im, goal[0], goal[1]) + # ======= + value_prior = G.get_reward_prior() + # Sample random trajectories to our goal + states_xy, states_one_hot = sample_trajectory(G, 1, start, False) #dijkstra trajectory + # print('states_xy', states_xy[0] , len(states_xy[0])) + + i = 0 + if len(states_xy[i]) > 1: + # Get number of steps to goal + L = len(states_xy[i]) * 2 + # Allocate space for predicted steps + pred_traj = np.zeros((L, 2)) + # Set starting position + pred_traj[0, :] = states_xy[i][0, :] + + for j in range(1, L): + # Transform current state data + state_data = pred_traj[j - 1, :] + state_data = state_data.astype(np.int) + # Transform domain to Networks expected input shape + im_data = G.image.astype(np.int) + im_data = 1 - im_data + im_data = im_data.reshape(1, 1, imsize, + imsize) + # Transfrom value prior to Networks expected input shape + value_data = value_prior.astype(np.int) + value_data = value_data.reshape(1, 1, imsize, + imsize) + # Get inputs as expected by network + X_in = torch.from_numpy( + np.append(im_data, value_data, axis=1)).float() + S1_in = torch.from_numpy(state_data[0].reshape( + [1, 1])).float() + S2_in = torch.from_numpy(state_data[1].reshape( + [1, 1])).float() + # Send Tensors to GPU if available + if self.use_GPU: + X_in = X_in.cuda() + S1_in = S1_in.cuda() + S2_in = S2_in.cuda() + # Wrap to autograd.Variable + X_in, S1_in, S2_in = Variable(X_in), Variable( + S1_in), Variable(S2_in) + # Forward pass in our neural net + _, predictions = model(X_in, S1_in, S2_in, self.config) + _, indices = torch.max(predictions.cpu(), 1, keepdim=True) + a = indices.data.numpy()[0][0] + # Transform prediction to indices + s = G.map_ind_to_state(pred_traj[j - 1, 0], + pred_traj[j - 1, 1]) + ns = G.sample_next_state(s, a) + nr, nc = G.get_coords(ns) + pred_traj[j, 0] = nr + pred_traj[j, 1] = nc + self.move_agent(Point(nr, nc)) + if nr == goal[0] and nc == goal[1]: + # We hit goal so fill remaining steps + pred_traj[j + 1:, 0] = nr + pred_traj[j + 1:, 1] = nc + break + # Plot optimal and predicted path (also start, end) + if pred_traj[-1, 0] == goal[0] and pred_traj[-1, 1] == goal[1]: + self.move_agent(self._get_grid().goal.position) + return + self.key_frame() + + def load_VIN(self, size): + if size in self.cached_models: return self.cached_models[size] + load_fname = f"{self._load_name}_{size}x{size}.pth" + load_path = self._services.resources.model_dir._full_path() + load_fname + vin = VIN(self.config) + vin.load_state_dict(torch.load(load_path, map_location=None if self.use_GPU else torch.device("cpu"))) + if self.use_GPU: vin = vin.cuda() + self.cached_models[size] = vin + return vin + + +def visualize(dom, states_xy, pred_traj): + fig, ax = plt.subplots() + implot = plt.imshow(dom, cmap="Greys_r") + ax.plot(states_xy[:, 0], states_xy[:, 1], c='b', label='Optimal Path') + ax.plot( + pred_traj[:, 0], pred_traj[:, 1], '-X', c='r', label='Predicted Path') + ax.plot(states_xy[0, 0], states_xy[0, 1], '-o', label='Start') + ax.plot(states_xy[-1, 0], states_xy[-1, 1], '-s', label='Goal') + legend = ax.legend(loc='upper right', shadow=False) + for label in legend.get_texts(): + label.set_fontsize('x-small') # the legend text size + for label in legend.get_lines(): + label.set_linewidth(0.5) # the legend line width + plt.draw() + plt.waitforbuttonpress(0) + plt.close(fig) + + +def save_image(im, goal, start,states_xy,states_one_hot,counter): + ''' + Saves the data made by generator as jsons. + ''' + s = config.imsize + + if len(states_xy[0]) == 0: + + im.tolist()[start_x][start_y] = 1 + start_xy = [0,0] + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': start_xy} + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + else: + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': states_xy[0][0].tolist() + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + } + data.append(mp) + with open('./maps/' +str(s) + '_data_300' + '.json', 'w') as outfile: + json.dump(data,outfile) + +def open_map(dom,path): + ''' + Used to open a map json given dom and path, returns grid, goal and agent + ''' + with open(str(path) + str(dom) +'.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data['grid'], data['goal'], data['agent'] + +def open_map_list(dom,path): + with open(str(path) + '.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data[dom]['grid'], data[dom]['goal'], data[dom]['agent'] + +def deviation(optimal_path, pred_path,goal, map_num): + optimal_path = np.array(optimal_path) + optimal_path = 1.0 * optimal_path + + optimal_path_x = np.array(optimal_path[:,0]) + optimal_path_y = np.array(optimal_path[:,1]) + + pred_path = np.unique(pred_path, axis=0) #removes duplicates at the end (when it reaches goal) + + #print('Shortened path' , pred_path) + pred_path_x = np.array(pred_path[:,0]) + pred_path_y = np.array(pred_path[:,1]) + dist = 0.0 + astar_dist = 0.0 + prev = pred_path[0,:] + total_diff_gen = 0 + for xy in pred_path[:,:]: + + diff = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_gen += diff + dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + #prev = [0,0] + #print('opt', optimal_path[0,:]) + prev = optimal_path[0,:] + total_diff_optim = 0 + for xy in optimal_path[:,:]: + # print('xy', xy) + diff2 = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_optim += diff2 + astar_dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + dev_non_rel = abs(total_diff_optim-total_diff_gen) + dev_rel = dev_non_rel/total_diff_optim #TODO: Add avg distance of gen trajectory + return(dev_rel,dev_non_rel,dist,astar_dist) + +def dist_left(pred_traj, goal): + ''' + Finds the distance left between the point and the goal + ''' + pred_traj = np.array(pred_traj) #euclidean distance or geometric distance ? use geometric + x1,y1 = pred_traj[-1][0], pred_traj[-1][1] + x2,y2 = goal[0],goal[1] + dist = (((x2-x1)**2 + (y2-y1)**2))**0.5 + return dist + + +if __name__ == '__main__': + # Parsing training parameters + parser = argparse.ArgumentParser() + parser.add_argument( + '--weights', + type=str, + default='trained/vin_8x8.pth', + help='Path to trained weights') + parser.add_argument( + '--maps', + type=str, + default='resources/testing_maps/16x16', + help='Path to maps') + parser.add_argument('--plot', action='store_true', default=False) + parser.add_argument('--gen', action='store_true', default=False) + parser.add_argument('--imsize', type=int, default=8, help='Size of image') + parser.add_argument( + '--k', type=int, default=10, help='Number of Value Iterations') + parser.add_argument( + '--l_i', type=int, default=2, help='Number of channels in input layer') + parser.add_argument( + '--l_h', + type=int, + default=150, + help='Number of channels in first hidden layer') + parser.add_argument( + '--l_q', + type=int, + default=10, + help='Number of channels in q layer (~actions) in VI-module') + config = parser.parse_args() + # Compute Paths generated by network and plot diff --git a/src/algorithms/learning/VIN/dataset/README.MD b/src/algorithms/learning/VIN/dataset/README.MD new file mode 100644 index 000000000..d67709e69 --- /dev/null +++ b/src/algorithms/learning/VIN/dataset/README.MD @@ -0,0 +1,8 @@ +# Gridworld datasets +To use the gridworld datasets you have two choices: +1. Download and place the .npz dataset files here + * gridworld_8x8.npz + * gridworld_16x16.npz + * gridworld_28x28.npz +2. Use the dataset generation script + * ```make_training_data.py``` diff --git a/src/algorithms/lstm/__init__.py b/src/algorithms/learning/VIN/dataset/__init__.py similarity index 100% rename from src/algorithms/lstm/__init__.py rename to src/algorithms/learning/VIN/dataset/__init__.py diff --git a/src/algorithms/learning/VIN/dataset/dataset.py b/src/algorithms/learning/VIN/dataset/dataset.py new file mode 100644 index 000000000..c2f579bf6 --- /dev/null +++ b/src/algorithms/learning/VIN/dataset/dataset.py @@ -0,0 +1,67 @@ +import numpy as np + +import torch +import torch.utils.data as data + + +class GridworldData(data.Dataset): + def __init__(self, + file, + imsize, + train=True, + transform=None, + target_transform=None): + assert file.endswith('.npz') # Must be .npz format + self.file = file + self.imsize = imsize + self.transform = transform + self.target_transform = target_transform + self.train = train # training set or test set + + self.images, self.S1, self.S2, self.labels = \ + self._process(file, self.train) + + def __getitem__(self, index): + img = self.images[index] + s1 = self.S1[index] + s2 = self.S2[index] + label = self.labels[index] + # Apply transform if we have one + if self.transform is not None: + img = self.transform(img) + else: # Internal default transform: Just to Tensor + img = torch.from_numpy(img) + # Apply target transform if we have one + if self.target_transform is not None: + label = self.target_transform(label) + return img, int(s1), int(s2), int(label) + + def __len__(self): + return self.images.shape[0] + + def _process(self, file, train): + """Data format: A list, [train data, test data] + Each data sample: label, S1, S2, Images, in this order. + """ + with np.load(file, mmap_mode='r') as f: + if train: + images = f['arr_0'] + S1 = f['arr_1'] + S2 = f['arr_2'] + labels = f['arr_3'] + else: + images = f['arr_4'] + S1 = f['arr_5'] + S2 = f['arr_6'] + labels = f['arr_7'] + # Set proper datatypes + images = images.astype(np.float32) + S1 = S1.astype(int) # (S1, S2) location are integers + S2 = S2.astype(int) + labels = labels.astype(int) # labels are integers + # Print number of samples + if train: + print("Number of Train Samples: {0}".format(images.shape[0])) + else: + print("Number of Test Samples: {0}".format(images.shape[0])) + return images, S1, S2, labels diff --git a/src/algorithms/learning/VIN/dataset/make_training_data.py b/src/algorithms/learning/VIN/dataset/make_training_data.py new file mode 100644 index 000000000..6baf94347 --- /dev/null +++ b/src/algorithms/learning/VIN/dataset/make_training_data.py @@ -0,0 +1,142 @@ +import sys +import json +import numpy as np +from dataset import * + +sys.path.append('.') +from domains.gridworld import * +from generators.obstacle_gen import * +sys.path.remove('.') + + +def extract_action(traj): + # Given a trajectory, outputs a 1D vector of + # actions corresponding to the trajectory. + n_actions = 8 + action_vecs = np.asarray([[-1., 0.], [1., 0.], [0., 1.], [0., -1.], + [-1., 1.], [-1., -1.], [1., 1.], [1., -1.]]) + action_vecs[4:] = 1 / np.sqrt(2) * action_vecs[4:] + action_vecs = action_vecs.T + state_diff = np.diff(traj, axis=0) + norm_state_diff = state_diff * np.tile( + 1 / np.sqrt(np.sum(np.square(state_diff), axis=1)), (2, 1)).T + prj_state_diff = np.dot(norm_state_diff, action_vecs) + actions_one_hot = np.abs(prj_state_diff - 1) < 0.00001 + actions = np.dot(actions_one_hot, np.arange(n_actions).T) + return actions + + +def make_data(dom_size, n_domains, max_obs, max_obs_size, n_traj, + state_batch_size,testing): + + X_l = [] + S1_l = [] + S2_l = [] + Labels_l = [] + + dom = 0.0 + while dom <= n_domains: + # goal = [np.random.randint(dom_size[0]), np.random.randint(dom_size[1])] + # Generate obstacle map + if testing: + # path = './resources/training_maps/8x8/testing/' + path = '/home/hussein/Desktop/git-projects/hjaafar_vin/value-iteration-networks-pb/resources/training_maps/8x8_150000/50000_uniform_seed50/' + mp, goal, start = open_map(dom+100000,path) + else: + # path = './resources/training_maps/8x8/training/' + path = "/home/hussein/Desktop/git-projects/hjaafar_vin/value-iteration-networks-pb/resources/training_maps/8x8_150000/50000_uniform_seed50/" + mp, goal, start = open_map(dom,path) + + + mp[start[1]][start[0]] = 0 #Set the start position as freespace too + mp[goal[1]][goal[0]] = 0 #Set the goal position as freespace too + + goal = [goal[1],goal[0]] #swap them around, for the row col format (x = col not row) + start = [start[1],start[0]] + + obs = obstacles([dom_size[0], dom_size[1]], goal, max_obs_size) + obs.dom = mp + # Add obstacles to map + # n_obs = obs.add_n_rand_obs(max_obs) + # Add border to map + # border_res = obs.add_border() + # Ensure we have valid map + # if n_obs == 0 or not border_res: + # continue + # Get final map + im = obs.get_final() + # Generate gridworld from obstacle map + G = gridworld(im, goal[0], goal[1]) + # Get value prior + value_prior = G.t_get_reward_prior() + # Sample random trajectories to our goal + states_xy, states_one_hot = sample_trajectory(G, n_traj,start,gen=False) + for i in range(n_traj): + if len(states_xy[i]) > 1: + # Get optimal actions for each state + actions = extract_action(states_xy[i]) + ns = states_xy[i].shape[0] - 1 + # Invert domain image => 0 = free, 1 = obstacle + image = 1 - im + # Resize domain and goal images and concate + image_data = np.resize(image, (1, 1, dom_size[0], dom_size[1])) + value_data = np.resize(value_prior, + (1, 1, dom_size[0], dom_size[1])) + iv_mixed = np.concatenate((image_data, value_data), axis=1) + X_current = np.tile(iv_mixed, (ns, 1, 1, 1)) + # Resize states + S1_current = np.expand_dims(states_xy[i][0:ns, 0], axis=1) + S2_current = np.expand_dims(states_xy[i][0:ns, 1], axis=1) + # Resize labels + Labels_current = np.expand_dims(actions, axis=1) + # Append to output list + X_l.append(X_current) + S1_l.append(S1_current) + S2_l.append(S2_current) + Labels_l.append(Labels_current) + dom += 1 + sys.stdout.write("\r" + str(int((dom / n_domains) * 100)) + "%") + sys.stdout.flush() + sys.stdout.write("\n") + # Concat all outputs + X_f = np.concatenate(X_l) + S1_f = np.concatenate(S1_l) + S2_f = np.concatenate(S2_l) + Labels_f = np.concatenate(Labels_l) + return X_f, S1_f, S2_f, Labels_f + + +def open_map(dom,path): + # print('dom', dom) + # print('path', path) + dom = int(dom) + with open(str(path) + str(dom) +'.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data['grid'], data['goal'], data['agent'] + +def main(dom_size=[8, 8], + n_domains=100000, + max_obs=50, + max_obs_size=2, + n_traj=1, #This basically uses 7 diff start positions , but you need to have that in the map or else it throws an error + state_batch_size=1): + # Get path to save dataset + save_path = "dataset/new_gridworld_{0}x{1}".format(dom_size[0], dom_size[1]) + # Get training data + print("Now making training data...") + X_out_tr, S1_out_tr, S2_out_tr, Labels_out_tr = make_data( + dom_size, n_domains, max_obs, max_obs_size, n_traj, state_batch_size, testing=False) + # Get testing data + print("\nNow making testing data...") + X_out_ts, S1_out_ts, S2_out_ts, Labels_out_ts = make_data( + dom_size, n_domains / 6, max_obs, max_obs_size, n_traj, + state_batch_size,testing = True) + # Save dataset + np.savez_compressed(save_path, X_out_tr, S1_out_tr, S2_out_tr, + Labels_out_tr, X_out_ts, S1_out_ts, S2_out_ts, + Labels_out_ts) + + +if __name__ == '__main__': + main() diff --git a/src/algorithms/learning/VIN/dataset/make_training_data_og.py b/src/algorithms/learning/VIN/dataset/make_training_data_og.py new file mode 100644 index 000000000..cf31fd857 --- /dev/null +++ b/src/algorithms/learning/VIN/dataset/make_training_data_og.py @@ -0,0 +1,116 @@ +import sys + +import numpy as np +from dataset import * + +sys.path.append('.') +from domains.gridworld import * +from generators.obstacle_gen import * +sys.path.remove('.') + + +def extract_action(traj): + # Given a trajectory, outputs a 1D vector of + # actions corresponding to the trajectory. + n_actions = 8 + action_vecs = np.asarray([[-1., 0.], [1., 0.], [0., 1.], [0., -1.], + [-1., 1.], [-1., -1.], [1., 1.], [1., -1.]]) + action_vecs[4:] = 1 / np.sqrt(2) * action_vecs[4:] + action_vecs = action_vecs.T + state_diff = np.diff(traj, axis=0) + norm_state_diff = state_diff * np.tile( + 1 / np.sqrt(np.sum(np.square(state_diff), axis=1)), (2, 1)).T + prj_state_diff = np.dot(norm_state_diff, action_vecs) + actions_one_hot = np.abs(prj_state_diff - 1) < 0.00001 + actions = np.dot(actions_one_hot, np.arange(n_actions).T) + return actions + + +def make_data(dom_size, n_domains, max_obs, max_obs_size, n_traj, + state_batch_size): + + X_l = [] + S1_l = [] + S2_l = [] + Labels_l = [] + + dom = 0.0 + while dom <= n_domains: + goal = [np.random.randint(dom_size[0]), np.random.randint(dom_size[1])] + # Generate obstacle map + obs = obstacles([dom_size[0], dom_size[1]], goal, max_obs_size) + # Add obstacles to map + n_obs = obs.add_n_rand_obs(max_obs) + # Add border to map + border_res = obs.add_border() + # Ensure we have valid map + if n_obs == 0 or not border_res: + continue + # Get final map + im = obs.get_final() + # Generate gridworld from obstacle map + G = gridworld(im, goal[0], goal[1]) + # Get value prior + value_prior = G.t_get_reward_prior() + # Sample random trajectories to our goal + states_xy, states_one_hot = sample_trajectory(G, n_traj) + for i in range(n_traj): + if len(states_xy[i]) > 1: + # Get optimal actions for each state + actions = extract_action(states_xy[i]) + ns = states_xy[i].shape[0] - 1 + # Invert domain image => 0 = free, 1 = obstacle + image = 1 - im + # Resize domain and goal images and concate + image_data = np.resize(image, (1, 1, dom_size[0], dom_size[1])) + value_data = np.resize(value_prior, + (1, 1, dom_size[0], dom_size[1])) + iv_mixed = np.concatenate((image_data, value_data), axis=1) + X_current = np.tile(iv_mixed, (ns, 1, 1, 1)) + # Resize states + S1_current = np.expand_dims(states_xy[i][0:ns, 0], axis=1) + S2_current = np.expand_dims(states_xy[i][0:ns, 1], axis=1) + # Resize labels + Labels_current = np.expand_dims(actions, axis=1) + # Append to output list + X_l.append(X_current) + S1_l.append(S1_current) + S2_l.append(S2_current) + Labels_l.append(Labels_current) + dom += 1 + sys.stdout.write("\r" + str(int((dom / n_domains) * 100)) + "%") + sys.stdout.flush() + sys.stdout.write("\n") + # Concat all outputs + X_f = np.concatenate(X_l) + S1_f = np.concatenate(S1_l) + S2_f = np.concatenate(S2_l) + Labels_f = np.concatenate(Labels_l) + return X_f, S1_f, S2_f, Labels_f + + +def main(dom_size=[28, 28], + n_domains=5000, + max_obs=50, + max_obs_size=2, + n_traj=7, + state_batch_size=1): + # Get path to save dataset + save_path = "dataset/gridworld_{0}x{1}".format(dom_size[0], dom_size[1]) + # Get training data + print("Now making training data...") + X_out_tr, S1_out_tr, S2_out_tr, Labels_out_tr = make_data( + dom_size, n_domains, max_obs, max_obs_size, n_traj, state_batch_size) + # Get testing data + print("\nNow making testing data...") + X_out_ts, S1_out_ts, S2_out_ts, Labels_out_ts = make_data( + dom_size, n_domains / 6, max_obs, max_obs_size, n_traj, + state_batch_size) + # Save dataset + np.savez_compressed(save_path, X_out_tr, S1_out_tr, S2_out_tr, + Labels_out_tr, X_out_ts, S1_out_ts, S2_out_ts, + Labels_out_ts) + + +if __name__ == '__main__': + main() diff --git a/src/algorithms/learning/VIN/domains/__init__.py b/src/algorithms/learning/VIN/domains/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/algorithms/learning/VIN/domains/gridworld.py b/src/algorithms/learning/VIN/domains/gridworld.py new file mode 100644 index 000000000..9f42cb539 --- /dev/null +++ b/src/algorithms/learning/VIN/domains/gridworld.py @@ -0,0 +1,452 @@ +import numpy as np +from scipy.sparse import csr_matrix +from scipy.sparse.csgraph import dijkstra +import logging +import gc + + +class gridworld: + """A class for making gridworlds""" + + def __init__(self, image, targetx, targety): + self.image = image + self.n_row = image.shape[0] + self.n_col = image.shape[1] + self.obstacles = [] + self.freespace = [] + self.targetx = targetx + self.targety = targety + self.G = [] + self.W = [] + self.R = [] + self.P = [] + self.A = [] + self.n_states = 0 + self.n_actions = 0 + self.state_map_col = [] + self.state_map_row = [] + self.set_vals() + + def set_vals(self): + # Setup function to initialize all necessary + # data + row_obs, col_obs = np.where(self.image == 0) + row_free, col_free = np.where(self.image != 0) + self.obstacles = [row_obs, col_obs] + self.freespace = [row_free, col_free] + + n_states = self.n_row * self.n_col + n_actions = 8 + self.n_states = n_states + self.n_actions = n_actions + + p_n = np.zeros((self.n_states, self.n_states),np.int8) + p_s = np.zeros((self.n_states, self.n_states),np.int8) + p_e = np.zeros((self.n_states, self.n_states),np.int8) + p_w = np.zeros((self.n_states, self.n_states),np.int8) + p_ne = np.zeros((self.n_states, self.n_states),np.int8) + p_nw = np.zeros((self.n_states, self.n_states),np.int8) + p_se = np.zeros((self.n_states, self.n_states),np.int8) + p_sw = np.zeros((self.n_states, self.n_states),np.int8) + + R = -1 * np.ones((self.n_states, self.n_actions)) + R[:, 4:self.n_actions] = R[:, 4:self.n_actions] * np.sqrt(2) + target = np.ravel_multi_index( + [self.targetx, self.targety], (self.n_row, self.n_col), order='F') + R[target, :] = 0 + + + for row in range(0, self.n_row): + for col in range(0, self.n_col): + + curpos = np.ravel_multi_index( + [row, col], (self.n_row, self.n_col), order='F') + + rows, cols = self.neighbors(row, col) + + neighbor_inds = np.ravel_multi_index( + [rows, cols], (self.n_row, self.n_col), order='F') + + p_n[curpos, neighbor_inds[ + 0]] = p_n[curpos, neighbor_inds[0]] + 1 + p_s[curpos, neighbor_inds[ + 1]] = p_s[curpos, neighbor_inds[1]] + 1 + p_e[curpos, neighbor_inds[ + 2]] = p_e[curpos, neighbor_inds[2]] + 1 + p_w[curpos, neighbor_inds[ + 3]] = p_w[curpos, neighbor_inds[3]] + 1 + p_ne[curpos, neighbor_inds[ + 4]] = p_ne[curpos, neighbor_inds[4]] + 1 + p_nw[curpos, neighbor_inds[ + 5]] = p_nw[curpos, neighbor_inds[5]] + 1 + p_se[curpos, neighbor_inds[ + 6]] = p_se[curpos, neighbor_inds[6]] + 1 + p_sw[curpos, neighbor_inds[ + 7]] = p_sw[curpos, neighbor_inds[7]] + 1 + + #NSEW bool matrix + Q_1 = np.logical_or.reduce((p_n, p_s)) + Q_1 = np.logical_or.reduce((Q_1, p_e)) + Q_1 = np.logical_or.reduce((Q_1, p_w)) + #Those were all the N-S-E-W matrix + # Now for the diagonal matrix (ne, nw, se, sw) + Q_rt2 = np.logical_or.reduce((p_nw, p_ne)) + Q_rt2 = np.logical_or.reduce((Q_rt2, p_se)) + Q_rt2 = np.logical_or.reduce((Q_rt2, p_sw)) + + #Now combine the two + G = np.logical_or.reduce((Q_1,Q_rt2)) #This one is G like before + gc.collect() + # combines the diagonals and the vertical-horizontals + #This is the array with true replaced with 1 + W= np.array(Q_1, dtype=np.float32) + W_rt2 = np.array(Q_rt2, dtype=np.float32) + # W_and = np.logical_and.reduce((W_1, W_rt2)) + + #This will remove all common obstacles + W+= - W_rt2 + W = np.clip(W,0,1) #This will remove all negative 1 from the interesected ones + # So the resulting matrix will have the intersected portions removed. + W_rt2 *= np.sqrt(2) + # W_1 = W_1 - W_and + #combine both + W += W_rt2 + print(W) + + non_obstacles = np.ravel_multi_index( + [self.freespace[0], self.freespace[1]], (self.n_row, self.n_col), + order='F') + + non_obstacles = np.sort(non_obstacles) + p_n = p_n[non_obstacles, :] + p_n = np.expand_dims(p_n[:, non_obstacles], axis=2) + p_s = p_s[non_obstacles, :] + p_s = np.expand_dims(p_s[:, non_obstacles], axis=2) + p_e = p_e[non_obstacles, :] + p_e = np.expand_dims(p_e[:, non_obstacles], axis=2) + p_w = p_w[non_obstacles, :] + p_w = np.expand_dims(p_w[:, non_obstacles], axis=2) + p_ne = p_ne[non_obstacles, :] + p_ne = np.expand_dims(p_ne[:, non_obstacles], axis=2) + p_nw = p_nw[non_obstacles, :] + p_nw = np.expand_dims(p_nw[:, non_obstacles], axis=2) + p_se = p_se[non_obstacles, :] + p_se = np.expand_dims(p_se[:, non_obstacles], axis=2) + p_sw = p_sw[non_obstacles, :] + p_sw = np.expand_dims(p_sw[:, non_obstacles], axis=2) + G = G[non_obstacles, :] + G = G[:, non_obstacles] + W = W[non_obstacles, :] + W = W[:, non_obstacles] + R = R[non_obstacles, :] + + P = np.concatenate( + (p_n, p_s, p_e, p_w, p_ne, p_nw, p_se, p_sw), axis=2) + + self.G = G + self.W = W + self.P = P + self.R = R + state_map_col, state_map_row = np.meshgrid( + np.arange(0, self.n_col), np.arange(0, self.n_row)) + self.state_map_col = state_map_col.flatten('F')[non_obstacles] + + self.state_map_row = state_map_row.flatten('F')[non_obstacles] #see what self.statemaprow is before flattening + + + + + def get_graph(self): + # Returns graph + G = self.G + W = self.W[self.W != 0] + return G, W + + def get_graph_inv(self): + # Returns transpose of graph + G = self.G.T + W = self.W.T + return G, W + + def val_2_image(self, val): + # Zeros for obstacles, val for free space + im = np.zeros((self.n_row, self.n_col)) + im[self.freespace[0], self.freespace[1]] = val + return im + + def get_value_prior(self): + # Returns value prior for gridworld + s_map_col, s_map_row = np.meshgrid( + np.arange(0, self.n_col), np.arange(0, self.n_row)) + im = np.sqrt( + np.square(s_map_col - self.targety) + + np.square(s_map_row - self.targetx)) + return im + + def get_reward_prior(self): + # Returns reward prior for gridworld + im = -1 * np.ones((self.n_row, self.n_col)) + im[self.targetx, self.targety] = 10 + return im + + def t_get_reward_prior(self): + # Returns reward prior as needed for + # dataset generation + im = np.zeros((self.n_row, self.n_col)) + im[self.targetx, self.targety] = 10 + return im + + def get_state_image(self, row, col): + # Zeros everywhere except [row,col] + im = np.zeros((self.n_row, self.n_col)) + im[row, col] = 1 + return im + + def map_ind_to_state(self, row, col): + # Takes [row, col] and maps to a state + ''' + Finds the position of the two integers passed in the freespace, (self.state_map_row and self.state_map_col), + and returns the intersection of the two (row and col, to give the coordinate) as a index of self.state_map_row. + ''' + logging.debug('Trying to find row %s', row) + logging.debug('Trying to find col %s', col) + + rw = np.where(self.state_map_row == row) #in the list self.state_map_row , what position (1,2,3...) is equal to row (int) + cl = np.where(self.state_map_col == col) #i.e where is value col in the aarray self.state_map_col + ''' + + The above acts as np.nonzero, i.e where in the available space is the targetx and target y (row col) + So you find where target_x is available, and where target_y is available, and then you find the intersect, + which should be the position of the goal. so self.state_map_row[16] and self.state_map_row[16], i.e 16th element + of self.state_map_row, and 16th element of self.state_map_col + + ''' + logging.debug('self.state_map_row = : %s', self.state_map_row) + logging.debug('self.state_map_col = : %s', self.state_map_col) + + + logging.debug('rw = : %s ', rw) + logging.debug('cl = : %s', cl) + + + return np.intersect1d(rw, cl)[0] + + def get_coords(self, states): + # Given a state or states, returns + # [row,col] pairs for the state(s) + non_obstacles = np.ravel_multi_index( + [self.freespace[0], self.freespace[1]], (self.n_row, self.n_col), + order='F') + non_obstacles = np.sort(non_obstacles) + states = states.astype(int) + r, c = np.unravel_index( + non_obstacles[states], (self.n_col, self.n_row), order='F') + return r, c + + def rand_choose(self, in_vec): + # Samples + if len(in_vec.shape) > 1: + if in_vec.shape[1] == 1: + in_vec = in_vec.T + temp = np.hstack((np.zeros((1)), np.cumsum(in_vec))).astype('int') + q = np.random.rand() + x = np.where(q > temp[0:-1]) + y = np.where(q < temp[1:]) + return np.intersect1d(x, y)[0] + + def next_state_prob(self, s, a): + # Gets next state probability for + # a given action (a) + if hasattr(a, "__iter__"): + p = np.squeeze(self.P[s, :, a]) + else: + p = np.squeeze(self.P[s, :, a]).T + return p + + def sample_next_state(self, s, a): + # Gets the next state given the + # current state (s) and an + # action (a) + vec = self.next_state_prob(s, a) + result = self.rand_choose(vec) + return result + + def get_size(self): + # Returns domain size + return self.n_row, self.n_col + + def north(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.max([row - 1, 0]) + new_col = col + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def northeast(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.max([row - 1, 0]) + new_col = np.min([col + 1, self.n_col - 1]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def northwest(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.max([row - 1, 0]) + new_col = np.max([col - 1, 0]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def south(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.min([row + 1, self.n_row - 1]) + new_col = col + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def southeast(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.min([row + 1, self.n_row - 1]) + new_col = np.min([col + 1, self.n_col - 1]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def southwest(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.min([row + 1, self.n_row - 1]) + new_col = np.max([col - 1, 0]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def east(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = row + new_col = np.min([col + 1, self.n_col - 1]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def west(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = row + new_col = np.max([col - 1, 0]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def neighbors(self, row, col): + # Get valid neighbors in all valid directions + rows, cols = self.north(row, col) + new_row, new_col = self.south(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.east(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.west(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.northeast(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.northwest(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.southeast(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.southwest(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + return rows, cols + + def return_state_map_row(self): + return self.state_map_row, self.state_map_col + + +def trace_path(pred, source, target): + # traces back shortest path from + # source to target given pred + # (a predicessor list) + max_len = 1000 + path = np.zeros((max_len, 1)) + i = max_len - 1 + path[i] = target + while path[i] != source and i > 0: + try: + path[i - 1] = pred[int(path[i])] + i -= 1 + except Exception as e: + return [] + if i >= 0: + path = path[i:] + else: + path = None + return path + + +def sample_trajectory(M, n_states,start,gen=False): #Everything here, find agent (start ... source etc. ) + # Samples trajectories from random nodes + # in our domain (M) + G, W = M.get_graph_inv() + N = G.shape[0] + if N >= n_states: + rand_ind = np.random.permutation(N) + else: + rand_ind = np.tile(np.random.permutation(N), (1, 10)) + init_states = rand_ind[0:n_states].flatten() #TODO: This is where start is chosen. + + #init_states is a list with the index to the start position in the free space (state_map_row, state_map_col) + goal_s = M.map_ind_to_state(M.targetx, M.targety) #This is not the source, rather, this is the index of the goal + + states = [] + states_xy = [] + states_one_hot = [] + + if not gen: + start_x = start[0] + start_y = start[1] + init_states = [M.map_ind_to_state(start_x,start_y)] #Because the goal and agent I provide are in the form + #x and y, but in terms of row, and col , it would be x = col and y = row + + # Get optimal path from graph + g_dense = W + g_masked = np.ma.masked_values(g_dense, 0) + g_sparse = csr_matrix(g_dense) + + + state_map_row, state_map_col = M.return_state_map_row() + + + logging.debug('init_states_index = %s', init_states[0]) + logging.info('Start position is %s %s', state_map_row[init_states[0]],state_map_col[init_states[0]]) + d, pred = dijkstra(g_sparse, indices=goal_s, return_predecessors=True) + for i in range(n_states): + path = trace_path(pred, goal_s, init_states[i]) #goal_s is source + path = np.flip(path, 0) + states.append(path) + for state in states: + L = len(state) + r, c = M.get_coords(state) + row_m = np.zeros((L, M.n_row)) + col_m = np.zeros((L, M.n_col)) + for i in range(L): + row_m[i, r[i]] = 1 + col_m[i, c[i]] = 1 + states_one_hot.append(np.hstack((row_m, col_m))) + states_xy.append(np.hstack((r, c))) + return states_xy, states_one_hot diff --git a/src/algorithms/learning/VIN/domains/gridworld_before_mem.py b/src/algorithms/learning/VIN/domains/gridworld_before_mem.py new file mode 100644 index 000000000..bc3c0c495 --- /dev/null +++ b/src/algorithms/learning/VIN/domains/gridworld_before_mem.py @@ -0,0 +1,435 @@ +import numpy as np +from scipy.sparse import csr_matrix +from scipy.sparse.csgraph import dijkstra +import logging + + +class gridworld: + """A class for making gridworlds""" + + def __init__(self, image, targetx, targety): + self.image = image + self.n_row = image.shape[0] + self.n_col = image.shape[1] + self.obstacles = [] + self.freespace = [] + self.targetx = targetx + self.targety = targety + self.G = [] + self.W = [] + self.R = [] + self.P = [] + self.A = [] + self.n_states = 0 + self.n_actions = 0 + self.state_map_col = [] + self.state_map_row = [] + self.set_vals() + + def set_vals(self): + # Setup function to initialize all necessary + # data + row_obs, col_obs = np.where(self.image == 0) + row_free, col_free = np.where(self.image != 0) + self.obstacles = [row_obs, col_obs] + self.freespace = [row_free, col_free] + + n_states = self.n_row * self.n_col + n_actions = 8 + self.n_states = n_states + self.n_actions = n_actions + + p_n = np.zeros((self.n_states, self.n_states),np.int8) + p_s = np.zeros((self.n_states, self.n_states),np.int8) + p_e = np.zeros((self.n_states, self.n_states),np.int8) + p_w = np.zeros((self.n_states, self.n_states),np.int8) + p_ne = np.zeros((self.n_states, self.n_states),np.int8) + p_nw = np.zeros((self.n_states, self.n_states),np.int8) + p_se = np.zeros((self.n_states, self.n_states),np.int8) + p_sw = np.zeros((self.n_states, self.n_states),np.int8) + print('Line 50') + R = -1 * np.ones((self.n_states, self.n_actions)) + R[:, 4:self.n_actions] = R[:, 4:self.n_actions] * np.sqrt(2) + target = np.ravel_multi_index( + [self.targetx, self.targety], (self.n_row, self.n_col), order='F') + R[target, :] = 0 + print('Line 56') + + for row in range(0, self.n_row): + for col in range(0, self.n_col): + + curpos = np.ravel_multi_index( + [row, col], (self.n_row, self.n_col), order='F') + + rows, cols = self.neighbors(row, col) + + neighbor_inds = np.ravel_multi_index( + [rows, cols], (self.n_row, self.n_col), order='F') + + p_n[curpos, neighbor_inds[ + 0]] = p_n[curpos, neighbor_inds[0]] + 1 + p_s[curpos, neighbor_inds[ + 1]] = p_s[curpos, neighbor_inds[1]] + 1 + p_e[curpos, neighbor_inds[ + 2]] = p_e[curpos, neighbor_inds[2]] + 1 + p_w[curpos, neighbor_inds[ + 3]] = p_w[curpos, neighbor_inds[3]] + 1 + p_ne[curpos, neighbor_inds[ + 4]] = p_ne[curpos, neighbor_inds[4]] + 1 + p_nw[curpos, neighbor_inds[ + 5]] = p_nw[curpos, neighbor_inds[5]] + 1 + p_se[curpos, neighbor_inds[ + 6]] = p_se[curpos, neighbor_inds[6]] + 1 + p_sw[curpos, neighbor_inds[ + 7]] = p_sw[curpos, neighbor_inds[7]] + 1 + print('Line 85') + G = np.logical_or.reduce((p_n, p_s, p_e, p_w, p_ne, p_nw, p_se, p_sw)) + print('G ', G.shape) + print('Line 87') + W = np.maximum( + np.maximum( + np.maximum( + np.maximum( + np.maximum(np.maximum(np.maximum(p_n, p_s), p_e), p_w), + np.sqrt(2) * p_ne), + np.sqrt(2) * p_nw), + np.sqrt(2) * p_se), + np.sqrt(2) * p_sw) + + non_obstacles = np.ravel_multi_index( + [self.freespace[0], self.freespace[1]], (self.n_row, self.n_col), + order='F') + print('Line 101') + non_obstacles = np.sort(non_obstacles) + p_n = p_n[non_obstacles, :] + p_n = np.expand_dims(p_n[:, non_obstacles], axis=2) + p_s = p_s[non_obstacles, :] + p_s = np.expand_dims(p_s[:, non_obstacles], axis=2) + p_e = p_e[non_obstacles, :] + p_e = np.expand_dims(p_e[:, non_obstacles], axis=2) + p_w = p_w[non_obstacles, :] + p_w = np.expand_dims(p_w[:, non_obstacles], axis=2) + p_ne = p_ne[non_obstacles, :] + p_ne = np.expand_dims(p_ne[:, non_obstacles], axis=2) + p_nw = p_nw[non_obstacles, :] + p_nw = np.expand_dims(p_nw[:, non_obstacles], axis=2) + p_se = p_se[non_obstacles, :] + p_se = np.expand_dims(p_se[:, non_obstacles], axis=2) + p_sw = p_sw[non_obstacles, :] + p_sw = np.expand_dims(p_sw[:, non_obstacles], axis=2) + G = G[non_obstacles, :] + G = G[:, non_obstacles] + W = W[non_obstacles, :] + W = W[:, non_obstacles] + R = R[non_obstacles, :] + + P = np.concatenate( + (p_n, p_s, p_e, p_w, p_ne, p_nw, p_se, p_sw), axis=2) + print('Line 127') + self.G = G + self.W = W + self.P = P + self.R = R + state_map_col, state_map_row = np.meshgrid( + np.arange(0, self.n_col), np.arange(0, self.n_row)) + self.state_map_col = state_map_col.flatten('F')[non_obstacles] + + self.state_map_row = state_map_row.flatten('F')[non_obstacles] #see what self.statemaprow is before flattening + + + + + def get_graph(self): + # Returns graph + G = self.G + W = self.W[self.W != 0] + return G, W + + def get_graph_inv(self): + # Returns transpose of graph + G = self.G.T + W = self.W.T + return G, W + + def val_2_image(self, val): + # Zeros for obstacles, val for free space + im = np.zeros((self.n_row, self.n_col)) + im[self.freespace[0], self.freespace[1]] = val + return im + + def get_value_prior(self): + # Returns value prior for gridworld + s_map_col, s_map_row = np.meshgrid( + np.arange(0, self.n_col), np.arange(0, self.n_row)) + im = np.sqrt( + np.square(s_map_col - self.targety) + + np.square(s_map_row - self.targetx)) + return im + + def get_reward_prior(self): + # Returns reward prior for gridworld + im = -1 * np.ones((self.n_row, self.n_col)) + im[self.targetx, self.targety] = 10 + return im + + def t_get_reward_prior(self): + # Returns reward prior as needed for + # dataset generation + im = np.zeros((self.n_row, self.n_col)) + im[self.targetx, self.targety] = 10 + return im + + def get_state_image(self, row, col): + # Zeros everywhere except [row,col] + im = np.zeros((self.n_row, self.n_col)) + im[row, col] = 1 + return im + + def map_ind_to_state(self, row, col): + # Takes [row, col] and maps to a state + ''' + Finds the position of the two integers passed in the freespace, (self.state_map_row and self.state_map_col), + and returns the intersection of the two (row and col, to give the coordinate) as a index of self.state_map_row. + ''' + logging.debug('Trying to find row %s', row) + logging.debug('Trying to find col %s', col) + + rw = np.where(self.state_map_row == row) #in the list self.state_map_row , what position (1,2,3...) is equal to row (int) + cl = np.where(self.state_map_col == col) #i.e where is value col in the aarray self.state_map_col + ''' + + The above acts as np.nonzero, i.e where in the available space is the targetx and target y (row col) + So you find where target_x is available, and where target_y is available, and then you find the intersect, + which should be the position of the goal. so self.state_map_row[16] and self.state_map_row[16], i.e 16th element + of self.state_map_row, and 16th element of self.state_map_col + + ''' + logging.debug('self.state_map_row = : %s', self.state_map_row) + logging.debug('self.state_map_col = : %s', self.state_map_col) + + + logging.debug('rw = : %s ', rw) + logging.debug('cl = : %s', cl) + + + return np.intersect1d(rw, cl)[0] + + def get_coords(self, states): + # Given a state or states, returns + # [row,col] pairs for the state(s) + non_obstacles = np.ravel_multi_index( + [self.freespace[0], self.freespace[1]], (self.n_row, self.n_col), + order='F') + non_obstacles = np.sort(non_obstacles) + states = states.astype(int) + r, c = np.unravel_index( + non_obstacles[states], (self.n_col, self.n_row), order='F') + return r, c + + def rand_choose(self, in_vec): + # Samples + if len(in_vec.shape) > 1: + if in_vec.shape[1] == 1: + in_vec = in_vec.T + temp = np.hstack((np.zeros((1)), np.cumsum(in_vec))).astype('int') + q = np.random.rand() + x = np.where(q > temp[0:-1]) + y = np.where(q < temp[1:]) + return np.intersect1d(x, y)[0] + + def next_state_prob(self, s, a): + # Gets next state probability for + # a given action (a) + if hasattr(a, "__iter__"): + p = np.squeeze(self.P[s, :, a]) + else: + p = np.squeeze(self.P[s, :, a]).T + return p + + def sample_next_state(self, s, a): + # Gets the next state given the + # current state (s) and an + # action (a) + vec = self.next_state_prob(s, a) + result = self.rand_choose(vec) + return result + + def get_size(self): + # Returns domain size + return self.n_row, self.n_col + + def north(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.max([row - 1, 0]) + new_col = col + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def northeast(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.max([row - 1, 0]) + new_col = np.min([col + 1, self.n_col - 1]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def northwest(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.max([row - 1, 0]) + new_col = np.max([col - 1, 0]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def south(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.min([row + 1, self.n_row - 1]) + new_col = col + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def southeast(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.min([row + 1, self.n_row - 1]) + new_col = np.min([col + 1, self.n_col - 1]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def southwest(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.min([row + 1, self.n_row - 1]) + new_col = np.max([col - 1, 0]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def east(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = row + new_col = np.min([col + 1, self.n_col - 1]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def west(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = row + new_col = np.max([col - 1, 0]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def neighbors(self, row, col): + # Get valid neighbors in all valid directions + rows, cols = self.north(row, col) + new_row, new_col = self.south(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.east(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.west(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.northeast(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.northwest(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.southeast(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.southwest(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + return rows, cols + + def return_state_map_row(self): + return self.state_map_row, self.state_map_col + + +def trace_path(pred, source, target): + # traces back shortest path from + # source to target given pred + # (a predicessor list) + max_len = 1000 + path = np.zeros((max_len, 1)) + i = max_len - 1 + path[i] = target + while path[i] != source and i > 0: + try: + path[i - 1] = pred[int(path[i])] + i -= 1 + except Exception as e: + return [] + if i >= 0: + path = path[i:] + else: + path = None + return path + + +def sample_trajectory(M, n_states,start,gen=False): #Everything here, find agent (start ... source etc. ) + # Samples trajectories from random nodes + # in our domain (M) + G, W = M.get_graph_inv() + N = G.shape[0] + if N >= n_states: + rand_ind = np.random.permutation(N) + else: + rand_ind = np.tile(np.random.permutation(N), (1, 10)) + init_states = rand_ind[0:n_states].flatten() #TODO: This is where start is chosen. + + #init_states is a list with the index to the start position in the free space (state_map_row, state_map_col) + goal_s = M.map_ind_to_state(M.targetx, M.targety) #This is not the source, rather, this is the index of the goal + + states = [] + states_xy = [] + states_one_hot = [] + + if not gen: + start_x = start[0] + start_y = start[1] + init_states = [M.map_ind_to_state(start_x,start_y)] #Because the goal and agent I provide are in the form + #x and y, but in terms of row, and col , it would be x = col and y = row + + # Get optimal path from graph + g_dense = W + g_masked = np.ma.masked_values(g_dense, 0) + g_sparse = csr_matrix(g_dense) + + + state_map_row, state_map_col = M.return_state_map_row() + + + logging.debug('init_states_index = %s', init_states[0]) + logging.info('Start position is %s %s', state_map_row[init_states[0]],state_map_col[init_states[0]]) + d, pred = dijkstra(g_sparse, indices=goal_s, return_predecessors=True) + for i in range(n_states): + path = trace_path(pred, goal_s, init_states[i]) #goal_s is source + path = np.flip(path, 0) + states.append(path) + for state in states: + L = len(state) + r, c = M.get_coords(state) + row_m = np.zeros((L, M.n_row)) + col_m = np.zeros((L, M.n_col)) + for i in range(L): + row_m[i, r[i]] = 1 + col_m[i, c[i]] = 1 + states_one_hot.append(np.hstack((row_m, col_m))) + states_xy.append(np.hstack((r, c))) + return states_xy, states_one_hot diff --git a/src/algorithms/learning/VIN/domains/gridworld_og.py b/src/algorithms/learning/VIN/domains/gridworld_og.py new file mode 100644 index 000000000..c60a4b7a5 --- /dev/null +++ b/src/algorithms/learning/VIN/domains/gridworld_og.py @@ -0,0 +1,385 @@ +import numpy as np +from scipy.sparse import csr_matrix +from scipy.sparse.csgraph import dijkstra + + +class gridworld: + """A class for making gridworlds""" + + def __init__(self, image, targetx, targety): + self.image = image + self.n_row = image.shape[0] + self.n_col = image.shape[1] + self.obstacles = [] + self.freespace = [] + self.targetx = targetx + self.targety = targety + self.G = [] + self.W = [] + self.R = [] + self.P = [] + self.A = [] + self.n_states = 0 + self.n_actions = 0 + self.state_map_col = [] + self.state_map_row = [] + self.set_vals() + + def set_vals(self): + # Setup function to initialize all necessary + # data + row_obs, col_obs = np.where(self.image == 0) + row_free, col_free = np.where(self.image != 0) + self.obstacles = [row_obs, col_obs] + self.freespace = [row_free, col_free] + + n_states = self.n_row * self.n_col + n_actions = 8 + self.n_states = n_states + self.n_actions = n_actions + + p_n = np.zeros((self.n_states, self.n_states)) + p_s = np.zeros((self.n_states, self.n_states)) + p_e = np.zeros((self.n_states, self.n_states)) + p_w = np.zeros((self.n_states, self.n_states)) + p_ne = np.zeros((self.n_states, self.n_states)) + p_nw = np.zeros((self.n_states, self.n_states)) + p_se = np.zeros((self.n_states, self.n_states)) + p_sw = np.zeros((self.n_states, self.n_states)) + + R = -1 * np.ones((self.n_states, self.n_actions)) + R[:, 4:self.n_actions] = R[:, 4:self.n_actions] * np.sqrt(2) + target = np.ravel_multi_index( + [self.targetx, self.targety], (self.n_row, self.n_col), order='F') + R[target, :] = 0 + + for row in range(0, self.n_row): + for col in range(0, self.n_col): + + curpos = np.ravel_multi_index( + [row, col], (self.n_row, self.n_col), order='F') + + rows, cols = self.neighbors(row, col) + + neighbor_inds = np.ravel_multi_index( + [rows, cols], (self.n_row, self.n_col), order='F') + + p_n[curpos, neighbor_inds[ + 0]] = p_n[curpos, neighbor_inds[0]] + 1 + p_s[curpos, neighbor_inds[ + 1]] = p_s[curpos, neighbor_inds[1]] + 1 + p_e[curpos, neighbor_inds[ + 2]] = p_e[curpos, neighbor_inds[2]] + 1 + p_w[curpos, neighbor_inds[ + 3]] = p_w[curpos, neighbor_inds[3]] + 1 + p_ne[curpos, neighbor_inds[ + 4]] = p_ne[curpos, neighbor_inds[4]] + 1 + p_nw[curpos, neighbor_inds[ + 5]] = p_nw[curpos, neighbor_inds[5]] + 1 + p_se[curpos, neighbor_inds[ + 6]] = p_se[curpos, neighbor_inds[6]] + 1 + p_sw[curpos, neighbor_inds[ + 7]] = p_sw[curpos, neighbor_inds[7]] + 1 + + G = np.logical_or.reduce((p_n, p_s, p_e, p_w, p_ne, p_nw, p_se, p_sw)) + + W = np.maximum( + np.maximum( + np.maximum( + np.maximum( + np.maximum(np.maximum(np.maximum(p_n, p_s), p_e), p_w), + np.sqrt(2) * p_ne), + np.sqrt(2) * p_nw), + np.sqrt(2) * p_se), + np.sqrt(2) * p_sw) + + non_obstacles = np.ravel_multi_index( + [self.freespace[0], self.freespace[1]], (self.n_row, self.n_col), + order='F') + + non_obstacles = np.sort(non_obstacles) + p_n = p_n[non_obstacles, :] + p_n = np.expand_dims(p_n[:, non_obstacles], axis=2) + p_s = p_s[non_obstacles, :] + p_s = np.expand_dims(p_s[:, non_obstacles], axis=2) + p_e = p_e[non_obstacles, :] + p_e = np.expand_dims(p_e[:, non_obstacles], axis=2) + p_w = p_w[non_obstacles, :] + p_w = np.expand_dims(p_w[:, non_obstacles], axis=2) + p_ne = p_ne[non_obstacles, :] + p_ne = np.expand_dims(p_ne[:, non_obstacles], axis=2) + p_nw = p_nw[non_obstacles, :] + p_nw = np.expand_dims(p_nw[:, non_obstacles], axis=2) + p_se = p_se[non_obstacles, :] + p_se = np.expand_dims(p_se[:, non_obstacles], axis=2) + p_sw = p_sw[non_obstacles, :] + p_sw = np.expand_dims(p_sw[:, non_obstacles], axis=2) + G = G[non_obstacles, :] + G = G[:, non_obstacles] + W = W[non_obstacles, :] + W = W[:, non_obstacles] + R = R[non_obstacles, :] + + P = np.concatenate( + (p_n, p_s, p_e, p_w, p_ne, p_nw, p_se, p_sw), axis=2) + + self.G = G + self.W = W + self.P = P + self.R = R + state_map_col, state_map_row = np.meshgrid( + np.arange(0, self.n_col), np.arange(0, self.n_row)) + self.state_map_col = state_map_col.flatten('F')[non_obstacles] + self.state_map_row = state_map_row.flatten('F')[non_obstacles] + + def get_graph(self): + # Returns graph + G = self.G + W = self.W[self.W != 0] + return G, W + + def get_graph_inv(self): + # Returns transpose of graph + G = self.G.T + W = self.W.T + return G, W + + def val_2_image(self, val): + # Zeros for obstacles, val for free space + im = np.zeros((self.n_row, self.n_col)) + im[self.freespace[0], self.freespace[1]] = val + return im + + def get_value_prior(self): + # Returns value prior for gridworld + s_map_col, s_map_row = np.meshgrid( + np.arange(0, self.n_col), np.arange(0, self.n_row)) + im = np.sqrt( + np.square(s_map_col - self.targety) + + np.square(s_map_row - self.targetx)) + return im + + def get_reward_prior(self): + # Returns reward prior for gridworld + im = -1 * np.ones((self.n_row, self.n_col)) + im[self.targetx, self.targety] = 10 + return im + + def t_get_reward_prior(self): + # Returns reward prior as needed for + # dataset generation + im = np.zeros((self.n_row, self.n_col)) + im[self.targetx, self.targety] = 10 + return im + + def get_state_image(self, row, col): + # Zeros everywhere except [row,col] + im = np.zeros((self.n_row, self.n_col)) + im[row, col] = 1 + return im + + def map_ind_to_state(self, row, col): + # Takes [row, col] and maps to a state + rw = np.where(self.state_map_row == row) + cl = np.where(self.state_map_col == col) + return np.intersect1d(rw, cl)[0] + + def get_coords(self, states): + # Given a state or states, returns + # [row,col] pairs for the state(s) + non_obstacles = np.ravel_multi_index( + [self.freespace[0], self.freespace[1]], (self.n_row, self.n_col), + order='F') + non_obstacles = np.sort(non_obstacles) + states = states.astype(int) + r, c = np.unravel_index( + non_obstacles[states], (self.n_col, self.n_row), order='F') + return r, c + + def rand_choose(self, in_vec): + # Samples + if len(in_vec.shape) > 1: + if in_vec.shape[1] == 1: + in_vec = in_vec.T + temp = np.hstack((np.zeros((1)), np.cumsum(in_vec))).astype('int') + q = np.random.rand() + x = np.where(q > temp[0:-1]) + y = np.where(q < temp[1:]) + return np.intersect1d(x, y)[0] + + def next_state_prob(self, s, a): + # Gets next state probability for + # a given action (a) + if hasattr(a, "__iter__"): + p = np.squeeze(self.P[s, :, a]) + else: + p = np.squeeze(self.P[s, :, a]).T + return p + + def sample_next_state(self, s, a): + # Gets the next state given the + # current state (s) and an + # action (a) + vec = self.next_state_prob(s, a) + result = self.rand_choose(vec) + return result + + def get_size(self): + # Returns domain size + return self.n_row, self.n_col + + def north(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.max([row - 1, 0]) + new_col = col + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def northeast(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.max([row - 1, 0]) + new_col = np.min([col + 1, self.n_col - 1]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def northwest(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.max([row - 1, 0]) + new_col = np.max([col - 1, 0]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def south(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.min([row + 1, self.n_row - 1]) + new_col = col + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def southeast(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.min([row + 1, self.n_row - 1]) + new_col = np.min([col + 1, self.n_col - 1]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def southwest(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = np.min([row + 1, self.n_row - 1]) + new_col = np.max([col - 1, 0]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def east(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = row + new_col = np.min([col + 1, self.n_col - 1]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def west(self, row, col): + # Returns new [row,col] + # if we take the action + new_row = row + new_col = np.max([col - 1, 0]) + if self.image[new_row, new_col] == 0: + new_row = row + new_col = col + return new_row, new_col + + def neighbors(self, row, col): + # Get valid neighbors in all valid directions + rows, cols = self.north(row, col) + new_row, new_col = self.south(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.east(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.west(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.northeast(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.northwest(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.southeast(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + new_row, new_col = self.southwest(row, col) + rows, cols = np.append(rows, new_row), np.append(cols, new_col) + return rows, cols + + +def trace_path(pred, source, target): + # traces back shortest path from + # source to target given pred + # (a predicessor list) + max_len = 1000 + path = np.zeros((max_len, 1)) + i = max_len - 1 + path[i] = target + while path[i] != source and i > 0: + try: + path[i - 1] = pred[int(path[i])] + i -= 1 + except Exception as e: + return [] + if i >= 0: + path = path[i:] + else: + path = None + return path + + +def sample_trajectory(M, n_states): + # Samples trajectories from random nodes + # in our domain (M) + G, W = M.get_graph_inv() + N = G.shape[0] + if N >= n_states: + rand_ind = np.random.permutation(N) + else: + rand_ind = np.tile(np.random.permutation(N), (1, 10)) + init_states = rand_ind[0:n_states].flatten() + goal_s = M.map_ind_to_state(M.targetx, M.targety) + states = [] + states_xy = [] + states_one_hot = [] + # Get optimal path from graph + g_dense = W + g_masked = np.ma.masked_values(g_dense, 0) + g_sparse = csr_matrix(g_dense) + d, pred = dijkstra(g_sparse, indices=goal_s, return_predecessors=True) + for i in range(n_states): + path = trace_path(pred, goal_s, init_states[i]) + path = np.flip(path, 0) + states.append(path) + for state in states: + L = len(state) + r, c = M.get_coords(state) + row_m = np.zeros((L, M.n_row)) + col_m = np.zeros((L, M.n_col)) + for i in range(L): + row_m[i, r[i]] = 1 + col_m[i, c[i]] = 1 + states_one_hot.append(np.hstack((row_m, col_m))) + states_xy.append(np.hstack((r, c))) + return states_xy, states_one_hot \ No newline at end of file diff --git a/src/algorithms/learning/VIN/download_weights_and_datasets.sh b/src/algorithms/learning/VIN/download_weights_and_datasets.sh new file mode 100755 index 000000000..8d85b3455 --- /dev/null +++ b/src/algorithms/learning/VIN/download_weights_and_datasets.sh @@ -0,0 +1,9 @@ +cd trained +wget 'https://github.com/kentsommer/pytorch-value-iteration-networks/releases/download/v1.1/vin_8x8.pth' +wget 'https://github.com/kentsommer/pytorch-value-iteration-networks/releases/download/v1.1/vin_16x16.pth' +wget 'https://github.com/kentsommer/pytorch-value-iteration-networks/releases/download/v1.1/vin_28x28.pth' +cd ../dataset +#wget 'https://github.com/kentsommer/pytorch-value-iteration-networks/releases/download/v1.1/gridworld_8x8.npz' +# wget 'https://github.com/kentsommer/pytorch-value-iteration-networks/releases/download/v1.1/gridworld_16x16.npz' +# wget 'https://github.com/kentsommer/pytorch-value-iteration-networks/releases/download/v1.1/gridworld_28x28.npz' +cd .. diff --git a/src/algorithms/learning/VIN/general_test16.py b/src/algorithms/learning/VIN/general_test16.py new file mode 100644 index 000000000..999ff4a95 --- /dev/null +++ b/src/algorithms/learning/VIN/general_test16.py @@ -0,0 +1,340 @@ +import sys +import argparse +import json +import matplotlib.pyplot as plt +import random +import numpy as np +import torch +from torch.autograd import Variable + +from dataset.dataset import * +from utility.utils import * +from model import * + +from domains.gridworld import * +from generators.obstacle_gen import * + +import logging +import time +import math + +def main(config, + n_domains=3000, + max_obs=30, + max_obs_size=None, + n_traj=1, + n_actions=8,gen = False): + # Correct vs total: + logging.basicConfig(filename='./resources/logs/generalization/16_w_64_model',format='%(asctime)s-%(levelname)s:%(message)s', level=logging.INFO) + correct, total = 0.0, 0.0 + # Automatic swith of GPU mode if available + use_GPU = torch.cuda.is_available() + # Instantiate a VIN model + vin = VIN(config) + # Load model parameters + vin.load_state_dict(torch.load(config.weights)) + # Use GPU if available + if use_GPU: + vin = vin.cuda() + counter,total_no_soln = 0,0 + global data + data = [] + t_list = [] + total_dev_non_rel, total_dev_rel = 0.0,0.0 + total_dist, total_astar_dist = 0.0,0.0 + metrics = True #this enables displaying the distance left to reach goal upon a failure + dist_remain_avg = 0.0 + for dom in range(n_domains): + if gen: + print('Gen started') + goal = [ + np.random.randint(config.imsize), + np.random.randint(config.imsize) + ] + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + # Add obstacles to map + n_obs = obs.add_n_rand_obs(max_obs) + # Add border to map + border_res = obs.add_border() + # Ensure we have valid map + if n_obs == 0 or not border_res: + continue + start = None + else: + wpn = True + # path = './resources/maps/' + path = './resources/testing_maps/16x16/' + mp, goal, start = open_map(dom,path) + # path = './maps/8_data_300' + # mp, goal, start = open_map_list(dom,path) + mp[start[1]][start[0]] = 0 #Set the start position as freespace too + mp[goal[1]][goal[0]] = 0 #Set the goal position as freespace too + + goal = [goal[1],goal[0]] #swap them around, for the row col format (x = col not row) + start = [start[1],start[0]] + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + obs.dom = mp + + # Get final map + im = obs.get_final() + + + #1 is obstacles. + #set obs.dom as the mp + logging.debug('0 is obstacle ') + logging.debug(' im: %s ', im) + # Generate gridworld from obstacle map + G = gridworld(im, goal[0], goal[1]) + # Get value prior + value_prior = G.get_reward_prior() + # Sample random trajectories to our goal + states_xy, states_one_hot = sample_trajectory(G, n_traj,start,gen) #dijkstra trajectory + # print('states_xy', states_xy[0] , len(states_xy[0])) + if gen and len(states_xy[0]) > 0: + save_image(G.image,(goal[0],goal[1]),states_xy[0][0],states_xy, states_one_hot,counter) #this saves the maps + + counter += 1 + t0 = time.time() + for i in range(n_traj): + if len(states_xy[i]) > 1: + + # Get number of steps to goal + L = len(states_xy[i]) * 2 + # Allocate space for predicted steps + pred_traj = np.zeros((L, 2)) + # Set starting position + pred_traj[0, :] = states_xy[i][0, :] + + for j in range(1, L): + # Transform current state data + state_data = pred_traj[j - 1, :] + state_data = state_data.astype(np.int) + # Transform domain to Networks expected input shape + im_data = G.image.astype(np.int) + im_data = 1 - im_data + im_data = im_data.reshape(1, 1, config.imsize, + config.imsize) + # Transfrom value prior to Networks expected input shape + value_data = value_prior.astype(np.int) + value_data = value_data.reshape(1, 1, config.imsize, + config.imsize) + # Get inputs as expected by network + X_in = torch.from_numpy( + np.append(im_data, value_data, axis=1)).float() + S1_in = torch.from_numpy(state_data[0].reshape( + [1, 1])).float() + S2_in = torch.from_numpy(state_data[1].reshape( + [1, 1])).float() + # Send Tensors to GPU if available + if use_GPU: + X_in = X_in.cuda() + S1_in = S1_in.cuda() + S2_in = S2_in.cuda() + # Wrap to autograd.Variable + X_in, S1_in, S2_in = Variable(X_in), Variable( + S1_in), Variable(S2_in) + # Forward pass in our neural net + _, predictions = vin(X_in, S1_in, S2_in, config) + _, indices = torch.max(predictions.cpu(), 1, keepdim=True) + a = indices.data.numpy()[0][0] + # Transform prediction to indices + s = G.map_ind_to_state(pred_traj[j - 1, 0], + pred_traj[j - 1, 1]) + ns = G.sample_next_state(s, a) + nr, nc = G.get_coords(ns) + pred_traj[j, 0] = nr + pred_traj[j, 1] = nc + if nr == goal[0] and nc == goal[1]: + # We hit goal so fill remaining steps + pred_traj[j + 1:, 0] = nr + pred_traj[j + 1:, 1] = nc + break + # Plot optimal and predicted path (also start, end) + if pred_traj[-1, 0] == goal[0] and pred_traj[-1, 1] == goal[1]: + logging.debug('#################### - Path Found map %s!\n', dom) + correct += 1 + t1 = time.time() + t_list.append(t1-t0) + dev_rel,dev_non_rel,dist,astar_dist = deviation(states_xy[i],pred_traj,goal,total) + total_dev_rel += dev_rel + total_dev_non_rel += dev_non_rel + total_dist += dist + total_astar_dist += astar_dist + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + elif metrics: + d = dist_left(pred_traj,goal) + dist_remain_avg += d + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + total += 1 + + + + elif wpn: + total_no_soln += 1 + sys.stdout.write("\r" + str(int( + (float(dom) / n_domains) * 100.0)) + "%") + sys.stdout.flush() + + sys.stdout.write("\n") + if total and correct: + logging.info('Rollout Accuracy: %s',(100 * (correct / total))) + logging.info('Rollout Accuracy Adjusted: %s',(100 * (correct / (total+total_no_soln)))) + logging.info('Total maps with no soln from Dijkstra %s', total_no_soln) + logging.info('Total avg Rel Deviation %s', (total_dev_rel/total)) + logging.info('Total avg Non-Rel Deviation %s', (total_dev_non_rel/total)) + logging.info('Total avg VIN Distance %s', (total_dist/total)) + logging.info('Total avg Dijkstra Distance %s', (total_astar_dist/total)) + logging.info('Avg deviation from Dijkstra: %s', ((((total_astar_dist/total))-((total_dist/total)))/((total_astar_dist/total)))) + logging.info('Total elapsed time %s', (sum(t_list)/(total))) #TODO: Possibly add total no soln + logging.info('Avg distance left when failed: %s ', (dist_remain_avg/(total-correct)) ) + logging.info('---------------------------------Done ------------------------------------') + + else: + logging.info('No successes either vin or dijkstra') + + +def visualize(dom, states_xy, pred_traj): + fig, ax = plt.subplots() + implot = plt.imshow(dom, cmap="Greys_r") + ax.plot(states_xy[:, 0], states_xy[:, 1], c='b', label='Optimal Path') + ax.plot( + pred_traj[:, 0], pred_traj[:, 1], '-X', c='r', label='Predicted Path') + ax.plot(states_xy[0, 0], states_xy[0, 1], '-o', label='Start') + ax.plot(states_xy[-1, 0], states_xy[-1, 1], '-s', label='Goal') + legend = ax.legend(loc='upper right', shadow=False) + for label in legend.get_texts(): + label.set_fontsize('x-small') # the legend text size + for label in legend.get_lines(): + label.set_linewidth(0.5) # the legend line width + plt.draw() + plt.waitforbuttonpress(0) + plt.close(fig) + + +def save_image(im, goal, start,states_xy,states_one_hot,counter): + ''' + Saves the data made by generator as jsons. + ''' + s = config.imsize + + if len(states_xy[0]) == 0: + + im.tolist()[start_x][start_y] = 1 + start_xy = [0,0] + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': start_xy} + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + else: + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': states_xy[0][0].tolist() + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + } + data.append(mp) + with open('./maps/' +str(s) + '_data_300' + '.json', 'w') as outfile: + json.dump(data,outfile) + +def open_map(dom,path): + ''' + Used to open a map json given dom and path, returns grid, goal and agent + ''' + with open(str(path) + str(dom) +'.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data['grid'], data['goal'], data['agent'] + +def open_map_list(dom,path): + with open(str(path) + '.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data[dom]['grid'], data[dom]['goal'], data[dom]['agent'] + +def deviation(optimal_path, pred_path,goal, map_num): + optimal_path = np.array(optimal_path) + optimal_path = 1.0 * optimal_path + + optimal_path_x = np.array(optimal_path[:,0]) + optimal_path_y = np.array(optimal_path[:,1]) + + pred_path = np.unique(pred_path, axis=0) #removes duplicates at the end (when it reaches goal) + + #print('Shortened path' , pred_path) + pred_path_x = np.array(pred_path[:,0]) + pred_path_y = np.array(pred_path[:,1]) + dist = 0.0 + astar_dist = 0.0 + prev = pred_path[0,:] + total_diff_gen = 0 + for xy in pred_path[:,:]: + + diff = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_gen += diff + dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + #prev = [0,0] + #print('opt', optimal_path[0,:]) + prev = optimal_path[0,:] + total_diff_optim = 0 + for xy in optimal_path[:,:]: + # print('xy', xy) + diff2 = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_optim += diff2 + astar_dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + dev_non_rel = abs(total_diff_optim-total_diff_gen) + dev_rel = dev_non_rel/total_diff_optim #TODO: Add avg distance of gen trajectory + return(dev_rel,dev_non_rel,dist,astar_dist) + +def dist_left(pred_traj, goal): + ''' + Finds the distance left between the point and the goal + ''' + pred_traj = np.array(pred_traj) #euclidean distance or geometric distance ? use geometric + x1,y1 = pred_traj[-1][0], pred_traj[-1][1] + x2,y2 = goal[0],goal[1] + dist = (((x2-x1)**2 + (y2-y1)**2))**0.5 + return dist + + +if __name__ == '__main__': + # Parsing training parameters + parser = argparse.ArgumentParser() + parser.add_argument( + '--weights', + type=str, + default='trained/30k_no_block_dataset_vin_64x64.pth', + help='Path to trained weights') + parser.add_argument('--plot', action='store_true', default=False) + parser.add_argument('--gen', action='store_true', default=False) + parser.add_argument('--imsize', type=int, default=16, help='Size of image') + parser.add_argument( + '--k', type=int, default=20, help='Number of Value Iterations') + parser.add_argument( + '--l_i', type=int, default=2, help='Number of channels in input layer') + parser.add_argument( + '--l_h', + type=int, + default=150, + help='Number of channels in first hidden layer') + parser.add_argument( + '--l_q', + type=int, + default=10, + help='Number of channels in q layer (~actions) in VI-module') + config = parser.parse_args() + # Compute Paths generated by network and plot + + for i in range(1): + main(config) + # main(config) diff --git a/src/algorithms/learning/VIN/general_test28.py b/src/algorithms/learning/VIN/general_test28.py new file mode 100644 index 000000000..6ab3dc04a --- /dev/null +++ b/src/algorithms/learning/VIN/general_test28.py @@ -0,0 +1,340 @@ +import sys +import argparse +import json +import matplotlib.pyplot as plt +import random +import numpy as np +import torch +from torch.autograd import Variable + +from dataset.dataset import * +from utility.utils import * +from model import * + +from domains.gridworld import * +from generators.obstacle_gen import * + +import logging +import time +import math + +def main(config, + n_domains=3000, + max_obs=30, + max_obs_size=None, + n_traj=1, + n_actions=8,gen = False): + # Correct vs total: + logging.basicConfig(filename='./resources/logs/generalization/28_w_64_model',format='%(asctime)s-%(levelname)s:%(message)s', level=logging.INFO) + correct, total = 0.0, 0.0 + # Automatic swith of GPU mode if available + use_GPU = torch.cuda.is_available() + # Instantiate a VIN model + vin = VIN(config) + # Load model parameters + vin.load_state_dict(torch.load(config.weights)) + # Use GPU if available + if use_GPU: + vin = vin.cuda() + counter,total_no_soln = 0,0 + global data + data = [] + t_list = [] + total_dev_non_rel, total_dev_rel = 0.0,0.0 + total_dist, total_astar_dist = 0.0,0.0 + metrics = True #this enables displaying the distance left to reach goal upon a failure + dist_remain_avg = 0.0 + for dom in range(n_domains): + if gen: + goal = [ + np.random.randint(config.imsize), + np.random.randint(config.imsize) + ] + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + # Add obstacles to map + n_obs = obs.add_n_rand_obs(max_obs) + # Add border to map + border_res = obs.add_border() + # Ensure we have valid map + if n_obs == 0 or not border_res: + continue + start = None + else: + wpn = True + # path = './resources/maps/' + path = './resources/testing_maps/28x28/' + mp, goal, start = open_map(dom,path) + # path = './maps/8_data_300' + # mp, goal, start = open_map_list(dom,path) + mp[start[1]][start[0]] = 0 #Set the start position as freespace too + mp[goal[1]][goal[0]] = 0 #Set the goal position as freespace too + + goal = [goal[1],goal[0]] #swap them around, for the row col format (x = col not row) + start = [start[1],start[0]] + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + obs.dom = mp + + # Get final map + im = obs.get_final() + + + #1 is obstacles. + #set obs.dom as the mp + logging.debug('0 is obstacle ') + logging.debug(' im: %s ', im) + # Generate gridworld from obstacle map + G = gridworld(im, goal[0], goal[1]) + # Get value prior + value_prior = G.get_reward_prior() + # Sample random trajectories to our goal + states_xy, states_one_hot = sample_trajectory(G, n_traj,start,gen) #dijkstra trajectory + # print('states_xy', states_xy[0] , len(states_xy[0])) + if gen and len(states_xy[0]) > 0: + save_image(G.image,(goal[0],goal[1]),states_xy[0][0],states_xy, states_one_hot,counter) #this saves the maps + + counter += 1 + t0 = time.time() + for i in range(n_traj): + if len(states_xy[i]) > 1: + + # Get number of steps to goal + L = len(states_xy[i]) * 2 + # Allocate space for predicted steps + pred_traj = np.zeros((L, 2)) + # Set starting position + pred_traj[0, :] = states_xy[i][0, :] + + for j in range(1, L): + # Transform current state data + state_data = pred_traj[j - 1, :] + state_data = state_data.astype(np.int) + # Transform domain to Networks expected input shape + im_data = G.image.astype(np.int) + im_data = 1 - im_data + im_data = im_data.reshape(1, 1, config.imsize, + config.imsize) + # Transfrom value prior to Networks expected input shape + value_data = value_prior.astype(np.int) + value_data = value_data.reshape(1, 1, config.imsize, + config.imsize) + # Get inputs as expected by network + X_in = torch.from_numpy( + np.append(im_data, value_data, axis=1)).float() + S1_in = torch.from_numpy(state_data[0].reshape( + [1, 1])).float() + S2_in = torch.from_numpy(state_data[1].reshape( + [1, 1])).float() + # Send Tensors to GPU if available + if use_GPU: + X_in = X_in.cuda() + S1_in = S1_in.cuda() + S2_in = S2_in.cuda() + # Wrap to autograd.Variable + X_in, S1_in, S2_in = Variable(X_in), Variable( + S1_in), Variable(S2_in) + # Forward pass in our neural net + _, predictions = vin(X_in, S1_in, S2_in, config) + _, indices = torch.max(predictions.cpu(), 1, keepdim=True) + a = indices.data.numpy()[0][0] + # Transform prediction to indices + s = G.map_ind_to_state(pred_traj[j - 1, 0], + pred_traj[j - 1, 1]) + ns = G.sample_next_state(s, a) + nr, nc = G.get_coords(ns) + pred_traj[j, 0] = nr + pred_traj[j, 1] = nc + if nr == goal[0] and nc == goal[1]: + # We hit goal so fill remaining steps + pred_traj[j + 1:, 0] = nr + pred_traj[j + 1:, 1] = nc + break + # Plot optimal and predicted path (also start, end) + if pred_traj[-1, 0] == goal[0] and pred_traj[-1, 1] == goal[1]: + logging.debug('#################### - Path Found map %s!\n', dom) + correct += 1 + t1 = time.time() + t_list.append(t1-t0) + dev_rel,dev_non_rel,dist,astar_dist = deviation(states_xy[i],pred_traj,goal,total) + total_dev_rel += dev_rel + total_dev_non_rel += dev_non_rel + total_dist += dist + total_astar_dist += astar_dist + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + elif metrics: + d = dist_left(pred_traj,goal) + dist_remain_avg += d + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + total += 1 + + + + elif wpn: + total_no_soln += 1 + sys.stdout.write("\r" + str(int( + (float(dom) / n_domains) * 100.0)) + "%") + sys.stdout.flush() + + sys.stdout.write("\n") + if total and correct: + logging.info('Rollout Accuracy Dijkstra: %s',(100*((total-total_no_soln)/total))) + logging.info('Rollout Accuracy: %s',(100 * (correct / total))) + logging.info('Rollout Accuracy Adjusted: %s',(100 * (correct / (total+total_no_soln)))) + logging.info('Total maps with no soln from Dijkstra %s', total_no_soln) + logging.info('Total avg Rel Deviation %s', (total_dev_rel/total)) + logging.info('Total avg Non-Rel Deviation %s', (total_dev_non_rel/total)) + logging.info('Total avg VIN Distance %s', (total_dist/total)) + logging.info('Total avg Dijkstra Distance %s', (total_astar_dist/total)) + logging.info('Avg deviation from Dijkstra: %s', ((((total_astar_dist/total))-((total_dist/total)))/((total_astar_dist/total)))) + logging.info('Total elapsed time %s', (sum(t_list)/(total))) #TODO: Possibly add total no soln + logging.info('Avg distance left when failed: %s ', (dist_remain_avg/(total-correct)) ) + logging.info('---------------------------------Done ------------------------------------') + + else: + logging.info('No successes either vin or dijkstra') + + +def visualize(dom, states_xy, pred_traj): + fig, ax = plt.subplots() + implot = plt.imshow(dom, cmap="Greys_r") + ax.plot(states_xy[:, 0], states_xy[:, 1], c='b', label='Optimal Path') + ax.plot( + pred_traj[:, 0], pred_traj[:, 1], '-X', c='r', label='Predicted Path') + ax.plot(states_xy[0, 0], states_xy[0, 1], '-o', label='Start') + ax.plot(states_xy[-1, 0], states_xy[-1, 1], '-s', label='Goal') + legend = ax.legend(loc='upper right', shadow=False) + for label in legend.get_texts(): + label.set_fontsize('x-small') # the legend text size + for label in legend.get_lines(): + label.set_linewidth(0.5) # the legend line width + plt.draw() + plt.waitforbuttonpress(0) + plt.close(fig) + + +def save_image(im, goal, start,states_xy,states_one_hot,counter): + ''' + Saves the data made by generator as jsons. + ''' + s = config.imsize + + if len(states_xy[0]) == 0: + + im.tolist()[start_x][start_y] = 1 + start_xy = [0,0] + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': start_xy} + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + else: + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': states_xy[0][0].tolist() + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + } + data.append(mp) + with open('./maps/' +str(s) + '_data_300' + '.json', 'w') as outfile: + json.dump(data,outfile) + +def open_map(dom,path): + ''' + Used to open a map json given dom and path, returns grid, goal and agent + ''' + with open(str(path) + str(dom) +'.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data['grid'], data['goal'], data['agent'] + +def open_map_list(dom,path): + with open(str(path) + '.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data[dom]['grid'], data[dom]['goal'], data[dom]['agent'] + +def deviation(optimal_path, pred_path,goal, map_num): + optimal_path = np.array(optimal_path) + optimal_path = 1.0 * optimal_path + + optimal_path_x = np.array(optimal_path[:,0]) + optimal_path_y = np.array(optimal_path[:,1]) + + pred_path = np.unique(pred_path, axis=0) #removes duplicates at the end (when it reaches goal) + + #print('Shortened path' , pred_path) + pred_path_x = np.array(pred_path[:,0]) + pred_path_y = np.array(pred_path[:,1]) + dist = 0.0 + astar_dist = 0.0 + prev = pred_path[0,:] + total_diff_gen = 0 + for xy in pred_path[:,:]: + + diff = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_gen += diff + dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + #prev = [0,0] + #print('opt', optimal_path[0,:]) + prev = optimal_path[0,:] + total_diff_optim = 0 + for xy in optimal_path[:,:]: + # print('xy', xy) + diff2 = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_optim += diff2 + astar_dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + dev_non_rel = abs(total_diff_optim-total_diff_gen) + dev_rel = dev_non_rel/total_diff_optim #TODO: Add avg distance of gen trajectory + return(dev_rel,dev_non_rel,dist,astar_dist) + +def dist_left(pred_traj, goal): + ''' + Finds the distance left between the point and the goal + ''' + pred_traj = np.array(pred_traj) #euclidean distance or geometric distance ? use geometric + x1,y1 = pred_traj[-1][0], pred_traj[-1][1] + x2,y2 = goal[0],goal[1] + dist = (((x2-x1)**2 + (y2-y1)**2))**0.5 + return dist + + +if __name__ == '__main__': + # Parsing training parameters + parser = argparse.ArgumentParser() + parser.add_argument( + '--weights', + type=str, + default='trained/30k_no_block_dataset_vin_64x64.pth', + help='Path to trained weights') + parser.add_argument('--plot', action='store_true', default=False) + parser.add_argument('--gen', action='store_true', default=False) + parser.add_argument('--imsize', type=int, default=28, help='Size of image') + parser.add_argument( + '--k', type=int, default=36, help='Number of Value Iterations') + parser.add_argument( + '--l_i', type=int, default=2, help='Number of channels in input layer') + parser.add_argument( + '--l_h', + type=int, + default=150, + help='Number of channels in first hidden layer') + parser.add_argument( + '--l_q', + type=int, + default=10, + help='Number of channels in q layer (~actions) in VI-module') + config = parser.parse_args() + # Compute Paths generated by network and plot + + for i in range(1): + main(config) + # main(config) diff --git a/src/algorithms/learning/VIN/general_test8.py b/src/algorithms/learning/VIN/general_test8.py new file mode 100644 index 000000000..968191203 --- /dev/null +++ b/src/algorithms/learning/VIN/general_test8.py @@ -0,0 +1,339 @@ +import sys +import argparse +import json +import matplotlib.pyplot as plt +import random +import numpy as np +import torch +from torch.autograd import Variable + +from dataset.dataset import * +from utility.utils import * +from model import * + +from domains.gridworld import * +from generators.obstacle_gen import * + +import logging +import time +import math + +def main(config, + n_domains=3000, + max_obs=30, + max_obs_size=None, + n_traj=1, + n_actions=8,gen = False): + # Correct vs total: + logging.basicConfig(filename='./resources/logs/generalization/8_w_64_model',format='%(asctime)s-%(levelname)s:%(message)s', level=logging.INFO) + correct, total = 0.0, 0.0 + # Automatic swith of GPU mode if available + use_GPU = torch.cuda.is_available() + # Instantiate a VIN model + vin = VIN(config) + # Load model parameters + vin.load_state_dict(torch.load(config.weights)) + # Use GPU if available + if use_GPU: + vin = vin.cuda() + counter,total_no_soln = 0,0 + global data + data = [] + t_list = [] + total_dev_non_rel, total_dev_rel = 0.0,0.0 + total_dist, total_astar_dist = 0.0,0.0 + metrics = True #this enables displaying the distance left to reach goal upon a failure + dist_remain_avg = 0.0 + for dom in range(n_domains): + if gen: + goal = [ + np.random.randint(config.imsize), + np.random.randint(config.imsize) + ] + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + # Add obstacles to map + n_obs = obs.add_n_rand_obs(max_obs) + # Add border to map + border_res = obs.add_border() + # Ensure we have valid map + if n_obs == 0 or not border_res: + continue + start = None + else: + wpn = True + # path = './resources/maps/' + path = './resources/testing_maps/8x8/' + mp, goal, start = open_map(dom,path) + # path = './maps/8_data_300' + # mp, goal, start = open_map_list(dom,path) + mp[start[1]][start[0]] = 0 #Set the start position as freespace too + mp[goal[1]][goal[0]] = 0 #Set the goal position as freespace too + + goal = [goal[1],goal[0]] #swap them around, for the row col format (x = col not row) + start = [start[1],start[0]] + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + obs.dom = mp + + # Get final map + im = obs.get_final() + + + #1 is obstacles. + #set obs.dom as the mp + logging.debug('0 is obstacle ') + logging.debug(' im: %s ', im) + # Generate gridworld from obstacle map + G = gridworld(im, goal[0], goal[1]) + # Get value prior + value_prior = G.get_reward_prior() + # Sample random trajectories to our goal + states_xy, states_one_hot = sample_trajectory(G, n_traj,start,gen) #dijkstra trajectory + # print('states_xy', states_xy[0] , len(states_xy[0])) + if gen and len(states_xy[0]) > 0: + save_image(G.image,(goal[0],goal[1]),states_xy[0][0],states_xy, states_one_hot,counter) #this saves the maps + + counter += 1 + t0 = time.time() + for i in range(n_traj): + if len(states_xy[i]) > 1: + + # Get number of steps to goal + L = len(states_xy[i]) * 2 + # Allocate space for predicted steps + pred_traj = np.zeros((L, 2)) + # Set starting position + pred_traj[0, :] = states_xy[i][0, :] + + for j in range(1, L): + # Transform current state data + state_data = pred_traj[j - 1, :] + state_data = state_data.astype(np.int) + # Transform domain to Networks expected input shape + im_data = G.image.astype(np.int) + im_data = 1 - im_data + im_data = im_data.reshape(1, 1, config.imsize, + config.imsize) + # Transfrom value prior to Networks expected input shape + value_data = value_prior.astype(np.int) + value_data = value_data.reshape(1, 1, config.imsize, + config.imsize) + # Get inputs as expected by network + X_in = torch.from_numpy( + np.append(im_data, value_data, axis=1)).float() + S1_in = torch.from_numpy(state_data[0].reshape( + [1, 1])).float() + S2_in = torch.from_numpy(state_data[1].reshape( + [1, 1])).float() + # Send Tensors to GPU if available + if use_GPU: + X_in = X_in.cuda() + S1_in = S1_in.cuda() + S2_in = S2_in.cuda() + # Wrap to autograd.Variable + X_in, S1_in, S2_in = Variable(X_in), Variable( + S1_in), Variable(S2_in) + # Forward pass in our neural net + _, predictions = vin(X_in, S1_in, S2_in, config) + _, indices = torch.max(predictions.cpu(), 1, keepdim=True) + a = indices.data.numpy()[0][0] + # Transform prediction to indices + s = G.map_ind_to_state(pred_traj[j - 1, 0], + pred_traj[j - 1, 1]) + ns = G.sample_next_state(s, a) + nr, nc = G.get_coords(ns) + pred_traj[j, 0] = nr + pred_traj[j, 1] = nc + if nr == goal[0] and nc == goal[1]: + # We hit goal so fill remaining steps + pred_traj[j + 1:, 0] = nr + pred_traj[j + 1:, 1] = nc + break + # Plot optimal and predicted path (also start, end) + if pred_traj[-1, 0] == goal[0] and pred_traj[-1, 1] == goal[1]: + logging.debug('#################### - Path Found map %s!\n', dom) + correct += 1 + t1 = time.time() + t_list.append(t1-t0) + dev_rel,dev_non_rel,dist,astar_dist = deviation(states_xy[i],pred_traj,goal,total) + total_dev_rel += dev_rel + total_dev_non_rel += dev_non_rel + total_dist += dist + total_astar_dist += astar_dist + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + elif metrics: + d = dist_left(pred_traj,goal) + dist_remain_avg += d + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + total += 1 + + + + elif wpn: + total_no_soln += 1 + sys.stdout.write("\r" + str(int( + (float(dom) / n_domains) * 100.0)) + "%") + sys.stdout.flush() + + sys.stdout.write("\n") + if total and correct: + logging.info('Rollout Accuracy: %s',(100 * (correct / total))) + logging.info('Rollout Accuracy Adjusted: %s',(100 * (correct / (total+total_no_soln)))) + logging.info('Total maps with no soln from Dijkstra %s', total_no_soln) + logging.info('Total avg Rel Deviation %s', (total_dev_rel/total)) + logging.info('Total avg Non-Rel Deviation %s', (total_dev_non_rel/total)) + logging.info('Total avg VIN Distance %s', (total_dist/total)) + logging.info('Total avg Dijkstra Distance %s', (total_astar_dist/total)) + logging.info('Avg deviation from Dijkstra: %s', ((((total_astar_dist/total))-((total_dist/total)))/((total_astar_dist/total)))) + logging.info('Total elapsed time %s', (sum(t_list)/(total))) #TODO: Possibly add total no soln + logging.info('Avg distance left when failed: %s ', (dist_remain_avg/(total-correct)) ) + logging.info('---------------------------------Done ------------------------------------') + + else: + logging.info('No successes either vin or dijkstra') + + +def visualize(dom, states_xy, pred_traj): + fig, ax = plt.subplots() + implot = plt.imshow(dom, cmap="Greys_r") + ax.plot(states_xy[:, 0], states_xy[:, 1], c='b', label='Optimal Path') + ax.plot( + pred_traj[:, 0], pred_traj[:, 1], '-X', c='r', label='Predicted Path') + ax.plot(states_xy[0, 0], states_xy[0, 1], '-o', label='Start') + ax.plot(states_xy[-1, 0], states_xy[-1, 1], '-s', label='Goal') + legend = ax.legend(loc='upper right', shadow=False) + for label in legend.get_texts(): + label.set_fontsize('x-small') # the legend text size + for label in legend.get_lines(): + label.set_linewidth(0.5) # the legend line width + plt.draw() + plt.waitforbuttonpress(0) + plt.close(fig) + + +def save_image(im, goal, start,states_xy,states_one_hot,counter): + ''' + Saves the data made by generator as jsons. + ''' + s = config.imsize + + if len(states_xy[0]) == 0: + + im.tolist()[start_x][start_y] = 1 + start_xy = [0,0] + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': start_xy} + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + else: + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': states_xy[0][0].tolist() + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + } + data.append(mp) + with open('./maps/' +str(s) + '_data_300' + '.json', 'w') as outfile: + json.dump(data,outfile) + +def open_map(dom,path): + ''' + Used to open a map json given dom and path, returns grid, goal and agent + ''' + with open(str(path) + str(dom) +'.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data['grid'], data['goal'], data['agent'] + +def open_map_list(dom,path): + with open(str(path) + '.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data[dom]['grid'], data[dom]['goal'], data[dom]['agent'] + +def deviation(optimal_path, pred_path,goal, map_num): + optimal_path = np.array(optimal_path) + optimal_path = 1.0 * optimal_path + + optimal_path_x = np.array(optimal_path[:,0]) + optimal_path_y = np.array(optimal_path[:,1]) + + pred_path = np.unique(pred_path, axis=0) #removes duplicates at the end (when it reaches goal) + + #print('Shortened path' , pred_path) + pred_path_x = np.array(pred_path[:,0]) + pred_path_y = np.array(pred_path[:,1]) + dist = 0.0 + astar_dist = 0.0 + prev = pred_path[0,:] + total_diff_gen = 0 + for xy in pred_path[:,:]: + + diff = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_gen += diff + dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + #prev = [0,0] + #print('opt', optimal_path[0,:]) + prev = optimal_path[0,:] + total_diff_optim = 0 + for xy in optimal_path[:,:]: + # print('xy', xy) + diff2 = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_optim += diff2 + astar_dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + dev_non_rel = abs(total_diff_optim-total_diff_gen) + dev_rel = dev_non_rel/total_diff_optim #TODO: Add avg distance of gen trajectory + return(dev_rel,dev_non_rel,dist,astar_dist) + +def dist_left(pred_traj, goal): + ''' + Finds the distance left between the point and the goal + ''' + pred_traj = np.array(pred_traj) #euclidean distance or geometric distance ? use geometric + x1,y1 = pred_traj[-1][0], pred_traj[-1][1] + x2,y2 = goal[0],goal[1] + dist = (((x2-x1)**2 + (y2-y1)**2))**0.5 + return dist + + +if __name__ == '__main__': + # Parsing training parameters + parser = argparse.ArgumentParser() + parser.add_argument( + '--weights', + type=str, + default='trained/30k_no_block_dataset_vin_64x64.pth', + help='Path to trained weights') + parser.add_argument('--plot', action='store_true', default=False) + parser.add_argument('--gen', action='store_true', default=False) + parser.add_argument('--imsize', type=int, default=8, help='Size of image') + parser.add_argument( + '--k', type=int, default=10, help='Number of Value Iterations') + parser.add_argument( + '--l_i', type=int, default=2, help='Number of channels in input layer') + parser.add_argument( + '--l_h', + type=int, + default=150, + help='Number of channels in first hidden layer') + parser.add_argument( + '--l_q', + type=int, + default=10, + help='Number of channels in q layer (~actions) in VI-module') + config = parser.parse_args() + # Compute Paths generated by network and plot + + for i in range(1): + main(config) + # main(config) diff --git a/src/algorithms/learning/VIN/generators/__init__.py b/src/algorithms/learning/VIN/generators/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/algorithms/learning/VIN/generators/obstacle_gen.py b/src/algorithms/learning/VIN/generators/obstacle_gen.py new file mode 100644 index 000000000..e8d0b4010 --- /dev/null +++ b/src/algorithms/learning/VIN/generators/obstacle_gen.py @@ -0,0 +1,93 @@ +import numpy as np +import matplotlib.pyplot as plt + + +class obstacles: + """A class for generating obstacles in a domain""" + + def __init__(self, + domsize=None, + mask=None, + size_max=None, + dom=None, #Possibly pass it a domain?? + obs_types=None, + num_types=None): + self.domsize = domsize or [] + self.mask = mask or [] + self.dom = dom or np.zeros(self.domsize) + self.obs_types = obs_types or ["circ", "rect"] + self.num_types = num_types or len(self.obs_types) + self.size_max = size_max or np.max(self.domsize) / 4 + + def check_mask(self, dom=None): + # Ensure goal is in free space + if dom is not None: + return np.any(dom[self.mask[0], self.mask[1]]) + else: + return np.any(self.dom[self.mask[0], self.mask[1]]) + + def insert_rect(self, x, y, height, width): + # Insert a rectangular obstacle into map + im_try = np.copy(self.dom) + im_try[x:x + height, y:y + width] = 1 + return im_try + + def add_rand_obs(self, obj_type): + # Add random (valid) obstacle to map + if obj_type == "circ": + print("circ is not yet implemented... sorry") + elif obj_type == "rect": + rand_height = int(np.ceil(np.random.rand() * self.size_max)) + rand_width = int(np.ceil(np.random.rand() * self.size_max)) + randx = int(np.ceil(np.random.rand() * (self.domsize[1] - 1))) + randy = int(np.ceil(np.random.rand() * (self.domsize[1] - 1))) + im_try = self.insert_rect(randx, randy, rand_height, rand_width) + if self.check_mask(im_try): + return False + else: + self.dom = im_try + return True + + def add_n_rand_obs(self, n): + # Add random (valid) obstacles to map + count = 0 + for i in range(n): + obj_type = "rect" + if self.add_rand_obs(obj_type): + count += 1 + return count + + def add_border(self): + # Make full outer border an obstacle + im_try = np.copy(self.dom) + im_try[0:self.domsize[0], 0] = 1 + im_try[0, 0:self.domsize[1]] = 1 + im_try[0:self.domsize[0], self.domsize[1] - 1] = 1 + im_try[self.domsize[0] - 1, 0:self.domsize[1]] = 1 + if self.check_mask(im_try): + return False + else: + self.dom = im_try + return True + + def get_final(self): + # Process obstacle map for domain + im = np.copy(self.dom) + im = np.max(im) - im + im = im / np.max(im) + return im + + def show(self): + # Utility function to view obstacle map + plt.imshow(self.get_final(), cmap='Greys') + plt.show() + + def _print(self): + # Utility function to view obstacle map + # information + print("domsize: ", self.domsize) + print("mask: ", self.mask) + print("dom: ", self.dom) + print("obs_types: ", self.obs_types) + print("num_types: ", self.num_types) + print("size_max: ", self.size_max) diff --git a/src/algorithms/learning/VIN/model.py b/src/algorithms/learning/VIN/model.py new file mode 100644 index 000000000..2d4865795 --- /dev/null +++ b/src/algorithms/learning/VIN/model.py @@ -0,0 +1,68 @@ +import numpy as np + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torch.nn.parameter import Parameter + + +class VIN(nn.Module): + def __init__(self, config): + super(VIN, self).__init__() + self.config = config + self.h = nn.Conv2d( + in_channels=config.l_i, + out_channels=config.l_h, + kernel_size=(3, 3), + stride=1, + padding=1, + bias=True) + self.r = nn.Conv2d( + in_channels=config.l_h, + out_channels=1, + kernel_size=(1, 1), + stride=1, + padding=0, + bias=False) + self.q = nn.Conv2d( + in_channels=1, + out_channels=config.l_q, + kernel_size=(3, 3), + stride=1, + padding=1, + bias=False) + self.fc = nn.Linear(in_features=config.l_q, out_features=8, bias=False) + self.w = Parameter( + torch.zeros(config.l_q, 1, 3, 3), requires_grad=True) + self.sm = nn.Softmax(dim=1) + + def forward(self, X, S1, S2, config): + h = self.h(X) + r = self.r(h) + q = self.q(r) + v, _ = torch.max(q, dim=1, keepdim=True) + for i in range(0, config.k - 1): + q = F.conv2d( + torch.cat([r, v], 1), + torch.cat([self.q.weight, self.w], 1), + stride=1, + padding=1) + v, _ = torch.max(q, dim=1, keepdim=True) + + q = F.conv2d( + torch.cat([r, v], 1), + torch.cat([self.q.weight, self.w], 1), + stride=1, + padding=1) + + slice_s1 = S1.long().expand(config.imsize, 1, config.l_q, q.size(0)) + slice_s1 = slice_s1.permute(3, 2, 1, 0) + q_out = q.gather(2, slice_s1).squeeze(2) + + slice_s2 = S2.long().expand(1, config.l_q, q.size(0)) + slice_s2 = slice_s2.permute(2, 1, 0) + q_out = q_out.gather(2, slice_s2).squeeze(2) + + logits = self.fc(q_out) + return logits, self.sm(logits) diff --git a/src/algorithms/learning/VIN/requirements.txt b/src/algorithms/learning/VIN/requirements.txt new file mode 100644 index 000000000..27dabf2f9 --- /dev/null +++ b/src/algorithms/learning/VIN/requirements.txt @@ -0,0 +1,4 @@ +scipy>=0.19.0 +matplotlib>=2.0.0 +numpy>=1.12.1 +torchvision>=0.1.8 diff --git a/src/algorithms/learning/VIN/results/16x16_1.png b/src/algorithms/learning/VIN/results/16x16_1.png new file mode 100644 index 0000000000000000000000000000000000000000..9eb172316c37b478f9af6b42ed17e550d5575a10 GIT binary patch literal 14540 zcmeHu2T)XDy5%JZ7zm1}pad0EK!VaJ5?hSqoN2%Sk`c+d5fnucB`QczKr$^zMuH-u zfRbYqCFj^phW)PN%$wPH^Y*>kt*y6JYn26bxP9+`zyEyaJLmLcRpo2+yV!Oi2tqG+ z{jxfOP~s7UqMwci9uaNq9)cg#4zhBZbnwrU?)D@2d*|KjdJYJ}cmw@Uks_09fgoIn z+~td!uCJ$i+*~!aVz=k(GpYAoKly!FR&EPbkxt#(T$jKdz;lgvWG9?Zew`8)w*Nsu05f7hN6dGc>NsQ5 zQ>^vmpitX#pU8N;D33}?8rI8fe+&r;+0V*4?s$O?9}==}cy!ciV`X04c}h2L)K!wB z>dP1EqO)Q_9teHD$tTLqt>yCeLi<6-pBF~W=YLgM67w@-eYV_^6{wh~ckbLd(eQ?9 zfUx7q)2EsN4D9{nDU-JT{nKVp^gcm&+~!TgopDA3jVb7F*JNlp6b*X-pI%}UDPuZ zNNY?*i(toK`HkMG#0dshYs{ZIb?R<0H-a==ucJKo`xFiwcvtGNyf)oe;$oM#9C-Qi z_H~9b#+rDWvC{ z<)H?BPOl}kO#M80ueGJd-9jH#CyQz$g&55RDjpxAJl7F)zBdIH+lZBwwd=#3;5G7e z`uSehgqkl*h`DmUb>~|+2F|(DG$he@b3#X6L7`bgMJh=_s8sWV*$?&Gw-cq;#*VW~ zxog4!;0F6`K@spn_4sVB_mLVdG4k^BD>Nso=(K0yoT^xK#+np_S|bDuTGDldlHR?W zC<1holY4yNv~K*DFPAfo%Z(Hw1zTSnzZUlVd3;@+Vjdu$k?6^jC(-f^;r4Y-%<(ZyMveRrY&e&(u zsfx@fvk^aB8)-HAGbT#ZDndI`Kj}v0L*8@ewBg>TH;tgr^AkR*`>a%QV$A9e*>vVy zL9gP8P+3Z(vA16638f~2&!!bU8|He*(x}X~o>f~$M(2)!3HeI=??UNKv zXctIJu{&gyC0H_b7NBINrzAZiL-ER$D|xY;%*+WxL$~GB3v7GBLPCzoi4Q&%S(>lr zR0x6Xl3>yNZajd~8`e{sI&sc5Cn0>E!zP|%YEoXbi8n^uJ@yvScu^V z3$v^~P0r03d$a3DB*6m8@fn{cjxNfDaCl6e>G58lW?>#XXWOlNCo@=jEyWnu=eZbU z`X$%YuthExnlk#nd6LSNVLEb&^x~i{VYepBxjkniy61v1ZSKfE! z+oZ!Q(%!#UCBm;_&Nn-#DT6q$2==;tkztgKPZ@{z$_@1JIW`99hKFL*w^uH4o_1_j zIzJA#vgX^j=iE5Y`KqXMHc`E!}dAHhUi>O=g&cC=Pw|# zzbCsHye*R@l~_ktdVEYwyu7-)Dlab&d+>H?Y-ngGbAOt0Z-(9n$JOD;$&sT3{dacdS?T0eOK}9>ym?c3LX^q<5Hug?M3*WM`~g!aHsweJxXE2N^WlM#XRKXF*rjufq~9DPJnf_IW{rHuGv3}2cH?|$9apJ9Br6(wi>Pp*Uz&I z-?eL3uJ@*U`Jkz(DZQ--56>0YtapQ|4>KcHBDURP!^6Yl!;!{qnFYQ2RzJe2xbEfv zgTh}`FwFU4C*eNXrXPGh@BMpW(nPBQT9eSFoT}Mf#(ql7YU=ApagVQ##&g~5+8l-fVuJ6KQOvuYQ+xa0NneV!lSl9RnR*I;idg(SvExbVSgfB*hnUp;tx3*FmV>DuCV z`|(C@!z^ec_gW{n=CvE8UkYFt9}Jdh`xf+=h17WLy$0Awv@KDBu4~y8 z$FDC9Trs|JmtDyZJ7SvW2AN2;w#LK5Q}g487Qh5|IAhVz*B3NU27$s8r_Y9(&(>lb z2}Q1Y>Gcdi2?+tiLKHOChC)>^x4$t>_mmXKmpF{nHR|1oNrE+2y>Q`zI8-{uu2&Lg zU&T^WTiZO=m>}vhV@QAaoT5R2ZRJG}c48)Sc8+b!f9yz+sE-Kt$#ZF|{gtsqrn z(zEDzjJSo1z;d6~$h<4YvgH(D=o7)>>j!a;FW`o^7T6g(jWt{h$#q*av*|8WD=I1) zcH)pq6Dr?OpDkUEHLH7hRIZrZWz`B8!Z%$`YBN}CFW=&k+r`MJqo}AD?$DNnOK&&! zDg9Lvz<5L~{#NxfULhfsc0-3CZi}IBFV_0~7}GZGTn8Q;T>5(0?i3%NJfLuhsa{v9 zM}ckO01Yg!P5>jPwxwknEQRj$qR(P<-*vv(iyI@+b`$FfRNN@DY}Lz6>FO~a8d|YA zDY>4}Wz(%YEUBQL?nkTD{UN!mWH{o4(Cms7KK#nnt7|qYFNH7a7`Y`SE_iHkk7rkN zgqmv2q+5#-^GSeA{AbQI+HF6!X(x*heho^$S3fZHJyg_#Y>hTCpLJs2QkNB>b!7vv zTb$2gzp6O2QE%it@F8A&#a!6iV8c#r}*Uf#`uU>t) zYxnNfWEIXBho2YR$)l|GD04ZFs#Cxo*0&T?G^coZQ*UkqoMiDRM64)L zQYb$(J-^hmGiAd#)wV#wWO_0GvX^L;YAkcq-vfBsSx5cQ*YcMt)K`V?{tDsY`?%@s z7SvAZo&UggzTcuM*rk=sjuV!s@|PGRL)C>uxGKM2xqu z$35M+A^jtL{OS$5L+ErtjfFX}lIfqvj=z7RQXC*JRNIWWwzQrguB-me`Na+Kl3V-_ z9*iC9!S6%?F*IkKxUclNP9l}eDj&Hj-`>Ko&pgJ*aOF{gu2NsbSoX4% z_kcI^4Rq9KdF;T)Sqqx-d*{nVgaF z6rnr57omqaAPF#&S6o~_M%<~X;mxJITh$E7wUkV<(wkf6;j!g4=t(8_aoQ}jO9Og# z71$Bx<`yJg{rdGQ*K_rrnYvaj* z!NKit)_E}Rm9G4HLh|<3nb)*Ugr6jmZ<~N+AHZ=u*LqVObj?5QVV_$;rvt_?c~EfX+M%3kyT{ ziZpd~b?v8qJrUCO^z;mEqxRoN$;BvO^ z$M*~^3@%ZMH}^e|qW}rK8t=?u<3nT7$s4_GYWjWJSw|A4^dm!j6qxfjB9iRA60D@| z81{|fGiX;x?Z%VQc4aR#dz+VG)AC768wo}#NE^Fx2?z?d@g~213yt6O&Ykqa!osfn zw+RVanwsmk55y-OMPw~3EY?;AgRM%pO#v$wTqRk8@87@ArA-%KB);8py~o4XAlqic zC+17O{dn*0ZVCh1x#T!+bpVrxjGS4A?LMmI+gPp44Y2*8eLHp^7^~qYXeui|L#IPV z=@l$6q1Jp9cQ%)rRTL?G)lL%`DmFa*pThWJ@gqvqeKf`;EG$fOcGRuZ2qs>9YZY-H z$@IZkff6QQ*%cnistKcVn~my*xQt7D9hDQCyWBo>q=TnMj!Rn`7U?5!xk{h70(n;q9YAlkX!LW z4SyamCYpj~Aj7ac7&kAh!!?8&@!t;>1Y3WicrvS5$Ji?gwvnls*#yjDj>Q0nrv4oU+K z5-R98j?SFXw#9P5EF;b8V+G(;t+I-)5)15fpfzqz(CTxci;}kHpZ!_V;yaI1oQr8h zn68(J)vk7&ON{eW$D3Ng5=D?OplV{dEtbX_-8V_?PB6Xj!X+(^*6UbUq@p7p>|}q0 zE&+vfI$kOFrMc+2Yny%M02MD>FP zokPA_95yKXX^fjh@`G%eBAtp1(fRX)N^Hi{r+c7ZqhmKLZWwpZy;=c20fE*EUnPUv)Xf487{lRIAZc4xkg1dduZ7!|j0)k-}leG7OdYI9B}E0~9k z%y9_u@~U}zm;FWP>MXpva^g9@_WSn>siU)f<;Kh>6jSUU{oWfVMiPyv-UOgS;b7jR zVg;2f(=yB<|E#~7h{e^9I_{+7BIfg#?PC;sCdY-Cq<}uM($hWDTB-ISgx2zi7V#kU! z0!w)K;j{v%LFzy-UkmPNO|hW3(|=odCn%Et$QCN4@QAFwLzE%~^5#BV^#7Vh{7u$$ zUl|B6)r)wi`0AG5?0FQkR0-p0&!_goPDgAoM#iz;_ zhW97ef;@Ur7QyHOykr$m@PR@kjsw}A?pWFG4pp@M1H*oYs&bPXO-<_I5fR$f9;Ln5 za2k;GpK@TSSoZHn$L*#-7Jkqz^$7pa1^W3Y5Ny0I976e^Z@>F+xQ=6wQ#%NX88?mf z^j@8=4XGVghi%YzCUuRY0{al#8-b|)%*nv#&(6EhdbzI8Z*T6Aeay&vGf zVgmmnx$3Te6rhMdvWy&K8l!+)@mF#9-$BK{sK)o!C?uyET|kD!0IplV{zTV_7dGAQ0X@N%BAte8V3I zxleGKY?GIhYlNX#C);je=zA9wbuZj{zR~(9QH2k8>KXLC;6`FZ%XSyK1LZk$=8STr zFINI6$sm8DlE4Nz#*5quW1UOhC>#PbIz#0VA--S?A`$F!&t zd<42^fNmJ42~AX>C$u}18WFl}T`DXGZFa_|+B2Gv3TYb@!PD49!GujYif9KT<|iKF z|H-NNZ&P!B1Db!sEPn|cf?vx%T|2&y?FV=#3JCHV|DQxHj3#XUrsigpE5WLcJr((> zYG!t#)(KoS1T96h$gVs&V9}XlI??$d85%AIq$8j^O0kli;|s9h2+FHLlpjZjVl5pU zkK9C89w4lK5{UR>hOAK3n1^_JCm8VgpxsKi%!F~<^p)Xe%ht0V?qU6P*A%L@VGKSC z)UhFwdn;fHG_+Juae>jJ!b!SRd2X&|Qc@BN(TH$fHwggd#8u!}Tc9+clo(Y*P;4}< zjX-Dl<{Z=7G$|R--fas?el-?3jB@Gh0Y@s(RAjpOI;xn!2f;L~ooxq3g?^c5ku)?; zzEPcaszcwHz7ccyYI9x-zG+<9emuezMtFk-iW~U&v3>C#YSv2?55++RD!4AN=T%D!3=Y@_*f!tvbe;Zi^$Spo61pCcV;6g;I#Y>R^{*41zG3?4dZEmXdOP ze0&@bG7pBlWYGUnSsQ!=%bsGVnEGY+(t9O66zRbQrdn?#J*&1=Ftl$x>38hARPozY zqr-nXSX2J-Z>M++iF&M8fgpOAVOkOY&uvU6p>l9uY_pt( z;k|V<;UKp14_(ij9V0dk^KG*jFxQ57N_U+royHb$j&;$4`~?hM;otCMFufmNV^$w= z_N=Ak)CZgImQy{<&Y8=&2Io_G>|s6P36n`Lu{ zxvjOFwq;tbb7pgr{}Fd2{TgHZE4X&P`Cf0r&3jt~Uk9r~XW9pdv$mQo6&Sqw- zI~_*C?9Nv;9j+$6oo=v0Kf5_!C8yTgXqTDStXw{!&Simc>(TF(_cJxA`@$s5mc&C( z!8yAsvDu;1#nP*k*^485p@{J#uVF9Q99jNIBUq{>*xA+K5#k-X?0@=TWwg@`xW$d$l`nJ4 zN#a(|pG_Q|wpeJtsO4ElP5Z&m;2pN!$n3y-xLf{v>3oDKq87qq!bqFqOx`2-5Yt; zqxErwld-O{sHi;hBbo*H5Pw#oEKC9yvzE}qva*KQD2$KuO}o6f8L^T4OeoU5W@VDj z2g_6LkL9)KGt-kXmXTN59Coky#zjp@(;to17pu+5h1rCjU0hS$ZPxha&9^f}qZ~cZ zpF?VNe1o4nX>LkVfZ^bgd*0;AdEY|A;qr`UaYEniGCg6-m08BIjcJk8SU5#~Jh9R9 zZdbUJ#YxTCM|~d~Pun02?h0y%s%vQQlL6~dr5!b;gX;PJPy;bbvUT|nKF(l=5$eqS zTrIlcS&Uhn6_V80N-(0!={?=>}G0cE*5&P>hCS-KZvT;Hcn>&PpG zb9f4Ouu!~7csN-dQRt0aVnWn}z%GphVb68{{{3PGs%G!PVsLq*vLS7m>%(825v|s{ zo4Whj8BORY<8S-|Q$@~Wd4@zRhv^eO=rI7XqTGNX0r4FuT z7kT3kKyQDUn8;mKUCl2cQF1e`eX zZyz%=zuB$;|0PF@pGP`n7;kR&PpEni7Mq4|zlxo?>x&g3FOrVe@^La4m5@g#Kf`n;;8j=J-r?Wa}3@rh*(%_ev!1_V<14lCG+#Fdyqa!5ib$rbqIo zua5Lx?YxsNts1OW-sX~}5w1#BDvhl?EBvXtT5+sHY{re=&^_HSC-2QN_vVS&oqB>< ztS7Vg?4k5U9$h_(ygj<|a;JcQ#mEEX5!M8B)Bb>22AaQ^B)dRW$oZ|UAETxeI0)oh z^*B6q7JVqslJ{ssPR4Uj#1{+tK}%{9?Dk<)Ep2T@*{B~$zPeieNKbbddhn4nTOGpS*i5g zHzKzcns&BFnXYr|>F<9vt4lZ3MTo5)RD}Z>x~2%{^J{XS^cuGsi1XNIc$U8>4+AtKT@laJ}_7bO#S)ZBz0TK{7LWR z8KnTw~rIk6w~Q1*^;EQ5Tx+ zKFF)|?_|D=P+!|lB+JivZ!h`>r=2DyQ_n6>#I;C&3#Ui{q-@X}=cqUym_ic6SL}As zi|Wk29-_rV%@Mal^24nc$C|1oa=&kTds zzP)av>^Lt{tGuFOn5iB*8S_0uGaX>=m%A^|WTvE?g7A}N-ODqOX>ol2>uOD%=Ca!8 zs!c5r7-j@%d4zOOvK&0ff8m1O?t_9E_ol#W2U9&7Xf>+QmToN6c4glR$SN9Pqvey@ z`RbMW)rY4$LlVhloABFh;;_#?N(v<9g()$A+Ho)4k-q_Z?pKF=+nzK?JVBBGHvxEF z+C}t%BIsbo zHs-pr4vjd^wnrQy4B|NTO06m| zWC(6d!W2=wwKf?Z8k)euubl$31<1CIu~Ht6L*MoW@Y~^+mKGj!L}*DL`RJU9xubkV z#iS{+Q{QLx(z6IU1nW?BR#>0`3HN4*PVmAHm>`HU7P9{fynM^jXs&eBiS9v|HyRK`rf6x7s289&5HxmbTFBazS>GE(<~!&9}UF3jv_ z7;yNe=EKXBa1@!ClkEJNtFPziEY!1(AAN58>~H5s(V)18f=WihUQBGf*7|#XZKaWA zTG$J=<21cL=5xrR8AA#tu}|^auio%&p6%@1B8uFhNk0N=*s<<$ zO8m&zD3Rok$pjk0ySm0n=EJ?VqjxQ?PRZ@3L9om)C4~cs9X|K%=g+psE_*sd+IY3v zZM{^xy5`L4gCfnkdo{06A`Pz8*!Y|}#Z>i`P;P}LQ~l%u!I<=*NXd?Vvq78GiIuYm z@~Qf?2}WMw^$x>N!+ddbYUo5$_OQU<k4 z7HDW}76_GMT=UR7f_@y7B5C_{x~G`ybl1Z4V9tl60Jiy{qc*o{+gH<-TraP>f#}3^ zslq($fE2Z1bO1*P$0_5}`imoSB5W%`M^AdAM#rk^VppE}I(c|~Ga8 zpQ-$WAYzYTkf%AygfM-k!((p&%SMg{B6P3!Kn-zG??rq=(HiwXWJW=yG;sS4EfrlM zR?(My(6 zqmWW;MXeuH64SvsX+7r?>3+)EWMC%;<+VOqFSIRkJYl1LX{o<$2O`WMg{OhMY%%1- zocg?$Q}5KrDo5UmDCkYwT&a8YDU}K_!BEEga<~ocg@~#Zh-M^jWzbkZ8WH-af`mPs zr>jTnb}F+YNCIdmXnay;Yd#n#pM_sp$=SctFA(j{`%tZZv^qYgMYY`LKJ~XzA;SA8 z8ic_sL9NA>B!z?U5oq=h42LF2jwNK_O4?RF{yhxqI4Xp^(j#$H3_?I`A?bN@V9I{=*UWU){LS63-Oz#gT|g}*)ef(1OVtf zyFMdav7?%`bw=MRs>y?6T3X4e{CTRgyp9}R6X4aJxhTT`dfpl8Zc30x$GRQrTOl#Y zFBb(FQB=`1eSJu>|LNZW_p}hvVdOoSH(aL|DrtV(wUtrQc^-HQZ$A5tY>NPt3 z|Az|mB=S~o*D$LS)@zP~wx8P_sP6xiS^mGT1YS<)0AjEcM6Zg`qF2B@!yqAos(cs< z>G~hHJ1_fS?$?iMKqwo0T2npkm{E^XBd3{ey&XGtfNr%@_JNXLu^+$YWQ1&i^8`cI zh-Lvqb@=$yVP-TGT=P>zb!<8%@PCtIob~)>G zb4d?GppXuM#TAb978T0#NuhxF%2A>nD zL>UOrWkS^-@aW($4!HLYy09vafq&^LM|on*)wg3X=5D6RHr+#E5~!# zhQNM{Fx87XYZ};`)KXi}=besvgxT5Imd)>iP~P_X^^xS-G_8M6-!%KMZrcim=ryo9 z^8g;eut5W5<%9V){Kt=90=*hN2u!~uh`32;W|ntZ%D6A3%)kgtn&~l6Q&R)7*aAK% z82)1>Rl^f&uj0%hV+i~TqoYT0>K?Gq?N0j8*BXgK9)LCOJJRSL}SiRR>B^!old0%;I`L%mDV_mgpB zGTVzXPBpyAVmSRw6k`PRbFcfXHODQ1lREL@8b{;C>LLlk7vi|6I7sHf$6F12e$dJh z2a(QL*IzpYj6Uk388c1Iu&lygyFdb_E^11%OyB&E_$x`Ly?&z*wZ%zj8wydF zDw!#+9S@ms+}>O!qJwR-DW(G?WYb_ynO?iTS?^^D4{&Q4v^3Alk}+1MIf6v;=xV{iER)I2HDci~&ehbEkq=&Y+`SkeiFaIkjqe z%Pfz~fVQgzF#`1v*ds3~ILpWI;p z5d^$!jdzNTVE@9M5K691{hhalxH#&NDn~YP8GDh!A%c*Ys*!B@^(iZA)}S^r7_nBM zDkw+lpy}uDlccn3!5ok%(STh7Qb=-R!Zit;e(7}Qhif5erOLsayIDY`D&F2&7ln`~ zaC0%7^6y&v)6bM6DKA0i7f(z|`W?S4nJt@ytKmC!ssVzaNl@2(XU{f`-(&kSy2y(L zP^FfhbC07)Ud`!~QT62D`AK92;(}WI+8GUyV{H9!M+K-ms;NM2S+_YAT!s^Dg@T0P z2(D+y@j@OXHCw?ozXn&U6k&P+$FQf0Ntp5-BtX$<41|F;$+Hj{oS&bEl@dnt=V;mv z0Kx#^Evk9Ah41`%opthbGa5_((==f(HZZG5zL}f53t_4YxSK)y_U%Kz%CRyn0p38K z$FgkzUs@d8P!%Zq=0|&vqo!St=i-lIcnsb$413mjLx0ckT)?Tv{IOrJ zyScfs1cSwgTFCG_T)IQ?7dHAMV()9tN`S}G0w6G>dWGb$GIyiMaEmkH#*G_LWDHs@ zcOkk8@u=kzW$?nz+4Xi$5j4gl=avELtiXE!Q_=~57rnN%xk|>(tXA+Q;2Q;A?u$Rp z#|Sb$1eJ(F6b!W>b_s1>Ufzkm@^YYI_Amikbr;$zM{1|TenCT85cF4bbabpLRkbZJ zfWW?eVetf{*K~@moq>=iAoY)}-YTdV12h-O;XZa0{YC?9;KmRhgZh|3fQ9 z=3kJCM#$A-AZ#1sHu4G$i9^N_KwcD@2W`5bj=?nGJ30*h9?l`T3L7lFP#fYT1lS%D zTFx;ti!~SX0BK>BbRa_GhvQQmUc%hY^+c0iJwUZezzlVUtvRg}eGa`T+*<2seicyt zqO2+FA(-QRHU>GJ2}Y$;Fw&Ml36tTPUJIrw8xh*eCbLSW;9CP7FJ8QGhRE?0Er)A` zpn!l=F-8VPN$0%TQ5Y|~@^NMR72(X;4<5XOf*gD?D&J+QW5BvmCOwn9v9ZBMXoaFK z>1@NXNBCZRub1Cb7m+~=h8)(OkqfNb|ChfGH?mE2Xl#$ZLWqK=k0)Q&O)O&t8^iTnK_{Cm&cYkH0da=;AzgGrSlSt1BK zLXf+n=^8!Vu(a3EON^#H*k zh$++m-|;_LlGE+uSbWT;clq)(h2-x~trtG*B!UoIey0>Ze)1$NGLk3a$ufHj9-)8X zwXvKAe^eqcNGhq+V`{q85DR2eQuq)Ce$M#SPoqv4VW7Aurx0jK>wZp_`+KKQ` zApENj-P5IJrE}xY#AX^l2JCh2G>@>H>3+Z7x3gt3{r#=1O;>)}+bfTvb&SkRgsBc$s{J zIo+})g}m6T6c!P|s5`o{0Y6D~=gPZ3)FSaN8_Uxz5-Z)WulVh@Y>dC6Pj<4*YUm_8 zXB67?vFgT3dXd46bY}HaRASPL`n-#q*NbP%42pW)Gal?an%MMOVX`qnR)}@f&S%3Q zK%YxB)^N6qI04JyGF!Sbkw%Jb4mqW?*rdQ_xj5D&>b{sN|CDLK$r4fHqZBTYq_zmc zJx-z9+biYPH6f?;^KJ$8dM^V(YKsI9GS-rm}1EA_aSrC*TVT!BX(azacUcQLPiR(ES5How`Unl&id>W|911{ z(n|S`(Z$=}S~K~W~$BTpAn z7KcJrYbTc`mML~R!K@NaUxhkS-@Vg4bXvJ_rl)wR&SYEtq{6d(oz}Dqu)4yx>Y0L$ zUi>_M0g-zd$Tk!2M-%m6+S8`XGw0Evl~&6>a9L$W!@E}koZ)X@$(_xGih1Ea&)1`I zKE`7AW?Ea*G$2|p&@l^MBM=D8f);AYH@z3W3QvIh&6MwW_t;mnbmd!y9yw>6$!Owv zBd2UV{qxhKua)@U%XzIYKIhzO*x3x$&NfWrkbTgaHUhq3FdFUnENQ;4*tW=kOC`GT z%kz`YV35fJKC6Ck;L)+#UhpgQwIfWt_2%a0;I4n5UUchgWZSr>v<;*JqT&Se=M!zt z9Y$PrxlSK;F{Wl_DuXu*&BVQYnYy^>L}(TnjegB4;eIXTo5YHt4W;mUNWRaG_x0$@ zH~Ksmf*`;r7T>;F>A4pdDQKDK_)SD%rO&6F({gE|P5-?^GT`F=%zlg zqhe!Ob$ymQZ%XPMYOfe5ifI!aX3*N+5G&@tU4T-mA&x^}T{ zBS#A?nq*LWf|q4@&eSlgv^(YQkLM7;Q6qRQz{68<4U8|V`T-2x=WDoxoW>d}B(2*s zpL0%kzt?#C_O0mP(~GSoZjLNsE@#IUU=1|Ebn_cq>mB9W46WzkDN{ziMrF7yjC2** z#nIBzVkzFtmweS;@4~|k4EZB&HN<8GvU%&OO0Ff{p|rCj(M*m??Yz%^KN8s@dFig`SxTV|>525>+?uCDaSw->q@5D` z;$KjlpDzf}CuXGLw>E9(lL6;u`0Q+}L^QJ(-sx1&8D$f7y7pQ=s2PGFqkHBTTV9aEO_ML0bVhoF`fd z_j;UlN+o55Po27|zQ$#%9DVt1*A~Q)W9fV?FUff-SZZ4r0wFEmd+tY-qKb<4%1lq& zd&k>+CS@g`2Zu&StpTWghofxCgyiI8lGfgz8r0H=^iKzeq|CE5g~|2GHG^yRBD}o6 za|m#A69NJP)_+zWD6sCxR>Y)dW=`b?cS1&#*!)@9W_#c?(*FCWhx9yW&$eddl%*HX zkTX6CwI2W zcSvxI;MnBj&%wYraJ~7KLUs+`_K%O3Z_J2g85L{8{lYnIddON&7|*qL6<8Noweg@C zpsU0!4}E4*oVUJ-*HYL>LtGAcnN44rk#P52KL~%dtDT*q0EtTHV{ zw=~(I3Wi`7bxKW9jmt)J;=;&R{W8z@7cN}jBn(yu>u76-_xkL3bru>;uDmg?JE9k! zocB#ssl(7QsMl-7h~dbQ^n0_tXg+97R$?@AZqp*~Y;P1ejjN;4AJSUp<-un})*Oww z7wh2YcF!%8*PE{kw|PI!YY0DbHr3G$77<;2 zbG>lek~#Sv*TJCLhK3F;9V3Q!q;tZ;Iyt4wkzl}61mhf?IPdkeUhfT?>#^c`A&VI% zTZ&(!gmauWApvOY09tSo08JE?l{52IZEdrAT?gr|$A}84wLCh+rKzi%8s)tj8wj|( z((92^J3H`xG`0nRpf&O8UOt0D<#2P$hFA&mQoDh3e+34xNEw(8kjL!#H60VLf~5H) zp8)g{pXD4^dj^I}&CE7C%cbBxU65YHBFYIUC$Xqj*|y{tx`M<`kj%an+6)g3jX~PW zdDCz+SmJbD?u3z_4aI#hNccr$q`ujfYOqm@8_1!d8%$CR#|r#%{w5rQ7S1 zZIb1L(9^z-)Q$}&>*W~*5O&vsp23JPhFH<`{yEph`il*zNl9`c`8TWghrM_)_Lw)j z@mCUd2Va47{+1`M7QfaIYFw-DrA;sc*v)K;C9g|t} ztO|m*-Be)Hxcd6XS7uwy5&A%WQz%v{@c=`}K3N2KslYxA9ziMu0PX(cjs<|{iL81% zYmFvhQC-8tOMB)f!rB$jXRrJ8@)H};tUF=eR+u9=T`;{~Yj9Avqn#y(n@TjzC!%n_ z_U?F5Ckz#GztD=J!@T8rCEzC)VWadE{OYT1v%&NG=(lg*v`4?iv=vzEER8kEUcGwN zed=ws%Esk5pY8YHE@600oE$%Dsx#)^1}(;v>~zWZ8$)Ev92=ci8|m&)vPT8m4PTfz5Y_*3OH1Uu}2pF z`=NtIo^w_1lUYTa1mKTADp4v1_|5A1^GcR+seVuKy^g)-N*enMu+P8b+7u7$SU-5! zQSReI$11K1e5MurSI}+#ca#!QitKcr9O1L_Dr*=Gg*rmdtAx+Dk(ZnM#*G`#o12>t zv$2_&QT*zpS}`U|11dbTBRz!;I+JeXq|-cAEGC1JHIL>Z&%d#rzPb|+Bvi-HJ^+Xl zJj{awY}r6O&Olb&SecFBWRdbVJay`n;gtkN;NHgeP-R|HLD#HDdOiW9ev&c=W6TnqNLglgpa^IGtX@%!*S`7Heeq74;5g0 zZ5S`_9iM>!#ziOqGv2&WL+deMYFG-CxvU|PM2im+#6JKrR_6xWitpJuk2fdRdVw3A zDx$Es=G*m2)@xxO3ZCaz(%9`3qRG1KJux)@0uv*1fvq1X-QqYYPLaoM9Fk>}sM8pG zMBSmaBh%T zvH1SRY@(|mfgrq>KRsk;wlNI5 z6EqCahi^_+DsUK9gtwwjxqi2}$ErQ^Rb?9nA*%ggm6qkxeIpI24k*|SSQ@I}2xIq} z@n!^(e%OSyI5YE>3aK#sr*sgo5z(y7O>{gl_-6n7x=W7J)ZHaY6c>SSZR{F*Gm2&( z)?9KHKP5LmYMxkm6cqaT&M9gv{R2Qd>ATNZvPCut!xU>Pj(yF51%gn0$3Ufl7TIkO zkfo6ENnX)ly!51`qz{u+Cs+Uy@mdf?BIBOdw&SjK+O&EFs&d*b2&M9$wy1&g-u(T!pCD1XH0=Pg0~g zv3msol*hyC{?JDHh?ioCvsimNLA|7S#OokMv3zq@lw}5r5_Mn^8QqtD`-J1u%awy= zOY>fc>azgMXTEgj<>#PdBM1 zd5K~0@~`(Y-IadT*De;v+Ftwd+|oBvpw`G}-YZ`+`RB*Iy!%#lfpv}4zj_U@r}N&2 zKzKA$x?;Sw5Vz9`Tr}Rmu2d^IxnQ3$hFJv5j=zV9orF|*;zn}{ z-H<3Qp)pG%{s^=v2A%>|QmUz`fikE01-Aw#p?srfR#RU;9fHvMXa9%Cj~+Gs!6EC^ zbd@m;Qa^cfzV28)D?78Ei!SM@!p(t&wIP=d2aS6Yf}e}bVWHsd#xnfd)fwUh%sRc3 z53{u!sCK6@={R{~uik9=F0}_-O%77tpUp1MyIg}1ZnluUmfW`!WCKx#Z&m!I)Hvth zp+l3-`{mg55nq8$&BdTHMe&TT>B8Y=okzDVAu*FTyC2w`$H=6Zdth1d0q(9`uNJgsYfk@xtNUYLyOT5j?UgZcMo=v};h+IG0F44P70`cKXgLz$?t{x^0k zS1dlfaEn}amI=Uca90I}y(+r4Q?S62H5nV9v6n5P>YAy)ZcEDbVTbo9bjCQh>!F0O z^_i!98!R={%pA}`>l;_co4wq<`-&O!%XRJrN*BE6{v#2o*JVoJQJX2{9xA1beWxPs zWKD`bz%yYIh4#gKTUr1BpM?6V8LTIxOF!|NJZ@n18>LVs`f>sgXzPKG0qg6NIT<|> zmZtS`h@2XZnDEx@Q=WeX?l&1k{QVAO;IY$B)fkOk2P~zk3vJdsC81@f-*>(V&h-E>pT8XKu1DYeP?^RWxlRih)ptKsi(Sjd4YxnX}k+- zO?`qIi^xFJRql)o0E8yG=_K`uX5DaxTpHp}Xq?kO12nU+06Wm)u^5C7J$4Tq8=rtj ze9y>0cR`REV#FNhixAgOAlMrZ;rP@8uqhecWJ-@DpiKeb^_#16DiQZv{h{*6UM+eS`j7?qykzz-fr_gE;YNlBC3GNuTkyk1a ze(vuk-aqehoXg=PgmXe56jq-!HzyPtZHK|VS237IQ=r0R0Epo^*J%D-bn!O_Y9I5? z&-3uBzB*X;8-mdDK^wdODcXA4wQCmupkvJPIX*ruptz=;qYDA)g4EVs`SamrY?|x8 zw@ppG38z9H0^l=SK-tYKY#R+W*aeM+j&YHH?WICfgzX29eb(vxhg%&jf< zb9AJti!<@O7AFF+~!4cf<*!BhPquwQ1K= zwfJ3l0cy5mG{|LeBqo2b*B>|{(H>j{!LpmOgJZo6f*p@x$H8*_I40iyB!W#n$bme|?0`u~vc@cK3j(+yLCd0t1G{;^JLM-Bhk!6bwQi088QW>eVon#91Yp zs3gUZvayT#CBr%4+)!r0Bje(2FI+rTZW8E?BbpdpOe{G>r{zz(z-hGqZ96SbGR zq`gS`!P-ZvuQ(YIQ&D~3H~;%;4T99fd+k?aFe!;s$hut_@>CmS&S?I<2;$F#X;j-; zYw&3WPK$PV>`LYyz|bID3O1s7G=`tC*7a3Mf;z>y4j{u*0yvKDrMw@^mOnl{ zT8HL(7I04LCC|79t|CfLg;oHK-5UGQ&^0~1Ht6<*U$~gjbG-IZ>5ZeabDvMOb_91! zY5|YG*5La^77;{SSFJfFQeJaajQT~kG2bLN?raSl@M(nR{f4MS9Ad!~p_D%EoQE3L z>>@@W+BI01pIX`rmXDbG=!2dN(nP_?)2>q4m3Nm^Ew20iHpTjdP1(Ss98GO`odwS=u%)2I) zl+b3}A%s_-9R61qS#ced*~>2Ddqi3m9Sp>XI;X#xNqoYOvdHvqpTG0;8>dfPRZ&TX z8aJFXOYh|MIM(G51o?0h<~h({>Uw)qA;1}Jk>e-?}E8vP}DT@ zZX$x5H~=O>8_q!Lgd9hdr@IQ2U|!J%-MJUfpTiX9p&ZF&$3vXq2p?gsqhX(k1VVJ@ zfE(Oyh(()TVCocpBk~z8E(NsZ00kRr(=!+IJmUk+!|u_Hj_eM9@@<@#FcG@J4#Ap8 z?I(>toZ|aIb=X8$)~j87aj<;H&LLoWA9l~V-oc=~u$vY*d1{g5`vW|E7q_Z?S`Ok6 zdVrJoD-ibV?f?;jOn?M(G!XW?ui~hQUlb7P#(ot#-%zKZN(Kz#{rE-&PEjxz`%JsZ zHLd-UD3kOw)1Rm%{iTs*AMzYfi#-YO1@8v)1Q|3pK8L7cKZ|Y>flVJDpql`5*fU{M zzc}3K%}Hu3F%R|~x3|LO^C?9dmt{sDuNmgqp+zk2DpLo=3(z~Q3biX0j2Q+uOV(}J zU+pVX?^O-Y%S*1W|H-&p4tM&#{23UjLW*A%>QZVO4)LBZ?F{i=AN^K%mce#O_)A*1 z?Rc~!^N^bKcQ}srtqLPjaknLfpr_|srLog$-qjgtPpFKsozk_OG35T_qk&;h{1a-J zyo1)&mt1*VHquK@-Ia}2C@T79^QL8rqSO1`wLzlUWLpL&=LE2gLJx8qpoi8+-A$># zE!ldpDs>!EoY0rFSTAkzB)?^E$-FtNFD*vX2tkfAp;`cqf4j@Fy2LPR1&>hwiuVQT z=7SF{uAshZX^LEiRQtG&Rdv|w)}?N7-;)Z8s!~R*>my%7KL=SidhS7Pz;agT0d8rO z>T0{)yK6fWT<5hBKD*HG=P!*|NIPLpo^iqi#=VW0sDCw#Q46_M|Ho-n+c&Z;EKe<@ z83kl~udpL0C^Z!;{eDurKkTN3dxZO#o8l0dQ)A>L;gJQ;fT6~u!6kM>n+Y!gwrgYu zlO^s2&zk@jVqZryhcUY85Nx_H4@Z@vMn8<6C=VyLpqsW}(_MWyikSwk+6slT^xzfP zDQYibghb!a)=q(b9CYl{^zv?r!k95EH1s5;5F9@>t|m13=*8Ndt$x4e`d4%=W6w_} zlj8UfcrA<&20s%FFCdBZ5OBfb^AC@l)s5$hb(LZBT71sxHbnn5R>ELG<&q83G}Vf? z&}&{CABKTLNS)TU5RUYq!pA7~BxTs@%^;QZW!+2HCFR$L&-tXmFjxX`)YsW>H=R0= zm_-=4bgci0lq|Ie?P>Yj*j>fdjeF6x);9cxqNShcVqSw{+z!7&XDw~boNs6*zVCMb zpF{3uNA`J0cqkI^>E~bDS@GE+Ei_1(z|^NKyL{XI>#K_jvVr@vmewEx4yx1LuhVj5 z%Q7dJcrh#vOus11T^yXVAuw!f8iWt6(0F-DpFmCsGEx7Q3beO&bYUFk0-Df|fB`Hz zGqS2Qv9wHsYJ{pzGVYvVGWJ8%rR%k|>E6XQO5EI6*tT1x>cX~_Z{Mczx`W_OK~JC+ zhSHh9ov#2!X!@b)F|^!C*GKmF z4pb#G`$tDd8X%4i6O7F=@H*h6f)?-?fayaHTAOp#pFVvt`MRZOZ*PzG%6}p!dS7r0 z+2wGT?6Tif7fy|~!mv#o))8pF?-WWyxbqQAly3VXq5%N8Df z4{!^_=N590;slvnN3cX1qeq6UOtKguMzAi68e z03rq;G>JHfZoCQ{^bik0(V%fIV%mYwLW-FEBlP5Z!(o%j$)UH2)87&12XxdQCYQQG zT>a}+zoBO9<@I=18HHB1(Fngh9lJ0r%nrqwg0)W2M)}!z00We5#t|Q%9p%iY0^S)Ey8@TwQx}Cd^5xL#?iySg}=OdT+o!8ym z#d8>BUepkvBXXO^|7*|i+&qu@spo9$3E&`6*jr7z&Fy6 z?c#CfnGK;iQfQb;meu`C>P;$(%lXKOGx^8^Ha=pIQkGPMMXBoSi<@!ovxzo(x$kIx z=EI%U`dMM^WUPa-rDH#XlW#eA08iEpaC2Wgi0X3ao2HgCyX%v>FtTFpE-6JgQ?z%jr0uB zZdv@@Ymt#fwlhw8^uhe5{#ZVk+`k!fUtw7H9I#@T+-Ec&Cv3Tg`Z%}|;!nqcf4C2< zDy>V8#f+i@OEU!=b@Wx(clkaZcR5fMiy_LN?vi&h$>!J3YH+Uc>DBzOM#aRk&mRL< zMife4+f0z2IC#Ya-6V9l)6IR0<;^<0;WWD4Gpj~RC_nfift$WS?bV4Y|GTU=qAqU1;h9KT6>W; zF(GPiO4*Y;5lWGgrJjL*F@pGSIATyE40K@rlD)Pts`K7!d8*SL@S*_5NTnMy#a2y; z0qaYXq>zvh0UcVTK;L-N8nY;ou1F}myZAT*Jyl9H#~W`E@E;d z$}U{9cReC9GF@tWE_l5qZkrSB-NK}(9fsU6p6wT9=@?xAC=ATB)jTqxB-AA-aiS|= ze)MRTR9%!DvYta0zlxBUiu()qwUZ|!Vvg(M9P z4be`9hK5p6(l1!IpMBpmQ}G%!WE@$C5T2U)7wgMfVblJs!HbQu1KGHT`qa{`HA#ff zp(S^!U>FjL8XZJhfZn^gg&CmrQ6wA910k z9QxyJVLHZXTydW2ptJFgY4{*0FiQ5W5}J8SZa_JrIduK+1qyI9VGJF~iO(t>aAoZgYa|JjWA@~nY!5EpB%rxJmG z!JM;Ux1y+O$P;rW%RBD-Wy|_@c}A@4*1{rqrLywAaS^=R7XwH?#XAHW*y5|5U`tL5AtM(Eg9lR;cyVo3qYpsA)756qF_@ZqsG9g{X_{yVi?7Zy?DMv%Mr!AYi_ z=y5D{x#+~B2PAH7GkI`1 znmW4V4)CR4ct>wq61XZ4A0OS>4bQT>FLon{43Xco5*>8=(y%r{huLbAtslf|&?cb@ zhEa0hTSWwEgqF^3G4IWHw#CzzIt#OQ?LE@^ZuW0+`XCKGsu6^Btu;T3JXUN=*G8fW zdUxm<8A&i=640W78$CiS;zI{!Lscc4V5$wb9C9Kc2>VkMA3LI;i~ zuE=Fte+jsp%BU$b6T8ECI_1HS{}w>hCm1nr>^YDVhnfCV20?cC$MWJ43^h^yK~p|r zB@6Y^KR)K4p8S7l$0KJl^mjhwo+T+gyj*A_W`srjsnOi`-;hRs$7Pw68v_Ydv$9G* zTh|OyoAjQ0t*JKv=%8+=yBPowL4f2p2Ffy2owK>NVBKxEHb30;&i*xI!b;7f2=b6x znYstpbGMR?Jn-O{Rre^c@m5qB+Loa+`T3aLJ`HoQ?LoBj1w$T~U{8YBO#{a5G5p{Q z{zv(UyWNMLD;z#{ECZsUeNE(VeyN7~B>7G*YR1aGs$v-U8NpH+pE=5={KvNwMp9jy-%Ve2qI-QC(N;1t3L-| zYR=G(s(2~`4|F`FKbh`jY-kw$EY|d-6O1{b5yJb%5OlyFKgyuq$0F*a zrmn6YuoVT8!51$M8{Pe|Yk;2tK`s-K?>L*z+^glQRp+pEs33-O{4d~VNvA~+Ia=V9 zl<^aIuYZTm3!5sp)&AgiWjk@uf4%E(FbNZm|Ef#Jl;V3u?l(D}6PQ_9B#7a^X?@&( zdCC8&yPi1nr!&T5er|LtQ%h4PzkBz6{2+XUKq>VE&qMgdG>U`+cZH&cIQ zWz7?tiSbvWMCM@j)qc^LL z>>f+0bLY=D&6&7=h=)Vl$;o3`G2}{K^$C~a9uT7k;@AEi!IkSl0z(otqOQjuI`mr7 zYaLvLJR|s;iMpr#Y&7Z8_Bj^JYC% zzjN^5!Pb=P$57OUwp(C}c})1<=OyY~|Ex=zA#ZMOURz%eQ@_YaA?p;!1_>R|0@JT_ zrcZX{bPzFAAaLS=obX%KV_FKV3y6KX3T-k#8&ESiHwL(BpW5}z#(c_bu^7DgL#95$vp^OjZzgV9-|sl)4P zYo{Ptdw}8gYdfz4hFB=&c4QfhI&l~9@becoh|P+N2@7|WjYL{c*$;jeF;dTBb?wJ& z)bi#yk)O7>faa_VIAi9U8IaT#NWc&~kS9Hc(*v;5*Sx&Xj+B)29Yie_raho{wyv(K z>VyHSW8x>P7<~^dkoiEF2hCZQlyoZ{qaG8_xd1Ov7E_?K$n#mwi|gFk>f4!?b{2rx z#=SztwxF;K`$fXv8=@+5=prA_oLY1%H72NMXlEVIPICEcOvYO>^L&PFXEhL&0ieTg zRC`p#Xp%{ab*~-IwQ{av&^dGNoGNsLUvQ%O15`-lR7od%*-)>Cfq}sixI07dIZ*FL z7`f=R8j;l33aoxHh;b)wA{iiqg3 z`E0(G)rye>{6{6s@TL>Z$xm8_TgTu@G@)Yyjx*)NO&ZQ?kZifXYp)Z00$?>iRD;SF zK0jm0NVy(00pRIGF0n%8+8}*`ZchTVtu>+RcJ$&ci?PN8l?d(IC@V_WEPmxo72 zcqAoz#~oCj^v$B0dO_%X5t`9e1C5j=cpyCs0K=;_|a-;ca9whE%;%Ff|TUUMhGzR(xhIY2g zccr14*{|j7jw&Fjf zv_YQ&G{|Wm$xryn*F2V|>{AoT?`O+41nqywp>i*D5c6}OQpoHh_18$Dy7A_Tsi6X< zuN~umk~Mg9dC$o{kaRo!?7xD3VRAQXrnl6RkO$Qp@j2IgV^Tk_F(M}dB8ilE=N`z)cnb*byd?ul3W@gq8SaE=kMN|`N zE08kSlRsGjL=-m*iv2Z|xCoIG1?jqGIDOSx@bqbU@AAHc$0{&biG)+3bpGRbvlWas5|Wi7tcGjDK$eE<-aG74j8^u685#OAGBU{?hYf(!gIeNY z`91rO{(Hzj9`mA}f!g`k7Y}^;Nl?v+80iP?-p3RVewhkg(TVQ&dA%O95@?eY5W-c9wn`9LX%@U4p8I7ru$9!n&dO5AT2qe`qrJbu^~I_uBZ1 zx=~O@<*OhQ*=v{qpQ@;Xsv(A_xV>YuHNGo}a40z`Y3%-9e%!q@fv}d!KblsdRoF1o znBdowZ>dqNlViLL=pDnKrX7;*DGrTll?WI8L~~{)C^bM)_;m(ClZBD-wu$RNz%rDl z(Qu?>TmvhN#(NTEX~ZBhB;kXNDNxfi^7HXI!wa=%8yO65tU-w8+Z229yo7Q_Ohlyf z*^?)E@R^YH?Dw~oorykb8q**%X*13Mrw;l~&Oj-+a9!caJGJhRX#EePBBcMfKjqt& YYHfDkD3LS=PL3dit15EYGN$+c1rQC%e*gdg literal 0 HcmV?d00001 diff --git a/src/algorithms/learning/VIN/results/28x28_1.png b/src/algorithms/learning/VIN/results/28x28_1.png new file mode 100644 index 0000000000000000000000000000000000000000..4995df1ddf8e761654c3be8fe53498efe2f4abf0 GIT binary patch literal 14453 zcmeHuc{r8*_wHkgGM1=hN+e@?OUbxRX%otrnM8)jEc2F%H%S?b%rhC==CLSA*ybW6 zWF|A)bkH0^=r*TEc8!`De1A? zWU+a+xh4kuu47>XIxl+?Fi8T&+%b2`N%yg|XP-Px#e9HA_&IWd)OmO3@R8@-mwyXy z`tp23lA~9&cU1c0$5^kq=N=2?GHc^**P<-;!ri@d=re*J$n{`^3PDT{lX4;m&Ws8N z4?Q@BkRr(82mk+x|C245G;=2jQkPCpr=(-OVq=rdN*6dVh9GZl=tZ#{JsKGq$&(Sj z%Gt?)?0-r|NoV!r>ua}Y5@aHEdvnc}kSOaG?SJ_A;&HC5tSq-^1PLOoX=_nH&VKn2 zDd4)ZF)qNvqv+w`;W=B}E50m7w@He*{&n-qWpYe_)b?^G|H+eA932Y>?~KrnjgDGP zb!8EkJJn}}X%WQbSfGVo)PQ0gJG`Ccy!{K`waOvvV#24>cYc@-_-@V;$4>bjSu8Zp z*PWc4EIi4G5bm?EutY>fS-KwyI<1e;u8c*S*o?QvIaLn$d#Gh-+Lsy_l-k=B`#L+n zsxrP!fQvNnwvwLfN|FzBSscY$+_*vB()Rv6VROEA$Y#5vLqo`*L{MjpB^e-=HdFlxBk-j`$!O7`Q}`u_LVCP&sIIFX~@X9aU^7WtQNk#Wsz`u>A5^1 zi(F=ivY2f6ozZi9rMK9oo8M>k^G&ztd-e|ytIVD4O#wbWtgEZ5?9%kq)TmRres02x z7cWOgN3TUva_)f-NHW)lpV{I53t-ZRp%0TCx5u*tJvjJo>S2(E;}Nq;U_X zx^tr6Jvx%IxVTs+nb4N8xag$%Oyu>u4@iTurDZyCZC-s{#gvbaZ)&L4FLt*u@Us=k z*=w@0{>*~c5(~{6ZMN1IJlBTPP>rTcqJtU=si$ixHGBM2AQ)5m1*=c3B2un%7tB?PCZ zpXl!DB2bFR&U`9NY1s&*W9vv(3|?MnlqLd#PR0*V0FwnyO;-b zY}s%qQ^SEA_G_n^n3!a9Z|~hZWZUgUIw7Rm0j11vTTV-i?y%7a(MsncboZ?l#$@8-zK$Y z_`b221{W%`Q`Rc8yCt0KI#A)-S8N-dSGB0T-Cw(H@ZMFO9*5CA4oLG&Sx@hIE4*j> zN1#jucUoFn79OuvyR(wdvtcmmOYBUrS)OQTk@hYtxjmGJrblYiFc^f%3@emh6{a&iZp5r@pO8vOfV;cN{Jd;{u2zPW>KR!R>I{N-8 zU9Y6$HwA57U6nB3#qb0x*hTaTI(deC*ow75FKrW({4ZY&g^Vh+!j}DZ*Zg8mn?De) zS-+lpt&oeqpHA%lD;x=uD`+gZJ;G7wRejrA-gCMAaW2H|JbJsbOBX}gbK!$di5ThX zW}F|qBd@KktzTg3?>5_)rWnlJH0EU8o~)P~|0*v}?fe&q05lJ+;WqdM1Ugq|`&Zr{ z51pX*TMtlHQ4zK7;BJW#RS8?JBcr!knd+`sYL$@CT3Tw8$%mwYC9LF_WZfRB5%=9H zL$}z-eZUpnA)S2V-0W`u>z^F!X6^VMtdt>V*y=qBkG-T1v#dUNKn9d{xdIR?Q9 zu+f?ibi4K%@j_O%rzlNB8i;$ZJG@^(&(p|bEHW$=E;ijiv{S-u-4L|Dug0g6+j^+R zhxnS_H?g8ZoK3>*vVCdgfVgE_e6De|Cq?mUu2H2$C@qym~LIgM&B z#I9}=SHih1Zr+@(^Cv@-Poic&j^SruIZdWT+rZPZOWE1kF7V5qbajrX_rnf&iOW~> zjjKuhB;s1@@ST%g@WDJ-_b-ltQ9=gk@XIvhPx&0nkg|QRJwy>=Lcmq zaVkeyRkd4YyHh5syu7@nG2VBhrE?ld_LoM(`I}qi%EovC`Vo=bqUX*{#e2 zjk_7|aWQPU$KZCBQYgE!y1FjlIe_?aU!(lqoR1$bZ+J^@L(n~%xPU@?OiZ?O>jfpR zwK;7I3k%cdos$u56hBav3t#&rezb?iZ<~r|NilL$_3}|Y-Ar&$z7teTX9*yP0qzwrg_(X0`TaW=ZMl33*jjlCXD5T3%{ukAcUfk8QeD*XTSHyZ#?rVggt>n>2A9_miDB5EM}9Y|?7*c__~#;ISa!vEytNx| z-8m!7lU{a!>WwTlCtB>p_L3t{uJpXrGk4#MDOLLQ;D5U*m|zW6auQD)YYxbeiWl~3 zikI@@KYv~~+Q{v>efe0Fnvs0(m=ku<1UnL4yES&Cx4+W;_E%y(+se-7yzSy>Q||YC zy;m2H965p}>W|_2V*7!LCvC5Mw>@V+zv30~fx4HImBsf`AJ@4baP8^Roz!HmrlQn0 za!s>4o)wvy&2px-zS~;b+G(Xjz6M^aGuBLrYVndsR-GTSN$AuPyYpMiB*T!yA41L# zW7>hEH>-q8%tkRu9tWnyjUYdxo5wWYufg_ zY*#r|oeI%6{i;p6b2`5^54~e3D!s1*D2tuOl+kn@a3AVkp6mqh?7jGzheg~a6U8)m zq16N-35A_>wK&LI{sD)2TKyXDpV8fG;%7A7ldI|LTf=SL6vo5P&);EiyPBuDsBil` zpq@Nkpgh?g8bT^I9PhK~23TqRt>Jh5YR_UQz02G4ekKCCd2q0i8k(9s*h5AxPX|4x zh0yJp%t(0rP3QjJGG-AZvG{Fpn%`E6`Sj}iDK_!0)vaFR@S5H2)r66wLr>U!@bzQe zy}beulW@7A&B3+h>Uke^D1;SL**T;>#FAT|lS7W4&rVL33+o~f65-@g(5M)X_u|H$ zx!I}(h15W5H~3Ud=I%zq?o;lSiFSEwYwO&e&qWr&@qRlV1Ae>P{xN`VNw5JvyE%SK zE4?Q90KTtU{OTFiC6my-oq5&Pqp2>%13>GuxjIQHv+h4NHRS@^fKjnDP^*47(Vn7h zWRwdz?{3;m;)}wDEx|JQ|T;(ZGSD!I0j=Cr{dgmyMrjZ4!!72~g zY034k0ZwA9ae1$c1f_PjmOgy=urJ1s=p5)7cl35up>{&O#9WztbCgi$XjAwK+>1?T zy6R1DZzrcB?xsnXi(TXUEk_tc5KgDrl?4I*hpKFL4wVCN6s})yeIBTEhZMx>8hcoL zeKH9^i{AA)j!J03upPfM@^*dtl-bqQh1sptoPKFo{+q})pe8oV$;pZIwa^#&SI|~I zfBt-N!y5&)!_S5mi~U=N=$bN1=B7;v>Shx52K^ zNREw<+qT6^p>=v^wX~M+`0RR5P)9c^XJe08-R=2j*gB5 zEGOos*h_~sHO$U#T6#uC;I^1sX#-P;yiI!1v~zn`S87ZQ3uFT{;yjDIi+8q!{CRkG z>Z>!-qlK2}X#<{pDl}J$U7}|bPlakfIT9*k@9ZHjFCXi}`t;KT85J_|;?0|{iM>uN zn9}-VrhoQo2#q{|5F$cD!hj%kB5?A5G;x1DF#@|KKAYOsVJn(2-TCeW?Pb;W^_d!F zr{BcH2u3I_#uaVE1?ZP77_r^5+V`>fvijA9N6lyL6w*T?NyHfGNXQVb67&^Y8Rzf5 zN#O22ch-6#VUY0MozCsEZFEG0)%uTNmJ3d)?;f+|z#^p}p3G$02CKc$U~!YZk6HT( zl%sGDMIrOqxdKNLVoGm~REkdq?-V~a@mbN<*3kg~P`+KeUDA_pk`F!pJvKx=UJ{Gk zdy0ZkP?gS-M^032r(PJ{xwDhSF>NV3ebDx@h&d#BuFmjbSju zPIs5qZrd68Zj7-=dE$*d#+a}gii-EY*q1^Wo+$0l^_s8NJyrGNF7$;fqv7fot9243 zlIb~-#7M8+>Rj>p=k;-;G9UZ*3+$}s8BX#Y>utKE(*E%CaS6jA_nIX(rbCBpVdVhu z#P(3%pBN9V7OW$Tn@z^o@aVZ)=olxxG0NET{ESk6>Fr;ji8fV+!qEU|cu0cnjW)lz zAJb$Dk70VYN;__L%ht6zZ@0~EHqU*2O}O(O^`>IVOI%l%R`tgB*zC;AE71JbEXB=F zJii&gSHH1HaC>W9rdJ@pDmgWk8!dYUGub(LVa;O?ZB#Lm=w#0In{;LVZ=IT2{e&({ z8B8FzZiU~aXL*DaCHnFKd-%IDKV$-TOQpX9yD-&!susn~u&^+f)(hk0Q7dx;l`1x* z-za<_RQ6+?#I+tA=0uDK}L&sX|DeDAcjr>EzqP;++`Lpn-bpP-1pCZdxP5(a#HN$MR|^tz;l zLs0hnUieKAjtS}GFLC&7ImR|0{W;9`M5ZMj{TUpcVJ{wMCQOhdPCHN?{tt5Q-zDSO zuI;s?G2p1I%>~;oRCutqRzsJl~HwQP{(TQ(DQ5ER0_BnfSrG5zUbk zN>rKeFI9hav#p2XkYVC!^LJLziFi)umcH?ghatd3RaNzBKK;ZT43e9GZg9mkYT+^r zI#xhu;Qr1vHZ~TTfB<{MloF;`y;qjjt23X86Dgs1=W#U;Np`5^{YESd=c#ZHJ~TIX z#)#VDJ(et`;1$_fS?$`oMrrTfy*t-?N=OLZ`LE~hhy*$#pHix-41I{p?V)~KBXw1d zBkv~rZ1cO(t^pl=0g?5q?i51*#fRk=7Var?965Wvfoj0HP0B5j&gG~J@wYB$Bgp(h z0k2^?6Q8eITK;&ZZQ^_EZTsRK=rVHe-o0Cg@15o$C;k0ar$&>P$g;R9Et^+fi@OTn zWKu7-Ibvt>Jewyvu&F~sZzo*2@b2h}$?l2?%Nff@d6jeGQY&4WF*1HNb5%=fKs#Ip zK4xflXKe+l5+5&bhWQHF-XMeutC1k>bJk_z3M5=c%UDxWlQMMGj@rnr&Zi=7iOQb0 zG&ZMI1tN?3=x()(c&*-=g+5?tqt$(MY2&Qr$HUkT+Vfu$#}oXFm&V(4Jv_?U?|dZ@ zJ%2t2X1n>CO<@v-i%?OyvCyhrb#*QN?aY~3VG2YJ9Zv{*IiN4%?4j+rx0f+lpPnI< zHOfNI?U4Wa$ckrV{GeLK+_{2Ek^v>SHyaz9h`6|1;6~oZkN(&pjBKsUHF6S87nwIc zJ~z%G?s6%ZSx9{3*y4i+dw?r&wXmLeU`2v9?ro-|>3s8jqaYFKBIjVM}QnX{9)3C zAHao@p1sSTjB-YpG7esmR5$dGJXD;R_A zx#;4d3jE*A&pu;!JA^by06MOk3pP*PRYs7B!}?HgppBvC#7#(4nTYH7eIKjd#QN-M z%wtC|45aX|c$Eop;P9lv((~TQr-B0|;S5yBxnTwoct(dqd<(lXmZVQbf*3vg?^4sr zIqovMVtz@>msayBkpc=bS+bx!>45D3NsgReO7+^LB>FGe+fr@4U+s2>dZ!(D7?1)c z5{A9a!w#Q}?v@j2qKGs-hoq`Ba@9>%r+6&J`iz(upxJ&qQ@q|PFD6D#^Y^i~cLJ!< zo<~Q{L;$-B&Hc)BFQrhapo*fR;)jnPyYz0x@j#nGFSA8Rw|QD1$sST+?nJ=9P}`M4 z>U7BK$k8bhVEyU5gL|7to<4jy`lNPO z^ZSC2#?_}c^me@DIFU~u%-$9S@?UQqcCZ4@4S+m~+i%)LM<;3ra8Vn0p=j=`q$F9$ zR{d9&A>Ui$`jf_2Jgb4!ps!>XSJ>p)YHXcc4y?|<7It@#Hc)o$Wtu?1A)?hwrA1t@hBZ(H&C(iDAd;8KB{+`wu0m){+1r$MovOn zSlF{jQp73*v3l0mr;pXRc<~}(>(Qe}bwwpWEAvZ9NjV&qe0KT^gPd7XOrx4s?CcBs zQkNm0!aOR(frXS)d=yAE9q66EmH>xTsUmm&`*w6?MAe9&LFpD&L+?4>+vBAQDe?ly z-^0<(JlO1<9O@VoUEM?1dZI>0ZvaYA4B$#wKh>CIg>H*4D=R7%^6fzgCrA^`ie9}M zb1K&|HfDbY+fWRn=P!Qu2^HO~q5P4d7hfvI=JC=QVfrik$&m(T5)l%%3r-3!pQ4T7 zsLVnN^tqd>v*Go_gfJczy!VFF_Da5AcmM6$T#nuKMs)ak!AGO;O>13ZzCnpt)=06q zGitJZbJJ6@BT8t8QHGUn*T*brV2l*ket6Fb5?~r842Fn0IUCf>f&@4}&VR3$0)GDTJHvTS$5j;&p^F|g?P@uxag=nY`gZ8t5SpS3M+m znk#)oeG}G9<}|)D$=@Yzp)d4ZE~rG1fftQSD`h_S@7oL(=vhnboG)1a5^E_C!YGY! z<~bao;(CMSjAPX0kjT&!&ox#N%A1{bxFl^-IyhK(PF!|<65C}rA1_Psn7jTAp>sL`B^p<$F)H&GkAdmfs~5#Y^air z0DACxUmF+@%vK@=!~P_S0_Sg2=1LrO1r6O^!T02-IZ;jvN6=p;xpm3$z9Ahlu>;+y zoPc12va(CI(e!*C-(S0*J4S-MQ9{TGHE-m!TfI+O=}U7A(<9#w{=3HSkMRt%Dao$d zcZK1sP)4U8JrX2ALmm<&!r(IA(B0UICMRilpr+wer`G1mV*Z1>ldb&{QQxMj73Iu0 zt?mUNK|u_N1aF?So8ew=LC2Lrt|U-`=wLXb%6%Itt^c2F3!R^RDc&MDb*{SFoNsQ> zfSX1i-U;A(@B6|fyZbU&eu40)Q=zVk2u=+u%>Vw@Jgx&40Jkpp-`y92bf{AJxr5}Y z&6CSRWXLxJV7~MG}N)2=j zddb`>)Bf{7`H2t$q=?IbKi>=CNkks}y3C*3*ON?dD*sSm)Cs|?K`=1rnYK?cgpVtj|cr|Lc z``z|PxGun6vvin}0K%TaJ>=(3nsZJRxU8*3t?`w78pJ@o zPH`&|1qCbg^EW@04;1xPuov_T*?s29^13~LZE9{_XkPQy+j@6)p`Sen#)>_(OJ_Yi zwdgnNjm2xaO02nsZq&3)9`7nYI|tv9^y$sj?R|Ay26Sgqb#Y(_FZ`STsg1RH>(;h7f4bfq0)C)j#iIenLd=$#}*J$tP)l z114r=&GgI`H=0s|iusyfheAifTUJ6xQo7u(#4-7X`ji%M2O!XOd>3zNX|b`lr@QP` zJs2csD#=Rs`+W#4pKeN#(Z$WV=^ntIicPQP&4UIDKjIpXnVxAUr807OO#xy|Elh&W z?_9&e!y}`jm|X&nJq!*wVX*z;{=Ps2Xa74w<+Iqt3#_v)=$!*;i4r#daPTJ*L16MrQ|w_K6NSx%v z_8RDA^%z1B6`AbOhJUSgP~&7)9Y5XoPux#O%E={%oTbtw-AlbGHY0~9plvfJo>`Y@ zwZf>Wao`oWY1*MybICzh`NdvuY%I4v;U`@2E%pQ|)JxM<{!8WmXF%VRL~nWx-QWKq zEXOrqhR!2F9RDI6_Cbzn z7#@ozLn>sTL=pB;r-dDevmx86w0*eSKpz+YrS#_kQ89>{+=#c(E219gAaV_fNn=NOR#47Vu(V4FB2?AtF*U;zW6hLq_@}C+XiWSI=oe z+GBZMvxGchz0jx9WjyX41_?U;E6}^@={N9lYZU1gdL2{UJMwk4+d{_}kZ+ghE2$BN zM*E5;Kfh+79fp(G-TB=O`C~|8KS&c3tT0wJu)&VD9w#To7(%3x@(T%d1DzECN-tCc z8_-Mps=eF^;xL7{(<%TC&&6KTy109+P{1e%_~cC4_a>xDAnUj8~7Vd2UJ1%2T|t~T*siN^6=g5gL+C`42!(c&E4Ofd3bpRgoK19_Pn_2=r}P3lVTPbIw_ix774194!Fhk9Z66Ew-Xg5yE$e7sn=5+%R z1YyH6(TfhvuUf@bz-3hdg$>+JiLkE_S*oC2SDv{1jF5B&=kLbEOq>r6-j=VWIl|Qv zJDy-%GBXV}G6M#LkpRX9er$}ShZA9oH~x+s+AVQ!!+;2EebkudR2PTTR2jFBL<3GJvyUGYM~8(aM+)fL0OxQz6PPm>-en-u{vHmD>PMcc z%?C~`T>4c7?Q!6iNNyY1%RUX3ll%w;1Js7;Hy8*MuD7}T?|tg$X|qGGFwT@ zv$s{09$(VTC}AX(VxPpT(C*W)a=lEUQ9BgAWk^~W~6jdXZTxqy#qANl2GV;-|13_ur@Sns(Ob_#oR0TuzcRGZWR9#k#pp-+15K$-{$I&2vEHa=kfb>N4T*qxqCd_z7(N z1MGbDO_ewtpBVY0p93lvn=~s8GT{cx<+=#{=DGqbiVsC189<`ApAX5wfWagrZ@N(` zS#&~m$CDzoQARqk(cB;eL1bDxdqXNxn4h2GB`8D`hMfVHk zR7sxP(hFn#IYk<2lk5#<#@vG5T_9& zjf-M+x(4x){QIA80ogXOpY7~+n&;H4jK~!rpzcG%LEJ~Z&Fd+~x>Nt!pyUz{PZ28} z+odL*qMELa;PeBeBy}fP(G`+Ey2K{oa(SLUE~$BUUQu+4b=GL*?!}$vNVjN;1}Qp{ z<7hh*lr^~RW&4Tp=*qkDA)V~U4KH~J!w&a+$Z!f!xfq+?3(YqrC!+X%y9mCBely!dqj-deGDn*0k&Ti z9qH#;lTryalzOGT9?so79C5??^UW+)x;?m)#}Lwj0Z?i;={Mt(DC(WWTt)0~#yK03 z?;=Ss*P*-=3}l3;UNxde7+7ElvwPCh6e*$1$^~ z+UAaqd_Xu)L!b6ZxM}jr0`4OAvCgp{U3ztI$yg#sN2gd323mOYz#F_p>2!MpOF7GQ z!1qd)X$dJHD>)DUn#qImoH}lzy6cLyc{p6t%G+%y6Tbzl^#L|`1*_DIFoNlQxU6){ zuzjBjRxR*^;ipslZ4`*r(Lk7i|Dz7^f8dEaaxH!RY|xVUKvo1|zU!slD?C`K5?xQI z#|xrzn*rYHsVWm0g1g2A5C&QZ>I*=}46tB4=8(y=%`3kMHZ5BHc@TNJ!M8$IO38kw zF8v`1f)QK+21{WGtd#-jy)X4r(CQ6VR@5XzwgguZif(3_UPKUF3(UC~dHDct)K(|E zl;FLHMP*ggjerVMphd(jTmXsVm#PLFT07~puOKl*3F;+-4Z;Q_PX7(m_~S7im+|tY z;A~&9O6~gFQ?L|!)OBPht!{0d1*SFToFcfdLr!4J{%k~E(F#-L%GOfbbfm6vd!lSW zYuO;`HsRAuw*u=!#g8}C212*(?RA`;olX0uS8TvGY}Q*p$LvKHc>F|u=fF_^r4o7f z-SZ?F!OO1(!NP*a<4@`a%spnVBX{e!E42o%2db{hKajz~OctJcW=|bZq)_(Uv2j3Q zlu+UJzAq9)jlif$4ODFqV+Q8Tf$ikRf^);jh#f%+j=;Q*?mDchufQw-JUQAx1!I9k z?gf%N4YXs__=nm!y7NtB(6yriZ?~>V?$wlwpADrz4X{}zxs16vMY-6(sBSg<=6+wT zUoEN+qk=P>Uia760iqzZ!Djog%~5_sWuka?H@g}8LM0B24CH8g`(Lclx-7DM93I*K zi$M!|c#Wdv8rNCUOIpy3BL6A|{cnjU>45_jb%Jz(^>YVs!f&PNIN5=V{Ga%6E-v)w zNfFL{>(b{LVPLSnbqilfj$jV{@UcG;sJN^|4uM6-DwYVYfeWmU6D`0*1D@O-@HxZC zZmD^H)m&n|kZtP|o$oEm+PMNY!DbC`fT2W+O9%Un7|>#H>!U8>qE>{&C)25EX-Bpx z{Py*~4xRnl==KBN`?;E}qqEb_%kER~d-}gEDQvEGcV}}}nDlpEuv3Tgs_TTZTdw_P z`nQY8l#tCdU^zjndH^a7q|Lwo0zop`U5JAf30w%tAQV)u_Sw2cAJl{nw3d}liD37s zz|1Y0iSbnJR*~h$XGxGR7g*_XO=_jB$}RLD1~q_tK#i-QZlV^_bK3o`ef@OM@7yX5 zz5J6Ks=Ns4^e*IR2J8{)Pfn_cOHDx+~|9I8NC{C5hJ>>DN zK;QvN`#8X`cSG~7$X*hLt4m;$&q++gynXxDb0p+^M0j`#+Cg=y%M@4QpP|kpI(Es! zWz~xx1YiM-5UAQ96oYH{^Y=vn8BD0;VvtU*e%fq*=}(teSC%H&v=5FbSL%56%Z7nb zg&7|z37Yijmj%<{lHrH05TL6QfWT#I-O&ha=;EHsR;F=jD$zD0??Q;pCOgRhaj1!@ z*VrpJL|e&3sU(1V0K|XU&3ZC=NmIDkA{fTU%Ro%dnWpOK)cO@g$r~o zj5G$U2L3UG?5&o8Wn@T#w-TkdE=Rf__MeqG zeq4UOcDF`V`>nCDaXid&Dd@FOowvxM*`-@scM{fg6|6r4W1rJYN_4{dzR0Kvop}q> zgbRHA0u_b3ySq`3;8`D{3y5IaV2^Q&KI95s1C=PjcJNV)g6{#lvz_$Uxbu6J48E9+ z+ZqgesO-XKjd;_1=`p(9Tl$4V}5ym6>Eh(F>ldvI^RZFd%XWArF zVsF=c2ASFDBdV^iy?=UK6kcF-XZSuiiU<*WnzvrQv>hEEpN2a>oqlTO`3q}_tx;Z? zy<1XFqn9i!Ep0wT@MY)a_1r&Xe0cgdGX`k2_~FY8`KhU?IyVj5r9mpi zR~|F~Z(TFkVP-i1?Z9kx-gEa!s=D-s!ph3Z-r~?}QEC15m8z(90*5<*&{}a8wF^D9mLwcjq8u;@>eYfsNZT+Z+l*_cQTDVxL z68s##h@o=E@*@GQ+t7YwmX%$DrPR|PGHp4A)XLcv2r(Xn~bE>~I{{hXBWFD0$ ztBu7m)WRMK&V@H`|Jl_}1s&P}i7nvFIljD`^=S(=T7OYIu59*w<`WG95ga32g|B#+|dJLwS1{PfR=% z$SWu(PFUZT(9+dK>o9T7y*39(bL-wbBmD}Oth^ec4Vb4se4l#yV@8_x$nRs|p34=z z@ty(v_{KdbT-f!Od^Xa!jDA@_2LQG^NB7k9(V_JRt^=;UdSIR|tbFAGLQQfd#;e~9wyX!* z1-q5m{$8-+ln7T$a82Ko*?bOTzUl2@k-POpf&x+gj8J)2Q5V;LRC)g2c;fpmC6DaT V`Z4XYORyjWtEi@sCue&1e*stlDV+cS literal 0 HcmV?d00001 diff --git a/src/algorithms/learning/VIN/results/28x28_2.png b/src/algorithms/learning/VIN/results/28x28_2.png new file mode 100644 index 0000000000000000000000000000000000000000..4cf22e8fa14b69b070f3b7b632fea7e9108f9736 GIT binary patch literal 14751 zcmeHucTg1j)@=_#K$N5cq6Bp?AW=b(oD>HTRFZ%s5do1PInSsl!GM5(5@eJlAfTj? zjG&;BGlC?^ImaQq-JWyrIrsa%`|4J`KVH46cgj-(%ydup{{8maYp=aJ@REkg?wuSv z5d_(dyP$jpK`00ag6X5(0e=x|{5c3OR8ERGZCd!xoAyQk{7&a^LEi~M7|qcCF>mFP ztr3I=!6_?fyT?p*d$~VfirwCBSEs;;(kb%z-($Zl&wyhRKDXEUrQ&-vYJELzzQdjL z9NNU?zC(fE%918JH}mtf$32N%K})k@m&StB-h9fX*vZnM_BhlrOMWl^eukq5hqpFG zEWTsInuT9VosJtBC>TEF)a!QC-p;Q73wg_1mQ<0&uLjp9{IGeN0$!Q)spR3Mf(gMO z$Sx&>3f|g(_;F8lHOx z8T>t$C%^Ih{Wb(&56yG-uC&S>ZPGblTITR$^8_>*H^Aw*~`Y3+}YVln99w~RaH~7 zar9PHyvM@Aa#)9pKpPMo+%DuhM{jZSW-5fyHS#`#uyqsG$aih1V4+URWqr}Quv+0* zIlH`)%T%XM{P`zW&YgRJk8YpPf!E5XoHphb$YEX?8KV;?PWY9wx1}43Cnh9NZTid9 zoTq6peZ(fzHu)o4%yZGo$fZ@sb7QW`Wn=YD?aR|u(@lsnp?PdDA`=7m4q=!Sq>SAK zizeRdon5jqueOhsb-eG>{qNttHFoCOM62(DJ^0~)3&I?FsU0)uNGTb-zV12InfpkD zAaZ?SxK@G0E$Ms#erSFr<8^P^l2iV)$c}%cuYAjoVlUGC{Jf}D1J7;_(F?(HTkdJS zTKf9?8+Z#RG759NR;Cv+hMr1IH=y5kJ5Jc_UdG2?J1s0MtbctDlyvBme-yXSVIEA~ zBIz_-vyiddiUGiZNr%6>DaX3mc_h|tyjfGOb+|Tq>DMEn@m4((+c`1^O<8iBuR-KB zN1T6~fv#@SR2OMtJT0@WbZeqC&wVlG{D0Xwo9^c%OqUEwS6lpWolxTL= ztxUg$Vh18Bf-hNYFFij!;xlldDNHkOssmzs^@z&TT^(oOnx{HGRBV^z<>_*J&)!W< zO%?T8zHQrEVgw7NWnd5m@oFDDZDV8O(q&5;(<@yMKOfAMK0ohh+nIyYl%DhRU5M^| z3$bW7a*Iv67=0Ayx!9<(RCBt1w1hluqg9((x>W!lO3<8y9;e*`#u?bba*ZJvF~19ES9(rU4=ciyUk-; zYsGu{{VQIF98nox9BY<@dw5i3;@o%+Jt;4);^FRQQt75Gu4%TvBGMM6j*d?~qf_ z=hB{Gv}qz0tFi5s+?;aSvhG6Ybn%MuJ~lR+az8rHp>VyNo3&B8HUcM4Ha|P0v@x?i zA6YsoUb^RDs*?z#$N8Ssqub01!Ij6pRr>-fp^nAx|j)aq$MORMW ztZ8_>|MW%`1YJeX*dx)dcu7}gsYNR^Aw?@0J$LN+rw3EU$H!f_f8M)Caq;D;bLfVy zso9mXJUMxL_d$tEN)LB6GU8X2o*WRbE46QXr_UjB-4D+~+YsW;LaSIFCQle;OVgyt zA9yMy>NKoAG&JsAvvR+qU3%}XzHVvJ9vU39 z>UeL)ck-mooRe<4L1L_w+j_5w<2T;a%uIVdfwFwK4njkI_Z73(H}tP4+#Ph>9PlwS z^M6nVyD}{`Rh8R!^|kBt&mUO%*t1Sa-En6Di#In$OSu{ggl`MjpWp(HdR8+(!-yidga2zjiIAWUbad6|f>aJlrw=+V|?}uP7&fZK;Ji1H501lQmNEXJ{?*SlEcu(brFbt*<&SGqcfVXs-jGbaZsg@mRPi zv)G{IIq*arjm$_K-ni;1XY)$ww$GFOr_aMV8!Mk2;KjxHuBV`g01J%nh4d{B6x{@k z3PvIQ403OUL+K8O6CAMnsDPdTAY!pfZtL>e+8PRVjqxhHxWvRnwDhme4Ghh>mwOsB zlYa+o9~-HtG$BC=ztL`jPbc+wszGjtVQqh8oja-fuEEg0I5F3zeY{$zw1qrVnpqny zJ~5UY*LIhZ28Bo{nd1Y&a&6B~XlWZ79!QvELCZ=P$-W&RO$@3G8k(?Unz|5OyTYk6 z0{U6X=oUv$m6dgBXkB zTfXu*4_ke`XQd?BqBsOL?!phh-RakXY=V@zv|yoOVMO$|ke8 zO)km4yTHYBDz{x*U;hLSB|H}|{ss=2)xP_o323BcU=z)+*_W&(ZH?f!R6_T1b7v+b zBpj`JXGqdO5t++lkQsk5Su+m3{gRaxlLZ0N&o}9Yq#c`L(knlm3<_P-0eUHlXUpk< z4|W#1W!e>t9c^uE8~+@*XG`OzuqD3oYse8DEvB{0S-ddmd{`D(uhwro{ zX%Tk(%jO$3{lzOiX}^A5zxYB}A+u;c2*9Fwa^)Bndm%JN!}5YTm)RLR3?-u2-pO8C zUjDU(*$>Fw>3ZpbH~~W=qjcaNMm|feTZTzcWg7$c9J}W`1LTVr2lw0tyDt32i&1X8 z+wu!oueDzLuDpw(&tqe!811Ht#U|$5pTBw~nAx6&#(it*3I2>j2`xeXaW?_6ilx}%md?l@q<^ybaG5nS{r15#!-JSVP$x!!_`L^`#QDkLZPPWP4cQQ5fWS$*nK~sqsYUlValI@vk9Oe$FlwvkvuUz ze(&iW<=f`Fc#yu0y?d#46j#n*oF#t(mH1Elt@jLYY*Fl-fv0`D-uf@A;`O7h%{niO z_l`(!k4#aFvcpa(^UxT_40T0%)C?d#&eD%|-Ssz!!IUDYY$Ko6H2!5s84J~|7yT>XRs@Ke zpjtB_X4v1y&P=VR;znk!MI=zVETK1Jw*7;KkxLsIpVmY*Hr$m`-%a$3$6)DEG<$RI z(cql>B2Y0y^diu5=+@(*5{f2jWOZUuk#znw!T$oQvXi+KvCQUBbT3M4FTcJeQ|mAF z%`KWF5V198p2+KFGxwq;!OlR^M>%?eT<2C256u$FGX*2ndbhV$a-GMn$Sl;H2Idxu zjg@p!P1DQNg-%R%d&8;rg@_WMROoNWlyaTC1bhxERW4A|&3;C_)j~bZ8%`j}4a$b) z9S@@_3WiG0g7!RKfezFZKH$G@B@3Lzi%n{nD(Gn9HU^J* z0;@**7qsZD7?%6y+!TU!ZT?iSP8>iuLz_Z<)XJUxfSy1U;2$(1 zR!ifp7;+qb%Oh6W1JG0_=+UF*2W-Zpz3s5B3@LO0tdB8Nh#$6Y>=JB`P;axDYnHui z7dbl*O<;8O^wLmroY}n6sKPx4tUdx-y!MH3tcNt$(o~n;ojZ3>t_3A;ju;0C{}#^q zu=qUj^bz#(BE+SQH0uhozOk3)4BB}*wuN)%G4y>fuM)Ls<3}S5^g6PAqE%+)XS{y) zP3zAA%mz7DAt>u9UK@@^GZjdDI<$;%^6>CPfYm4*k8n^$*1OsIdX<_5cWG4(4ZD?< zJNcMc6(65s*Qw6Lni_SNiMDy{leSqb6WO+>=*Rn_sY zO$~HtcjVCXE9U8Y%|)c)&VS5qe$yU~@emcRkcxg<_uczXeV)!jl%tQ36E<~fOP@|= z7b+qO@0oHwq4)$;ew)1NE=Yq+Ioya_OuO7P_HZ~jQ1cnxs3gs}jM9xE zA+&XedU5O4tx3V%0UT8FN<+iLx4s1JL(9`E?|B@wmul_CUg^MuITlx&nyvuxC7W!L zoup_8GJJe|kkgvozT~MbTNI$5i1)l*>BXa+d0PPU5z#-vI#gR*J3+*}(rR_K-xFkr zDL&3oQT9cPw4v+Kyf9M7TYi(@34I;|3+8H1W$LHgd0*G?yX|Q*|4E zs5Fg%Y*I;vZCvg$CdJUPx(b`9w!OHWJ2rrJG%+zTL^2e%_};B$5{KBW=XND)H__r6 zC1w*bT{t5Py#dP5y?V*7P!2yiIhohW_?d@-2aA*OT5cgK+|PlpCX)BCusnbEjHy2r z=|~bVyNwTDY;j5Fj@09`pkln;FYL4KtfQwlA=SI~w8%IU)`AyTFqPM}xIU2ybx=R` z?c0Xjq(O^y zQczHX^waTCcOpg}6=4{-W#6HwwPkC9-^>L;CFSEs3A}`N{{uGXTCsN6D#jYgT5Lwn zAtmeMCySRmZCt|{oH>3{oM{0Dnqg>{f6kwgTQ^SD2mQDebZFzv$-!{;09F}Ka@(4X zEoT{5amBiMtVXhKrg1uGPI?a>Jb?PE1D%|Xwl)?QCFPdE!NDPF_fw)a=4|tJaM#I< zkBXb?OU)303Ij%_CkG=e^j+ERjt&3~i23=QIL-6%C*pPXAj5V$<&Iqqz#{5xb~j|0 zwDWDy7W>1T}o8_V%f6?r3vqSU449fuF7SKRUOcn zNEiy&h`YV~A}q9I{$M*b-S7s;CMKBxSYx34xJKU7cxYkD#1e5A^G0jn8R6u;>>HMS?ai2aV6k#z2y`<;%<_cPp zd_cLV_2sTEdl85PJYz!cj_&2}gQ%muET~tvF|%QsvUJrWIjwEQ_~uO^dz+}Q5jS_x-QFyxuPF5lZ_ywj`JkNe!l)-}aPLrBJ##~Uj!ji!#f#Z!Weqb+C^2#~COAcxqZ61eida_&F2(OZR8%JTcr z7mmjKwEH&pJc2(2J5}OXkfARUIZNY*LF@e5$A9*rTM;7Kk++Q!v+vk%Jl+c@;d0-5 zP_aC|uYoKqWoKujpL*riiXDHZilE>g{aG+iVo*Vceh_G*Ac zaF#60jLCR(?|~eSPiRFGbW~865u(;w>Dl^QWo_xjvrgfLAV-tT?*0pY=g!yA~8D&Uy}TUtCp29^>rWZFW=sQkfMKea5= zGc_}@@>c{q{Lt85wIkVg%T|Z(+_^}4{JP-A1mE@*p#jSTrY!=kIjgW@Ie#DClqG-> zVt8L5n-HsEvMN=m`Q+8qgu?}6d*$&UiyDgae}v%O`A28O15f-89)Xcl-p`MrPp$#8 z_?nWPbJBoj)_%WI>*GTIF2A2Wh8r5%3Os}y_D3&F zcw2l#e|JYg>d^h2^hdOIA#!M#lwyoVghp+9oy0hf&=Loj;drxl_Yljpj~(J&?NiH7 z)=r+7D;7EtvUBI8Ae%5lm^>59(YqUzVe;NDs8~1;&yht{1{QOAEJ8+T%~@zWkMvAa z5Uf<#6-38IC&H@=F^zW(%MikQo()VRy7wgP{p5HG&=5`yxRSYMmRFX@t|n)V+TXoU zTcvzMi*7>Ke;>l*W&Eaoe)?rrk3o}nko4)Fnc)}n%*(7e;$LG(L-|az$kDr>lPde|y<7LZTWH6L&{0h((0~ZNx1TY< z2yKt|KzQ=dRlv~VroNm|G7th6n*eR_ATBz8`z#u2>x@ z)DCKk^CxOHtNFoiphpEeB>p~0+hjI>Z;er;8Xy02rIJ#1*QHwC4U7l`U8Jtx;VsmB zkZHmOC$?bpGpZuD9)ebOT*-b@n2Z>Fxc=Z~UCfW9dJ9%z4s%wA=Xd^CCu-UDc1MZ9 zSy*&2u+7Y^O|qL+EFyVT;jZYN>tVGK zgN2+MvlYxjbn=5tNaImP=~)8Ej~kgj(?@>ZNW8w5o zO-)!F3|lZ=plWZD@8K?7xCE@Wj9z~4NxoYic-RT!h$%gM5?bf*h=`;hr>&uxId_;M z9DTwm)mHf(QPz$ZAx!l!@}x2&<_*v?fPS5rn#zYO3FL+*231R7F;Vr1S1n>R7{3uO zyD_VT5>b#jX6yIvFeTihNJ&t3l*mc8jKEsAKa>8}KWi&={C&PW!Wn2`di|1xMPkWP zy1m#BSYWOc1h&XSR1m+*L5=r`%J#6?0Z? zz*R2u`++T`J-Px}g$MltEKD1?=3Lnbv(dt4ijVLLk_Q>WpFe-=XTb#xJqwBOucJft zAygS%=UlrKDoR|(`veC#71|2!l2?KBS1iXeiB@8t9UpKd6}54nz8epK(ZEFo(1iq4 z(h@FLjy*S!3})~jTsoh@6(ddo*$41o`lgN5^JX~0lhlY&zA%cs5s@TZ{yMaCUD`DN zT2p~bNG8q5gFiF%#n&kYt}R%vYX(`^tR*DixFms=j#FOrJdN;o;T^)m|EM5)a?k8| zpSy{?;Q&lg*S^#jJ=*VJrCHC9Ysxs}QBudPR)&ZK10*H!9e-A2?9=1qhqbQn!yNOI6C9KrHaJ*m7(EBPHQrDh9c z{hde0-!qAJ-v7;68rd<#&Ioh{J|Qs;NaN=W!v-=V+J?y`~JP97WFWuS6;jLUm=7B%lGUMDp^u z<+TCD2!T6ydhd}r$S^hbk{8!0hOKX3GpA$!eT!w<)YPHvY4u)DpJAu=OMi3g0xIyi z$i0vrVLHGbNrN<^==gWBoRET&2kp`$+^V5Fw6`yT_~8%U&qaF#QHo zh$wqm+cIUqEdW&E%L;ex-@q_PMkX7JD||S(CW+_AP=1z%;`btzC_nGH$ zB^z@pjItPdyT5L)l81zRI7_UXvHnUzmic11iA6i(K>NbokP89)=x2y`R=w7(Q{Ji1 zd**t6t_V%I7`D@!^ zji=ml889U1_85sRn5#T+pDQdfGI`{cSKYSkp+kqR3}I-+NrrsaRC7V#eJC3OXtqrojD-aZ@x7CwUvU0&w;+<638&) zU8GEO038-kF!PQsJL z(f(B{NgW*qMs2?OOHfz|HvNJaXkX&F7{l%zSIsN4;gH?CKAGLLO~#jG3Ki#2-lA+c zF~{G>KH?yu4TEIs<|Hw%8k4PgHgxUn2gxfAt zC-Qy2GRwH|>4Iu;efLikV}ZxK7tT$VXA`DAWfN+ZX_yK?bPEp+eFHk%883D42ZV-( zQWztWftBXS0eTh&DMJV|*keW)>gC$|EBwP>y|QyVfhaaYs+=d&;SmVpDB9qsrlKPY zC^#8L@bmA%=wye;=4zE(8_Y3sy;kg?M;?86z^=g}(kSU$*s-o_X9>^t?p;0VDRJhKp?D|5^#=% z-KYeo|}gOT&U!Udw{>JxJR3vJ?Q9O%*=mW z^sgiia82i@nmT?W@X{gV4fSt^&eQPiK}-NSKdsXP(PzQ6X0UNDe0cwWotpCLIfEO`l7JSDbpAT$$SvK!so!&Y1tsHl`#+{ z2jI8z$J&!-M_kFT=)C_3;)K_Xt4fhE63hK($#XE>WjfJ$aBC-h^!BskQ1nCpNl$ z+-84wK24##W|@2Bo&O~N5}&VEjd*9wJuB~x+vYcNx>#K$yn{)ANNIHx3T6YH-@&c< z`6xTLe2QMune$A9J3#bJvln*atDfR|xAN>?viVn#ur&oPz~C5ETA+sTWPPN5{0wS% zKhq=9MyJ;kf8LGa<*1f;mC=^Nzm?Kzd79~FaA}$SQ%$@07c9Ha<5j&>J>T-VY!TRVtlgMh)*V9Z?obtb?>H*LE z{Lc857S8-ZynYkqK56LTh}o+Paf!)sc55%P_b4NO{Oz8={s-i*Zv+v|Cv%~$MxuWc0dXqGGn1F|CEE{QF?em%2l-Uhs4I_T=2UEe>6XT zm-R_Sh}0g4RL9*}nzHlg9u_U#PI>*W>tn`(d>S5iTNX#Rs^@=o1{Y(p-@;Jx*8#>yeyYavKNgj0|uQF%~soW_{ zG=(JpUY*FRcG!p+x$uTEfC=cW#Ak1sXCj0Eo?@+obx-x=PLSVH%Y{@VG7KPAj2|RIue1IK?F~~I+M3KZ)?%(6??0Ip|R{C z=NWD4J+K?6J}EYoot1Rr8Jq0SKOHB9$n&7m(qA42(_MT50KkLx*Y&I4_5J^BG1IMb8vee%2>SqN<7jsV_9|--{X-M((=euj8)*DTwq@A|=yE09 zXH6UGPY4L;I67u+x0d3!88SHoJgKRhJ_qjXEOg0F@aKN&d=B9WMA#SR7Z#S#0TdWp zDm;AZ(Ahvif#kLgObN|WnF7opCYKv?7ErwgP%9*Z7QsT>*m;kd9$Y{iFbg+CU6gn) zn9gaul1>3<7S%x$L&LP3oE(@eJ$puk$vuIMpu2Y;b&+io9Y6o$t0xc#K?>7dpzPAK z@?X-|f7#=`a1k62J0`hdwou?a_9iH3=XT!s_^qk#LOt-3J#o&LpS&fyW7b9`n019x zK4JFwNA~A&ahVYPTQG!ERaajOo>0@!&|R7C4v&d>m+v%k3tWxkt!a@!Hk|8aR;(sF zKG;TKb`hWK+=<2MnuQ$9awb-P;aHtbFR3%V^8qi1D05VoeI^b^P`FR$k2nJ_yf807 zwHeR7c|oXX_8zLbd(7Y1TyHP6TV{1Y8&;2)!cnHl73(gcUft#5_LQUvrrozY%#?J5 zG1X;6NVQ609@PW8?gHx{Sr!I)xAm#`z|Rv)hmK2uxP!JL`8~v^DX?PP*E9L~uy(uP zvBXWC?T+4fQcIYa7)F`P9O!u^a-M5MP;wO?_jEGMS4>s1yJ6$g99`2r0Y7;^YN*6$ zqe6J<@XMjPUvqv+?vBK%C8X>uDhd8O2hqR0!TDOnzE=`25U4rJNrNy~hRGwuBa4$W zhJ*QuE?eMiLZVSPjbEJ9ECZGZkmh&J60gmP^l`I;j`|;2mj2}n|HmVaqS{>DXE4ly zYnoVr&Kv&IfjuhN{k;$RzhbF=tN=bBj?*_EqoW9biHp%^F1EI|Vye`^0m9&aq)*Jg zM>?8s2ZDqsgXT5So~Z|3$}@I9-@-%{H5EpQT8BcH!R7F?+pRa9l7?Z`Bv^L+?ZthR z2;l-+E&O`H5JB0K32F)`M6Do~iTZB1Z!dGnc%ofJ(-1@LoMKHB#z|45Xz7$eg%(M(a*)Mr{YJE%-p9-m1 z49-0Txn%H4e);kxgzo@?IP>^h=tp#)BM#JnVHox0^mvSL0;XpE7(g@nVvNLPDYxks zn9)Q}+FMy&Qc}9VU0gMA86daIh%xd2;=Iehg1Ko0;shg-6E&^G?08D{3ReEBL@}GT z(J4}|oyZ0bonKEQX*hA!vFwPz5|!`~AD589x8% zV9AAna$c6HxHq20yFIZ$D9N#Iwb+(JTDD?(rah*D?N95dV`m0%3Q& zc;Nq?QkV`u{5l(*Zv2lq_Fs*J|NE>wjE%7PVMKxO>{?y;sSYyP;AmsKYb1iuQi1_K ztm5$c5h~+MJ@U$o9ym4*GN_cv23N(S5rNVHE`Y2QMe-dkZwW@(7#i-n`K_&dZ2Sdh zN2kFZ;PH9-Fq0QJyTQ>&^`$CW<~w1bufk)tUwYL6{nqz;0LTCrP&3ZId+x%o^}oCP zE!}X;OJ9!!{}u(HCSP~cXP#T`vbPy;NkOl?qQ3#`HWbT-5ul|gf;BY_yxma1p`EV3 zotih0|0!1hz2G0+QsMT|M!n7vLM9miOJn!w1smYW0v!_* z?tWD%U7HbwP!K;TGD6sl-4ET{=UN0Qd{R!%V*sMjotgH zrBojWRF1%4!De~#M;rLdO|{M~LqLT1ac%2M69U!qn>}u0O<&N9+N^>?UlDo(SlFkG zQ&QIk7EJXcT)=#)51y&GCpw?BVbI5VO!HL>0O+7&{zg}fMl#Iy&)n`XHMa;^`C%D% z0^}jo3xfJ8fq=A6hu$XHvSEct z>C@+6okt(L0EZEy>?$!y{pA}NT^tt_tec)MubWu9)nERAL)DrP61ybML}dD#!iGS7Lc7_8<@fL3-&x{Q=)2w=ms(H&v)=X8C#~gFRPsEJ;hev= zB~PAI1Met@uqnaVbBwn(R;s=7dxL%r>P=Vzm&$Xn7J;QTH7o1No?{w0AF`f?z@~@7 z_VvYw_qEp3(i?p=aSp>6*C`3ZRfp~kYp0Y1Eus7k&}Y=w0;ZddE}+-&D(X6U^}xyOc62@h zHqpgJ=R&m)IyY}7!32gPgAt$(a%sPh&xIz`+X0JT)glI72();C0r$7Z3cTYTa<^KJ zyNosQ_IbiwzYVh8eB9G&N}5Uwo*BT2xX2Klz{pU36Ub_mj8`7&MNZ2oP=#k4KB75% z)@iuI$l&u&{Pt#)q)RGlh=ktV+RFbUGRbcy)*@h-UJ+RD_5%*2I;G(92I(O>y^KO`jT!S$UFiyO_V7HkG= zuze?41dTzi}dK*YFiiwFCn4t=hx@K)*-=0Cko@In2wpJWn|R#l$%ij8`8y69)oH#`2`+#_;*F;|BW|lwizv$8KutpoU?!n OMsVjelrt1f@BJ@U%iDVZ literal 0 HcmV?d00001 diff --git a/src/algorithms/learning/VIN/results/8x8_1.png b/src/algorithms/learning/VIN/results/8x8_1.png new file mode 100644 index 0000000000000000000000000000000000000000..0feacc9de0055a056dfed71d6fd516e47ec0dd11 GIT binary patch literal 11951 zcmeHtcT`mQ*5xHAm@pusAcZJO6jVTzY!;B5ksuHD{p5ronQK`2IO_rejO_JQy47q$IOX;oVI<3ao61$^FTb6?9IK@JuXPk`e?9HsL1o*D-UFJPw>fm4_!q5ND^L*BJ zCj7}uA_N3EgGk@Lq3RMdKj`j&Uya>e>}h_^u7ev*Vmm{X!nvRNSOa30b@u4X<9irq zSTwODn!88r$}PAi_@00K_$ue*)!wHuysyIdj~$}sF1;Td?R#94`XGDbo!br1@8ixd zZ*K=*!Jc7fJGgOSLyUE*`T92!xuu!;=RpOD;f3aFyiEnv@EV5K<}vUWBJ>J@kCzTo z&?3mAlaxmgr1lc^83ge$pp}Btbw_;Q|Nd+U1%e#Bg-{~MN&o-CszW zHS#gn5?{Qay?&amqMicL;b>ojk2;b z^Un8g1`QMZkNTvC!!M;%`A-K22VYg7LQ3@{;uSDK96#OY3Xj0qrZmrU=g#euLelF> zU)Mi~S)uu#H&KU*7WvIctK4B4CnF;>huL#u%&zSCv15M-d2uqrZu|4_@pXR<;aDAX z9_|z0|1?`E`L20aX4uVJx8^Z>iqU}ByG-70{Pr!pX+n9nJBNXZsWDJV{E=38;Hy`T zlg+1$OiX6qiV{ca;*|EH--Zi@4=+U2kHeMT%wcG5;6E7L5;AR*st;n-u(i$qqpofq zv#*$T_cVfh5K&cFoC?H_?jHPiQ8treSpU|n;WxIK-uz?{r@3^$1B~jM0@1?O-MP9I zl%#miH7yoVr#XLA4tdp{TwNjK=95*VA@`ZVijs1tc_z*CI)%n>+*W5&*VkR%oVq9XpK0SWjp=_eNcGkikZvk`Ff1|1OA$=f=a!-C?V{71<7qy^kYY7r;zuK$Dn|Cp4?l7W2&m9sc+mD6+pWxk~D0j@#;fr{C`np|9Jd21!I(#U1UdWA!iIIsvkj^bn!94dyc&IH!Iz-TtAn3W) zXTo&pkq-`sa|xzUoOPVROS`YlaTQv&#);41GgQjzABL+<8+z@M?PBFkMyflf@u{*A z?9%89^LuZ|8W|ax{r+?hH#c|YiY>oB)jF|aA=8t**4WUX2x)aiKp-qKQfsu;-_K81 zi0a5e>V)6DNEO4=9FiL^;~-Jc8y;Zf3x&kg)+L)rBIE2AF3b#+*}E;b2%nb*U{Ohv zR|Ak3dDlcncY5z3riOVZp0j6fGVSy!f- z?<}pg!_^VDBR>Az04ESZLM}f?uJqF!#+)uaa|$u%N$bq$%F>uc6RQ3#OW1F}B>*~1 z$L~zXQ`Q$*_N#Aiu6EKZh$XJ9I7oMVFsKU+4o-{{cUNBz=Iy!Sx#@8J{CTsHTHnBz zFEhI56vP&vU$rEp=vBFy0V0|A6{NCfJS?#)cUerT)Uz%gKbFoNpcdw6ix=gPk7=_k z8GNrGvB7TB@`}ABUQ$1=W~UtTKy`9GDJkhnnh2zi7$yh;KIi7q~ZTI0lsEFe@<=L`ysJ;?GW?T(CgcYKHK&TbDrcQ1E#(R*h(9t*FhVaW;*E#mMa zp|tecpeJdN!Aoc`->|+2icRgdR-v)k;@IaNA)23_Ks!dhG{^F^ykpY}7xXL-eYfsS zGHEU~JKCFc=YWy1F*gs7?7g?A+h@A6n&$YqxXOt2F#;y?W$}J)T8@+-&i;F>P6i@V z`zG>w;oP=dH1`ne6TS#pv`DE8AR9b0VM@I)f-|!v^kDFt!__?=*R%Uw??ge4O zTBkZ|eW|*V?;VD^L9Ko+O)mQV>TC~>goIvG1b+nk{7}`yZ8AwCfR2SyfJ*HSR5GjP zxju%4c!8<+65F3&F|M}XW6iLi`i4u16?L9z5gt~(d)NPhLR@yFfC+?vuG@5KL?{k- zR(dnSdxJ4YGyi6V(>xRcPsc996ATPyD?i$`KUzeh7_ht5x~mH4zuI=|uvvT3oyO*7 zRVeE#dApl=+7(VYYjb_)@_Mr1w`%&Ht6?VqDA0N_Pben9+ojg{lZL(Z;VIPX{0~tD zk=FXHX(nTZt>Z8*BNVWe=5Q-q%6NScwkiA~ibFzE7j`kpJEQi%G$)uP^|(3SHaa$4 z><}Ift?{gc0OoJ;lKeHpc@TTj9gxWH;jwC(4OvAT?w1UERX}>OOPdc>Zm6|-|7c^I z?=MlHqM-|AXJBN+wk6ykl-Z{pqo=155L+L4*0UwV&cTr-9dZFEo7+T~#uKgZ_YWeI zavqj6PS~lrl)L_DON5l48LFzVv$Nyg-lnV$0K~9wDoNYd?qS0&YP`2=3hQ=BWG@E6Nx;Rl+MP?!JZ(VD-{8CWrN$Reu7T zz;xxw{gIvcofQsvKpv=x5Xy2ImA%zMF!qA<}OBGPBxz8 ztVhF{fX$P}0WU2mD8OfH30#krN8cA}+C*cR#`b`&^I&T7y`a5Ak>vT3VHw=nvu7Pf z|M085+}GFF*w~0;XE=WRM?>&A$i9fHRs-oFJzhI(#|W#WKK9 zG{pI#458_)<9g`PQgiGmIP>dJCh6B>^=~bpP@Ti*(l-i=dj+gLs_}*}I(5c@NYBLVLaQ6*U zDh1VERo5YoIG3BzS1pzE^s0&+XVr@>`jm0_oQJ}?l`cwpc^aoqow~6e>pOiq+(^sm zuy1+&)bdxTbo~4Zz_Xo#s1l;~A2<*e8=Ld)-ATWI0F7PW$!`~R_r(aLtvD=9kPA&B zyaW$gD7DA)=ZzbmLzFI7+90d9+9;0{AES7I9X)>Of4gz2K5!B_a{RoPtY|?^SI>rD zU?8nV-n&V=>ntdNIgKu!e-4=}-QqRnBR|Qo_hYeL0Cw`jH>P9#jbFYZp#dtjh!1lC zae+ff!cZY{JtRfN)sHDY|K9A5*V22jg_!I1$C;V4fGs!9u5yctKIE@j>2Q?%_80O- zUt&{qM@g(X&*HEu-1Zfbs$P=z!2#t))6_N+>eZuJNjZQd8`)v6S#_ zA6}iVso*KO^~y9(Rw%+=sNb(i=!egKNFIYlZ#!KjiHawRDU;q4a&-0eB~6tGoOJGp0G#wbxIS`;@o5ah?FcWvH@?G9kr{8#>b! zg*`*V>3(>;iv2Q@ew^Y3x3rvG7XQ|upVG$9Gr4y5r1o2}&gprCuhWdEb@|q-uKg=7yer3_$u9P=?(H zgF34{-5E}t*y|~gaMf_n5w&9{PQ3r{;Zj9Cf=J_r7kYk<)(M$)u;=JiY2%9Qr{r10 z-Bh9FxvD^e_sT|?X8!OyfYxrvV|Ffpfyl^6xK+OE6(oJ&y<9ZD$V}x8MqK1Wj!A2r z+xOsmMJBD7)tN3?bU%KyCsSp)3z&RUg-)fb7^@){FENC+WZRt*fSfVT;V zE8bXbUjcoj@}_tgt1!Eoxvtj-wfe9;{d0F;wRz*jh1Lj+0#<||K2nltihCcxFP z5O$lZdEPlmNoQjvJhjPy7l;9`JOvdp6C)ohVg|SKC1+{mbuW04M1j2!GYwhj0??X( z!zDpmt^lGyC!}hVM-juLKT~uH?WLlvtCjBFz5A#iBDf1y)bIIoQ=nV8lpcL=U8=nY z6%7m$p*W*mvUSb2`a(51S2vlem@>_W0_o~WzcW3xrM7n6=Z!Y?s#&aj+K?C7QZDA2 zl7MluqJu+Gl(2O!5V*!^2lK7wcq>%qdT3pxmYA4mckQ%O zkRLM;qrW5~;23`X{t)fvrvT3mPCAG%(9eI>6Tp+<6g1kqROhI=(y5ArPuPE3R7fCO~6h_H*QSZcfe*?t%zn6LKB&**Og5 znyCHcYv@vojG9ht{|u1KUUB*Rc6Qf3M|>WQISyE`IUV*PumOR=;VWI2?MRFa3>k1= zo8hVDt_B@wM%SM9n;6n6}C?*nN*iQm6J zL4_w&+<*$KLcBbYgSd%dVWzz;&DYazth9A+Pa;EzzYDZ)Ez=RfvCLmRD#fQ2}Ah)(}Q%mgtO z_<3aM-Zn+LfaHOBSdy;Zd_$gX*IX6~pYk_7wuSkWrkv%h2x7l@85q`oCpj7wi={l3 z#!8CLkso@->Zhl_uvrZ>SAFdfDw$H>k3hX8<`dt<|Wh)ywpLSN;)WhI)cgxl@xey9OQ3AFNy0^|}hb7YA zvWRAa76MQd2b?kp=2AK+?27cXQqKlmr(&Vr;n(K-g}k<0(7HrJcbb#b zo1v1%&Ckzo)6~+EGiruSOMme*0YigG$&DQU@H>d!j?neUqRoU8aUdf?-%D$Ek#*O$ zD<1+J@IPz%D7Nb7DhH0fDx zhvd%H%nFPM=_TI~p_iDCY(0ZEE(kgKj6v$;Xy$t74PDj5K|>1U(E-XMt3zRFo-co0 z1dpB2UA_lT{NNsvG&FVypd>U1xm|S|4%t{zpP6BSm+%P$+eHIzi*TdgLSG^sO8hD% zJiAmEoLffErL=R;KAh%=DODTecHFFxh6!R@4NuCX^P-+W@4x)!f z&LB!0uzUzLTtPbsu5hN5R*F+dNDZ3*kPC7J?x9(Ko@S*mEd}bqEiSGLB&^W=xEWFB z>vX7|hZ4@gyjP)XwQL8r1G#Y*VVVPS=eG2fqxi|!V?x;O<{^X0Z(+vGzwydON$74k zyE1GOv36UA&U3r7vhaLI*g+b`g@gz>{7{7Efgm0zXm1=@AxAgQXD_*;!QcHIN)R3k zB?WQM)+tj4NunLph%5&WC8glNz(7ipr-Z%@O_X?r!=9h4r!|Q20vx5SG>FgdniPVH zR#v%sE~9&lo;=|c6RVVw{9Bb7p%d~f7c^DeARnVo)!(`D>qmbz;ZeoVT%y(nUj)yG zoRpN?upZEa+_`hdXia(|4=q9y6BArlt}J~CDEBz@=nnbWabLQ6wb@sQImM1hrWOd; zRwbe?dE@CSv{F>i=*_M#jvIa4UY6V~B$m(eZTHt~M?W?+)dK;lEX7Fa|;-ze}(4jI-c=6&z*4aF;I?O?C2^(8@UpmHAYe%9&d=f^Or0{f| zX%W@!b)#)Bf1Z?9{x+B_qp>sD%r$rzL7qNUsZ9@$cAXh1G$v(`H-d^R$7EJLH`)^< zK7QhQl<;vZFF6V=r1_NpO|8~jEm1eXV`G7H zLYe*fG?j}7f~0YTx%F4y9m1`^xx9gv!4C@e@86%-T8$Rfh`M|WhcCbM5Qldn$IFto z4J+O&lo(zC0Ao={4mqv=G$^(j)V_TAvfEsK{mN%X{a)eb=1Ad>kQS<^C`{AAu)}UF zO`tMDIImU`*c+(nBjUbhQC1HG>Fd;TSlJ$E*ih;8uL%AXj1*)LxzcY1&_!J_3RG5B z&LyDUVGAEe$1j)QR16zLuv~QGaJ9iLo48xJx=~t!Aq6ewpK(|UaVVfk@ti#aqG*Ot zoJ9u|kht@=@^grY*iVJCYGL}T5rmEcodxj571<`QjJ^8ZQv!VUyU0!DX|ADXG>0?6 zc9|d^K5#(g$&)9M4;9s%-V9F&KScmxApK`dB*~ELDcp9K&8@5V? z=~8L-c>(J*qE1Mx-dd6s-&r3oU;ZY#+Hfw2Q&3Q~zruYUVvkbG znA{m-=uH2MCkNggsh~m3qy7YZ#3x6S;{WW}Lfa!$=8kil;VL$5zxjqqNEsYfr-dQQ z)=N|Nsq~Q@FCPe1u8MLIGQQ-F`&x>`V_SiJ<8S>5rZJ+<@~F`T^8V`YG_0K5+%llL zs6jW1^?J8l*1@x{BlWCwhpxhXW%|!%aiJ1`Xz;R(nS2mnC{~!H_=i+BZ z8Cls*7UzLPFc+|(sYYG1O#<;%kGKe!yR_k9WCdoD)!6Y5i6Gvh{(c{niQ`6FT zu3zsXwnLoBLGWjSV4+>*R=hJ?vm*;_O_Zo}F6u&X#CsG>2AJWKR_u4Uj~!EYgK2|O z15gloD*~hj$Co$0J5_)Tlh)AC!0cVST$UQY?0oT!_Rmv3$dTJOyjkwW{vqU(&JyKD zD+baKE!v|0cbt4NO$UbDG-{*cC@U}j@mKHZ*DH?9<%5P478*)T+}&RA?g2KL2Zn9m zH3ncq`v6I-s#d$NkWf7ZoYWt2?!EucLg6PH4ecLBdt!T*|xD723+|60H+jRT?4^%x@MsN=O=z4h0)4|pxoxcc#Y zI8pnip|v%lfBvKULHwnFJDbT&kfr|^HF^td1Gqg{{XoZn+dtdv6@I828(W4wVc%0( zE<$dJ=`Y|##c=2s>_p1WHVbAWN!lReK+|up5`XT>l>+%HMQ{Co`At-Gn52F17V?kQ zdmN;!Zb&F3_q$!0!$K8_n>~f7o2sZ`VnU4iJRp>p`%O<{>hAoPXjnMp;J?*qe`!p& zbCzea^0xSThKLC3z90(WOa1N89oRN_+(^r@V-W2qbsiSoP zv!M1l1*x4kGc!Yl!jU>#GWg<$CDv<+`MI*@=9#DI0xrl!Jt1+1!6m{^)7^`F2%6fU zq1spQE>lsZ`l5D`B3PTpSy}VI!t57U_al^8dv`6A)Qu{_{KsH&B8Cl7%BqA3A=GaI z)%1O<1o;7~>p=qt0W*imLFaNn^XlZut9(r2RQm|B&3O(EEXaj~4+hV`l}q{k8BJC~ zLM^eKyX(Ci^F_@MQmFR|@aHh>Nty=NxqD}uJdeqdLHkee^8hO3TaYcKQ`yuaLPJAM!JhibC=Qb<`krw0Zs<^Ugj{*1AU7!i8zS1NEP+ve4jsmh9Q=bX4vW!e1Iy(hJ6czOuxa@3RP}*;UAiyMS6{DV~ zmjlfa8ZPR&IvJ>08`(A2_(3LM2+3WrZN3$;lAZo z$vhbQK__3Ff|QM%hg{QO_Q!E&%XNE#-@EmQ^Io-<+pCzt39A;KOTR=07HJD!AD7YSF>lhlZb>G?;4PsP=_o!Eh|cL zXE6v?2OR)}vB1@X=$=`XO`Isx$<0zHSMO}jW~uKvD)t}BAUc2mq39H(Vg&eIzP;9u zIyx{GhZ-(hOHEEej&c>w3*8`$=fJQF2<4pY?6+an+ZVid-Qi<;O3EkV(X8x$l4lDN zCj-oibgAv0L1n`nZ1p-0#vJ+{*pX7 zl`j2oqiGo9=mCK`6`c!!Iih^`6z0dEPzMIiVaB5%k5XV`VI*O%6_^DuE|da`3Le@V zCchEB0pf4{5no|W! z76>k6c4hgXm5@bm+vGI&+AIC7o6^$KFn7?{+S=0?#)CSM5cS`P?YUfM&yHoz@-Pcq zWuLgHjo+N*^Ij{AH|zaiVBY^R2Q~;>wLbO=XdRP)@$124HPfZH8YOv7NRN4wl1109C>?Z{5%7r93i#mz}~US&JY6SQDQ)q^nN*)WjTiz4*q zk8`peY9*Hqzx0X|gu;aL*s;?csSxKGCULG8e(pBplnj|4ViIiF{8uVo zuY5#08RoXqelG{cGqvCgAGAxXEX9`^&i%r`ESVwKDHfiqSGz%Z`?H1T5f>jM24_Bj vtO{rU1^G|i0$YFe0RP^u_!SV-! zAash?uWBF&B^E&_zR~Q4zX-Q~{Q*CyUE~zCXy89@n%lwf``&xk^<5Bz;THOjB1>E z`5m3D1k#qo2M~l4QM`It>wd!QkmvmrHY#d=QYDcfwvN zW43L$R;H+%zr^O{Pn8W%>4LMpmXQ*l246mvu88Zpr5}3=Ug1c+4GDfAG<=kd2vT>R zniD~=w`gSHPxXvg_ea3DgBAm{f{&>)D}k^g_e|8bRWJRhRs6pD$C zp8WXiWN2t;Wc_H77b_xrPCquIs!Gyrx=T@od+_0nD@dwJIICD{U!NYX2|>C#N9%*S zD_pE_O|sbi*sQ3mEFQ%P8PU|VH2b92?q40shfa1WIye--kD~<1BE6A?fL?<3-Me@D zC&ZfNU@|3-O?d@{AR{D@x5wq%n-;3aM0*DMF~Nfp+yAtaO|Y#$?um94TPhpY4rHgK z)W!DN=Na+Gy?_5+w2?vk1VT}FZ+4)xlZru-Gos1D?)+^_%l@UxF#Wf8zI45@PzVSL z8kG9u0U8p!X=!OYFSbgiy?ZBmOuWdWhL+IJW)8UF9fzPAKREXL6%+61O=3z$+ zx9=UVx(G4(Pti!DxQBtgi`Z*g=rB~7ZCLJLP-J#FETaFNQ(Bqhh>81l&E{O^pAYvC zr0{EtW2JY##Pd4N50vH}IB>vqJVw`cez=<1JN)4u2HS>ECd*!{)};IZTIL=vkrZAy z5S%~Y4PH7_Dw4jsMAU38Dcg_K7#CKr#t+{g3$x|W6-1cN8Kf)oSEOII@*%HJl-l+3 zZ_jzucCJiy3cNFFK&V*`!1~aL5qb-C@v-NKFGsfIBM-5$<#%;yNi8>DjBIiA^ji3H z4+Km4+S+}ejj2p=@3o@C{6<;aN%!SKLP9paKNfH$cIy&McQ)dydYwpTB}9<`ooFSB~S4;hODxbLC3lKK0%L4>n-7)|}95aqhDneQzw#MY#F={AD(A z*S1G{5AsS)ERHp~4ceDa^;;#!G>xglW3EhPCQrk!+89jb&(`F^+=5D_G=+#*uI=?! zzaG3mZQiFS?oFX(+&(*`!TVRVo8+k^`FzQQ{v+&t^-~ zLmt%CGg$3eW3regPKJ(H=~cR&KYsl3L`$NkgM&k9gkyJ3%x*dsF2(8ox0wYMKUowt z3Qej@><2{@HQe3H8lN0Z8*7Rn1abJPIXgRtxn!iLXI*=II5aG*+oe^C8(%Esv*Bt# zT&2$+r@Tp;ul;7>_-=G}g%Lk-s#BrA-=DlJYthJW_ON<&z%KrRWhlBStui}Cu+iO+ zKGvU7Fs@X3%oij|c*NrD;jyNNtG#0IhOydXF61rJxOieqF+n9$6TXS!x*0E|7$Lop zb>ZSg;*TiRoT8$l#))%MyBB85M|w`h8Kqj{W3#iet{**m^z+A$S*vgM@82JD2ifvR zg!)X$OUy%F^VOB}HJe@4ULNQ4ie%rsd84GP{MJhd$=mF&hIt#GmiX0#1KE8 zjIbUkv9ZKQ50}G^3)^&DbtpGfS63Hz`FSJJ^pb#pDw~A6mZD-v>^Vc7iHYElkZ1Vu zj!>qvpTke)&EJ)^XT%2b^73|svq=b^!BICLh*OY>DqGG_AF*jU1P|(~^+= z?4*KiU(v15$B%P6XV8%%cl%Pv+naXFKflY;AMk@BODlL`2k1LIsvNiO_YI z*&9OSz_R9jo#j?Z;2C6Fbcv8$3U+pJ!Se?(aL{*UA!>HYv3!EQ1P(Pe$~Lvp(NQHO zrH)EB!snqHNRiICN6Oc(UgZ%Nul%|TK^~p1+0$m$`TAz(kjKcmh^91usy)0?QpWxL z{o1~lc@e}~l1BF75QdO1?lv9$vn4SX4iK-jvXb0iX9daQaZDR^+u((F{|?w?9QHl}B*Mm>A>SwdLd<6jUvJ zw)GPpf<#|x-Vj<$cx%<#^FDyqPI%9P3*_+>6LEfn^v5RN>h$=IYp^+kA3|h|{T3T` zKduw8AK-z6@#4jcfm**>odP5MIr8`K-wk9YzE-&8Zt-_%xb3Z*mfYD|9w@bo^CjU% zY#{kD8x@!%l*DULMW-_~F_;R1`e2#1S8XlqkFn=?jQG88*jPUOREha2O$e&4cTQuj zkS4b`dTV2w#@gFnT_KLgnJPt+7bE<#AR(cfgpOS`f^5BafaF5YQwgI7YUMTT;mQl$G#->c(S0<-LT z3oI{E)c^oMh|WLAn|R~GwZRV&TVxX5#s)lUH{`l3NXpkEp@6?e^8r-0&aP}tVY?na zv_PuLY?r*g`SP@4ez|o6;RQ zFMDbDep-?r+0*j&?JNKo8-+U?0{JG@1`-}~(Xd>!0#zxFwc9$hhxytdU6f#mwmz%< zR>EYNs?9mvbbDG*yokNQs<&g!`knqNRmsIi1-LnJn9O>n?@eSnCL$cK2@oYtv@FlO+u6D2a{6$Z5TQSoq zK`fK7l84`jLs6-;^OERFU}}_Zf)&_EZ$w~=a+9}zYW{BwzfL|cJJs{vTE9}#o(_Oey49p%FQx0()x5pE`}cB8O-)s9uMXl>QnPp@ zC5-?EbpwKlfg=aV!?0t0RnGs_CzT!ox+-poYOa+r#yN@O~)##2GzXx8nbcw z8O_%p#}=)jLa~UuX0v^heUtR|6Nx5=AsD>a+m82X4N&BCK<`j;>J!-5@RX3qTh@uM>- zU}-{hI>G`y&h9YobAU+$0eL`+!~P3SkZSUqW*B6#_eIpvc1P(VDW~tkQTZ?2j}?&N z!eeuM$8gk+j>_dX2J)3%)y4-aM8glzD51+fT;^+-(s0yM+^sBqrcC5!;)cC>bU_tQ zal8U$5fyrGVli=$QffQmtMekaVrEP1CPW#tOBAco5*Asd)VrSHm>>m6G^7cbwXU(-TqVnK)(P^b8}aYb}kSc zyultbCnrG)nv?sn2v&U1YiT5=Y)jj9`fkW-(pI^tDGB!6w)1t^`}ZvzpSpm7q4z@z z-hqP$g*}&U!w%6M6S)B!sjZ0D>yX1i8A=lZ`-f6(f z0_9KsKDlIUZ<6AMF5Ujf=r-B*r{gm63e^C*^hxeFyIyC%9T=*!j~DNTO|PwW*VB6* z85I?ikdOy4YD=8%9-lLXFbD1%{p{KCIDE$8a|Y2vy;YtrwXIZ-4unHMb-lokwmBY5 z%Q6009-{8MBV7l}I|X$LDACm5TdN2_7bh}1vm+yy1>@u5GM_&`0?8&{?~NSL8f|G^ zSdW$jv9aEDteFB@gvUQWK0yrfcPO9aVB}W}7R=M5<|9D_Wtad$kR$sj3 z?I8W+8xb?^j+tFmn|XGwgunsE=4t9JBrL@fnMlbNY}4u;pV#yMDp2yA+R9|DdJS z*3;8-A0iM3MCq_*2YgrGy$^L3xW7|zuRJtfXWLK|>3fJgQq|DZ6t-$%1x14K_WM?L z-L0UpalSK^-!7bLxwq}oci;pg&50SxKvj0nANwI`#TwIdNZ%!0M;^%PsXLUyS#F6; zwJm4t(FS(pP&swl7&DcDq8pT>16=Fg@Z_)fae(R2p*0_A&gXA-F-nI72diDZ`UoBw z%aslL0}!((`iy3Y^SF{?2LzPsw>pYbTwHyUY>Zogk+UvZwV;R%NK9lpBnK$-JQ5Ov zKR+QUcaDkNRRc643|OXkaDzz5eCb%D4ee+qhcBLCr;=sF&k1KVTE>t2X0vn1sEs)X zd8u?U%sF)P9~@i0owKs;)_L=i4e)rD^JdtVL<#PQrjt`!GPcRyONuuW#kFAx+LCv+ zte6fTo`y#mcx%N#L1XcT_6Wl0j1tKqoJX9oX@zqJc`2@T5`>DFST|8tULKyd5}VGI z7KwRl(%ewxc&y=PpHe0>7622B&TzPiKoD&Di<8}tB||9${Xh#2JxDD4iWtyQgj^@p zU`szvxkAd-*&uJu69MlH3X3xy>f5ys$$IND>JTGZZ@0x>2cK$d3Oc8{U~960Ok`hu2V7f2NWoc?PoB!ooeAg51+lJ2N$IZ@PW94-5VGI{Zyt zODifmKS-`Z2^F3o0wR^LW)q;~eX=#X8}CwcPCr7f0$cvGb@blHS@*L56Ut-?{k93F zWmOIq&I5C=eAhywIm;zrQP zH!N2Hg`+cg|H1~MVJSWe>-m~R{`M+|l!X7iZ zM;C(XZ;@!+4&-n|*PcIATeKEnL@DisBAfp`% z=JU7pV}ZxK22#W$EnV~35>g{%emp@S@Z z2sZG8AU3eFO;GMJvp^4=dOL5)rtlOI(bMNpnRr+7*c}VnlvishyO25~h(F|iSn0pz z&VX(Ei5AIMJ+Mh$kHnosS=}SdSbM~%!ZoA9Q|ms*D_5G3nCeK8+??l|Lp$7*6M$l)kuQ|vffSe2TYgi%WN zBUfNnXY|L$s(T{A>M1ZXvuckbk% z`eSe~D!!sMlsMU*_SsVN?_*aa_)9`|l_^*DXM5&PpE}Yac4JN;NKOOjkW5TWXCx#H z#l*xmHb-^}U@DY4Ys;GfbBDX)3%sg_pN7f$)s9DmZ6u0URZqUBZGJfsM{}F`Jc|Up z=wTf83;KGX7goB@<}jOjziG4-+r3aVWRslL`{DS8XS}{f1p)HwTHybBK^_Zzor-+# ztl_y5)kBvSG1|?=k-32}INzDTc-0&Vg!3tl?3L{)RlklV4!=C$0lXJ4YN7&0W7sjb ziA0Y9DTC`gVYNJo?JYX>+A9mq%Z3}Uw{~NDcro|y^b7ib88In<2j~+Q04c4B-|Px1 zb)qaSoL!0+as>+aBK)?}3aVEPlGz=X_`iiNdNjo4cIVl@Rl;vC3^8ozma9*DZBUJ8 z2Nav=r42LeD_ABJ(;f+9lL@DMY+QHeB%;p!dUs0UqVDJ9{nwGyH)BoeX8W;sAd!4a z8)!`C61E$>q3gXK>t0qVuP&bUE~NLZ#1X_yf|7CJvhnxAL;ImqAVSTl_Hg5a>6sHy zcW=zCt*KgX^MzpgFJ9*G%ixv&#=^!0_xIg@ zhlJ4BS5#DlhKHAV?MvSB#*bi#?O~ZR?fvdwX4!M)Z!%?RtUosu}+(@cGgX2SHclW zWZ@VkW0-+9xA!B1t(fmKW(c;H5o=u{H+pwu09_8UERw2j`(#l!lm<8lGexj`reM>r z)m7u5=44?y_wHpXnEMFTw_b0?T+jU~QQVcJ>`3e15o=D~0}@2|oL; zy@b3(J#)0pxe!;*Z&rr7A16>Go#!3un&Yr~6 zQRNxgc}9x-j%%tfYa|%`g$1>eaR;|uECRIPAL|6_{SQi1vodjgp zr7TdLu9I)jFVS4(3DT-!>?t%4oRyqjSUn-;TO~mzJq(tSITKcQ@|jbp+A~)1+LoAv ztEg~T5+JgVH;vTu=kYG^mkiSEo-{}G9nh%O3^b6`Tn=pSTW>k{vMH46L#}s0< z4*NxN1zq7ilf6T@?kvhGEO6-2*0yf9v-mhoo4A76B72a)7FjI(DIk$6=S*viP{jHzM;;2T0rZ2Oo~|092xmsGxWd zD(?6_Q4pn2qBJKxwRO1eFG!#Z6`wrlriR&P=0@5)bG=V$H4Q0np*}qCwD#cF(Wi?B zjtV+s3~m;%e}2Kc?|*EAw5tU&Q&?+zTN9%5UJy&(8P%(Iii>4+)?=kQ5_&(Po>>Lg z^BzGCvY=i7>XiPzU3XKU?F&RT4kzR|Y-B%BvUrBfay4MA-PT9Ow(z?k_S2{F0_N^%KUHP<;0yU-V$q)QyU5Z!c#Mi32g{PyIUzOc3DY)D^Jl#7H+b8w&F-Ohj<(+8} zB_CD46v@)IKLhN#38aUVs~cGy!($7(Aj>xbY>d&`3OjyxFEzF?0HIbp1p&<>K*?CQ z3k?@hkXv#fLCKCm%QtI31b7%N1x+0{oDv~+YEC5i*C6m3*apy0W7~knX9>dua>poW zQdA)9X^g2kvEnBwDL92-rs-dU(3viwgV31-&_Qf4Q|)nd5X`g%#~k|&W?Dec)&q2= z%WznK$whR5=yz8UN^3QE0%{IQM%jnxg$yC2Cnr*?mN{zhy)IZ*P9l(HPbdNH>K|V zd$H|+Jba)_4RT5pz*-@*Is|Q4!Lfaab4D~(=diga!!i4yrN z`WIusx3=m+`#~65v(PWj@7PBBMG0aqyhBN7ThCg?e&D(a*+$o-VX!xUxz!GG7VqgR z*53-JTe-M(hnKj|2;{}kms~Hg%b~R$nA>CQ)HEE=T;&(ll}%n=hGh1ut5@zP)3pCW zA0>^V2DBZ!b9DvKCDI<=f!1j>?Ulmb+&(=S_FN%HlWver2))TNeLL+6+awH)u}qC_ z&cG70&s@pImR~RRh}Zb_#`S|z2

ChtG2*jx^vvZg2y!@bl(LU=eI=#&h3wh1sa| zhURWP4u40VPqO2LEcfL)-jWFQFTHQ8P!gLWl6Bl^j2_)=aLe6p(!p z*;BwXeo2hJo3k4Y0=}({h<|h~!IU1%9#OJt+;pA`3lsST>d6TVrIt(1beL70 zTIuo+6iY>lEu+gL%O1zS46MycAfAvD6R6EaR46I1verlOagtOtEGV=6F9I|i`u`pd za}eMP;yrdxbcUS*X}kWHV)oxQ{r}ZH_0VT?w)~es`TFKD=%C$zF4Mppi=h0AOmTV} zR!uFKx0?dQfX}(8mAnfP{vGs3%MNtaK;~^j}bR3ds=TPUCGz=F+FnOkXCUa2MI(MCgbh2A%~{ z7H8^P(F1Oiy$4TQFa7vrQ0ZzD^S6?Zx6&UWSi9ns08u-Ox(QV#O5-<%9Sip&*iWLn z+tfgVfqqK7j9(2`wra98C|wa99UVfT{VCxyU%$2meVFb7KBS7WmVXbhnKf97jUk04qR=dgh+lJ|?y*8g0YK<$=>aiwdv^LlVF z4K&b&KJsmk8RVvs(!;Nnt}i~XU1HS<5&w~>vGcBnZfOHSo(ID9@#1bdWe!6*>&p{% zpg>~a_{StA6+}lzV_UyFiwor%Cvi1JSZ$NjSNOi^&o2FNc3$O2P|(VfV_NHLfU0!! zJ7m?x#l?6?)Q9=>QlLrP8S7b)d{NQnnf~-skDRZwY)dNZ#It5Vsz<1T#ci|ZBY64W ztmyZU#GmP=JkPpLjIf&}o%8c%;ABi#b2_Bw&5+U$4f@LsT)dSSHUsbkTar2>~VJ!m<|KI5{w9W$8)>Yu=9S}tgbL}9(P|?2i zLi#KKXY=y&^W(^^eq_yF6R&)*_*jf-AqXeXp(|i+D)>r7?IEa^4PAE2&a3d18jWlg z_-fEbILJ_-)aMwz{#Bv^EPJa>98(&p5X84p5ZeawXmnIm8aSd%@4WyMSjEs>u`*cr zzU`)l5#vSZ>d^ENAR%bpzyEF+j2E>0n%#%%z$5s(LnzK@vGFCK%y~CXjhy>goPn?B zlK1GZj>MHE7gi!r+j_td*THbsd(5#)ebyNiCJ+N?+TaaRj=!Ppy9W}H4x|eQup!|T zKh+lV%ka3gw8W(d*jz-_ zoW6X}ork7VoBL0CzMu3}YKhgyC(8Jg;c67fB&dYbT%9ZN8lWDvA zUq}=^mspTqg|95pUoL|JhzRF%{0jV{c&SZBh==rNl8N%-Acf(Sm}da-*{n8;t`IQ z@+xDcSEx`1B_6$G#CJ0v?U9w*bjqW0w7GdIMi_JgJx4K5bYbZx<^0-|Yx{F5WEK-64$H4tlb8`u-OQN)o0f<97;Qm_y zS#9dEKusI!OGV3~ltC%pLT5u6vWaFDjmVBydu2J)Q&wh5dZhvw5f1&yQce#JEbQ%PIbYD%b|r0d!eW zL&&F3pWGuf&YJoVV<1G*q5nAKK4`zOK`5MM&z6UFGU`EkE@*M+;K4Ms$&wLgQtXvy za0c9X&@Ojvl_D|vEr29)c?@l9z-Gu4rH}xva*>;syTR9JKOjVR^ukrBP3{q6 zQO(UazzHG;*(Uz{tp{ME$_9&2pOj81s$0V032g3}(V*JBudO3Pxzv5upc*g&n1G*y zw=!KbQ>7oar-r;?mywY%1-F=_qW|#KX zV~|>T3eFZE)T=C|lK+HNtazr@3oNzZe#?Ob|Er?~uXl_)*97`?VBrIgs(|8jXJ!P- zKuSu4ppV$|bIN-e9@P#yu4uMEALKT6fyBGB#xueQ5I7>2KzvczQX{+>R-jIbcYHxD={$Vd1_A`8ZH*Vd!g#vi;gdbS} zK0{$D^SYW0rX8bd46#Gk)W?#<(>)D)lHtebIzKEmsmgm6+V#g#cMNIBBN-lQ{|RKj z(EIv&u>ESeHyWbt4x6@=%OW?s?}PDX4~Ms!W7T4y^x8M7hK2@6Bh_Gee*az$*Yqw0 z3<+mJXN5+qB{LW?P=6YH29u3@#N=BTGdqOSg#az_inawu=DQBvV?OKSanoQ|xp}a= z<$J?pCWtSH$3N%P+i0KzdD#+PrV<=m|LqDkO8V5hea9dDJg}T_z<cD3_zH@0DU+Od_pRfP6aju;o(tb|IVhs3jbYF_tKOn!uqofGUov4pAQPfE;LWcYf z=o&dWmc0EjbbLJxYm{L+hbM3ZTP)iyfGuwTA=ky2dn1V11b{F-F ziDbWF^qra5*&T?pgT3g64V~ra_mwIJV;iK=P7+Cv1MK*EJNT7ww!kszzn!DG zK+>XU4}zRVwj^?#0~yJ){j#hMv$uWbdvBxwJJh_ zAcv$#$PwiHq5ps3|K>=J+PER)sc+w&WfI3~+_`hdQJB%Sh6EvmnCjNo)v2W^zoDc1 z;vQnjiNl8{$p+VtE_5-GA*!;nL5yk>-`~sxk+38oZ?!&u{>*laMc9BpvdOY>$hCAP zNY%Q#yW3Uvyh6e(&o7P&;tH0y;I0rfasD91;X=oSf}958TjZ_6FIGIw>QN8-lfhBK zle7qO`vlc#1i{_m#KErohZ!Uh93xGDUZIugbkdu}#YLNqzHjxD&fLPnudTOL`Qhdh z*pQje^?5|6Q~BdfF+69_UO#i@jH|3~d?Gl|r-y!iD}7GQTo*5@ zE5%$c6jC%XNzRHNe616a^6(+)8&MYpU0vPHNfn>^>Nqtju&YDIj~q#Ko$2W|+GX32 z_Wzcwn{Q^-n*6k4WBk-HI=cBKX|8Qi1lf0xifV-6z03{=;sh;2D|R=hMa`px9o}|i8|u$)&-Rz{h>GfI z8+#S5&J7w5Uxpj3vkZ#!Ot7oJy7dYy)ZXNi5MgHsX^^QdgP7~P+440buZ6GlJ@4-Y z40tXnl`nl43$K`4B>m{VaMxwJBkNjz#d51+Pq|0YY~`M}QoKa@=M{bdoTSuSbe8Y_ z)Tam;nmqlky|$rjkqs$hVg4z?_q35_2Vlaor-Msjw)$K7WnHbKZ9!HAAw5*Jjv@KeCO= z^~fk`G~lUkK}?x_eSKEu%%aYC8YWS;oX*VQ>VQn2z1?Slf$b4|`fT!-%)b*#T&C%c z9!)ZBh>#C;o$X7v&MubeFLe`edvpZP7e}@q;p}`!Vd%V=Y*%sZ?A03UBU;JB&O+ABD%Px7QcY6JS?~dl$qcX0;RS^CrY%@S=(&6pelL8B=iW87)vk@fAxNmlvIYL?vu_(|zj>dkbmaazo_YB_dA zJ>RUUz^sYk@#Dvny>^xDSw#aGS^7m&a6%n$R0VgxQptZ!!&QRmq@<jv!u_CwPeRhk%Tv8uBBB!~CY#TMli5FUTW~(oO zyQIX4dqk90nCfcYUuvCepr)eYj-sKWYK0B4$)mew_StqYH8ZpN^pK*yxjDzNls<>V zb8ly@Jxl*eN=iz4LW1PUlPBwIYw^qFiHV7(7m*cTQj1*7?m}?MT*a24Wh>jtukKq1 z>G{KIM%5G)p7pmN3F-?A3!+ZrF9xvFeDa~4)9o)#UJ45f)5!b&)wI-YPU{uB(wJ-M zvE#>6ohH7wZ#p?SI?{oA1N6Zw+p(vHhlgSRdc}^WbuN!wn)VUMDSoI@P*C&~JEh6I zIH6{>`{W3#0>9io#0PvoSu<11a$)4_R7+C8-Fx?h?)M4r)c9=6)`=RHIJf*rlyaGP zrl1fIN@hlfr?qrQc)5}L=d@2Uq zN2!^F?Gp%_8yg!b$}aBZMh~tsgJm=j9XEK3>PR=X^h6rhwUk!FvkgnFTHZ=qb!Hoq zv1i16{P+>u+wtY;QJ1ZmqEkL?H;jzp^78WNPM%CpRf>tw8wb#KEfonm_3Vgbwc##? zsv)m_CtlXkF|Y5wTV2EG!aDa^1i_orNlL-TS5{8DRTIo2Ahb7D_v&04E5FdaZ--zR zk!64uBbj(a^`2rRzmWGbE&dQ3Eg&=M+{2oEFr**7-*>yK6&p<8&5A!wndA%WrH)P;_;kb z>%#$4J%yQI6Nn_)TZCcu6D145k5rHsokl$3 zl7+!oK~jVQKt#~u=g`5*lMyh z#dKUHR#**?M%&rBAiH97N})1(b~>lBqduHRG1a^|E(?&eos-g@3!+zo^4a489mjIY z&IXuHK9hb<82v=)6YEV3-^b%?+xtONM#lf*rAsdJL)WtO3Nr|c5NH|foM!uqi4q4_ z-uBQ{)tCKCL^o6ej^xtHB~(ao?pmD*=g+Z&&x6Ji0NUgf#mK4b(pmNE*B`Rqe4TKV zL;2@;jGdN&K`iRi0u}4mLzmYo_YBKD*0gMF-bt(v`Gwjl!LGo?!`W=R^Q8UuQLDrD z{`ljMd;MBySZL^5W^wG#9_yTT zScWDs{?5-IiK5OwL&}VD{ApRTcXzgUE?@p&BIr2Y7^PQq|IOXIcMEK~FUq%megHW_u2tuK*X|byLih&N-hwk1XAMnNrsobvP{)vc1;&-Sj@%C!*m+$hyO# z_k*qG?n#O5c`2|AMK~Cq?t#*|3jNSEG!)dnWDS4$GI3$FMllqn5fBTeAyl;X6+2B& z*Bp?VXOu)Fi%WQQ5<*^aWERfu6ONG~#!oaArsLSdv-DAg&aRH|Y zbq0r@hIqhRI0LZrlLNEy6i5Y^yj5GOQdBd>eh3E?BMBgie(xj|m2)-q6^pIanP->$TsKtNX zt2mH?$IC@UMOpQi6h3_Tu)w^T83N-m9<8tkgPPigDN+G6;eAnCJ8OfAq2Pqh;DVtp z0G44GHV_>qw>OqhOGequ&z~05%vW_NX_*Pfv<(u|Z>4eb^DCp# zKle^`#+$Y8VsixW$KR_QDAQ#Ubq--$nd#M@XpYwgV{UD2of_sKYZKvZDA|q_N>DJ$ z(Y{}#4>aDf*ub^s!ozBZjg%E%zv>ru?Iy;P0I|pU^pfENwNGARJuaY?M$ll|nXz;E zRk7tAyW0eLW^1yQEh|5Nw(?0;t$zwF-yHgEP$b6Z-+RR#&W(*!A)6P7C58O<&l#OP z_36W>KfWYw*L>xbk{3gc2pYWr|3^;vll}vYf82K%Q5B$i&THGSX~f~QNXOivLB`r! zdy?)W?RiRuODNEO@7NuEJziF%Qj@o&FfRQu#;QZ9T#%dl^EVTuzMAVce3ExNH5bG~ zK4fZayhUp%YZE%lpdnhhh zW#!hk#;&cVmr*tw#9o;w_u}Tw7_2#gLT0@8W;)m=7)*M5rgkO}2qAtP&_z1Q6Hj3? z_(EIV!N`*vrD5kLbzI{iu(^(86j>%@2Ig(2>N#AnpRAJ3tpSfwqVx~ z=fDMcbzRYzajzz5C4zFlAnI~i)Gut3_PgC;YW~i%CMp?_95g#3`Q^6a!Ww|tLdCNeVTbcA#)&} z(tW6k1iVU4eL-#zsCav!wKfpQbO09h1thr%Ana)h2l~lzpIq#hk6cb~*as`40IJtRmI-p{6b%k$`yy5=R6Xl}R9h{j+y*;`}$4+f-bD$O5J)gG8WLjxFqe33+<3DncC|6O#$h zQ@%d(+*KBY8HsO(RsITca;m^C`Y*su)F=L;)Ue|45!>6I#EFT>n;zvxr`nKt9`j&1 zOl-FMgQXAZJi&8|2>uU>!|kE5vF}8j#zASoRf>4rz+TWQ2{B#}{gl`$CC258V;QD9 zpJ58QSpE0!{12E!9KX!UA={s5(|81Y)E5@(W%gU6K?lu&i&T_Wg{|zuy9k zTM(iG3bO#<0w?=pfQ#%qbRq$uY%fCzSu<6O*!<%62d2xxPu<%XBT+*;?aaXHV@j1qw$1j+y=O zqp&!zH3fP3mIBMRNF+exsFTV{p<)rnGnoAoXFP;P_toekKJV|wn$09O10Reqm*GB?;-%hLQZ<)v~jJiZbbi) z7k|`lDJ0xwkxjTm{{Kjo71V$v6m_HA-1CnE0<0TfUqH>Y|2@DBEs9BTaoJzK+(4tw zbZ1T{C=Y^nzw9TDkM}T9BZ%Pn08`k=ncI~BXIxjW8ca@3Zf-RD7!f!Bk@HB%?GLEF za8RAe_=~WEI7*g+ZZALkr9K{GCxdSv4 zK3QmJs58VMPRJYkTtZ*Hf?QhBh&i~XhKmG|^a2nId;J2 z)#+p`RAX&eMl(;IkhO5hV^6k(({N4SxNcXAZ_iij<(mac>@2rYHN>+)!ptowNC>Ed zw@M`<4E1@uzi$EfLPKP2tz0Q9Kr#zl;=XV}tszpN6LOJEI5j4Ww?QZfBJJ;VFU7P$ z!tMV>20g>G|3rActO`^KT5ktasJ zEd|)K+<2+^X9b$>80K{H6!m8P`ejpWz|WFQh7dS{M$Ha>5JP@x>*(P3b_Vw}@88cx zQD{~aObpaAfv8n%dBrnI1erRShNF%EHVHy07=gws4wViFK!i-~9LG*2W#x$88U6>L z1hJ%1AjryN5b$_7Ie$P-hbDsMJFC5VW$y1=+u9oVe2IYw_A0Y76C#Nlq08;&#Bm;B zVaWjjdOBR&DBaPJh7md3B^(qJc&m!@j;>a^ngoLU5Oz5FB$3(U*M)OzY_+2coE#jo zJX-I5fO1seI-~cOZOcGp1GU7tS$yp$P(R1g{;A%g98^O&WmncRYk?pMzG?{d3aA2r zgb330n~wZf*LKKznGoNm8P{H>PbVTwyO}--(*#t{?l1gYL7Kq*FVc(uO(Xe-gyjG1 ziLlsMUWki8u6md#aPi!{dCO~3X)E`?xovJGw!x3Tg2)qS9?sAp9=3xOhUw|)=qaLD zonDx~n=F#{*}1X3!o=v&T9{d3a|>RCw)gcRN!tscnS$0i2})0EW==&0$i%%&lsFbW zv1t|TX#w?`&pqMMK3CsmG1opP^yvEJah=4KBp?jDfD};c(em^xL*J43`K+Ro^5eQt zwtytU#fBqn>Fy$Fe!f+ZGf8FXyuS`wNGlWoG7G(d1}rWuDW;Yi*-1&04RN43%v8Y~ zkykb(sJO>kzhjM|uRM$P6$zdPxRmo)?Gf?CT2=+%+8{e*uEasTp}2oGG_q#YawrV> z<$94L$EcHR2*ZinhpMp3aaYs3x^yTgDS3p2bpW%m!;k6d7#ZI|K7Mg7>cTz$y!?Dc zBPdP1KO=$|Q_*G)2s~*wq{ZzG(dGKy|2Vu4MCXX;wfMb__>!rN%w^-f6=TqRO$UxL zFl7Arp=vBmK13OvwU;J}Rhhh@)9Yirq#a$&tzHw`#&Dwho4+$M^^nv8jX6NY!=pRj zN*{#e>_ej`Vpi{I`n&{lULoIOxrOW=^Q&rZZcS1qt(R;8@I_u;s#RO?N7m1w%t)aQ zy;X@h{rsC->*;3LPYWrcOd`+G$TuyR_5Kr9UVg2lFW4oA!9qSa-`^=N6ABL&Ain56lMB4L5(YlxP23aG@ z49L`Wk$li?s?LXD5cH1yL|WVpxNuK(7coHpL5BY08Kkd)j6~WhBG~2d*m0D+s7 zV3ntp5u3)Vw8h7Q3}CA|_5Qb}CGHVIw;R=*#C zU96zFws+7XWkLnIv0#`&kQ~C)I#)dl&i^`W33CGuCIr?vWX|b>FfhPm+vvUp(6ds3 z`Pb3f;5IA_CxA|22D4V6K=-RVpJ*?vP!0tg3pd^>VHkG>cY5bGHvmIAbv&0Ywav;& zt~ADQP*BLLh-+f816MtqG>LhA9NsRKZ`o;6wn^rTZBZ)mdAL=4!(^cn zv+$_E@>PMuy@lnJrJW|z;^v(VLznf5pwO>sl}s6j>sb&M2ROQJCen&4pbVjPBmi3~ zYU=icYkq=0yPk#*h~e$n%FgNV-We@d*AE8#+GJ@I{72~6G+ny%-haNLYc;aktEi+G z${6|GUO4&fS!4Ex!zkuMxTj{256K0pcX$Bp1rU@2CE*qnGM%AnCcZ+r#c z<_Fu}2wluSu%So?UzVn*rBOLOX?`C4-CBo@7sXC9Pe`g%Um({*!ys{l7-J2>p@M<} zcfKcRj?tIQm7_Mc$M@?mPo0|?GJaL&9f;ugb5R4hA-L~?goHXOGcle3KAZy-(-lL+1i`rrj}Y{_Vl*m8dS&ds7e$LH6t zUoUW()^Y)HZPr3km%bt1RGwB`!ytk&Xhb&iow2be+2d-pZ50fbyz%Rz+iYwBag#%1 z{!1UOmKyXJ_QM1!dfIApegPkWGF5}eY zqz&%IfZnyOoZC-=2;L3DocMeM*%Gqnf;>v_5c(26s?)elbjnJ|YE&!H$JXGnDOz}p zl$7GIEQm7M05z~9bOzAVuPWc-$NkQ;YH9PK^ThuJp>{QUIK;$ zsbGEWQX6DA89&nf30COIXuylO;oUeH0Ja2J9byp%L-oIhp2H9?`k)n^q#s%i_$}Cc zu+f3^LQ$7WMcNXTXl0dy*_*M${#9LFb>ULYA#JSiF{BqJ{K{ zI1gn=dKbN3=gTJ3-%BA_^LUq^m>xW5KX8?cQw}?o-rBY_-W`+49XY`)rd2Zcd(n=c zTq$wlyIYle`pm(yiV?hAQJtW5czoWr*8}xrLzI5;d8ajTK`Ax*^%WKDV%><3&&UDG zHg?O=ZzGe66I(G2P6EdKGlP4hi>otSKFAL_5Q6i5(U1&Iw`WGm1Tm)DRqkkkng1?8 z4*Ahe?QYHGbZmj6_1!Om$V+rAo;p5mA+x|~d@N`M^s|OIm#ugEeh|MgI~pv;zdSeD z+G8TZC|b6AFn%IK!Nrkf^KI=!vDHX}KuYiZr0q}MLCCsk`q>pnM%k)6IZQ z)uD)+T*K^f6ACMB&{jYPu^JS|$iD&&CaCp6xsma=a5m|i%&bgnbJx#?^Q(o91TAZtv`6bvH%ZqhTo0=+XcEADs1+ESD+NbEFP6))?RC3g_0E%7*!UP^2B`a})h(m@=S96BfPpmEIT+6*yux3zRh? z8#JdL`@{%kE)t1hyp@OX#h^E!VF(pw06Wm01+)~fNNfm?j?RE=xHO3(u0bWRrbfG> z&zRf_ogdIk2+Ap7ZVG!#n7t&ZpMrj%0#Xmb(5;`ib-t-r;ZtIG&QV6avN#MD3}&`( zL^c(|QkL=F=yG52?#BKTT=q_6$kdCPssyd6l?&(97o_5Rgbf!K$8C3(PU4W$%w&F5 z)k>%5X|t}r9dD`A#H{kGW} zwj6?^YYk56ugWCE^Z&NyGAU5wm1qKz8hI{U_-h740r|*k7t}04o>5HQTL|{~i3-~v z9Y$?#Lhj}X%@24$hRA56+}pk-ZgbhzIprOxH(h zPIcPq9AvtoV+-y!$`2)f_qySbD(}0&v9XNatNR>vG<*ox)zn@FF^gwFrXa!?gzf(T zw5w*cww z5a{}U%X3o{^39tgs)spO(7I9o`wSuoQ*HfjgIT~LGaf9BKYRML1uFkDL7M6J`%A(% z9*8m0oMUGP0WWYr#bIw^)0II2A^nz+)A&s<=-OhqZ1Wnr5uDfO@02EKa$mWk-O|#6 zg=V;!Aaz@aTF{s)pUPp5!|G;Sl`%K0Q(0`J1J2rAOqx6w18V+Hz=YMT6dL~g*tI4DO-Bl`D^^gh=weEdfetb?sLuKve3HLEJWA9hW58$6%WH!e zxw6|oeV(2bH-GA%o1F?w8;-}QN3n{D>8m@uq+)}wtoQ!zV@D?^A!v9h9o69aN1?!K ztU;hs18o?E_95tiGepC8|H}-tDGmzNmVF-`(5*l}-ttstP7hP23`p8x5fQ0?Hj#?~ zSZR_+gX^7@yD6VPUw47l5W+Tes{CAE7`>cw(a%&DGK5y4P6IST!J8G$%u=EIARQ>W zQruPDOsy>S9HVkDymYA8TR?jg$)#wo7D#>24iYF6DCbmcW8mq`wYzL9Q$`B7hky z2ij68dt)hOson_b!WGvVQ^>GtzeuvWFy@1pR-mEBOkoEY 0: + save_image(G.image,(goal[0],goal[1]),states_xy[0][0],states_xy, states_one_hot,counter) #this saves the maps + + counter += 1 + t0 = time.time() + for i in range(n_traj): + if len(states_xy[i]) > 1: + + # Get number of steps to goal + L = len(states_xy[i]) * 2 + # Allocate space for predicted steps + pred_traj = np.zeros((L, 2)) + # Set starting position + pred_traj[0, :] = states_xy[i][0, :] + + for j in range(1, L): + # Transform current state data + state_data = pred_traj[j - 1, :] + state_data = state_data.astype(np.int) + # Transform domain to Networks expected input shape + im_data = G.image.astype(np.int) + im_data = 1 - im_data + im_data = im_data.reshape(1, 1, config.imsize, + config.imsize) + # Transfrom value prior to Networks expected input shape + value_data = value_prior.astype(np.int) + value_data = value_data.reshape(1, 1, config.imsize, + config.imsize) + # Get inputs as expected by network + X_in = torch.from_numpy( + np.append(im_data, value_data, axis=1)).float() + S1_in = torch.from_numpy(state_data[0].reshape( + [1, 1])).float() + S2_in = torch.from_numpy(state_data[1].reshape( + [1, 1])).float() + # Send Tensors to GPU if available + if use_GPU: + X_in = X_in.cuda() + S1_in = S1_in.cuda() + S2_in = S2_in.cuda() + # Wrap to autograd.Variable + X_in, S1_in, S2_in = Variable(X_in), Variable( + S1_in), Variable(S2_in) + # Forward pass in our neural net + _, predictions = vin(X_in, S1_in, S2_in, config) + _, indices = torch.max(predictions.cpu(), 1, keepdim=True) + a = indices.data.numpy()[0][0] + # Transform prediction to indices + s = G.map_ind_to_state(pred_traj[j - 1, 0], + pred_traj[j - 1, 1]) + ns = G.sample_next_state(s, a) + nr, nc = G.get_coords(ns) + pred_traj[j, 0] = nr + pred_traj[j, 1] = nc + if nr == goal[0] and nc == goal[1]: + # We hit goal so fill remaining steps + pred_traj[j + 1:, 0] = nr + pred_traj[j + 1:, 1] = nc + break + # Plot optimal and predicted path (also start, end) + if pred_traj[-1, 0] == goal[0] and pred_traj[-1, 1] == goal[1]: + logging.debug('#################### - Path Found map %s!\n', dom) + correct += 1 + t1 = time.time() + t_list.append(t1-t0) + dev_rel,dev_non_rel,dist,astar_dist = deviation(states_xy[i],pred_traj,goal,total) + total_dev_rel += dev_rel + total_dev_non_rel += dev_non_rel + total_dist += dist + total_astar_dist += astar_dist + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + elif metrics: + d = dist_left(pred_traj,goal) + dist_remain_avg += d + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + total += 1 + + + + elif wpn: + total_no_soln += 1 + sys.stdout.write("\r" + str(int( + (float(dom) / n_domains) * 100.0)) + "%") + sys.stdout.flush() + + sys.stdout.write("\n") + if total and correct: + logging.info('Rollout Accuracy: %s',(100 * (correct / total))) + logging.info('Rollout Accuracy Adjusted: %s',(100 * (correct / (total+total_no_soln)))) + logging.info('Total maps with no soln from Dijkstra %s', total_no_soln) + logging.info('Total avg Rel Deviation %s', (total_dev_rel/total)) + logging.info('Total avg Non-Rel Deviation %s', (total_dev_non_rel/total)) + logging.info('Total avg VIN Distance %s', (total_dist/total)) + logging.info('Total avg Dijkstra Distance %s', (total_astar_dist/total)) + logging.info('Avg deviation from Dijkstra: %s', ((((total_astar_dist/total))-((total_dist/total)))/((total_astar_dist/total)))) + logging.info('Total elapsed time %s', (sum(t_list)/(total))) #TODO: Possibly add total no soln + logging.info('Avg distance left when failed: %s ', (dist_remain_avg/(total-correct)) ) + logging.info('---------------------------------Done ------------------------------------') + + else: + logging.info('No successes either vin or dijkstra') + + +def visualize(dom, states_xy, pred_traj): + fig, ax = plt.subplots() + implot = plt.imshow(dom, cmap="Greys_r") + ax.plot(states_xy[:, 0], states_xy[:, 1], c='b', label='Optimal Path') + ax.plot( + pred_traj[:, 0], pred_traj[:, 1], '-X', c='r', label='Predicted Path') + ax.plot(states_xy[0, 0], states_xy[0, 1], '-o', label='Start') + ax.plot(states_xy[-1, 0], states_xy[-1, 1], '-s', label='Goal') + legend = ax.legend(loc='upper right', shadow=False) + for label in legend.get_texts(): + label.set_fontsize('x-small') # the legend text size + for label in legend.get_lines(): + label.set_linewidth(0.5) # the legend line width + plt.draw() + plt.waitforbuttonpress(0) + plt.close(fig) + + +def save_image(im, goal, start,states_xy,states_one_hot,counter): + ''' + Saves the data made by generator as jsons. + ''' + s = config.imsize + + if len(states_xy[0]) == 0: + + im.tolist()[start_x][start_y] = 1 + start_xy = [0,0] + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': start_xy} + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + else: + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': states_xy[0][0].tolist() + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + } + data.append(mp) + with open('./maps/' +str(s) + '_data_300' + '.json', 'w') as outfile: + json.dump(data,outfile) + +def open_map(dom,path): + ''' + Used to open a map json given dom and path, returns grid, goal and agent + ''' + with open(str(path) + str(dom) +'.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data['grid'], data['goal'], data['agent'] + +def open_map_list(dom,path): + with open(str(path) + '.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data[dom]['grid'], data[dom]['goal'], data[dom]['agent'] + +def deviation(optimal_path, pred_path,goal, map_num): + optimal_path = np.array(optimal_path) + optimal_path = 1.0 * optimal_path + + optimal_path_x = np.array(optimal_path[:,0]) + optimal_path_y = np.array(optimal_path[:,1]) + + pred_path = np.unique(pred_path, axis=0) #removes duplicates at the end (when it reaches goal) + + #print('Shortened path' , pred_path) + pred_path_x = np.array(pred_path[:,0]) + pred_path_y = np.array(pred_path[:,1]) + dist = 0.0 + astar_dist = 0.0 + prev = pred_path[0,:] + total_diff_gen = 0 + for xy in pred_path[:,:]: + + diff = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_gen += diff + dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + #prev = [0,0] + #print('opt', optimal_path[0,:]) + prev = optimal_path[0,:] + total_diff_optim = 0 + for xy in optimal_path[:,:]: + # print('xy', xy) + diff2 = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_optim += diff2 + astar_dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + dev_non_rel = abs(total_diff_optim-total_diff_gen) + dev_rel = dev_non_rel/total_diff_optim #TODO: Add avg distance of gen trajectory + return(dev_rel,dev_non_rel,dist,astar_dist) + +def dist_left(pred_traj, goal): + ''' + Finds the distance left between the point and the goal + ''' + pred_traj = np.array(pred_traj) #euclidean distance or geometric distance ? use geometric + x1,y1 = pred_traj[-1][0], pred_traj[-1][1] + x2,y2 = goal[0],goal[1] + dist = (((x2-x1)**2 + (y2-y1)**2))**0.5 + return dist + + +if __name__ == '__main__': + # Parsing training parameters + parser = argparse.ArgumentParser() + parser.add_argument( + '--weights', + type=str, + default='trained/vin_8x8.pth', + help='Path to trained weights') + parser.add_argument( + '--maps', + type=str, + default='resources/testing_maps/16x16', + help='Path to maps') + parser.add_argument('--plot', action='store_true', default=False) + parser.add_argument('--gen', action='store_true', default=False) + parser.add_argument('--imsize', type=int, default=8, help='Size of image') + parser.add_argument( + '--k', type=int, default=10, help='Number of Value Iterations') + parser.add_argument( + '--l_i', type=int, default=2, help='Number of channels in input layer') + parser.add_argument( + '--l_h', + type=int, + default=150, + help='Number of channels in first hidden layer') + parser.add_argument( + '--l_q', + type=int, + default=10, + help='Number of channels in q layer (~actions) in VI-module') + config = parser.parse_args() + # Compute Paths generated by network and plot + + for i in range(1): + main(config) + # main(config) diff --git a/src/algorithms/learning/VIN/test16.py b/src/algorithms/learning/VIN/test16.py new file mode 100644 index 000000000..5734b5c90 --- /dev/null +++ b/src/algorithms/learning/VIN/test16.py @@ -0,0 +1,339 @@ +import sys +import argparse +import json +import matplotlib.pyplot as plt +import random +import numpy as np +import torch +from torch.autograd import Variable + +from dataset.dataset import * +from utility.utils import * +from model import * + +from domains.gridworld import * +from generators.obstacle_gen import * + +import logging +import time +import math + +def main(config, + n_domains=10, + max_obs=30, + max_obs_size=None, + n_traj=1, + n_actions=8,gen = False): + # Correct vs total: + logging.basicConfig(filename='./resources/logs/scrap.log',format='%(asctime)s-%(levelname)s:%(message)s', level=logging.INFO) + correct, total = 0.0, 0.0 + # Automatic swith of GPU mode if available + use_GPU = torch.cuda.is_available() + # Instantiate a VIN model + vin = VIN(config) + # Load model parameters + vin.load_state_dict(torch.load(config.weights)) + # Use GPU if available + if use_GPU: + vin = vin.cuda() + counter,total_no_soln = 0,0 + global data + data = [] + t_list = [] + total_dev_non_rel, total_dev_rel = 0.0,0.0 + total_dist, total_astar_dist = 0.0,0.0 + metrics = True #this enables displaying the distance left to reach goal upon a failure + dist_remain_avg = 0.0 + for dom in range(n_domains): + if gen: + print('Gen started') + goal = [ + np.random.randint(config.imsize), + np.random.randint(config.imsize) + ] + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + # Add obstacles to map + n_obs = obs.add_n_rand_obs(max_obs) + # Add border to map + border_res = obs.add_border() + # Ensure we have valid map + if n_obs == 0 or not border_res: + continue + start = None + else: + wpn = True + # path = './resources/maps/' + path = './resources/testing_maps/16x16/' + mp, goal, start = open_map(dom,path) + # path = './maps/8_data_300' + # mp, goal, start = open_map_list(dom,path) + mp[start[1]][start[0]] = 0 #Set the start position as freespace too + mp[goal[1]][goal[0]] = 0 #Set the goal position as freespace too + + goal = [goal[1],goal[0]] #swap them around, for the row col format (x = col not row) + start = [start[1],start[0]] + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + obs.dom = mp + + # Get final map + im = obs.get_final() + + + #1 is obstacles. + #set obs.dom as the mp + logging.debug('0 is obstacle ') + logging.debug(' im: %s ', im) + # Generate gridworld from obstacle map + G = gridworld(im, goal[0], goal[1]) + # Get value prior + value_prior = G.get_reward_prior() + # Sample random trajectories to our goal + states_xy, states_one_hot = sample_trajectory(G, n_traj,start,gen) #dijkstra trajectory + # print('states_xy', states_xy[0] , len(states_xy[0])) + if gen and len(states_xy[0]) > 0: + save_image(G.image,(goal[0],goal[1]),states_xy[0][0],states_xy, states_one_hot,counter) #this saves the maps + + counter += 1 + for i in range(n_traj): + if len(states_xy[i]) > 1: + t0 = time.time() + # Get number of steps to goal + L = len(states_xy[i]) * 2 + # Allocate space for predicted steps + pred_traj = np.zeros((L, 2)) + # Set starting position + pred_traj[0, :] = states_xy[i][0, :] + + for j in range(1, L): + # Transform current state data + state_data = pred_traj[j - 1, :] + state_data = state_data.astype(np.int) + # Transform domain to Networks expected input shape + im_data = G.image.astype(np.int) + im_data = 1 - im_data + im_data = im_data.reshape(1, 1, config.imsize, + config.imsize) + # Transfrom value prior to Networks expected input shape + value_data = value_prior.astype(np.int) + value_data = value_data.reshape(1, 1, config.imsize, + config.imsize) + # Get inputs as expected by network + X_in = torch.from_numpy( + np.append(im_data, value_data, axis=1)).float() + S1_in = torch.from_numpy(state_data[0].reshape( + [1, 1])).float() + S2_in = torch.from_numpy(state_data[1].reshape( + [1, 1])).float() + # Send Tensors to GPU if available + if use_GPU: + X_in = X_in.cuda() + S1_in = S1_in.cuda() + S2_in = S2_in.cuda() + # Wrap to autograd.Variable + X_in, S1_in, S2_in = Variable(X_in), Variable( + S1_in), Variable(S2_in) + # Forward pass in our neural net + _, predictions = vin(X_in, S1_in, S2_in, config) + _, indices = torch.max(predictions.cpu(), 1, keepdim=True) + a = indices.data.numpy()[0][0] + # Transform prediction to indices + s = G.map_ind_to_state(pred_traj[j - 1, 0], + pred_traj[j - 1, 1]) + ns = G.sample_next_state(s, a) + nr, nc = G.get_coords(ns) + pred_traj[j, 0] = nr + pred_traj[j, 1] = nc + if nr == goal[0] and nc == goal[1]: + # We hit goal so fill remaining steps + pred_traj[j + 1:, 0] = nr + pred_traj[j + 1:, 1] = nc + break + # Plot optimal and predicted path (also start, end) + if pred_traj[-1, 0] == goal[0] and pred_traj[-1, 1] == goal[1]: + logging.debug('#################### - Path Found map %s!\n', dom) + correct += 1 + t1 = time.time() + t_list.append(t1-t0) + dev_rel,dev_non_rel,dist,astar_dist = deviation(states_xy[i],pred_traj,goal,total) + total_dev_rel += dev_rel + total_dev_non_rel += dev_non_rel + total_dist += dist + total_astar_dist += astar_dist + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + elif metrics: + d = dist_left(pred_traj,goal) + dist_remain_avg += d + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + total += 1 + + + + elif wpn: + total_no_soln += 1 + sys.stdout.write("\r" + str(int( + (float(dom) / n_domains) * 100.0)) + "%") + sys.stdout.flush() + + sys.stdout.write("\n") + if total and correct: + logging.info('Rollout Accuracy: %s',(100 * (correct / total))) + logging.info('Rollout Accuracy Adjusted: %s',(100 * (correct / (total+total_no_soln)))) + logging.info('Total maps with no soln from Dijkstra %s', total_no_soln) + logging.info('Total avg Rel Deviation %s', (total_dev_rel/total)) + logging.info('Total avg Non-Rel Deviation %s', (total_dev_non_rel/total)) + logging.info('Total avg VIN Distance %s', (total_dist/total)) + logging.info('Total avg Dijkstra Distance %s', (total_astar_dist/total)) + logging.info('Avg deviation from Dijkstra: %s', ((((total_astar_dist/total))-((total_dist/total)))/((total_astar_dist/total)))) + logging.info('Total elapsed time %s', (sum(t_list)/(correct))) #TODO: Possibly add total no soln + logging.info('Avg distance left when failed: %s ', (dist_remain_avg/(total-correct)) ) + logging.info('---------------------------------Done ------------------------------------') + + else: + logging.info('No successes either vin or dijkstra') + + +def visualize(dom, states_xy, pred_traj): + fig, ax = plt.subplots() + implot = plt.imshow(dom, cmap="Greys_r") + ax.plot(states_xy[:, 0], states_xy[:, 1], c='b', label='Optimal Path') + ax.plot( + pred_traj[:, 0], pred_traj[:, 1], '-X', c='r', label='Predicted Path') + ax.plot(states_xy[0, 0], states_xy[0, 1], '-o', label='Start') + ax.plot(states_xy[-1, 0], states_xy[-1, 1], '-s', label='Goal') + legend = ax.legend(loc='upper right', shadow=False) + for label in legend.get_texts(): + label.set_fontsize('x-small') # the legend text size + for label in legend.get_lines(): + label.set_linewidth(0.5) # the legend line width + plt.draw() + plt.waitforbuttonpress(0) + plt.close(fig) + + +def save_image(im, goal, start,states_xy,states_one_hot,counter): + ''' + Saves the data made by generator as jsons. + ''' + s = config.imsize + + if len(states_xy[0]) == 0: + + im.tolist()[start_x][start_y] = 1 + start_xy = [0,0] + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': start_xy} + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + else: + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': states_xy[0][0].tolist() + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + } + data.append(mp) + with open('./maps/' +str(s) + '_data_300' + '.json', 'w') as outfile: + json.dump(data,outfile) + +def open_map(dom,path): + ''' + Used to open a map json given dom and path, returns grid, goal and agent + ''' + with open(str(path) + str(dom) +'.json') as json_file: + data = json.load(json_file) + logging.debug('Opening file: ' + str(path) + str(dom) + '.json' ) + return data['grid'], data['goal'], data['agent'] + +def open_map_list(dom,path): + with open(str(path) + '.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data[dom]['grid'], data[dom]['goal'], data[dom]['agent'] + +def deviation(optimal_path, pred_path,goal, map_num): + optimal_path = np.array(optimal_path) + optimal_path = 1.0 * optimal_path + + optimal_path_x = np.array(optimal_path[:,0]) + optimal_path_y = np.array(optimal_path[:,1]) + + pred_path = np.unique(pred_path, axis=0) #removes duplicates at the end (when it reaches goal) + + #print('Shortened path' , pred_path) + pred_path_x = np.array(pred_path[:,0]) + pred_path_y = np.array(pred_path[:,1]) + dist = 0.0 + astar_dist = 0.0 + prev = pred_path[0,:] + total_diff_gen = 0 + for xy in pred_path[:,:]: + + diff = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_gen += diff + dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + #prev = [0,0] + #print('opt', optimal_path[0,:]) + prev = optimal_path[0,:] + total_diff_optim = 0 + for xy in optimal_path[:,:]: + # print('xy', xy) + diff2 = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_optim += diff2 + astar_dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + dev_non_rel = abs(total_diff_optim-total_diff_gen) + dev_rel = dev_non_rel/total_diff_optim #TODO: Add avg distance of gen trajectory + return(dev_rel,dev_non_rel,dist,astar_dist) + +def dist_left(pred_traj, goal): + ''' + Finds the distance left between the point and the goal + ''' + pred_traj = np.array(pred_traj) #euclidean distance or geometric distance ? use geometric + x1,y1 = pred_traj[-1][0], pred_traj[-1][1] + x2,y2 = goal[0],goal[1] + dist = (((x2-x1)**2 + (y2-y1)**2))**0.5 + return dist + + +if __name__ == '__main__': + # Parsing training parameters + parser = argparse.ArgumentParser() + parser.add_argument( + '--weights', + type=str, + default='trained/60k_no_block_att3_vin_16x16.pth', + help='Path to trained weights') + parser.add_argument('--plot', action='store_true', default=False) + parser.add_argument('--gen', action='store_true', default=False) + parser.add_argument('--imsize', type=int, default=16, help='Size of image') + parser.add_argument( + '--k', type=int, default=20, help='Number of Value Iterations') + parser.add_argument( + '--l_i', type=int, default=2, help='Number of channels in input layer') + parser.add_argument( + '--l_h', + type=int, + default=150, + help='Number of channels in first hidden layer') + parser.add_argument( + '--l_q', + type=int, + default=10, + help='Number of channels in q layer (~actions) in VI-module') + config = parser.parse_args() + # Compute Paths generated by network and plot + + for i in range(1): + main(config) + # main(config) diff --git a/src/algorithms/learning/VIN/test28.py b/src/algorithms/learning/VIN/test28.py new file mode 100644 index 000000000..1be43611b --- /dev/null +++ b/src/algorithms/learning/VIN/test28.py @@ -0,0 +1,339 @@ +import sys +import argparse +import json +import matplotlib.pyplot as plt +import random +import numpy as np +import torch +from torch.autograd import Variable + +from dataset.dataset import * +from utility.utils import * +from model import * + +from domains.gridworld import * +from generators.obstacle_gen import * + +import logging +import time +import math + +def main(config, + n_domains=3000, + max_obs=30, + max_obs_size=None, + n_traj=1, + n_actions=8,gen = False): + # Correct vs total: + logging.basicConfig(filename='./resources/logs/test28_60k_no_block.log',format='%(asctime)s-%(levelname)s:%(message)s', level=logging.INFO) + correct, total = 0.0, 0.0 + # Automatic swith of GPU mode if available + use_GPU = torch.cuda.is_available() + # Instantiate a VIN model + vin = VIN(config) + # Load model parameters + vin.load_state_dict(torch.load(config.weights)) + # Use GPU if available + if use_GPU: + vin = vin.cuda() + counter,total_no_soln = 0,0 + global data + data = [] + t_list = [] + total_dev_non_rel, total_dev_rel = 0.0,0.0 + total_dist, total_astar_dist = 0.0,0.0 + metrics = True #this enables displaying the distance left to reach goal upon a failure + dist_remain_avg = 0.0 + for dom in range(n_domains): + if gen: + goal = [ + np.random.randint(config.imsize), + np.random.randint(config.imsize) + ] + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + # Add obstacles to map + n_obs = obs.add_n_rand_obs(max_obs) + # Add border to map + border_res = obs.add_border() + # Ensure we have valid map + if n_obs == 0 or not border_res: + continue + start = None + else: + wpn = True + # path = './resources/maps/' + path = './resources/testing_maps/28x28/' + mp, goal, start = open_map(dom,path) + # path = './maps/8_data_300' + # mp, goal, start = open_map_list(dom,path) + mp[start[1]][start[0]] = 0 #Set the start position as freespace too + mp[goal[1]][goal[0]] = 0 #Set the goal position as freespace too + + goal = [goal[1],goal[0]] #swap them around, for the row col format (x = col not row) + start = [start[1],start[0]] + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + obs.dom = mp + + # Get final map + im = obs.get_final() + + + #1 is obstacles. + #set obs.dom as the mp + logging.debug('0 is obstacle ') + logging.debug(' im: %s ', im) + # Generate gridworld from obstacle map + G = gridworld(im, goal[0], goal[1]) + # Get value prior + value_prior = G.get_reward_prior() + # Sample random trajectories to our goal + states_xy, states_one_hot = sample_trajectory(G, n_traj,start,gen) #dijkstra trajectory + # print('states_xy', states_xy[0] , len(states_xy[0])) + if gen and len(states_xy[0]) > 0: + save_image(G.image,(goal[0],goal[1]),states_xy[0][0],states_xy, states_one_hot,counter) #this saves the maps + + counter += 1 + for i in range(n_traj): + if len(states_xy[i]) > 1: + t0 = time.time() + # Get number of steps to goal + L = len(states_xy[i]) * 2 + # Allocate space for predicted steps + pred_traj = np.zeros((L, 2)) + # Set starting position + pred_traj[0, :] = states_xy[i][0, :] + + for j in range(1, L): + # Transform current state data + state_data = pred_traj[j - 1, :] + state_data = state_data.astype(np.int) + # Transform domain to Networks expected input shape + im_data = G.image.astype(np.int) + im_data = 1 - im_data + im_data = im_data.reshape(1, 1, config.imsize, + config.imsize) + # Transfrom value prior to Networks expected input shape + value_data = value_prior.astype(np.int) + value_data = value_data.reshape(1, 1, config.imsize, + config.imsize) + # Get inputs as expected by network + X_in = torch.from_numpy( + np.append(im_data, value_data, axis=1)).float() + S1_in = torch.from_numpy(state_data[0].reshape( + [1, 1])).float() + S2_in = torch.from_numpy(state_data[1].reshape( + [1, 1])).float() + # Send Tensors to GPU if available + if use_GPU: + X_in = X_in.cuda() + S1_in = S1_in.cuda() + S2_in = S2_in.cuda() + # Wrap to autograd.Variable + X_in, S1_in, S2_in = Variable(X_in), Variable( + S1_in), Variable(S2_in) + # Forward pass in our neural net + _, predictions = vin(X_in, S1_in, S2_in, config) + _, indices = torch.max(predictions.cpu(), 1, keepdim=True) + a = indices.data.numpy()[0][0] + # Transform prediction to indices + s = G.map_ind_to_state(pred_traj[j - 1, 0], + pred_traj[j - 1, 1]) + ns = G.sample_next_state(s, a) + nr, nc = G.get_coords(ns) + pred_traj[j, 0] = nr + pred_traj[j, 1] = nc + if nr == goal[0] and nc == goal[1]: + # We hit goal so fill remaining steps + pred_traj[j + 1:, 0] = nr + pred_traj[j + 1:, 1] = nc + break + # Plot optimal and predicted path (also start, end) + if pred_traj[-1, 0] == goal[0] and pred_traj[-1, 1] == goal[1]: + logging.debug('#################### - Path Found map %s!\n', dom) + correct += 1 + t1 = time.time() + t_list.append(t1-t0) + dev_rel,dev_non_rel,dist,astar_dist = deviation(states_xy[i],pred_traj,goal,total) + total_dev_rel += dev_rel + total_dev_non_rel += dev_non_rel + total_dist += dist + total_astar_dist += astar_dist + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + elif metrics: + d = dist_left(pred_traj,goal) + dist_remain_avg += d + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + total += 1 + + + + elif wpn: + total_no_soln += 1 + sys.stdout.write("\r" + str(int( + (float(dom) / n_domains) * 100.0)) + "%") + sys.stdout.flush() + + sys.stdout.write("\n") + if total and correct: + logging.info('Rollout Accuracy Dijkstra: %s',(100*((total-total_no_soln)/total))) + logging.info('Rollout Accuracy: %s',(100 * (correct / total))) + logging.info('Rollout Accuracy Adjusted: %s',(100 * (correct / (total+total_no_soln)))) + logging.info('Total maps with no soln from Dijkstra %s', total_no_soln) + logging.info('Total avg Rel Deviation %s', (total_dev_rel/total)) + logging.info('Total avg Non-Rel Deviation %s', (total_dev_non_rel/total)) + logging.info('Total avg VIN Distance %s', (total_dist/total)) + logging.info('Total avg Dijkstra Distance %s', (total_astar_dist/total)) + logging.info('Avg deviation from Dijkstra: %s', ((((total_astar_dist/total))-((total_dist/total)))/((total_astar_dist/total)))) + logging.info('Total elapsed time %s', (sum(t_list)/(correct))) #TODO: Possibly add total no soln + logging.info('Avg distance left when failed: %s ', (dist_remain_avg/(total-correct)) ) + logging.info('---------------------------------Done ------------------------------------') + + else: + logging.info('No successes either vin or dijkstra') + + +def visualize(dom, states_xy, pred_traj): + fig, ax = plt.subplots() + implot = plt.imshow(dom, cmap="Greys_r") + ax.plot(states_xy[:, 0], states_xy[:, 1], c='b', label='Optimal Path') + ax.plot( + pred_traj[:, 0], pred_traj[:, 1], '-X', c='r', label='Predicted Path') + ax.plot(states_xy[0, 0], states_xy[0, 1], '-o', label='Start') + ax.plot(states_xy[-1, 0], states_xy[-1, 1], '-s', label='Goal') + legend = ax.legend(loc='upper right', shadow=False) + for label in legend.get_texts(): + label.set_fontsize('x-small') # the legend text size + for label in legend.get_lines(): + label.set_linewidth(0.5) # the legend line width + plt.draw() + plt.waitforbuttonpress(0) + plt.close(fig) + + +def save_image(im, goal, start,states_xy,states_one_hot,counter): + ''' + Saves the data made by generator as jsons. + ''' + s = config.imsize + + if len(states_xy[0]) == 0: + + im.tolist()[start_x][start_y] = 1 + start_xy = [0,0] + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': start_xy} + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + else: + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': states_xy[0][0].tolist() + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + } + data.append(mp) + with open('./maps/' +str(s) + '_data_300' + '.json', 'w') as outfile: + json.dump(data,outfile) + +def open_map(dom,path): + ''' + Used to open a map json given dom and path, returns grid, goal and agent + ''' + with open(str(path) + str(dom) +'.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data['grid'], data['goal'], data['agent'] + +def open_map_list(dom,path): + with open(str(path) + '.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data[dom]['grid'], data[dom]['goal'], data[dom]['agent'] + +def deviation(optimal_path, pred_path,goal, map_num): + optimal_path = np.array(optimal_path) + optimal_path = 1.0 * optimal_path + + optimal_path_x = np.array(optimal_path[:,0]) + optimal_path_y = np.array(optimal_path[:,1]) + + pred_path = np.unique(pred_path, axis=0) #removes duplicates at the end (when it reaches goal) + + #print('Shortened path' , pred_path) + pred_path_x = np.array(pred_path[:,0]) + pred_path_y = np.array(pred_path[:,1]) + dist = 0.0 + astar_dist = 0.0 + prev = pred_path[0,:] + total_diff_gen = 0 + for xy in pred_path[:,:]: + + diff = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_gen += diff + dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + #prev = [0,0] + #print('opt', optimal_path[0,:]) + prev = optimal_path[0,:] + total_diff_optim = 0 + for xy in optimal_path[:,:]: + # print('xy', xy) + diff2 = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_optim += diff2 + astar_dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + dev_non_rel = abs(total_diff_optim-total_diff_gen) + dev_rel = dev_non_rel/total_diff_optim #TODO: Add avg distance of gen trajectory + return(dev_rel,dev_non_rel,dist,astar_dist) + +def dist_left(pred_traj, goal): + ''' + Finds the distance left between the point and the goal + ''' + pred_traj = np.array(pred_traj) #euclidean distance or geometric distance ? use geometric + x1,y1 = pred_traj[-1][0], pred_traj[-1][1] + x2,y2 = goal[0],goal[1] + dist = (((x2-x1)**2 + (y2-y1)**2))**0.5 + return dist + + +if __name__ == '__main__': + # Parsing training parameters + parser = argparse.ArgumentParser() + parser.add_argument( + '--weights', + type=str, + default='trained/60k_no_block_att3_vin_28x28.pth', + help='Path to trained weights') + parser.add_argument('--plot', action='store_true', default=False) + parser.add_argument('--gen', action='store_true', default=False) + parser.add_argument('--imsize', type=int, default=28, help='Size of image') + parser.add_argument( + '--k', type=int, default=36, help='Number of Value Iterations') + parser.add_argument( + '--l_i', type=int, default=2, help='Number of channels in input layer') + parser.add_argument( + '--l_h', + type=int, + default=150, + help='Number of channels in first hidden layer') + parser.add_argument( + '--l_q', + type=int, + default=10, + help='Number of channels in q layer (~actions) in VI-module') + config = parser.parse_args() + # Compute Paths generated by network and plot + + for i in range(1): + main(config) + # main(config) diff --git a/src/algorithms/learning/VIN/test64.py b/src/algorithms/learning/VIN/test64.py new file mode 100644 index 000000000..e58306b9b --- /dev/null +++ b/src/algorithms/learning/VIN/test64.py @@ -0,0 +1,339 @@ +import sys +import argparse +import json +import matplotlib.pyplot as plt +import random +import numpy as np +import torch +from torch.autograd import Variable + +from dataset.dataset import * +from utility.utils import * +from model import * + +from domains.gridworld import * +from generators.obstacle_gen import * + +import logging +import time +import math + +def main(config, + n_domains=600, + max_obs=30, + max_obs_size=None, + n_traj=1, + n_actions=8,gen = False): + # Correct vs total: + logging.basicConfig(filename='./resources/logs/test_64_30k_64x64_600',format='%(asctime)s-%(levelname)s:%(message)s', level=logging.INFO) + correct, total = 0.0, 0.0 + # Automatic swith of GPU mode if available + use_GPU = torch.cuda.is_available() + # Instantiate a VIN model + vin = VIN(config) + # Load model parameters + vin.load_state_dict(torch.load(config.weights)) + # Use GPU if available + if use_GPU: + vin = vin.cuda() + counter,total_no_soln = 0,0 + global data + data = [] + t_list = [] + total_dev_non_rel, total_dev_rel = 0.0,0.0 + total_dist, total_astar_dist = 0.0,0.0 + metrics = True #this enables displaying the distance left to reach goal upon a failure + dist_remain_avg = 0.0 + for dom in range(n_domains): + if gen: + goal = [ + np.random.randint(config.imsize), + np.random.randint(config.imsize) + ] + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + # Add obstacles to map + n_obs = obs.add_n_rand_obs(max_obs) + # Add border to map + border_res = obs.add_border() + # Ensure we have valid map + if n_obs == 0 or not border_res: + continue + start = None + else: + wpn = True + # path = './resources/maps/' + path = './resources/testing_maps/64x64_300/' + mp, goal, start = open_map(dom,path) + # path = './maps/8_data_300' + # mp, goal, start = open_map_list(dom,path) + mp[start[1]][start[0]] = 0 #Set the start position as freespace too + mp[goal[1]][goal[0]] = 0 #Set the goal position as freespace too + + goal = [goal[1],goal[0]] #swap them around, for the row col format (x = col not row) + start = [start[1],start[0]] + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + obs.dom = mp + + # Get final map + im = obs.get_final() + + + #1 is obstacles. + #set obs.dom as the mp + logging.debug('0 is obstacle ') + logging.debug(' im: %s ', im) + # Generate gridworld from obstacle map + G = gridworld(im, goal[0], goal[1]) + # Get value prior + value_prior = G.get_reward_prior() + # Sample random trajectories to our goal + states_xy, states_one_hot = sample_trajectory(G, n_traj,start,gen) #dijkstra trajectory + # print('states_xy', states_xy[0] , len(states_xy[0])) + if gen and len(states_xy[0]) > 0: + save_image(G.image,(goal[0],goal[1]),states_xy[0][0],states_xy, states_one_hot,counter) #this saves the maps + + counter += 1 + for i in range(n_traj): + if len(states_xy[i]) > 1: + t0 = time.time() + # Get number of steps to goal + L = len(states_xy[i]) * 2 + # Allocate space for predicted steps + pred_traj = np.zeros((L, 2)) + # Set starting position + pred_traj[0, :] = states_xy[i][0, :] + + for j in range(1, L): + # Transform current state data + state_data = pred_traj[j - 1, :] + state_data = state_data.astype(np.int) + # Transform domain to Networks expected input shape + im_data = G.image.astype(np.int) + im_data = 1 - im_data + im_data = im_data.reshape(1, 1, config.imsize, + config.imsize) + # Transfrom value prior to Networks expected input shape + value_data = value_prior.astype(np.int) + value_data = value_data.reshape(1, 1, config.imsize, + config.imsize) + # Get inputs as expected by network + X_in = torch.from_numpy( + np.append(im_data, value_data, axis=1)).float() + S1_in = torch.from_numpy(state_data[0].reshape( + [1, 1])).float() + S2_in = torch.from_numpy(state_data[1].reshape( + [1, 1])).float() + # Send Tensors to GPU if available + if use_GPU: + X_in = X_in.cuda() + S1_in = S1_in.cuda() + S2_in = S2_in.cuda() + # Wrap to autograd.Variable + X_in, S1_in, S2_in = Variable(X_in), Variable( + S1_in), Variable(S2_in) + # Forward pass in our neural net + _, predictions = vin(X_in, S1_in, S2_in, config) + _, indices = torch.max(predictions.cpu(), 1, keepdim=True) + a = indices.data.numpy()[0][0] + # Transform prediction to indices + s = G.map_ind_to_state(pred_traj[j - 1, 0], + pred_traj[j - 1, 1]) + ns = G.sample_next_state(s, a) + nr, nc = G.get_coords(ns) + pred_traj[j, 0] = nr + pred_traj[j, 1] = nc + if nr == goal[0] and nc == goal[1]: + # We hit goal so fill remaining steps + pred_traj[j + 1:, 0] = nr + pred_traj[j + 1:, 1] = nc + break + # Plot optimal and predicted path (also start, end) + if pred_traj[-1, 0] == goal[0] and pred_traj[-1, 1] == goal[1]: + logging.debug('#################### - Path Found map %s!\n', dom) + correct += 1 + t1 = time.time() + t_list.append(t1-t0) + dev_rel,dev_non_rel,dist,astar_dist = deviation(states_xy[i],pred_traj,goal,total) + total_dev_rel += dev_rel + total_dev_non_rel += dev_non_rel + total_dist += dist + total_astar_dist += astar_dist + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + elif metrics: + d = dist_left(pred_traj,goal) + dist_remain_avg += d + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + total += 1 + + + + elif wpn: + total_no_soln += 1 + sys.stdout.write("\r" + str(int( + (float(dom) / n_domains) * 100.0)) + "%") + sys.stdout.flush() + + sys.stdout.write("\n") + if total and correct: + logging.info('Rollout Accuracy Dijkstra: %s',(100*((total-total_no_soln)/total))) + logging.info('Rollout Accuracy: %s',(100 * (correct / total))) + logging.info('Rollout Accuracy Adjusted: %s',(100 * (correct / (total+total_no_soln)))) + logging.info('Total maps with no soln from Dijkstra %s', total_no_soln) + logging.info('Total avg Rel Deviation %s', (total_dev_rel/total)) + logging.info('Total avg Non-Rel Deviation %s', (total_dev_non_rel/total)) + logging.info('Total avg VIN Distance %s', (total_dist/total)) + logging.info('Total avg Dijkstra Distance %s', (total_astar_dist/total)) + logging.info('Avg deviation from Dijkstra: %s', ((((total_astar_dist/total))-((total_dist/total)))/((total_astar_dist/total)))) + logging.info('Total elapsed time %s', (sum(t_list)/(correct))) #TODO: Possibly add total no soln + logging.info('Avg distance left when failed: %s ', (dist_remain_avg/(total-correct)) ) + logging.info('---------------------------------Done ------------------------------------') + + else: + logging.info('No successes either vin or dijkstra') + + +def visualize(dom, states_xy, pred_traj): + fig, ax = plt.subplots() + implot = plt.imshow(dom, cmap="Greys_r") + ax.plot(states_xy[:, 0], states_xy[:, 1], c='b', label='Optimal Path') + ax.plot( + pred_traj[:, 0], pred_traj[:, 1], '-X', c='r', label='Predicted Path') + ax.plot(states_xy[0, 0], states_xy[0, 1], '-o', label='Start') + ax.plot(states_xy[-1, 0], states_xy[-1, 1], '-s', label='Goal') + legend = ax.legend(loc='upper right', shadow=False) + for label in legend.get_texts(): + label.set_fontsize('x-small') # the legend text size + for label in legend.get_lines(): + label.set_linewidth(0.5) # the legend line width + plt.draw() + plt.waitforbuttonpress(0) + plt.close(fig) + + +def save_image(im, goal, start,states_xy,states_one_hot,counter): + ''' + Saves the data made by generator as jsons. + ''' + s = config.imsize + + if len(states_xy[0]) == 0: + + im.tolist()[start_x][start_y] = 1 + start_xy = [0,0] + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': start_xy} + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + else: + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': states_xy[0][0].tolist() + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + } + data.append(mp) + with open('./maps/' +str(s) + '_data_300' + '.json', 'w') as outfile: + json.dump(data,outfile) + +def open_map(dom,path): + ''' + Used to open a map json given dom and path, returns grid, goal and agent + ''' + with open(str(path) + str(dom) +'.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data['grid'], data['goal'], data['agent'] + +def open_map_list(dom,path): + with open(str(path) + '.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data[dom]['grid'], data[dom]['goal'], data[dom]['agent'] + +def deviation(optimal_path, pred_path,goal, map_num): + optimal_path = np.array(optimal_path) + optimal_path = 1.0 * optimal_path + + optimal_path_x = np.array(optimal_path[:,0]) + optimal_path_y = np.array(optimal_path[:,1]) + + pred_path = np.unique(pred_path, axis=0) #removes duplicates at the end (when it reaches goal) + + #print('Shortened path' , pred_path) + pred_path_x = np.array(pred_path[:,0]) + pred_path_y = np.array(pred_path[:,1]) + dist = 0.0 + astar_dist = 0.0 + prev = pred_path[0,:] + total_diff_gen = 0 + for xy in pred_path[:,:]: + + diff = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_gen += diff + dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + #prev = [0,0] + #print('opt', optimal_path[0,:]) + prev = optimal_path[0,:] + total_diff_optim = 0 + for xy in optimal_path[:,:]: + # print('xy', xy) + diff2 = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_optim += diff2 + astar_dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + dev_non_rel = abs(total_diff_optim-total_diff_gen) + dev_rel = dev_non_rel/total_diff_optim #TODO: Add avg distance of gen trajectory + return(dev_rel,dev_non_rel,dist,astar_dist) + +def dist_left(pred_traj, goal): + ''' + Finds the distance left between the point and the goal + ''' + pred_traj = np.array(pred_traj) #euclidean distance or geometric distance ? use geometric + x1,y1 = pred_traj[-1][0], pred_traj[-1][1] + x2,y2 = goal[0],goal[1] + dist = (((x2-x1)**2 + (y2-y1)**2))**0.5 + return dist + + +if __name__ == '__main__': + # Parsing training parameters + parser = argparse.ArgumentParser() + parser.add_argument( + '--weights', + type=str, + default='trained/30k_no_block_dataset_vin_64x64.pth', + help='Path to trained weights') + parser.add_argument('--plot', action='store_true', default=False) + parser.add_argument('--gen', action='store_true', default=False) + parser.add_argument('--imsize', type=int, default=64, help='Size of image') + parser.add_argument( + '--k', type=int, default=48, help='Number of Value Iterations') + parser.add_argument( + '--l_i', type=int, default=2, help='Number of channels in input layer') + parser.add_argument( + '--l_h', + type=int, + default=150, + help='Number of channels in first hidden layer') + parser.add_argument( + '--l_q', + type=int, + default=10, + help='Number of channels in q layer (~actions) in VI-module') + config = parser.parse_args() + # Compute Paths generated by network and plot + + for i in range(1): + main(config) + # main(config) diff --git a/src/algorithms/learning/VIN/test8.py b/src/algorithms/learning/VIN/test8.py new file mode 100644 index 000000000..4f2946c84 --- /dev/null +++ b/src/algorithms/learning/VIN/test8.py @@ -0,0 +1,338 @@ +import sys +import argparse +import json +import matplotlib.pyplot as plt +import random +import numpy as np +import torch +from torch.autograd import Variable + +from dataset.dataset import * +from utility.utils import * +from model import * + +from domains.gridworld import * +from generators.obstacle_gen import * + +import logging +import time +import math + +def main(config, + n_domains=3000, + max_obs=30, + max_obs_size=None, + n_traj=1, + n_actions=8,gen = False): + # Correct vs total: + logging.basicConfig(filename='./resources/logs/test8_rerun_60k_no_block.log',format='%(asctime)s-%(levelname)s:%(message)s', level=logging.INFO) + correct, total = 0.0, 0.0 + # Automatic swith of GPU mode if available + use_GPU = torch.cuda.is_available() + # Instantiate a VIN model + vin = VIN(config) + # Load model parameters + vin.load_state_dict(torch.load(config.weights)) + # Use GPU if available + if use_GPU: + vin = vin.cuda() + counter,total_no_soln = 0,0 + global data + data = [] + t_list = [] + total_dev_non_rel, total_dev_rel = 0.0,0.0 + total_dist, total_astar_dist = 0.0,0.0 + metrics = True #this enables displaying the distance left to reach goal upon a failure + dist_remain_avg = 0.0 + for dom in range(n_domains): + if gen: + goal = [ + np.random.randint(config.imsize), + np.random.randint(config.imsize) + ] + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + # Add obstacles to map + n_obs = obs.add_n_rand_obs(max_obs) + # Add border to map + border_res = obs.add_border() + # Ensure we have valid map + if n_obs == 0 or not border_res: + continue + start = None + else: + wpn = True + # path = './resources/maps/' + path = './resources/testing_maps/8x8/' + mp, goal, start = open_map(dom,path) + # path = './maps/8_data_300' + # mp, goal, start = open_map_list(dom,path) + mp[start[1]][start[0]] = 0 #Set the start position as freespace too + mp[goal[1]][goal[0]] = 0 #Set the goal position as freespace too + + goal = [goal[1],goal[0]] #swap them around, for the row col format (x = col not row) + start = [start[1],start[0]] + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + obs.dom = mp + + # Get final map + im = obs.get_final() + + + #1 is obstacles. + #set obs.dom as the mp + logging.debug('0 is obstacle ') + logging.debug(' im: %s ', im) + # Generate gridworld from obstacle map + G = gridworld(im, goal[0], goal[1]) + # Get value prior + value_prior = G.get_reward_prior() + # Sample random trajectories to our goal + states_xy, states_one_hot = sample_trajectory(G, n_traj,start,gen) #dijkstra trajectory + # print('states_xy', states_xy[0] , len(states_xy[0])) + if gen and len(states_xy[0]) > 0: + save_image(G.image,(goal[0],goal[1]),states_xy[0][0],states_xy, states_one_hot,counter) #this saves the maps + + counter += 1 + for i in range(n_traj): + if len(states_xy[i]) > 1: + t0 = time.time() + # Get number of steps to goal + L = len(states_xy[i]) * 2 + # Allocate space for predicted steps + pred_traj = np.zeros((L, 2)) + # Set starting position + pred_traj[0, :] = states_xy[i][0, :] + + for j in range(1, L): + # Transform current state data + state_data = pred_traj[j - 1, :] + state_data = state_data.astype(np.int) + # Transform domain to Networks expected input shape + im_data = G.image.astype(np.int) + im_data = 1 - im_data + im_data = im_data.reshape(1, 1, config.imsize, + config.imsize) + # Transfrom value prior to Networks expected input shape + value_data = value_prior.astype(np.int) + value_data = value_data.reshape(1, 1, config.imsize, + config.imsize) + # Get inputs as expected by network + X_in = torch.from_numpy( + np.append(im_data, value_data, axis=1)).float() + S1_in = torch.from_numpy(state_data[0].reshape( + [1, 1])).float() + S2_in = torch.from_numpy(state_data[1].reshape( + [1, 1])).float() + # Send Tensors to GPU if available + if use_GPU: + X_in = X_in.cuda() + S1_in = S1_in.cuda() + S2_in = S2_in.cuda() + # Wrap to autograd.Variable + X_in, S1_in, S2_in = Variable(X_in), Variable( + S1_in), Variable(S2_in) + # Forward pass in our neural net + _, predictions = vin(X_in, S1_in, S2_in, config) + _, indices = torch.max(predictions.cpu(), 1, keepdim=True) + a = indices.data.numpy()[0][0] + # Transform prediction to indices + s = G.map_ind_to_state(pred_traj[j - 1, 0], + pred_traj[j - 1, 1]) + ns = G.sample_next_state(s, a) + nr, nc = G.get_coords(ns) + pred_traj[j, 0] = nr + pred_traj[j, 1] = nc + if nr == goal[0] and nc == goal[1]: + # We hit goal so fill remaining steps + pred_traj[j + 1:, 0] = nr + pred_traj[j + 1:, 1] = nc + break + # Plot optimal and predicted path (also start, end) + if pred_traj[-1, 0] == goal[0] and pred_traj[-1, 1] == goal[1]: + logging.debug('#################### - Path Found map %s!\n', dom) + correct += 1 + t1 = time.time() + t_list.append(t1-t0) + dev_rel,dev_non_rel,dist,astar_dist = deviation(states_xy[i],pred_traj,goal,total) + total_dev_rel += dev_rel + total_dev_non_rel += dev_non_rel + total_dist += dist + total_astar_dist += astar_dist + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + elif metrics: + d = dist_left(pred_traj,goal) + dist_remain_avg += d + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + total += 1 + + + + elif wpn: + total_no_soln += 1 + sys.stdout.write("\r" + str(int( + (float(dom) / n_domains) * 100.0)) + "%") + sys.stdout.flush() + + sys.stdout.write("\n") + if total and correct: + logging.info('Rollout Accuracy: %s',(100 * (correct / total))) + logging.info('Rollout Accuracy Adjusted: %s',(100 * (correct / (total+total_no_soln)))) + logging.info('Total maps with no soln from Dijkstra %s', total_no_soln) + logging.info('Total avg Rel Deviation %s', (total_dev_rel/total)) + logging.info('Total avg Non-Rel Deviation %s', (total_dev_non_rel/total)) + logging.info('Total avg VIN Distance %s', (total_dist/total)) + logging.info('Total avg Dijkstra Distance %s', (total_astar_dist/total)) + logging.info('Avg deviation from Dijkstra: %s', ((((total_astar_dist/total))-((total_dist/total)))/((total_astar_dist/total)))) + logging.info('Total elapsed time %s', (sum(t_list)/(correct))) + logging.info('Avg distance left when failed: %s ', (dist_remain_avg/(total-correct)) ) + logging.info('---------------------------------Done ------------------------------------') + + else: + logging.info('No successes either vin or dijkstra') + + +def visualize(dom, states_xy, pred_traj): + fig, ax = plt.subplots() + implot = plt.imshow(dom, cmap="Greys_r") + ax.plot(states_xy[:, 0], states_xy[:, 1], c='b', label='Optimal Path') + ax.plot( + pred_traj[:, 0], pred_traj[:, 1], '-X', c='r', label='Predicted Path') + ax.plot(states_xy[0, 0], states_xy[0, 1], '-o', label='Start') + ax.plot(states_xy[-1, 0], states_xy[-1, 1], '-s', label='Goal') + legend = ax.legend(loc='upper right', shadow=False) + for label in legend.get_texts(): + label.set_fontsize('x-small') # the legend text size + for label in legend.get_lines(): + label.set_linewidth(0.5) # the legend line width + plt.draw() + plt.waitforbuttonpress(0) + plt.close(fig) + + +def save_image(im, goal, start,states_xy,states_one_hot,counter): + ''' + Saves the data made by generator as jsons. + ''' + s = config.imsize + + if len(states_xy[0]) == 0: + + im.tolist()[start_x][start_y] = 1 + start_xy = [0,0] + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': start_xy} + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + else: + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': states_xy[0][0].tolist() + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + } + data.append(mp) + with open('./maps/' +str(s) + '_data_300' + '.json', 'w') as outfile: + json.dump(data,outfile) + +def open_map(dom,path): + ''' + Used to open a map json given dom and path, returns grid, goal and agent + ''' + with open(str(path) + str(dom) +'.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data['grid'], data['goal'], data['agent'] + +def open_map_list(dom,path): + with open(str(path) + '.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data[dom]['grid'], data[dom]['goal'], data[dom]['agent'] + +def deviation(optimal_path, pred_path,goal, map_num): + optimal_path = np.array(optimal_path) + optimal_path = 1.0 * optimal_path + + optimal_path_x = np.array(optimal_path[:,0]) + optimal_path_y = np.array(optimal_path[:,1]) + + pred_path = np.unique(pred_path, axis=0) #removes duplicates at the end (when it reaches goal) + + #print('Shortened path' , pred_path) + pred_path_x = np.array(pred_path[:,0]) + pred_path_y = np.array(pred_path[:,1]) + dist = 0.0 + astar_dist = 0.0 + prev = pred_path[0,:] + total_diff_gen = 0 + for xy in pred_path[:,:]: + + diff = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_gen += diff + dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + #prev = [0,0] + #print('opt', optimal_path[0,:]) + prev = optimal_path[0,:] + total_diff_optim = 0 + for xy in optimal_path[:,:]: + # print('xy', xy) + diff2 = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_optim += diff2 + astar_dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + dev_non_rel = abs(total_diff_optim-total_diff_gen) + dev_rel = dev_non_rel/total_diff_optim #TODO: Add avg distance of gen trajectory + return(dev_rel,dev_non_rel,dist,astar_dist) + +def dist_left(pred_traj, goal): + ''' + Finds the distance left between the point and the goal + ''' + pred_traj = np.array(pred_traj) #euclidean distance or geometric distance ? use geometric + x1,y1 = pred_traj[-1][0], pred_traj[-1][1] + x2,y2 = goal[0],goal[1] + dist = (((x2-x1)**2 + (y2-y1)**2))**0.5 + return dist + + +if __name__ == '__main__': + # Parsing training parameters + parser = argparse.ArgumentParser() + parser.add_argument( + '--weights', + type=str, + default='trained/60k_no_block_att3_vin_8x8.pth', + help='Path to trained weights') + parser.add_argument('--plot', action='store_true', default=False) + parser.add_argument('--gen', action='store_true', default=False) + parser.add_argument('--imsize', type=int, default=8, help='Size of image') + parser.add_argument( + '--k', type=int, default=10, help='Number of Value Iterations') + parser.add_argument( + '--l_i', type=int, default=2, help='Number of channels in input layer') + parser.add_argument( + '--l_h', + type=int, + default=150, + help='Number of channels in first hidden layer') + parser.add_argument( + '--l_q', + type=int, + default=10, + help='Number of channels in q layer (~actions) in VI-module') + config = parser.parse_args() + # Compute Paths generated by network and plot + + for i in range(1): + main(config) + # main(config) diff --git a/src/algorithms/learning/VIN/test_house_expo.py b/src/algorithms/learning/VIN/test_house_expo.py new file mode 100644 index 000000000..e2a9b6666 --- /dev/null +++ b/src/algorithms/learning/VIN/test_house_expo.py @@ -0,0 +1,340 @@ +import sys +import argparse +import json +import matplotlib.pyplot as plt +import random +import numpy as np +import torch +from torch.autograd import Variable + +from dataset.dataset import * +from utility.utils import * +from model import * + +from domains.gridworld import * +from generators.obstacle_gen import * + +import logging +import time +import math + +def main(config, + n_domains=30, + max_obs=30, + max_obs_size=None, + n_traj=1, + n_actions=8,gen = False): + # Correct vs total: + logging.basicConfig(filename='./resources/logs/house_expo.log',format='%(asctime)s-%(levelname)s:%(message)s', level=logging.INFO) + correct, total = 0.0, 0.0 + # Automatic swith of GPU mode if available + use_GPU = torch.cuda.is_available() + # Instantiate a VIN model + vin = VIN(config) + # Load model parameters + vin.load_state_dict(torch.load(config.weights)) + # Use GPU if available + if use_GPU: + vin = vin.cuda() + counter,total_no_soln = 0,0 + global data + data = [] + t_list = [] + total_dev_non_rel, total_dev_rel = 0.0,0.0 + total_dist, total_astar_dist = 0.0,0.0 + metrics = True #this enables displaying the distance left to reach goal upon a failure + dist_remain_avg = 0.0 + for dom in range(n_domains): + if gen: + goal = [ + np.random.randint(config.imsize), + np.random.randint(config.imsize) + ] + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + # Add obstacles to map + n_obs = obs.add_n_rand_obs(max_obs) + # Add border to map + border_res = obs.add_border() + # Ensure we have valid map + if n_obs == 0 or not border_res: + continue + start = None + else: + wpn = True + # path = './resources/maps/' + path = './resources/house_expo/' + mp, goal, start = open_map(dom,path) + # path = './maps/8_data_300' + # mp, goal, start = open_map_list(dom,path) + mp[start[1]][start[0]] = 0 #Set the start position as freespace too + mp[goal[1]][goal[0]] = 0 #Set the goal position as freespace too + + goal = [goal[1],goal[0]] #swap them around, for the row col format (x = col not row) + start = [start[1],start[0]] + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + obs.dom = mp + + # Get final map + im = obs.get_final() + + + #1 is obstacles. + #set obs.dom as the mp + logging.debug('0 is obstacle ') + logging.debug(' im: %s ', im) + # Generate gridworld from obstacle map + print('got here') + G = gridworld(im, goal[0], goal[1]) + # Get value prior + value_prior = G.get_reward_prior() + # Sample random trajectories to our goal + states_xy, states_one_hot = sample_trajectory(G, n_traj,start,gen) #dijkstra trajectory + # print('states_xy', states_xy[0] , len(states_xy[0])) + if gen and len(states_xy[0]) > 0: + save_image(G.image,(goal[0],goal[1]),states_xy[0][0],states_xy, states_one_hot,counter) #this saves the maps + + counter += 1 + for i in range(n_traj): + if len(states_xy[i]) > 1: + t0 = time.time() + # Get number of steps to goal + L = len(states_xy[i]) * 2 + # Allocate space for predicted steps + pred_traj = np.zeros((L, 2)) + # Set starting position + pred_traj[0, :] = states_xy[i][0, :] + + for j in range(1, L): + # Transform current state data + state_data = pred_traj[j - 1, :] + state_data = state_data.astype(np.int) + # Transform domain to Networks expected input shape + im_data = G.image.astype(np.int) + im_data = 1 - im_data + im_data = im_data.reshape(1, 1, config.imsize, + config.imsize) + # Transfrom value prior to Networks expected input shape + value_data = value_prior.astype(np.int) + value_data = value_data.reshape(1, 1, config.imsize, + config.imsize) + # Get inputs as expected by network + X_in = torch.from_numpy( + np.append(im_data, value_data, axis=1)).float() + S1_in = torch.from_numpy(state_data[0].reshape( + [1, 1])).float() + S2_in = torch.from_numpy(state_data[1].reshape( + [1, 1])).float() + # Send Tensors to GPU if available + if use_GPU: + X_in = X_in.cuda() + S1_in = S1_in.cuda() + S2_in = S2_in.cuda() + # Wrap to autograd.Variable + X_in, S1_in, S2_in = Variable(X_in), Variable( + S1_in), Variable(S2_in) + # Forward pass in our neural net + _, predictions = vin(X_in, S1_in, S2_in, config) + _, indices = torch.max(predictions.cpu(), 1, keepdim=True) + a = indices.data.numpy()[0][0] + # Transform prediction to indices + s = G.map_ind_to_state(pred_traj[j - 1, 0], + pred_traj[j - 1, 1]) + ns = G.sample_next_state(s, a) + nr, nc = G.get_coords(ns) + pred_traj[j, 0] = nr + pred_traj[j, 1] = nc + if nr == goal[0] and nc == goal[1]: + # We hit goal so fill remaining steps + pred_traj[j + 1:, 0] = nr + pred_traj[j + 1:, 1] = nc + break + # Plot optimal and predicted path (also start, end) + if pred_traj[-1, 0] == goal[0] and pred_traj[-1, 1] == goal[1]: + logging.debug('#################### - Path Found map %s!\n', dom) + correct += 1 + t1 = time.time() + t_list.append(t1-t0) + dev_rel,dev_non_rel,dist,astar_dist = deviation(states_xy[i],pred_traj,goal,total) + total_dev_rel += dev_rel + total_dev_non_rel += dev_non_rel + total_dist += dist + total_astar_dist += astar_dist + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + elif metrics: + d = dist_left(pred_traj,goal) + dist_remain_avg += d + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + total += 1 + + + + elif wpn: + total_no_soln += 1 + sys.stdout.write("\r" + str(int( + (float(dom) / n_domains) * 100.0)) + "%") + sys.stdout.flush() + + sys.stdout.write("\n") + if total and correct: + logging.info('Rollout Accuracy Dijkstra: %s',(100*((total-total_no_soln)/total))) + logging.info('Rollout Accuracy: %s',(100 * (correct / total))) + logging.info('Rollout Accuracy Adjusted: %s',(100 * (correct / (total+total_no_soln)))) + logging.info('Total maps with no soln from Dijkstra %s', total_no_soln) + logging.info('Total avg Rel Deviation %s', (total_dev_rel/total)) + logging.info('Total avg Non-Rel Deviation %s', (total_dev_non_rel/total)) + logging.info('Total avg VIN Distance %s', (total_dist/total)) + logging.info('Total avg Dijkstra Distance %s', (total_astar_dist/total)) + logging.info('Avg deviation from Dijkstra: %s', ((((total_astar_dist/total))-((total_dist/total)))/((total_astar_dist/total)))) + logging.info('Total elapsed time %s', (sum(t_list)/(correct))) #TODO: Possibly add total no soln + logging.info('Avg distance left when failed: %s ', (dist_remain_avg/(total-correct)) ) + logging.info('---------------------------------Done ------------------------------------') + + else: + logging.info('No successes either vin or dijkstra') + + +def visualize(dom, states_xy, pred_traj): + fig, ax = plt.subplots() + implot = plt.imshow(dom, cmap="Greys_r") + ax.plot(states_xy[:, 0], states_xy[:, 1], c='b', label='Optimal Path') + ax.plot( + pred_traj[:, 0], pred_traj[:, 1], '-X', c='r', label='Predicted Path') + ax.plot(states_xy[0, 0], states_xy[0, 1], '-o', label='Start') + ax.plot(states_xy[-1, 0], states_xy[-1, 1], '-s', label='Goal') + legend = ax.legend(loc='upper right', shadow=False) + for label in legend.get_texts(): + label.set_fontsize('x-small') # the legend text size + for label in legend.get_lines(): + label.set_linewidth(0.5) # the legend line width + plt.draw() + plt.waitforbuttonpress(0) + plt.close(fig) + + +def save_image(im, goal, start,states_xy,states_one_hot,counter): + ''' + Saves the data made by generator as jsons. + ''' + s = config.imsize + + if len(states_xy[0]) == 0: + + im.tolist()[start_x][start_y] = 1 + start_xy = [0,0] + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': start_xy} + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + else: + mp = { + 'grid': im.tolist(), + 'goal': [goal[0],goal[1]], + # 'start': int(start), + 'agent': states_xy[0][0].tolist() + # 'states_xy': states_xy[0].tolist(), + # 'states_one_hot': states_one_hot[0].tolist() + } + data.append(mp) + with open('./maps/' +str(s) + '_data_300' + '.json', 'w') as outfile: + json.dump(data,outfile) + +def open_map(dom,path): + ''' + Used to open a map json given dom and path, returns grid, goal and agent + ''' + with open(str(path) + str(dom) +'.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data['grid'], data['goal'], data['agent'] + +def open_map_list(dom,path): + with open(str(path) + '.json') as json_file: + data = json.load(json_file) + logging.info('Opening file: ' + str(path) + str(dom) + '.json' ) + return data[dom]['grid'], data[dom]['goal'], data[dom]['agent'] + +def deviation(optimal_path, pred_path,goal, map_num): + optimal_path = np.array(optimal_path) + optimal_path = 1.0 * optimal_path + + optimal_path_x = np.array(optimal_path[:,0]) + optimal_path_y = np.array(optimal_path[:,1]) + + pred_path = np.unique(pred_path, axis=0) #removes duplicates at the end (when it reaches goal) + + #print('Shortened path' , pred_path) + pred_path_x = np.array(pred_path[:,0]) + pred_path_y = np.array(pred_path[:,1]) + dist = 0.0 + astar_dist = 0.0 + prev = pred_path[0,:] + total_diff_gen = 0 + for xy in pred_path[:,:]: + + diff = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_gen += diff + dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + #prev = [0,0] + #print('opt', optimal_path[0,:]) + prev = optimal_path[0,:] + total_diff_optim = 0 + for xy in optimal_path[:,:]: + # print('xy', xy) + diff2 = math.sqrt( ((1.0 * xy[0]- 1.0*prev[0])**2)+((1.0*xy[1] - 1.0*prev[1])**2)) + total_diff_optim += diff2 + astar_dist+= ((xy[0]-prev[0])**2 + (xy[1]-prev[1])**2)**0.5 + prev = xy + + dev_non_rel = abs(total_diff_optim-total_diff_gen) + dev_rel = dev_non_rel/total_diff_optim #TODO: Add avg distance of gen trajectory + return(dev_rel,dev_non_rel,dist,astar_dist) + +def dist_left(pred_traj, goal): + ''' + Finds the distance left between the point and the goal + ''' + pred_traj = np.array(pred_traj) #euclidean distance or geometric distance ? use geometric + x1,y1 = pred_traj[-1][0], pred_traj[-1][1] + x2,y2 = goal[0],goal[1] + dist = (((x2-x1)**2 + (y2-y1)**2))**0.5 + return dist + + +if __name__ == '__main__': + # Parsing training parameters + parser = argparse.ArgumentParser() + parser.add_argument( + '--weights', + type=str, + default='trained/30k_no_block_dataset_vin_64x64.pth', + help='Path to trained weights') + parser.add_argument('--plot', action='store_true', default=False) + parser.add_argument('--gen', action='store_true', default=False) + parser.add_argument('--imsize', type=int, default=100, help='Size of image') + parser.add_argument( + '--k', type=int, default=48, help='Number of Value Iterations') + parser.add_argument( + '--l_i', type=int, default=2, help='Number of channels in input layer') + parser.add_argument( + '--l_h', + type=int, + default=150, + help='Number of channels in first hidden layer') + parser.add_argument( + '--l_q', + type=int, + default=10, + help='Number of channels in q layer (~actions) in VI-module') + config = parser.parse_args() + # Compute Paths generated by network and plot + + for i in range(1): + main(config) + # main(config) diff --git a/src/algorithms/learning/VIN/test_nonzeros.py b/src/algorithms/learning/VIN/test_nonzeros.py new file mode 100644 index 000000000..3cc429ef3 --- /dev/null +++ b/src/algorithms/learning/VIN/test_nonzeros.py @@ -0,0 +1,23 @@ +import numpy as np +a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) +print(a>3) #Where is A>3? + +''' +[[False False False] + [ True True True] + [ True True True]] + +gives the above. +So in 0th list, none are true. Then you have 1st list, 0th is true. 1st list 1th is true. 1st list 2nd is true +so you have 1-0, 1-1, 1-2, where 1st # is the list # and 2nd # is the index in that list +In the 2nd list (3rd one), 0th is true, 1st etc. + +''' +print(np.nonzero(a>3)) +''' +(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) +gives above. So the first array is the list #, and 2nd array is the index within that list +so list 1 number 0, list 1 number 1, list 1 number 2 +list 2 number 0, list 2 number 1, list 2 number 2 + +''' \ No newline at end of file diff --git a/src/algorithms/learning/VIN/test_og.py b/src/algorithms/learning/VIN/test_og.py new file mode 100644 index 000000000..5755a3d00 --- /dev/null +++ b/src/algorithms/learning/VIN/test_og.py @@ -0,0 +1,173 @@ +import sys +import argparse + +import matplotlib.pyplot as plt + +import numpy as np + +import torch +from torch.autograd import Variable + +from dataset.dataset_og import * +from utility.utils import * +from model import * + +from domains.gridworld_og import * +from generators.obstacle_gen import * + + +def main(config, + n_domains=100, + max_obs=30, + max_obs_size=None, + n_traj=1, + n_actions=8): + # Correct vs total: + correct, total = 0.0, 0.0 + # Automatic swith of GPU mode if available + use_GPU = torch.cuda.is_available() + # Instantiate a VIN model + vin = VIN(config) + # Load model parameters + vin.load_state_dict(torch.load(config.weights)) + # Use GPU if available + if use_GPU: + vin = vin.cuda() + + for dom in range(n_domains): + # Randomly select goal position + goal = [ + np.random.randint(config.imsize), + np.random.randint(config.imsize) + ] + # Generate obstacle map + obs = obstacles([config.imsize, config.imsize], goal, max_obs_size) + # Add obstacles to map + n_obs = obs.add_n_rand_obs(max_obs) + # Add border to map + border_res = obs.add_border() + # Ensure we have valid map + if n_obs == 0 or not border_res: + continue + # Get final map + im = obs.get_final() + + # Generate gridworld from obstacle map + G = gridworld(im, goal[0], goal[1]) + # Get value prior + value_prior = G.get_reward_prior() + # Sample random trajectories to our goal + states_xy, states_one_hot = sample_trajectory(G, n_traj) + + for i in range(n_traj): + if len(states_xy[i]) > 1: + + # Get number of steps to goal + L = len(states_xy[i]) * 2 + # Allocate space for predicted steps + pred_traj = np.zeros((L, 2)) + # Set starting position + pred_traj[0, :] = states_xy[i][0, :] + + for j in range(1, L): + # Transform current state data + state_data = pred_traj[j - 1, :] + state_data = state_data.astype(np.int) + # Transform domain to Networks expected input shape + im_data = G.image.astype(np.int) + im_data = 1 - im_data + im_data = im_data.reshape(1, 1, config.imsize, + config.imsize) + # Transfrom value prior to Networks expected input shape + value_data = value_prior.astype(np.int) + value_data = value_data.reshape(1, 1, config.imsize, + config.imsize) + # Get inputs as expected by network + X_in = torch.from_numpy( + np.append(im_data, value_data, axis=1)).float() + S1_in = torch.from_numpy(state_data[0].reshape( + [1, 1])).float() + S2_in = torch.from_numpy(state_data[1].reshape( + [1, 1])).float() + # Send Tensors to GPU if available + if use_GPU: + X_in = X_in.cuda() + S1_in = S1_in.cuda() + S2_in = S2_in.cuda() + # Wrap to autograd.Variable + X_in, S1_in, S2_in = Variable(X_in), Variable( + S1_in), Variable(S2_in) + # Forward pass in our neural net + _, predictions = vin(X_in, S1_in, S2_in, config) + _, indices = torch.max(predictions.cpu(), 1, keepdim=True) + a = indices.data.numpy()[0][0] + # Transform prediction to indices + s = G.map_ind_to_state(pred_traj[j - 1, 0], + pred_traj[j - 1, 1]) + ns = G.sample_next_state(s, a) + nr, nc = G.get_coords(ns) + pred_traj[j, 0] = nr + pred_traj[j, 1] = nc + if nr == goal[0] and nc == goal[1]: + # We hit goal so fill remaining steps + pred_traj[j + 1:, 0] = nr + pred_traj[j + 1:, 1] = nc + break + # Plot optimal and predicted path (also start, end) + if pred_traj[-1, 0] == goal[0] and pred_traj[-1, 1] == goal[1]: + correct += 1 + total += 1 + if config.plot == True: + visualize(G.image.T, states_xy[i], pred_traj) + sys.stdout.write("\r" + str(int( + (float(dom) / n_domains) * 100.0)) + "%") + sys.stdout.flush() + sys.stdout.write("\n") + print('Rollout Accuracy: {:.2f}%'.format(100 * (correct / total))) + + +def visualize(dom, states_xy, pred_traj): + fig, ax = plt.subplots() + implot = plt.imshow(dom, cmap="Greys_r") + ax.plot(states_xy[:, 0], states_xy[:, 1], c='b', label='Optimal Path') + ax.plot( + pred_traj[:, 0], pred_traj[:, 1], '-X', c='r', label='Predicted Path') + ax.plot(states_xy[0, 0], states_xy[0, 1], '-o', label='Start') + ax.plot(states_xy[-1, 0], states_xy[-1, 1], '-s', label='Goal') + legend = ax.legend(loc='upper right', shadow=False) + for label in legend.get_texts(): + label.set_fontsize('x-small') # the legend text size + for label in legend.get_lines(): + label.set_linewidth(0.5) # the legend line width + plt.draw() + plt.waitforbuttonpress(0) + plt.close(fig) + + +if __name__ == '__main__': + # Parsing training parameters + parser = argparse.ArgumentParser() + parser.add_argument( + '--weights', + type=str, + default='trained/vin_8x8.pth', + help='Path to trained weights') + parser.add_argument('--plot', action='store_true', default=False) + parser.add_argument('--imsize', type=int, default=8, help='Size of image') + parser.add_argument( + '--k', type=int, default=10, help='Number of Value Iterations') + parser.add_argument( + '--l_i', type=int, default=2, help='Number of channels in input layer') + parser.add_argument( + '--l_h', + type=int, + default=150, + help='Number of channels in first hidden layer') + parser.add_argument( + '--l_q', + type=int, + default=10, + help='Number of channels in q layer (~actions) in VI-module') + config = parser.parse_args() + # Compute Paths generated by network and plot + main(config) \ No newline at end of file diff --git a/src/algorithms/learning/VIN/train.py b/src/algorithms/learning/VIN/train.py new file mode 100644 index 000000000..0e0056e54 --- /dev/null +++ b/src/algorithms/learning/VIN/train.py @@ -0,0 +1,142 @@ +import time +import argparse +import numpy as np + +import torch +import torch.nn as nn +import torch.optim as optim + +import torchvision.transforms as transforms + +import matplotlib.pyplot as plt +from dataset.dataset import * +from utility.utils import * +from model import * + + +def train(net, trainloader, config, criterion, optimizer): + print_header() + for epoch in range(config.epochs): # Loop over dataset multiple times + avg_error, avg_loss, num_batches = 0.0, 0.0, 0.0 + start_time = time.time() + for i, data in enumerate(trainloader): # Loop over batches of data + # Get input batch + X, S1, S2, labels = data + if X.size()[0] != config.batch_size: + continue # Drop those data, if not enough for a batch + # Automaticlly select device to make the code device agnostic + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + X = X.to(device) + S1 = S1.to(device) + S2 = S2.to(device) + labels = labels.to(device) + net = net.to(device) + # Zero the parameter gradients + optimizer.zero_grad() + # Forward pass + outputs, predictions = net(X, S1, S2, config) + # Loss + loss = criterion(outputs, labels) + # Backward pass + loss.backward() + # Update params + optimizer.step() + # Calculate Loss and Error + loss_batch, error_batch = get_stats(loss, predictions, labels) + avg_loss += loss_batch + avg_error += error_batch + num_batches += 1 + time_duration = time.time() - start_time + # Print epoch logs + print_stats(epoch, avg_loss, avg_error, num_batches, time_duration) + print('\nFinished training. \n') + + +def test(net, testloader, config): + total, correct = 0.0, 0.0 + for i, data in enumerate(testloader): + # Get inputs + X, S1, S2, labels = data + if X.size()[0] != config.batch_size: + continue # Drop those data, if not enough for a batch + # automaticlly select device, device agnostic + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + X = X.to(device) + S1 = S1.to(device) + S2 = S2.to(device) + labels = labels.to(device) + net = net.to(device) + # Forward pass + outputs, predictions = net(X, S1, S2, config) + # Select actions with max scores(logits) + _, predicted = torch.max(outputs, dim=1, keepdim=True) + # Unwrap autograd.Variable to Tensor + predicted = predicted.data + # Compute test accuracy + correct += (torch.eq(torch.squeeze(predicted), labels)).sum() + total += labels.size()[0] + print('Test Accuracy: {:.2f}%'.format(100 * (correct / total))) + + +if __name__ == '__main__': + # Parsing training parameters + parser = argparse.ArgumentParser() + parser.add_argument( + '--datafile', + type=str, + default='dataset/30000_new_gridworld_8x8.npz', + help='Path to data file') + parser.add_argument('--imsize', type=int, default=8, help='Size of image') + parser.add_argument( + '--lr', + type=float, + default=0.005, + help='Learning rate, [0.01, 0.005, 0.002, 0.001]') + parser.add_argument( + '--epochs', type=int, default=30, help='Number of epochs to train') + parser.add_argument( + '--k', type=int, default=10, help='Number of Value Iterations') + parser.add_argument( + '--l_i', type=int, default=2, help='Number of channels in input layer') + parser.add_argument( + '--l_h', + type=int, + default=150, + help='Number of channels in first hidden layer') + parser.add_argument( + '--l_q', + type=int, + default=10, + help='Number of channels in q layer (~actions) in VI-module') + parser.add_argument( + '--batch_size', type=int, default=128, help='Batch size') + config = parser.parse_args() + # Get path to save trained model + save_path = "trained/60k_local_new_vin_{0}x{0}.pth".format(config.imsize) + # Instantiate a VIN model + net = VIN(config) + # Loss + criterion = nn.CrossEntropyLoss() + # Optimizer + optimizer = optim.RMSprop(net.parameters(), lr=config.lr, eps=1e-6) + # Dataset transformer: torchvision.transforms + transform = None + # Define Dataset + trainset = GridworldData( + config.datafile, imsize=config.imsize, train=True, transform=transform) + testset = GridworldData( + config.datafile, + imsize=config.imsize, + train=False, + transform=transform) + # Create Dataloader + trainloader = torch.utils.data.DataLoader( + trainset, batch_size=config.batch_size, shuffle=True, num_workers=0) + testloader = torch.utils.data.DataLoader( + testset, batch_size=config.batch_size, shuffle=False, num_workers=0) + # Train the model + train(net, trainloader, config, criterion, optimizer) + # Test accuracy + test(net, testloader, config) + # Save the trained model parameters + torch.save(net.state_dict(), save_path) diff --git a/src/algorithms/learning/VIN/train_og.py b/src/algorithms/learning/VIN/train_og.py new file mode 100644 index 000000000..4e18c0f19 --- /dev/null +++ b/src/algorithms/learning/VIN/train_og.py @@ -0,0 +1,142 @@ +import time +import argparse +import numpy as np + +import torch +import torch.nn as nn +import torch.optim as optim + +import torchvision.transforms as transforms + +import matplotlib.pyplot as plt +from dataset.dataset_og import * +from utility.utils import * +from model import * + + +def train(net, trainloader, config, criterion, optimizer): + print_header() + for epoch in range(config.epochs): # Loop over dataset multiple times + avg_error, avg_loss, num_batches = 0.0, 0.0, 0.0 + start_time = time.time() + for i, data in enumerate(trainloader): # Loop over batches of data + # Get input batch + X, S1, S2, labels = data + if X.size()[0] != config.batch_size: + continue # Drop those data, if not enough for a batch + # Automaticlly select device to make the code device agnostic + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + X = X.to(device) + S1 = S1.to(device) + S2 = S2.to(device) + labels = labels.to(device) + net = net.to(device) + # Zero the parameter gradients + optimizer.zero_grad() + # Forward pass + outputs, predictions = net(X, S1, S2, config) + # Loss + loss = criterion(outputs, labels) + # Backward pass + loss.backward() + # Update params + optimizer.step() + # Calculate Loss and Error + loss_batch, error_batch = get_stats(loss, predictions, labels) + avg_loss += loss_batch + avg_error += error_batch + num_batches += 1 + time_duration = time.time() - start_time + # Print epoch logs + print_stats(epoch, avg_loss, avg_error, num_batches, time_duration) + print('\nFinished training. \n') + + +def test(net, testloader, config): + total, correct = 0.0, 0.0 + for i, data in enumerate(testloader): + # Get inputs + X, S1, S2, labels = data + if X.size()[0] != config.batch_size: + continue # Drop those data, if not enough for a batch + # automaticlly select device, device agnostic + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + X = X.to(device) + S1 = S1.to(device) + S2 = S2.to(device) + labels = labels.to(device) + net = net.to(device) + # Forward pass + outputs, predictions = net(X, S1, S2, config) + # Select actions with max scores(logits) + _, predicted = torch.max(outputs, dim=1, keepdim=True) + # Unwrap autograd.Variable to Tensor + predicted = predicted.data + # Compute test accuracy + correct += (torch.eq(torch.squeeze(predicted), labels)).sum() + total += labels.size()[0] + print('Test Accuracy: {:.2f}%'.format(100 * (correct / total))) + + +if __name__ == '__main__': + # Parsing training parameters + parser = argparse.ArgumentParser() + parser.add_argument( + '--datafile', + type=str, + default='dataset/gridworld_8x8.npz', + help='Path to data file') + parser.add_argument('--imsize', type=int, default=8, help='Size of image') + parser.add_argument( + '--lr', + type=float, + default=0.005, + help='Learning rate, [0.01, 0.005, 0.002, 0.001]') + parser.add_argument( + '--epochs', type=int, default=30, help='Number of epochs to train') + parser.add_argument( + '--k', type=int, default=10, help='Number of Value Iterations') + parser.add_argument( + '--l_i', type=int, default=2, help='Number of channels in input layer') + parser.add_argument( + '--l_h', + type=int, + default=150, + help='Number of channels in first hidden layer') + parser.add_argument( + '--l_q', + type=int, + default=10, + help='Number of channels in q layer (~actions) in VI-module') + parser.add_argument( + '--batch_size', type=int, default=128, help='Batch size') + config = parser.parse_args() + # Get path to save trained model + save_path = "trained/vin_{0}x{0}.pth".format(config.imsize) + # Instantiate a VIN model + net = VIN(config) + # Loss + criterion = nn.CrossEntropyLoss() + # Optimizer + optimizer = optim.RMSprop(net.parameters(), lr=config.lr, eps=1e-6) + # Dataset transformer: torchvision.transforms + transform = None + # Define Dataset + trainset = GridworldData( + config.datafile, imsize=config.imsize, train=True, transform=transform) + testset = GridworldData( + config.datafile, + imsize=config.imsize, + train=False, + transform=transform) + # Create Dataloader + trainloader = torch.utils.data.DataLoader( + trainset, batch_size=config.batch_size, shuffle=True, num_workers=0) + testloader = torch.utils.data.DataLoader( + testset, batch_size=config.batch_size, shuffle=False, num_workers=0) + # Train the model + train(net, trainloader, config, criterion, optimizer) + # Test accuracy + test(net, testloader, config) + # Save the trained model parameters + torch.save(net.state_dict(), save_path) diff --git a/src/algorithms/learning/VIN/trained/README.md b/src/algorithms/learning/VIN/trained/README.md new file mode 100644 index 000000000..9d1d9b432 --- /dev/null +++ b/src/algorithms/learning/VIN/trained/README.md @@ -0,0 +1,4 @@ +# Trained Models +To use a pretrained model you have two choices: +1. Download and place the trained .pth model files here +2. Train the VIN on the datasets yourself (the models will save themselves here) \ No newline at end of file diff --git a/src/algorithms/learning/VIN/utility/__init__.py b/src/algorithms/learning/VIN/utility/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/algorithms/learning/VIN/utility/utils.py b/src/algorithms/learning/VIN/utility/utils.py new file mode 100644 index 000000000..de0104400 --- /dev/null +++ b/src/algorithms/learning/VIN/utility/utils.py @@ -0,0 +1,34 @@ +import numpy as np +import torch + + +def fmt_row(width, row): + out = " | ".join(fmt_item(x, width) for x in row) + return out + + +def fmt_item(x, l): + if isinstance(x, np.ndarray): + assert x.ndim == 0 + x = x.item() + if isinstance(x, float): rep = "%g" % x + else: rep = str(x) + return " " * (l - len(rep)) + rep + + +def get_stats(loss, predictions, labels): + cp = np.argmax(predictions.cpu().data.numpy(), 1) + error = np.mean(cp != labels.cpu().data.numpy()) + return loss.item(), error + + +def print_stats(epoch, avg_loss, avg_error, num_batches, time_duration): + print( + fmt_row(10, [ + epoch + 1, avg_loss / num_batches, avg_error / num_batches, + time_duration + ])) + + +def print_header(): + print(fmt_row(10, ["Epoch", "Train Loss", "Train Error", "Epoch Time"])) diff --git a/src/algorithms/learning/__init__.py b/src/algorithms/learning/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/algorithms/lstm/a_star_heuristic_augmentation.py b/src/algorithms/learning/a_star_heuristic_augmentation.py similarity index 98% rename from src/algorithms/lstm/a_star_heuristic_augmentation.py rename to src/algorithms/learning/a_star_heuristic_augmentation.py index 39762f7c8..d958a394d 100644 --- a/src/algorithms/lstm/a_star_heuristic_augmentation.py +++ b/src/algorithms/learning/a_star_heuristic_augmentation.py @@ -8,7 +8,7 @@ from algorithms.classic.graph_based.a_star import AStar from algorithms.configuration.entities.goal import Goal from algorithms.configuration.entities.trace import Trace -from algorithms.lstm.LSTM_tile_by_tile import OnlineLSTM +from algorithms.learning.LSTM_tile_by_tile import OnlineLSTM from simulator.services.algorithm_runner import AlgorithmRunner from simulator.services.services import Services from simulator.views.map.display.gradient_list_map_display import GradientListMapDisplay diff --git a/src/algorithms/lstm/a_star_waypoint.py b/src/algorithms/learning/a_star_waypoint.py similarity index 98% rename from src/algorithms/lstm/a_star_waypoint.py rename to src/algorithms/learning/a_star_waypoint.py index 2ea738daf..57a23460d 100644 --- a/src/algorithms/lstm/a_star_waypoint.py +++ b/src/algorithms/learning/a_star_waypoint.py @@ -7,8 +7,8 @@ from algorithms.classic.testing.combined_online_lstm_testing import CombinedOnlineLSTMTesting from algorithms.configuration.maps.map import Map from algorithms.configuration.maps.ros_map import RosMap -from algorithms.lstm.LSTM_tile_by_tile import OnlineLSTM -from algorithms.lstm.combined_online_LSTM import CombinedOnlineLSTM +from algorithms.learning.LSTM_tile_by_tile import OnlineLSTM +from algorithms.learning.combined_online_LSTM import CombinedOnlineLSTM from simulator.services.algorithm_runner import AlgorithmRunner from simulator.services.services import Services from simulator.views.map.display.map_display import MapDisplay diff --git a/src/algorithms/lstm/combined_online_LSTM.py b/src/algorithms/learning/combined_online_LSTM.py similarity index 98% rename from src/algorithms/lstm/combined_online_LSTM.py rename to src/algorithms/learning/combined_online_LSTM.py index fa1cc1a45..512dc1f57 100644 --- a/src/algorithms/lstm/combined_online_LSTM.py +++ b/src/algorithms/learning/combined_online_LSTM.py @@ -5,7 +5,7 @@ from algorithms.algorithm import Algorithm from algorithms.basic_testing import BasicTesting from algorithms.configuration.maps.map import Map -from algorithms.lstm.LSTM_tile_by_tile import OnlineLSTM +from algorithms.learning.LSTM_tile_by_tile import OnlineLSTM from simulator.services.algorithm_runner import AlgorithmRunner from simulator.services.services import Services from simulator.views.map.display.entities_map_display import EntitiesMapDisplay diff --git a/src/algorithms/lstm/map_processing.py b/src/algorithms/learning/map_processing.py similarity index 100% rename from src/algorithms/lstm/map_processing.py rename to src/algorithms/learning/map_processing.py diff --git a/src/algorithms/lstm/trainer.py b/src/algorithms/learning/trainer.py similarity index 100% rename from src/algorithms/lstm/trainer.py rename to src/algorithms/learning/trainer.py diff --git a/src/analyzer/analyzer.py b/src/analyzer/analyzer.py index 7e4ca09ac..804895b3c 100644 --- a/src/analyzer/analyzer.py +++ b/src/analyzer/analyzer.py @@ -3,8 +3,8 @@ from simulator.simulator import Simulator from simulator.services.services import Services from simulator.services.debug import DebugLevel, Debug -from algorithms.lstm.a_star_waypoint import WayPointNavigation -from algorithms.lstm.LSTM_tile_by_tile import OnlineLSTM +from algorithms.learning.a_star_waypoint import WayPointNavigation +from algorithms.learning.LSTM_tile_by_tile import OnlineLSTM from algorithms.configuration.maps.map import Map from algorithms.configuration.maps.dense_map import DenseMap from algorithms.configuration.configuration import Configuration diff --git a/src/generator/generator.py b/src/generator/generator.py index 93ed2d726..746a21704 100644 --- a/src/generator/generator.py +++ b/src/generator/generator.py @@ -10,7 +10,7 @@ from matplotlib import pyplot as plt from natsort import natsorted -from algorithms.lstm.LSTM_CAE_tile_by_tile import CAE +from algorithms.learning.LSTM_CAE_tile_by_tile import CAE from algorithms.classic.graph_based.a_star import AStar from algorithms.classic.testing.a_star_testing import AStarTesting from algorithms.configuration.configuration import Configuration @@ -18,7 +18,7 @@ from algorithms.configuration.entities.entity import Entity from algorithms.configuration.maps.dense_map import DenseMap from algorithms.configuration.maps.map import Map -from algorithms.lstm.map_processing import MapProcessing +from algorithms.learning.map_processing import MapProcessing from simulator.services.debug import DebugLevel from simulator.services.progress import Progress from simulator.services.resources.atlas import Atlas diff --git a/src/main.py b/src/main.py index 34f12be6c..42469710d 100644 --- a/src/main.py +++ b/src/main.py @@ -1,7 +1,7 @@ from algorithms.configuration.configuration import Configuration from algorithms.algorithm_manager import AlgorithmManager from maps.map_manager import MapManager -from algorithms.lstm.trainer import Trainer +from algorithms.learning.trainer import Trainer from analyzer.analyzer import Analyzer from generator.generator import Generator from simulator.services.debug import DebugLevel diff --git a/src/run_trainer.py b/src/run_trainer.py index e5107abc0..e1e522e17 100644 --- a/src/run_trainer.py +++ b/src/run_trainer.py @@ -5,8 +5,8 @@ from algorithms.basic_testing import BasicTesting from algorithms.configuration.maps.map import Map from maps.map_manager import MapManager -from algorithms.lstm.LSTM_tile_by_tile import BasicLSTMModule, OnlineLSTM -from algorithms.lstm.ML_model import MLModel +from algorithms.learning.LSTM_tile_by_tile import BasicLSTMModule, OnlineLSTM +from algorithms.learning.ML_model import MLModel from simulator.services.debug import DebugLevel from analyzer.analyzer import Analyzer from generator.generator import Generator @@ -27,10 +27,10 @@ from algorithms.classic.sample_based.rrt_connect import RRT_Connect from algorithms.classic.graph_based.wavefront import Wavefront from algorithms.configuration.configuration import Configuration -from algorithms.lstm.LSTM_tile_by_tile import OnlineLSTM -from algorithms.lstm.a_star_waypoint import WayPointNavigation -from algorithms.lstm.combined_online_LSTM import CombinedOnlineLSTM -from algorithms.lstm.LSTM_CAE_tile_by_tile import CAE, LSTMCAEModel +from algorithms.learning.LSTM_tile_by_tile import OnlineLSTM +from algorithms.learning.a_star_waypoint import WayPointNavigation +from algorithms.learning.combined_online_LSTM import CombinedOnlineLSTM +from algorithms.learning.LSTM_CAE_tile_by_tile import CAE, LSTMCAEModel # planner testing diff --git a/src/simulator/services/resources/directories.py b/src/simulator/services/resources/directories.py index 5da6bc806..2167873b4 100644 --- a/src/simulator/services/resources/directories.py +++ b/src/simulator/services/resources/directories.py @@ -13,7 +13,7 @@ from simulator.services.services import Services if TYPE_CHECKING: - from algorithms.lstm.ML_model import MLModel + from algorithms.learning.ML_model import MLModel class ModelSubdir(Directory): diff --git a/src/simulator/views/map/display/online_lstm_map_display.py b/src/simulator/views/map/display/online_lstm_map_display.py index 9c90be130..d1cc4de86 100644 --- a/src/simulator/views/map/display/online_lstm_map_display.py +++ b/src/simulator/views/map/display/online_lstm_map_display.py @@ -8,7 +8,7 @@ from algorithms.configuration.entities.agent import Agent from algorithms.configuration.entities.goal import Goal from algorithms.configuration.maps.map import Map -from algorithms.lstm.map_processing import MapProcessing +from algorithms.learning.map_processing import MapProcessing from simulator.services.services import Services from simulator.views.map.display.map_display import MapDisplay from structures import Point, Colour, DynamicColour, RED From fa1e5eee9cb0d13977469a97e03a56009b3f5e6a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=5B=20=E5=A4=A7=E9=B3=84=20=5D=20Asew?= Date: Mon, 4 Jan 2021 18:47:58 +0000 Subject: [PATCH 2/2] Fixed rendering issues with VIN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: [ 大鳄 ] Asew --- src/algorithms/learning/VIN/VIN.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/algorithms/learning/VIN/VIN.py b/src/algorithms/learning/VIN/VIN.py index 3c466cdc8..e15187a3a 100644 --- a/src/algorithms/learning/VIN/VIN.py +++ b/src/algorithms/learning/VIN/VIN.py @@ -116,6 +116,7 @@ def _find_path_internal(self) -> None: pred_traj[j, 0] = nr pred_traj[j, 1] = nc self.move_agent(Point(nr, nc)) + self.key_frame(True) if nr == goal[0] and nc == goal[1]: # We hit goal so fill remaining steps pred_traj[j + 1:, 0] = nr @@ -124,8 +125,9 @@ def _find_path_internal(self) -> None: # Plot optimal and predicted path (also start, end) if pred_traj[-1, 0] == goal[0] and pred_traj[-1, 1] == goal[1]: self.move_agent(self._get_grid().goal.position) + self.key_frame(True) return - self.key_frame() + self.key_frame(True) def load_VIN(self, size): if size in self.cached_models: return self.cached_models[size]