From 937696f57d6fa681cd16d103e6b87d2ca2a04e16 Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Tue, 22 Jun 2021 13:55:05 +0300 Subject: [PATCH 01/18] Initial changes --- .gitignore | 1 + README.md | 82 +- config/config.txt | 51 +- results/flood-erlay/README.md | 63 ++ .../flood-erlay/plots/compare_protocols.py | 23 + .../plots/erlay_configurations.png | Bin 0 -> 62539 bytes sim-cluster/Makefile | 35 - sim-cluster/blocking_TCP_socket.py | 39 - sim-cluster/churn.py | 31 - sim-cluster/cluster.sh | 108 --- sim-cluster/colors.py | 12 - sim-cluster/common.py | 8 - sim-cluster/copyright.txt | 23 - sim-cluster/create_cluster.sh | 94 --- sim-cluster/drain2.py | 185 ---- sim-cluster/flash_crowd.sh | 57 -- sim-cluster/gatherer.py | 478 ----------- sim-cluster/get_results.py | 64 -- sim-cluster/getpid.py | 12 - sim-cluster/peer-h.py | 745 ----------------- sim-cluster/peer-x.py | 791 ------------------ sim-cluster/peer.py | 740 ---------------- sim-cluster/run_oggfwd.sh | 72 -- sim-cluster/simulation.sh | 94 --- sim-cluster/simulator.sh | 82 -- sim-cluster/splitter-x.py | 693 --------------- sim-cluster/splitter.py | 730 ---------------- sim-cluster/stop_simulation.sh | 7 - sim-cluster/test_get.py | 153 ---- sim-cluster/test_p2psp.sh | 3 - src/InvObserver.java | 137 +++ src/Neighbor.java | 17 - src/Peer.java | 401 +++++++-- src/PeerInitializer.java | 116 ++- src/PeerObserver.java | 96 --- src/PoisonedChunksObserver.java | 50 -- src/SimpleEvent.java | 22 - src/Source.java | 143 +--- src/SourceInitializer.java | 29 +- src/{ => helpers}/ArrayListMessage.java | 6 +- src/{ => helpers}/IntMessage.java | 6 +- src/helpers/SimpleEvent.java | 21 + src/{ => helpers}/SimpleMessage.java | 16 +- src/{ => helpers}/TupleMessage.java | 16 +- utils/average.py | 26 - utils/expected.py | 46 - 46 files changed, 798 insertions(+), 5826 deletions(-) create mode 100644 .gitignore create mode 100644 results/flood-erlay/README.md create mode 100644 results/flood-erlay/plots/compare_protocols.py create mode 100644 results/flood-erlay/plots/erlay_configurations.png delete mode 100755 sim-cluster/Makefile delete mode 100755 sim-cluster/blocking_TCP_socket.py delete mode 100644 sim-cluster/churn.py delete mode 100755 sim-cluster/cluster.sh delete mode 100755 sim-cluster/colors.py delete mode 100644 sim-cluster/common.py delete mode 100755 sim-cluster/copyright.txt delete mode 100755 sim-cluster/create_cluster.sh delete mode 100755 sim-cluster/drain2.py delete mode 100755 sim-cluster/flash_crowd.sh delete mode 100755 sim-cluster/gatherer.py delete mode 100644 sim-cluster/get_results.py delete mode 100755 sim-cluster/getpid.py delete mode 100755 sim-cluster/peer-h.py delete mode 100755 sim-cluster/peer-x.py delete mode 100755 sim-cluster/peer.py delete mode 100755 sim-cluster/run_oggfwd.sh delete mode 100755 sim-cluster/simulation.sh delete mode 100755 sim-cluster/simulator.sh delete mode 100755 sim-cluster/splitter-x.py delete mode 100755 sim-cluster/splitter.py delete mode 100755 sim-cluster/stop_simulation.sh delete mode 100755 sim-cluster/test_get.py delete mode 100755 sim-cluster/test_p2psp.sh create mode 100755 src/InvObserver.java delete mode 100644 src/Neighbor.java delete mode 100755 src/PeerObserver.java delete mode 100644 src/PoisonedChunksObserver.java delete mode 100644 src/SimpleEvent.java rename src/{ => helpers}/ArrayListMessage.java (91%) rename src/{ => helpers}/IntMessage.java (88%) create mode 100644 src/helpers/SimpleEvent.java rename src/{ => helpers}/SimpleMessage.java (52%) rename src/{ => helpers}/TupleMessage.java (56%) delete mode 100644 utils/average.py delete mode 100644 utils/expected.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6b468b6 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +*.class diff --git a/README.md b/README.md index 62ccabd..06ddab2 100755 --- a/README.md +++ b/README.md @@ -1,44 +1,88 @@ -# P2PSP Simulation Project +# Bitcoin transaction relay simulator -[![Join the chat at https://gitter.im/P2PSP/PeerSim-simulator](https://badges.gitter.im/P2PSP/Qt.svg)](https://gitter.im/P2PSP/PeerSim-simulator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +This project helps to measure different trade-offs in transaction relay protocols for Bitcoin. +It was primarily designed to compare configurations of Erlay-like protocols, but it also helps just +explore how flooding works. -This is the PeerSim simulation branch for P2PSP. Its purpose is to simulate practical conditions with large sets of clients in order to obtain more knowledge about its behaviour. +The simulator assumes knowledge of the existing Bitcoin p2p stack. -To run this simulations you need to download the PeerSim simulator from [PeerSim download page](http://sourceforge.net/projects/peersim/). +It currently omits to send GETDATA/TX messages, because it is not necessary for the current +case, but can be easily expanded for that logic (as well as more advanced peer selection, block +propagation research, etc.). -You can set up your IDE (i.e. Eclipse) to work with PeerSim project as it described [here](http://miromannino.com/blog/integrating-peersim-with-eclipse/). +Beware, research code. -Configuration file is located at `config/config.txt`. +## Organization -## Running simulation +This project consists of several main classes: +1. Peer represents a normal Bitcoin node. +2. Peer initializer spawns and configures Bitcoin nodes. +3. Source represents a special node from which transactions initially propagate to random nodes. +4. Source Initializer spawns and configures source nodes. +5. Helpers contain custom message types to send between nodes. +6. InvObserver is a class to collect results at the end of the experiment. + +## HOWTO + +The configuration file is located at `config/config.txt`. In this file, you can specify network size, +connectivity, and other protocol-specific constants. + +Also, you will need JDK for this. 1. Create a directory: ```shell - $ mkdir p2psp-peersim && cd p2psp-peersim + mkdir p2p-simulations && cd p2p-simulations ``` - + 2. Clone a repository: ```shell - $ git clone git@github.com:P2PSP/sim.git + git clone git@github.com:naumenkogs/txrelaysim.git ``` - + 3. Download PeerSim simulator engine (and unzip it): ```shell - $ wget downloads.sourceforge.net/project/peersim/peersim-1.0.5.zip && unzip peersim-1.0.5.zip + wget downloads.sourceforge.net/project/peersim/peersim-1.0.5.zip && unzip peersim-1.0.5.zip ``` - -4. Compile source files of P2PSP protocol: + +4. Compile source files: ```shell - $ javac -cp ./peersim-1.0.5/peersim-1.0.5.jar:./peersim-1.0.5/jep-2.3.0.jar:./peersim-1.0.5/djep-1.0.0.jar ./sim/src/*.java - ``` - + javac -cp ./peersim-1.0.5/peersim-1.0.5.jar:./peersim-1.0.5/jep-2.3.0.jar:./peersim-1.0.5/djep-1.0.0.jar ./txrelaysim/src/*.java ./txrelaysim/src/helpers/*.java + ``` + 5. Run simulation: ```shell - $ java -cp ./:./peersim-1.0.5/peersim-1.0.5.jar:./peersim-1.0.5/jep-2.3.0.jar:./peersim-1.0.5/djep-1.0.0.jar peersim.Simulator ./sim/config/config.txt + java -cp ./:./peersim-1.0.5/peersim-1.0.5.jar:./peersim-1.0.5/jep-2.3.0.jar:./peersim-1.0.5/djep-1.0.0.jar peersim.Simulator ./txrelaysim/config/config.txt ``` - + +## Result interpretation + +We usually receive something like this at the end of the run: +``` +1.7822426377729141 extra inv per tx on average. +2.155010635147142 shortInvs per tx on average. +23.500275013750688 success recons on average. +0.08350417520876044 failed recons on average. +Avg max latency: 7348.884615384615 +``` + +For every transaction, no matter which protocol is used, the cost is always at least `INV + GETDATA + TX`. +This data demonstrates extra cost: `+ 1.78 INV + 2.15 * SHORT_INV`, where `SHORT_INV = INV / 4`. + +`Avg max latency` represents the time it takes for a transaction to reach 95% of nodes. + +## Results + +Some results generated from the output of this simulator you can find in the Results folder. + +## Scalability + +On my 16" MacBook Pro 2019 it takes no more than a couple of minutes to simulate transaction relay across 30,000 nodes. +If you increase connectivity (more than 8) or the number of nodes, you might run out of memory. + +For a more large-scale experiment, I may suggest using a machine with more RAM. +To make it faster, you probably want a faster CPU. diff --git a/config/config.txt b/config/config.txt index 8054b6d..83a7719 100755 --- a/config/config.txt +++ b/config/config.txt @@ -1,35 +1,34 @@ -# network size -SIZE 10 - # parameters of periodic execution -CYCLES 200 -CYCLE SIZE*10 +# CYCLES * CYCLE = how many milliseconds the experiment would take +# Note that the source stops issuing transactions 25 seconds before the end, +# to let everything propagate fully. +CYCLES 600 +CYCLE 100 # milliseconds # parameters of message transfer # delay values here are relative to cycle length, in percentage, # eg 50 means half the cycle length, 200 twice the cycle length, etc. MINDELAY 5 -MAXDELAY 50 +MAXDELAY 100 # drop is a probability, 0<=DROP<=1 DROP 0 random.seed 9098797865656766578567 -network.size SIZE +network.size 20000 simulation.endtime CYCLE*CYCLES -simulation.logtime CYCLE -simulation.experiments 1000 +simulation.logtime CYCLE * 50 +simulation.experiments 1 ################### protocols =========================== protocol.0 peersim.core.IdleProtocol protocol.0.step CYCLE -protocol.1 sim.src.Source +protocol.1 txrelaysim.src.Source protocol.1.linkable 0 -protocol.1.step CYCLE +protocol.1.step 1000 # trigger every seconds to handle tps easier protocol.1.transport tr -protocol.2 sim.src.Peer -protocol.2.buffer_size 32 +protocol.2 txrelaysim.src.Peer protocol.2.linkable 0 protocol.2.step CYCLE protocol.2.transport tr @@ -43,13 +42,21 @@ protocol.tr.transport urt protocol.tr.drop DROP ################### initialization ====================== -init.1 sim.src.SourceInitializer +init.1 txrelaysim.src.SourceInitializer init.1.protocol 1 +init.1.tps 7 -init.2 sim.src.PeerInitializer +init.2 txrelaysim.src.PeerInitializer init.2.protocol 2 -init.2.malicious_count 1 -init.2.trusted_count 1 +init.2.reachable_count 2000 +init.2.out_peers 8 +init.2.in_flood_delay 2000 +init.2.out_flood_delay 1000 +init.2.all_reconcile true +init.2.reconciliation_interval 500 +init.2.in_flood_peers 2 +init.2.out_flood_peers 2 +init.2.default_q 0.01 init.sch1 CDScheduler init.sch1.protocol 1 @@ -59,13 +66,9 @@ init.sch2 CDScheduler init.sch2.protocol 2 init.sch2.randstart - ################ control ============================== -#control.0 sim.src.PeerObserver -#control.0.protocol 2 -#control.0.step CYCLE*1 +control.0 txrelaysim.src.InvObserver +control.0.protocol 2 +control.0.step CYCLE * 100 -control.1 sim.src.PoisonedChunksObserver -control.1.protocol 2 -control.1.step CYCLE*1 diff --git a/results/flood-erlay/README.md b/results/flood-erlay/README.md new file mode 100644 index 0000000..942f3f0 --- /dev/null +++ b/results/flood-erlay/README.md @@ -0,0 +1,63 @@ +## Pick best Erlay configuration + +These experiments were done to explore all reasonable Erlay configuations, and to pick the best for +the current network and for extended connectivities. + +For these experiments, the network had 2,000 reachable nodes and 18,000 non-reachable nodes. +Increasing it by 2x doesn't change the results much. + +The following Erlay parameters could be tweaked: +1. q choice +2. reconciliation frequency +3. in/out flooding delay +4. in/out number of peers a node floods to + +## Latency +We measure the time it takes for a transaction to reach 95% of the nodes, assuming every node has 8 +tx-relay peers. For this experiment, only 2, 3, 4 matter. + +We label the configurations as following: + +| Name | In/Out flood delay | Reconciliation interval | In/out flood destinations | +| ------------- |:-------------:| -----:| -----:| +| Erlay-a | 2s/5s | 2s | 2/2 | +| Erlay-b | 2s/5s | 2s | 3/3 | +| Erlay-c | 1s/2s | 2s | 2/2 | +| Erlay-d | 1s/2s | 2s | 3/3 | +| Erlay-e | 1s/2s | 1s | 2/2 | +| Erlay-f | 1s/2s | 1s | 3/3 | + +# Optimal bandwidth + +The second experiment considers the first parameter, Q. We try values between 0.01 and 0.2, +and find out that q=0.01 make most sense for all, because it's best at conserving bandwidth. + +It's also possible to see bandwidth breakdown (how many reconciliations failed, etc.), +and try to optimize it from there, but we leave it for further research. + +# Conclusions + + + +For now, we conclude that Erlay-e-0.01 is the most optimal configuration, because it provides +the lowest bandwidth overhead (except for Erlay-a-0.01, which is way slower) +and one of the lowest latencies on par with legacy flooding. + +If broken down, Erlay-e-0.01 requires sending 1.77 txid and 1.47 short-txid per transaction, in addition +to necessary 2 * txid (inv + getdata, as per the latest erlay protocol). + +Since the reconciliation failure rate is very low, the first aspect is not caused by it, but is +rather caused by the natural fanout which happens faster than flooding, and thus it can't be reduced, +unless we use yet another configuration. + +This is, however, problematic: either we increase flood delays (and increase the latency overall), +or make reconciliations more frequent (and make them less efficient). + +The latter aspect can be optimized by better estimating set diffenence (by choosing q better), +but it's just (1.47 * 4) bytes, which is minor compared to the overall INV traffic, +so we don't do that for now. + +# Scaling with connections + +Erlay-e-0.01 also scales well with the number of connections. If we increase the connectivity from +8 to 12, we get just 77 bytes of extra overhead. diff --git a/results/flood-erlay/plots/compare_protocols.py b/results/flood-erlay/plots/compare_protocols.py new file mode 100644 index 0000000..c74329b --- /dev/null +++ b/results/flood-erlay/plots/compare_protocols.py @@ -0,0 +1,23 @@ +import matplotlib.pyplot as plt + +latencies = [4.3, 8.1, 7, 6.1, 5.1, 4.3, 3.7] +bandwidth_overhead = [227, 74, 101, 77, 101, 73, 98] + +protocols = ['Legacy flooding', 'Erlay-a-0.01', 'Erlay-b-0.01', 'Erlay-c-0.01', 'Erlay-d-0.01', + 'Erlay-e-0.01', 'Erlay-f-0.01'] + + +fig, ax = plt.subplots() +ax.set_xlabel('Latency (s)') +ax.set_ylabel('Bandwidth overhead (bytes per tx)') +ax.scatter(latencies, bandwidth_overhead) + +for i, txt in enumerate(protocols): + if i == 0: + ax.annotate(txt, (latencies[i], bandwidth_overhead[i] - 10)) + elif i == 1: + ax.annotate(txt, (latencies[i] - 0.3, bandwidth_overhead[i] + 5)) + else: + ax.annotate(txt, (latencies[i], bandwidth_overhead[i] + 5)) + +plt.show() diff --git a/results/flood-erlay/plots/erlay_configurations.png b/results/flood-erlay/plots/erlay_configurations.png new file mode 100644 index 0000000000000000000000000000000000000000..06f7978725872bbb11e607db09ce383aca00f0fb GIT binary patch literal 62539 zcmeFZXH-;K+bvo~%)Z@5RKQRe5CI8FM%z{_0YNg76-08*61r`%wH2^bP$Z}zQG(8$M9 zRohlkD3rCd(?2UyD9iBclEM|s@gKA1YFGS6%;BV_gNpSf2WLY&V~T>IgN=o?gN5mZ zoleGf_NLZWLVUt}g1kE~J2=?bi}Uka{`&)b)^;ZR2d{q8#6?!xoYu0ZP&i+a|1Glm z;3!R@e9ff&d|cHfc&No$r^az%;Y%-@f6UX}XZ?jmOnM5Nqs0<-u&a-#nFaOiSJ4i< z9cSTk|Ni|fS<}7cmG2$J?5+j%aJH||$vhWX*gJJyzg+P`><(YgE6vtNtuog4e)TM< z7^EoombhhY=cGj1V`76bH?q3wvpEsYPtYH3CbL*m=%-;+QmwGdQ^S<`O ze}9lU75_aw|Gg;wd(SZT!vCYKz=!CwKJwc)m*JKi4SLt$;K40hJU1>P7kq0;qtV2@ z=5whrZW9A`&d$y@CvBE0ZxQ{oHP2lw!%9~+kny>~C3}R0Re$;A7hk?5>k5A~U<`&* z`0Bx;71uvcOr+^v_wk7}Dxt4?PC__^VlHmqbIWpBYy9ZzF9y3Pzny5#?$s}$b9eXl z_7<8kK9VFo+2&>QV(C`K|3^s@SbU86Tj8q}6vj;{l+}AEOPPP-OU*N zsnD84_lnpHUuLIst1iZ>H%j=$G3KqkJc(_mH(TSVY(-pEMEpY^uLXy$W=F@MRJ8&w zDeK;Mj46G~vDBNwsX232x5j(andivi@9ER8_wF?>oRUZztyYszW4zO2BjoPK1Qk>g z%iBDfI`m`f|1`S&U?EVx>V5uU#)a1XOj*(XOX@`Z>mB?vW$!!e>r#j7pXtOh)7;EX z^8IPUrV@9v9gNQpU8DS~a7pj^*|qV_UVIvFrY#q=bpI&0bMPW#f(+J^afzBFzBwqy z?L0XmclNL&pSZNf8%8iFVOP1{uZ)Y>#qn=jT8dRM!X?fi{G@%^7rN`c%Ojj{xe58wW@iEH! z)Hj>1hwkpfA!Os!8?_H!Kl(BKV&M2(`iApa z3&C_1`6iDo`%c8i1t|HsEsE7hSW)W9nAxdKxbTNEsYQb_L*diwXZr98LjLfroA}MB z+u2_I%dgCJ%FTEf#yV~1T9w(m=kpSBPtE1-{~Mr5&l3&_r@pzVHjOy-_^#gGeMgTT z?XC#Z-*iyD`fG(uqQavi#uBG2ES_&ZbUq?b&y9v?#G{j$G}c|ZkBf_|CQ)C1G=Cx;)gw_`npQ)I)Ale#uuCQ0`gVD9J+sA zeN4N%cN+Ijv8;>J;8%Dkcrqa&p?_q!Aw_oW_9N93gLNr8!VO;j8YJnOEbcrKY@Cyn zB#>rWbxPKcv&s_i=hs;$U!_)Lu*ItginUNgRk+NVh zl?s`KTx$QfFP(0EPp*$;ievsoe;h5?b$E$DMX>H&;Ve)VQvrOTJ2)2&)! z@g1FKa)ANVvQQ~)kLmFen_GQ-m$kpuB^jRZ82>1HKF=*}e)g+kyiS(#lI3gs@7}%p zG(5Z}&)qp*^TqBew*=js$aKr5##aUNb2H|{jcJX^#(zl9em+6o$H$K!2b!}|oJLzi z-1S$qOQ(9yxv~4~DiWx?b}(~FC(lg{vsGsj<`H?~`nrZ!GGxWLC}oLed$XN+PL% zcML+KDpFz7#*Gp6^_pwBq;&@C5-YAHE~eDC9c{aEai;T4^ttfrd=K|cJgTSZs>wz) zHg!gz^L@FdP%_z~LC|ra`hf1Yz}U+lpH2^@R9J-ZYbNc~eYAY-w!Ww^&vyc|ay3s* zusIIZpQCT&(TrykUXk}Q1B7AM5 zc)+hO&8ETF`BTdm7rMXoS-ah$?*2iI^d?cZbvu%DHL&2SY;|;WRP(R6(kfqGWh1ve z8n2ykHqU)ZJuzVy7ntqcGIQvvqkk9ltg=HydsBS-x(^ zgW>vu*|9%{9p8t^$jDAKyD62H+}+1bOSx2*EOlwCPtXy|pzs$~j-UQqnK$uc`2aX%FZ>4ocjD6@{INe>TQxR5uKLU3WvXdrYUC^Io?(K5&LcV^ zmW`^{R&PGwB{FGg+4#Q%!>sD`Hfl#s}gOW3PN48~gm(AFup8 z-e>nq<%}RH_w-+X{na8(JyB10v|u40yOR6H5=ZhDWd&>%PoF(|WUw~g{EQ&?LkXv$ z%y3)mLQ8Q8iAVJJ12u6b^*vvTxsGx_#AG!1)}>o&Yb$*D@m#R5ap9{y2p_@yTiqvz4*#)*Kh$(# zi~g05!G|yU41Yz;irOCTJld*NyR&5c%a_APEgSE34>qKk2Wq8TG*}d8r}^}Tx|iQG z+RUT+_SPSNl-kXXU>dV|$Dp6E3v*IOQPmpU=jpwg1C-*6aRtJfg3wumE=weH~eEG65!#eEdrh@@i zx303SkG1W5lkGW|O+6qgs;Qgpa1Il8FF5!)f-9n@TL0wQ;kv}IpMLtuq5IvYLwdQI z2wG)HZ}5!f4avszh>@0@-fzxl4GbPzp3=iobDSPGb?p0aoa@TxU*gN+3o4RRu6(y^ z>R`vT)cyX$p6aJ3S7&8rUM>wdG~E~O=g{$LS>E@aM?9+0{(5fx&m1mZnHkP-`&xFC zo0gxSzfH<5m4y1PE~9v@lvAB=ZmY=4-eeX>}Wa{BbwUt?Q1g zuvztfDO@+(Z6X$97B+sj(Q%|%tT^%DL6h>}k5!5?hxYB;=YiN@RvmpuM^_gad2imr z{0z3-%~qGrwNl@^?qV~WWzQG+tgS+*;HDwc)|e;eMVi&b9uOBlj}>0~=Zy{a@#1VfsP5;{zq*Q&SaA??a{B zTR-Zrx>vHs|CaQ()k{YlorRXz;3Y;L4%gdwCkIB(+w`eewb37aQ5SV9v?M65ljFBN!4K{GQ8e z_Hfk;KAlX1H*elFN-H8BELpr*o^}cIFEl*$y*I2$uLy}q&@%9Tb#KYQl!3YsWy6LI zWyqfo>F;O1e>Kr8zOiB7dpfsV5M3Ve?JNFfIwE`WfGCM!BcWYaztymdwIz z)%e&LLJgqA5nQCIxh=)S&vMcr{(Rbd^SXp*+b?2sn&ywF=;XC4fm6-MDFKq30{e{(`M9I+tQ+2SvFUgY(MzRZca{4v4nk$m{#53 z>wW)WWQ?Np_n+y)4t+{T_c!)c65ESnA>>tu-;EU{d%c9_&|S8!7yII zv%%4!q36JS4fB>JJssD%0t~l4Yy}$0MgrMKI}b3BJp&k88Rj)_>Mn(iRN}zP#T6!8 zAcW05{%SS0qVzq1#rj@zBBjr) zmaW?8Pi^`!|M{PP{#l=3pKK(09{@Ihi(4+>{TI!`xd9-vRqNLWItb0aeHXtlnPJsp zmecy;;`QOj*Eyx!j2a7EzI-xk$)2#RasEDLeC_&m^M(025}ptnqmVKi%n-*<1x?1t z*>jtI``nQ+p+hHg`L?8lcHqDP%!YdQjD+(@>Y+}BwW~P{%YsDcR-Ed0)Ho`nrh;?x z@~Xza_inwmY7>dtLL7TUY|6}Tk-V36=w~aB_*I*DB7j;ixyhvYAV)mQ2|RC(O9<(3 z2FGe7>N#t1V@|v!j#K(}Mk$7=|FQXy1EL$d&y)VDs5oF%%!!}OeDv|F%W2tSdyFRr zYIx44UJme>ot>q&teHxdI4BvoBk^)M!R$0R@oB>H{h!BCR}*n?oKwdX()v>ey?aKF@>G#-niAH#d^maXWXrjN>Sy8f%HM3XuALLh zlCf+QMh21-y7d0^2Ej{v`S|$CdT}5Bh@7C8kHk9E(yC%o%_#8 zpez15m#3(#$954jqny?x!>d?afrdG#OX~wDM=k0o_X0Iqu)P7jfbBO;gk#X05qdoNj%)Htkm`JR)a_6gSY6kw|S=9No+)IRJkF z0IF`W3t7m81@dZLSS`VkH9MuQH5}+taGMOg{pBgjgX^ecYk=dL_D);pO$40s&H=rwHyt=T{O-ER7&skgK9 zVh$g&a;#dMQ`5y}@eLyg{IQxZE<{>Zgvo^U=RFiM;Q`_(FE4MzgCi{5GtBWSH_A~P zm|ToxTaqv&Pq28N+e9T`7E<(6grxmm^RwyQ1;+30ox^q-ahYxQ&#sj!tm?Z$RYI+(|6&8_L_f)Yca%owb+;iSpSt0D!U2H~Xv9Xyc! z)_%2#(`0I-#jGhk{=$U|UEST!aBq{G-b$IQ>}<2X4^QY306PN(3v<@lN3gbQQcbI< zG}JT=BgdER%&VU3pf=18wety;t_o6;4yJjU+;*&ts~U0q!I{@~9~Dy>_Zl#RbNOm+vOIs(gJ?S4z#aiIQe z&>V7{prtdw6Ka0Js+pKw)$#k>7pm~q$MGB{ZUr7Qlj(zBI*Uyj`6NKOr~;gEw@7{? zDgF$NcE@n5+x++5QJ3asy7-ch!U|3A|GdSknRE=xO`)nc?4nMV-AOEgcMi8qt0Ef$ z&YK(|#ggUmi>D$L!$SM>a>xGMpm;jlRY~5gKKM>Uib<~;jjRAG_VjA z*lk zKNqcUm@hAwDBxiCP`vuWK7D=tYuB#zm+}`>2OQE?g{pAP$LGu$FPoo{GDQqu zEyW^^#+oKv0k$U<*BfrmQXvEx_??<0!y?|mgV?E4vDz643Hk*IK)5_q>d$|xZ@|W3 zKc4{y_95S6Mo{WoS5|Lm6{#G+mayRLeJqU0_U z6BA1WRiNt^mOc|+3p4sn^S{Sj@$euRaxJH%=AAotVs&%U^}OaBMJ|6h2F!jQ49=U| z+oLcCr2=adLM0oV{v{WS(O>@D{&E@>TpC_ZO^k|cxU8Q#f(aSNwcmcbjLHBh$G0f3 z7CBUVZf2?u04~jOP<`3*<%GD3RgVus)t`3x<8Fmu(Zi)-=g2N`>grqiaazT6H&p&_u`Ycc{xy=9FKseF@du@{YlvPz--O(yfP)LWP6dqlgnH_3K zL0vVPH+;Dl)Gw21Mzu0Ea{PHl=7Yn-A>FrLHrZwfQB*)YRyo+) zmyS<}fm{HXDoYMnFEjh$zm(;Q^76vqAs%#B$bpwCGNAh50J(4f`Y+Uhp+qPRN4 zTK^ED3)_0m_cpgMt9?kX+YVp&4Ta@SumYP7omU04t3sv`x%g(yNm<#Z#;B3)E^&u1 zzPXGwkBaI3sx5rH07&1ze~-ZfAS|l+uq`%@a2jqDzw#|9e|q4ch;^&Z>zkYIqSPDf ztw=<&A4I6AMhFlJnG1|HsR&IUu{Z-tAk*_3n<2U7!!t5_Ggk5el?ajh!Vw(~E4n zdJA9mRGU{KR#GW)H%c&a<<=`-703oa^}P5UdK(IY_QiDd=_)1>m! zF_FJ{t^XZw_FQGz!`p~1PBtPKNa5A1Q0PXMbaZsQvv}nM8U+LZw{E7*@yfDiaZ}m1 zw;x41x1>k>vV=`Mb6b}Wp1lto>BCn2EW2lW6@sWVw}F@_)o7(Bxd+zZg5ILbw&HpV z-z&Tbn}Qe7hp?>i;==2VTed`@M!x*+-hpY%3pef1`BWubn?uL)4U&1xZ@(=aZY$7F z%pJQ)jrsY;k5lja3o<2UCWlo3)W)0bD`JqU7_X+QE8^}x#Q~IKwOHPd!Al^QULLA{ z;mZg4j^gqdW{pCRPe6-qBq`;!fj}>NbX3vDu**0ETCf!6etN($foZ*iPGlHwtdk~N;4Z_CP2@gGp~LA5yiN}{H$wBrtJyPoFp_r zUU*JcT}gjyTf>uMzbqypq1A0L-rXsEx0QZ~xTCOrkNn(ZlQrRq$d`4IdzY==tcry{ zL8v7{pCWj1=h5tP#z)yW#Vv%l)L)j!-wAm@`sW77%XBmf+L~RA+l`9%E}VUHexP@opo9~6iKL4 zgrxv5I>5`zTP9T{Y+2`Hcp1AZy#ipiCfm_`qVMAgq<{aE6#LK`vQzO>f0ui4A<^%y zu9yCRg7!WLoxMY-N5gJzJn(e)$(zwO9j|Ww@kbT7BnOZKh}d%UPoF-eI`k_a(#=+d zFjR?bE#mUo0D^@005@Z`0?WJ-uvRD?5i&G1bOSRu4h&KMlMLj(H0w4!q*cP6s-Y&4 zHyKCCX^K6*YVFz@L~xQ)A4VQ15>QROe2!3pZh(QtTTkibKWF>N$S497E;kKfaqR0? zdz+JtXV6>^S*@ja(c_$)9H6B8&_Re$1@iULJoZ(|>+bTsM?xiCmF;`WNr~mq_4Ws_ z2M8QWNMvq&8tXh4{b!RogoQ1(2#Q?3X6s`>05gz?W_1ZI9M`?*8~^pM$46UTP9V8e zqd3B|Z{ps&x97rZ#waMqTC)?UD3VC4X9D>`20OR;JV9=W1>puyg?xe^L(c}ebAQHd z`D3Y91zw~eZwX^4vlL2F;EXth<(cpokz4NFTM<^!nO`tBsm-U(5Ots*25>JaB-yS` z-zzU(8Z^N+NtYyuDdthJ%rSW9nv_rkczDjBlBq-V+XU{*a)uCXXA;@no{GgIVH)L#OPwajxcJ-|ElV1_dKK3t#tD|7ORTP{I~F*PMnxP;*Z5Uq(?x78UO|> zcesVDhgoiVRRBiI40BDu!FY{^ehk=wGpA2~z!Y1VpW4No-w~;R;6*MVK35R!f)F-k z%X6X5$X9{f`aT}EFe?jGom#dg^*WGN49Y{do`)t~LqowM^U%erkw6(jZK`3>auL%? zs%{QGPC^n$7-i(PR4{~ixAh~%Z{G0i-+vnL(jWM29j9b8d4=0e{-RP$KRiA@-Sf!0 zvU&aL)hbwXn>aY)@sU9+S36tVthmINGGDeZqx+EGRGUAq_>#}+^YimWY(tLaMP%y; z!;1%H7zt^zL1gxBgmGyA=H)EAj~AW94UfqNlk|!$yqf-oIsEzGegf{NM}YQ-Sh+wg zsj5=huvZ@3tycjln?=h>-9S?Y%_nTtoar!y;Fn0Yd1aY^&6iKFwelpHGuT|j%SGAC z&D{@TyBZJyi&J@@%Pyc`rxXyfE-kga3! zx{3M<9!?1|k9<`%Ko~-(qjplyX~Gx|-nz7kQ^vD7JWd2}X_Y-?k5c$JnU6KaTe{qvY*4(*XV`ls!U|Fo)hdmU3xJ5&-ol=Ist5z5rne8adYz_p8lRSIY89QIw-w5APaTxHz|i`@sayuH6|3~*C!gVgy2M0+M&J7H3WAmmF0~p2nQ*D? z*H>_DSiVH@!H^goED-@O<&=lXcm;I2y1F(bBTGRoo!A+X1|~E%xpGP~I6ZJ{F4`WyUcIW8s3(>8`PFKtSe`tTE(akMVr9epr*Zo9 z??g&8)J6&3PHknrM6acvrcbH%4GsM8MG0~hnA*o8<+#nsGu)uANKKz zITOGegHd(aiLgs}&SnyV3j2Umb|4m1?(S6(wA6omY*CY=Q>m@BR8nVH>6Hbsv+9cs z)H{v#<;*Q(rhWgTyzG>GByD;G*!YHZ^KZ++{}HAD`4-!(ZIV~<|CGYln#>YWDjhlI zt8LOYXV0JS<$JfLZlB|hB7qg-%#n_$1T-)C+~V9;<35@f6&)SDV=Bv`pWf;*rAs_F zlLzH1NtnXqBU8!Kg>g@8oP$lCYO$&>bh*SG_Lrt-v?l6gjp_Pzj*f=)9AnuMn`JQO z);QQQVr%2I2T`xmBOoVef~PD$*OqKtiVdtxw;Hj1a37`689I`CJfZ2!@=o2}){v00 ztrvB=xLw!p5fb{!Yng*B-b0y{IcnAHPsQU`@O2_4#)2nd7CkINIHYsl$FM$NC5Cny zD6KT91p&=MXiD6nZznuC&54>Xo?5E6gYDh4efy&hQ7r0IT{bqh-psSin>1^$pe$5C z_+?16S<+6(L0BYdu3sHK%j1L@V}6xabPEn8(V8?)X0*bazU07vFkcbl;1`OP()J(Q#1%bpbjEJ z`g_*~Bm_CSFg^nV$F8C;?g&AUdCaj2Nz!nb9MYN?ZPO=C4S1&JzlJUBhZubJ?Ad8J zGrB%qxWUC-+1_k)!=T9izTG2;Tq;4&5@PuK!%Fp2E(AqF2uyVvKEG_)vTiev43+Ct zC^vnOlG$73XtZhqvtKK+IOn^MAXET% zC}4+apt2=KFwo;jE|K$X%VCd$7Y{ML0{S z3zQ1Oo9CHZ4|wjv@^+YGEbAgoK}OgKt~mj{`@IAEa|ioNp@7DqSS{QKcUgb_M{-SO zGdOnan2#TPc05(?%#~GmweZJ9?J{8ZM6+3tv^~_|q10dQ-U!+Mml`0lM6)UG^_jNe z)EIb>5te_w8pnJb%8p{X5arPkO9FaK@%DZBmN5rsvmf9XDtjOm;jo42l)}IG8kbO# zcr*+3Q&7rh0P z{Ui68%Sb~!N{_wCB7JeOcmPYpmyc;XrM$KGF%60g|KqJCxSqRI-2zWtB8dT@T5+yZ z#>xf#X3|(Eum)mkFD+z!^4qtkzt7Cb(II|AA?$6x$rwozCsqnwXNRu0)U18FzS$es z;YJbU;6tVPF^0RCgEd$du!e$dFx>Ye^AFzczhZ#wLYMyja!O7dWBx0J7-pHjYChl& zq%5fD=ma~BG{=D3=$|jDTFG!Tp zXYTW)SAdC(gzQ`bk_+m`C#nJK(muwxG-#bzt(3@~@{l+%nyA{UgoP}cfToH4648Dk z6_`RYVI%W9NnChwKnv%v;0XCgB5IT>LZ?-}=5Y zkQq+XB_$t=YtDQjCC6H5WB2eW#P3pJg|df2#Ya;35AHd2>jPAMZiYcHB<`;d`r?D@ zIK-p1QZ4~*SAzO}0{)4Ihv%J>49Y8#Qh@+aVLbT_7_W z4s`%Dx_!E_FBRefyU!hC?J8k{D&qC>&ci|Gs@ie&kf5M4&^A2+r2;ruvCQ3!X?YHD zfh9TOFye9k!r1AD^*ai|e~&Id4{UklZOm z;XZ&m7aOAn)C83VxA3_%vl`W07h5(BW-kM!-tXCxV^zYWT=9UNsj4^IPAwdBd0L|+k7^so$1ZY}~CG=cF&-EnXe=#Z%EVsP*>A`i;+n5)4>k*){ zSkk&x5?_WwV}`AP5-0}J6?tY%$C*e|$d#_Hng^I)Fwn=+ErM%+h6?+gZCG_WXkVuo z7L2u7iX9r`{tc0;H&f9}8!UWI^2CRQQ z`oDv#?RgR%y&uRx{^FZkAMYwk$H0j9PGAkzfwaeTbFJpSv%w!A#KIzJNo>-nepFC} zR6jnxgm7v%Z{B?OQS`-&7t`Zaikr4>jRw*8wzO34K95=@X3ha(mxRlG=J+!MGl>P9 zhEmwxI2f-&j|hrz5eF6X>*qg5Brt>DaC)qiAF_;O8tm6qwxpKA_T`Xr6E5q#zMiYL zqv*Pb4RhUwdTLG|Q&i&GQ61f{?4hW*G%xAXY#rB5t`9-$V za4hC#KT&<>V2CAJWu)mDIxZc7XBtEw0MQJG)$1M{zPJ$+Sy*_C6zQ0LCBO%3^ILw> z)2Fe(81%zsP#@{;Shj>(5ym5WK$<7yD#KQ0j`$tyyF+X@jl2~ClbiA&1;s%MTD5AG zG8!m+)^jC5HEw%(kQv~-!?o;|wf#r`Q5ZZ{9^mw|oTG5J(v$)6)4Ucu59wqo5$y)Motp-Rqpa$@ zwFYVwsR3EgHE|l;GMIp4TVwLRMUK<8>jzpTm>nU@4Pcf~e@B>l z#Q|B(7(>m`Sfz55#IQUju3sTGOeHne}>DRNA2Bs7B+>&t^wk!kJe)j9gOYzMYi_# zD$2_ALo4LdK$6jpPR*}gvEsBnzvB@DJ3G79jjq@ATsoFnHm1^m(TKw-4S9laFoYnU zpP#Yz0I5lM5^6t~CS@d&%G-C`1a6`^2D$iN_gCx`b13qp9q`8=f2@ayP(E1Y2JK zOn`~NzGef}G5wQ=ENY)WgvMF6rbvc{oe@Mmfc?`T9fpAnPh+Wyt)t8_9M;TRP8yeR z7lIJGYZ??JVEC(L{{Tz}&#{2z=-^`SCRAW4QE9}bm?Az}P4b)jqy<_Xj~30atU(UA z@0!u#;$k^EwA2)j1nngSnVFdq(VLlf@WsloL0xO-=%|L|4UbeL;vf%r5!67ym}H+- zPH3f>-A9r@O*`Oy(|!ajlLpxB2}lyenK?Z_GfHwhsC=H?yMJoC^7S^!pNM5O@!A@& zxhfv@LG=`IOL4_~6coy)37IL?_h-Ov1#p3y660i*1kMDi!nyvKgh>9}&%c6&u_$$4 z4YUN!^>EAk4C1Z;Cqa(@{nqQ6Cw3pH4;yg+n1kos)87c6iL#@9Waj&KLXwhA53-AR z9VbRwv_Ytz7CJlNc^$4?80RkH>UCmTf>+EBG3UZY5=n?oRMG>WUr}1t!K|YOLo8z@ zDMy^nPDf|JIj}!e<6OyadS6geHUvzpV;7_Nk;dL zM2wj1rvZn&OSVMZx4eFJ2Z$I2iYBBJgoh7Kq>qT*_YVAbAQEWG2ojDvaNzQ?AZ`w%ecwjy?V&q<1G_p)l^2={nKio9%d)07K z30Y)Zc)f}^_UWqG4yMFvY&w#|GQ(N%9h@KaT6ko2i=$v z)hG{y!?pkgK<&zg7a<(+GX^o81km&87BOhe8H6E&u+ow851-z=%x91)1uGur8`9N`>`trNhYbs-QVL;)6Z5Cv@i|TaPNKu)o$bu6d=cPE zs2Vjui4VIiUZeZQ6hc2y6$LFfY~CCRUZxLFhrJbDUXOd;KMaR{(Vf{7eDl_=dj$mr z5s!pK=4NN@=EN8?>hj^s{C4_D6N)FAy?}$1KR!K4G{^e~RQ5JWqbm7;Zi7wRwgn;N zixmk719B7H6#1(L<^Uig+G_|LhPFE20BZivjO0-l94kJaX1XmR)M8OVEM2sjc zE|!G~STRWF5}jSf)BtBX=Z;eU;9N}4uFyl&wrh@pK1+9K3;h=>B^&)oUt|DQIpWbw zd$%gOG^iy-MMZofEM)3y0Gb5)5Iq7et=>!~eTX-dEK6f9IPVkC1+Spq;if^(dwb3u z^%L4R04%Znno@+BpW3MiG9Pts4rB=EbQVH0$%4Q$b6~d_?#rgEzsrp1>bZ(+0?#y6 z>dT)Sf<$dm3qG}&x=W{%=myvxjrJD;A&s~Ch&v5Mqf<{b$&Pg^XQnXtiZekc>*>Bw zm!Ht~LpR*2l`f*7LbMSqrta?U`?8y)>qUK-{mIN(Do0d;t#l7QB~LxA4Wdx z0Ka)1G2|H%(FPWIR%pj70g(xMJMqvu4b>BL&Ao3Qc>|F2NbS}&JPZcrzr2VNlqX%w z!1R+>j0A4$z^bztBm>lhQ#560H6Rb{s`p7t>mgszBOpK4#~)!m96u375QPXSX;Q(d z7Ng89>w*44<3VD~?)?w5(Qx9}h=X|%n}OWV%&$rw+fcygyKY zkdL%nL>eWCTM8EIJiBlz^K#$`#@%-mtJFz+I!|UzJhQW#rz6YYHqv=&7(K> zoMEne%E-z9(A}m8I&{I-?JVxz#h)xzG#sfu{dpzp!~>9i!FR(~CtWAy0&HI-tkLh@ z<_dOqF8yUq)`+tpkNPb5!^@#E*r>tfG#oQQes1b8wc1LixmwFVR3L%p&=oZ z1WANYv250S?Nr1lX3WVBD2^`dgX zL+vNNZDOAyH7&6(H)qEr1PL3566QNRmf*5;aiKObFQPIahdz*-!M*qs z3S#u=!s(`ta+`G5k``Q89vmF9m|rc_b!}fWaw+2bmF8SK^E0Q6)~>DCWg0SMyd_q zM0Dm2wGXh?Nz>QDrQX`<79Zij7xtRJLYP;&QBWiyjL>t8J*|lUWuz6L^DKXMOpbI- zGn&dgXDkWgIM>bM)SUmUT+|L_fnZM3t_|#SpW0+mfC7$O1c6))t)NvC6O)s*sO?lE zPX9pGIc{B2)yNu5GlHoNK1Jk1(u9D$sNJW5=2!ACfS4pa*$bx_j0`lBkA_#2szib@FDVfK~p z$q|p(e$C93RO07iUU2>69Q{4+eiA6jeb#ZrqNCs~nz+aypdXWFFBF{S0NtcPD7l5IFJ|w}9a7^B#(|1=1VQ+bo;3hixB8M8qwAXy#r5+Vo6B}QF=()K!i~AMGR#8yj8=l?k#&1 z1;!1~q9*#>35Yl91_lN`mKn@V2pf+sL4?SxQJ@F9Y+p0K>g}OvxAHUGyxcU1RNyTh z39enB9>56%Sf15HuxC(+5xG1p{x?YiGwwAv5s0D!z=I?oOLruJ+niF3wST?hRrt(} z8#m|?vRod*#U9E{t(ms}Ov3^@cOIiVV2SLQy5Z}4w(75MF2*-2B){H5Y6UX0Z#DtR z3NZ&AqyJ>n^gSljh!RAKH>iXumavj9RtOTN!3;qv&6 z|8sQAd-`>6=I1DxH&{yKKhB#O8Kl^tsf2{DmN)v$cWaXIg%x1ka}t*n0`s0nSy6>* z?>Tl@_qzvp+z+H*47ecv_-^ghgh>9pixpnieXvzBS_=xSA20VbG^b8$7C-fQ5&y9MSw-^2b zkBn`-dCLEtqLh4$^1uwilp$@wkj_n|o}OMXG*Gwl_zvH1EjZ?AtCsAN($Z3^v|sg) zG3K~y8PTG6h4Kzz<=gkXKL;8F1{TtO)lW)h7Z;aE4^%cpE`YsW6HdYp7yYCHoR_hl z*tJk_kwGjuJ^=nKaySIFAIlObyc&CwKdmzY31qS(nPTkrDywI{85fheUW% z3Qd&crJ>Ux)OZ0hpa&K?$YA2XAgxGXuT62VgShKh7e@^3>d%Axlz)chiu zeBkk1Ja*8C9-iNqQz{lkUo67&!x2k}Mt>C>rkpbHqOaqUeh%-B87w6vxFpmt)dBC^ zlW0{Wy;Zm+k-(Zp+|fkp8^NbLzDv;8q*Krn83J zFGnpKBM9k1Cys-N)eBazYx+d(Kv(|>#Hjn!7<9U!2jJ)IBWt$_EB*aYjnT|Y%>6C8 zZc)2O4{Xr^R0Nzs^wZ~uABI#A1srmQ4juhBFv1f$kLxhMX2ZL9`$;NsO?WUL!rT!95c}52uD=pFxaF$Xq{&GpGG4cShrq*TX!K$ zBrm%H9#R97On6q(*bX_F;3q7v8mvX$sUeaRhqFmfcDlW-?K!aO+%(ea5VD1Z2qa(c z+BtPc`)UrL0GP&Nr<>NXkVZ+p${5emtc(gzz%IE!sb;t!y-$r8${4p%m7NPdR-j070MV897# zz~2RLO{oQ&d+wJv+XWSb%0y#If7d0XTsf1JYj$DLwQof52po6S}A+B14384YYX}F`~PUm?viY4cIp-2JwcC@&YIj!yG z&2@_m(4uPRAzvhbBU?fTJ9~T=n|(9UFRJQ7~4!j-e7B^!#>qNaF8XJt=l zSXkKnx6bvDFiF3I7UxpB_}vQdqs21|tZ}v4>a%%+w=N%>hmMQl*9-1BqPLKu5et=@ zMw&(OPmZuoqsjs@ibm<_dTx><4o8&?i;Ibg5nGb8Ep5g!0l0rNpU!?796R$lX>!y@_|M~9pRWvfO!l$Ss39Ik&+v}jXr zvrvWP9vbwOTGE+gnSpUNO~n5aMkQyhw6?a=BTxc9)7} zPzC6ESVy>N==7+D)+_Yf80})nOku=_j*W2!9{GXFXMcbtAU&cuMMWL&9Vg6ufEH?7 zm&m~!oUh@7hw%%!exTm=A3l5zIOP48R>wz2t<}--iQ`QoGc!eVs%xSYxj?xeEFMmo zt|eQQ98@G|sRD+IRHoPy6%M!lb_CXEPoPT?lky!15q!lWy{fR-TVA>Vb~`<48zZrM zHx(4T#>OpKeg(%Ev%_eQG5W~vO(;&B9&``XI-!bSeyhXwtKtm= zSp;pT_F!V}QxpIPLB^o1&<%tYhbEI4r1=_{H*$~8o+>Fev;F>!d!2`osb1l9%LZVy;XLF@ZdOf2S6x{00j+Pu|3X5ISz zcO^egDbnhNTxE`>4rg;UXOCVPjsS@H(i7^GhxB??RD6m zQ3Ug#0)#10GUf-?{H{i|2?d5qL*`B|1}e6!BMsGcQ9yh22#6=8_IeGe+Q)z~ZH_=+{uV7~&-qHw!Hfn~BSt_h=?E;KUfXBP$MyEW;(G6k=>x~r-p8WRDLE-!KN>JUK{+ZV zAVk@-XAe>H2uY7R2EHU;K9;)$WqSoB7qTU(q)OY`^f9}Pekz>Afx}iL6>`WKD|niL zwt|AIJWQ;BKFwqy3Kn!Mks0775LBp|%rxBE-&h-`p(y1xj{X_K>=A7w@ENOyb-D;2 z8i7$b2H{mvQ8!Li!!kF8A>d)U7r2{h+>us^{S~ZX{MT1Z@D)f#sz@^6vWRzO@I^6a zX?g`QcOY;Qlf@)Ii}!rH2LUn@`4pG}r{Z+)M+*c-a5_87;jdF!3I70SdXKn(c#1t> z{-n~N#=Q6k&!5XUaSCFF*vMUf|DD9}7N%>)8q$uJP!a|#@-eEe%nIy1Zo)(*{o?}9 z5y+^fNI+sELLkyG9Mr{4alFhovaJW4h4V^T z@oJ1%F422GfK)=xH9;$8EVvjfpeBzZ0nmGdM@6_H=kkFHfe*VD)6!q~jukMPyGaM# zOL%&SjD(qpz@W*&j)*+pi@NtMg--+@>q;llB|IzIR>PiE) z2xey|CdXh)z&eTwjK{C2^!yoPvJqQr;p)Zhn5!#B3eKejlmj+XYxLwaacw@o2^(Xq-P&8?=Bi? zTGjLXZd`*dcX9&f-3?8g%mvZ@(hy-8ohWoF>7^(B&aQ!h0hW~&O%O?44w;Z~u?Vo* ze_%-~?Y~?C*C;Z65CP)f8pV!MEG@)jDh*VDqYXE6xp-g1av6fJ2kbWlv~W3A1l{ot zpN1i?CfC)TAOg}76<+$+KlbEHFZ+h_vwXZ2Hlb>G{7CM`@nGtp6G?JLU*aVRK6f_e zQxx)mG)h9xeM9x!<>ul z5EWr7SA%e@h8kQ1XC4BnDv;WdbljVm^zzQ6IPYYR_W3Iq6Hly{q815IX~-%N@6W?Z z<~xn1MyPo10j#yt#g0K%mbLSTn=`-Ie*6MBafr;me7jD7^Tt|JBZr$}|6&ge0zEPG z!L}ODYp|b@#pcT;!$-q$79SV|*(f*w;$=nHggs&>QBN(K)FI=j9XnRYQOBB_;uIrF zeU7uWY@q)WT(Ze>2QwTxFDZo|s;9-D>8mvnQii7AKBXpEqwKWno=^CN-LtU={ z`%G3hISbc1|9hl*g3f_0TecvHk>fa=b@wqRcSq3}gJj2G!N>mDD! zx)I0yAX~)3?;tjk@qfYZ{M9BSN{mmWllOD`ONRA^wD=GMWNKbqTwJJY4?EN{V61Mn zht9_E%1)zR>8DSSA65X_-*V>wQ7Wb8*U7@x~mF&C2e2}TdYRvFJ+7$H<~ za_AgtBeC<_nlf!K5^o5^>UURHjgmenOE&u}5Z1>gCi3FvVIHAVAsT=!KxfkbKmfJW z+8gzR%?Pl9mNcR_af;l9?M?cMi|^EHxsd+NKm1@=QEB&@sFye(h%6qEh6Tg($Y6J= zMCENthp9V#5Hr^+O2N5@1;g3Q+4i5|%~~qzIH`sD6J}nKJ~Z^D$th$RBb5h9dt}4O zHjY?TB5(@_Yyt$*?Kb+hZu|E7_3M^NduHS?LbsR2^EC0Z zDwV8qBo26D98qYaD3&8kP5KCI%u*RK7T-<23CC6M^)+ZfDrZi@Q%gC+0a~7KgV<4+m`^rQfF#1LaCe9Ng((oS6<&=*`BW zry$KB8gkPJ4Kz5-oO~E(QLEyaa2k#P)-RlNu}BI*GG6(^^I-0T5#7o8H&}Ib@NK|W zK|n85!NOne_ul@ugUXa~MB1f|M=a_;j5sncy>kkt(#KS?{7Kmg>VhD19O_c&B>EOl zX)%nFmP$ln9CDd{!mKQ&`h3dRhZF1j5yV(=T+pH_)l`Krx!|#gj}Ujf-(mXduO7BG z(+1``wAo4OWvkGovj~irq0OT6*Hf6=PYu%bP@2j@4rbhyP4guoH0&y!x5EHhcG8}@S$GST^!%15y?5AZZ z=SdaLIPnH0agy%!YuBDVwB2{0G;j4NF$MguI-g!yIWnK8Fn^8EzQHqh4wAD`!ESYD z&V^htw@Y3sss>;WH9H)U=#mRL^eITl#Fr50O^!x{4JIuYR-wW}y}f~h6^y4Syy{C@ zF0z+?!vPDvd>#2pIE9e3$Aw)Py8$XY9NaN!X7-aD`<>{i%rM>O#0)m50XFR&uso9Q$kcU1L8}Qd;#+CC}$ibds=}OFd6qX~wOu`%w-s9H-qKtx~ zN5vF3-1l!_2eX3`9j~w^&0K@r6L7%(uV-ht#u&ZIN3DkbU7TOHw{uuDdF-!_pk^b~ zxa>gP2=3%O5oXg4Xnl0jUp>ew1&4~q77JQPSw3icIP<_LBS5f{R=SoBY< zVS6HtY0`1%2ayYi2@BLq90Ise(4`RN%Nr6 zsLpjiwcfq=yWexp`@ZLM{y3k{*`NK{+mPq^4d3tgzVGY4uImOXLI<4c#i)_CCm7wJ z}I-$BSr z*&H%>{wFPu267K}F-$leKYc@N;t&}7Pe1T%qlgQ9`H`J|g%+r?csdi`#XOmf+vI|xUk!M2dtK+ zBM{*PEm#hVuwP;{A(tRV^(4c;z_hcs#y>M4lUSr&!n?;dxc@hq$A3-7VR*zwFKMq8 zuh;0W{%;Hj<0helsGYdL5nw8!=Ofsq+QOF*;78ELzu?Fp`zx3ZBvGz^o|!|yp-}G% z-GIL!;vf`n9e5#7M*6_*L5!FFx_E4p%@?um7kA>!b~#8{@)Ivg8Q&?fvbDi=HY`rpgN*1P_LZCh?p zQPFb&ZqVMl8a`c4*NV^t7*x(0``(r|-~uorZTq`~_aoqiiM+i3AmO#h+xnNf5eFr6 z4?qnGHA#yl)T@H0b$9GG_X_m(%=}W2V3M^5%o{jtYaA9*+kaC^*NJGi@btmd*}Sh0Y?r&#n66Mls|5lJDym;)EgkH?#`GN%gy>K;@r9_yB8YW>BxkkY}MZBze>^v9Bp2@Q}RBnGUuNUe< zfN>;h!GRNOrT$_$vB{8?#DR=XFAFDC?2|=;qZgmc55lJ&xtBOf#f0yip9tv{I@%2>qc&$ls;Tz6)Iog8#mZHY0lszIXw~OCnjY_64NY`GE3c;SA6?q zdDl(17-_}_Ks2ooM?({_{sjZD6DO&f}o@ddlA8vnyB+uwqDHf3}B0+_T@g8_i3Tq7;R_)sneI>}b81^egDNngCPeW!6C{!~o-5qAQxedyA zDl69wvoC`ezQV64LM5PK2uY^Xw?bidvQJJl$CIM~*R-xT6(rRM0Bsm)#)&J3ZMTn7_cP~r}N_nKFl#AS(27hMLR4(r?)Z*`0F znL(o=undSw&x*y@vn82Rr%p+*;T#U2T=V=-7>n!~8S3&QOIT2KK!Ug>?fYm2#}Us| zp=b2q@G8jrF!@;>d1eF455&@itV3eXoM7|Hl}KViq$;7#2syQor2*21-nmh4R*^GY zXD4yjSdEDD+9_2i`dQR_*LhIXr~Vi(F-C=FuHoJ>Q-abf@k%`GLX_j?$wuAPZtb^1 zhAz}19Rz>|x5xv?xBV9U(Sk<0U5Iw%q^a|wQ7TDREQ47er>>zP#QIt%zXuxl#=lC8 zy+Nt)2%fpYyyPPmmINKwiRf~P=jrdDTP8$>eg5hPWl#alD^Z-m4nkafYpc?*_d-Nj zJx4VxC+Wy6aB|6uvVM5<6a81L3}vtaZ?hV)G_9W2qY=W125_=pTc^Sbh!v3!B(e`R zB|}6p%W_RdMuyh;-NoTyn3K|_yj0t}7XH>T>0>gn6hGZ%Jj=jpVp$=h!KXo1bD4S{ zk~>XbFC*C)rIc(qjNgQ@cLI=|m}9!T9F}2UojyDkjvS_8Ka+Yg%6Sb!4c$d2XJXAMlx85@6FCFw;|5JJmtgJk)0C{1-`u!y7KJ+DG!QVQ*)84 za0XgTL{64Odie6uJ9k)vkA4chGL0p{zas`TRUYKOiOjX9Qg=oYzlB+M#v~b|S zp)enQGiYc$bndePXK6Mzm^MC~`Zjb+*xa`pKO$WW%K;TJFRiP$S1Fhwc8Vj<7^1V- zHf195jfjtdV{{v)hahS+tXE%^Vu*gmcz>&6p>an)6CPcbl-=U_!688wUi1tIOgW3xrQ1yQc9^vA`}- zZg-pcSR%Duh4iEU8ywcV`nnt6Pqz5PE(WuRf2bcB09n?g+vj)=cZtIE-w1qIU1u6o zLYfDkNKq)nXa)hbx(M;c2N5&^VyF$ia{z`o0?>xxx6MdiH}%?#9oL0>SMa^K=lrWn zHpyW@j8%B(*_~v8L6oljUxVK(*5kBOu&;T^Dk4)okeI2W;Ha6vd4)JWT&DmU1czWg_CW1 zIoouLfq}uzZ}K(o{;Hrp$rws8w_eWQAN@mZY>Nle36AJc-1FC;l;KaVBOA>aH6CP` z@AFh`#=0(y*r5MkUXiv&iF?LHM4&?5F&g^}P$Qw9HEs1D{cI-wULNmtki%Fg4EM*-IUReGuf*A&-Wr$Ln{996uWH>A=WaQh=A{LY*{lY>i z-?8tTAUN%$ZDEmO6*=m5-_g9g^2Ikh&6N?%ani4W&?wtKFz^~AL^||AO)u!OpS;%1 z%?)yx9ZL$-PMK(M@6=o!(KqFt#4~nsYAS)piXey(8lQo)lY9u#GI{sgfBPr$`|;aFh)J~v#Ckqqt#Fdv4V^Za+r*BLwi zyzb)pegW?&%=C{W6x@?%dZ2!}XYpwC^EJ|v^LY0nF+=bzIBkL>gP%22bKYd^$)y!a z@_X6K%)UTQ1Hgx01L;7Hn(90jcAuH;519N6WT^%mB%ElhG?GB!SS4l(0aN6ts2;}U zW;lNHjV?Fei?@FzD%E`qVB_u*&ZT8Mn{+iaywCzXRQR_i@Hs!Z`_yRS!tA^(h0l>3 zg0jyv^1`)lB)VZB)7+i0aP+SHm)gqlgp{7XhVYL+f1|5G8s1BXNB_B(tr?yTiu~86 z$(mXr9r3uslc!DK3UpUs>;pQC%FV}ZKCe53cDCx;Kay}Jl};Pu`X5VZNf6d>HiC;a zV)|`r5kVjU7MoO_I>X43*B72M{!8W0gBLC`kf+LA$i9hYkg@74CU=W%W8{~wUnMSN zF9eoib9Nk4K`#@Wkj(U#wnWSsTeb z3v)O!?jF68>X>X(0&Y^Cm@rJrIy@E{gKTs5gt-iSih=)gSeZXQ+E6#bB@vy&rt;({ zVv695B6D`?ISL8M{rJ&BEn0x}LPA2_fO7Ak4zEwg(aar+2(CPMvSli8cUVb}Oxm!FQG-$~Ok|XzuhZ3q#cjL~c@c*NWV3~ZuHg6ttX#zp_9!_mXN#JEIGSO4u^s&+(yfDOBn#~WRRREcwG9HK z6+GW(~n=If5Lnb;5^IVkfSI0vy33VKx#qe}5g

Hzl>ue# zlkFjHHQ^3`q|3Z|l?rFd`aC>5LfgXK#xXYeAGEyy5=6A5qg-ypdkuuA0}3Qbq+#pd z$3#U%^*{-$N4!T^hx8z7*g#lRAq<_a%4?cd0F@AO&HW7nK>JwzJhgCtp(R7f7M}e9 zFdlrYx03Y>j_3D;Un7E0P7?(Fdx}|h)gb&mkt9Ck%YF<$0Ky+os?<9o0#b_$4gXlQ z0xa%)-k80#i@?|-^Gb4V0fCKrYDoFz6@zdn5YNo^ai2YTj^TVSdV7qqHQe)9>SYC# zlCD2mVkzlQhw&dZ(@@)8A@DD@Dj;2oxtF$U_4wr~*jKKQzLTvA?hCjjAa+9E;Qb{K4A7NfRbW^mT-8w@=lg zvfVWEPM`hD-QyYi?;lq$C`6l~1XJu(l$9IsLduY-Q$vcHn`1Racj-<$i4f!9J)ig) zqn(M7JRuRVAkYGtoXVl5=i4%l@pnOP{2qn%)`Nyf z8VQwu*M;?O2xIg6$P57+zdZhw@EPA5SXq$Dn%w-VgJzcjWC27M2SI2D6~dqxU&nfu zJb=$rr3nK#Du^V6F+<^X%IZTMr;MY>r`F0>_G#H3H(g|AMAaj80&$Gg%Fu14G64ZB z6oxVFd_AHj(D=Btw6#T=RswC)KEHJ5Od+8JI`CW~0CYifA8i#u!d-N^P)fjTM5}8q z<1ndhvY;dI0ZQf30#aPJc-WfuO%<7>(`FXM&eKWr!wZw>yB@p;D^oiy=rW7GgL;|K<%2Dt^v#TTn3^iD z{)8&Sis=jNwKEA-1B-iMmA#1a;b#$z&|G!!=4JII?MElIu}xJPX_dU+)+Lc`$ah6P zXL+huzVcDu?wZ5n-(|0t`)Fh^Yed_B=mI#M!bFVYM1vMaTqeP$Yjh~2B5?yFUP5~9 z!D}TiA6$AooAG*b9R&kU<847d1fkqR%xngn&ds4jR)_6PTd$wm?9C^*5cw>5+acNT zDHMgI1svKHl@>2)?x2X2;jG;pl%w)Je=2Q)uc=E5kOI@nKJ7A~fGGzf`pj+}6=Kq4 zWvlpWkQ~DcZF9;u%+KBLC?43?%jy1txc1T3Ve(;Q2~OY;06~&|^Zo<+tBa)5IzX;W z&mOCP_)GvZ{FLz#N1GJsK^b-ciO?a-G_Bl7LVK`jNJAY2x;EOH%b)i@3KapP7Zg{| zla7GTAkWirpg}uloVbyOTy7WpImVS>#C}+2wn~SFaYJr3scJPrmwS8xrL9yK` zV=g+PosYUrs&bjMpKn0eqvT@MXcCcVj9N#2@om`b}^QjO>JR6X)UpzL>WV7pvj?)fds30V&DW!&Ap34 zkD59(6^V=TAiNmMji03P8eXYkjcX{;M_+)oE?6+du1dzSuLgu-(cyMc{K(ZM5W}gl=CbtiRrJ`|UWwgN|Y$>VQT>jrz>>HIaP-6GYpD|+rngFDKm(A?vAP-uAb=@^GDbu^JpzdVTUDbLgP|Ea1(vm?czLlJY z!8n;EC>V>xj#NU(H3HG+Mxg@Zn|+&n@a(2dn}(y(K5l2q_}o?W9Iz@#jZ41oKss00 zT$u%m)o%CyM6#=sO@rjfa1p#vosJ=dFQT%OdJj5>FffK%S~$bLUkMUMnkOeP$$zVD zRmL{N&@0@L?f5Bn+68s9g(}F?--9|v)+uDhMwB23ZQe|$+Hf-`6X$<#(u-7+mi{r`XsD*$iah-&(30yX zC@n%l9FGfglzgLE7plfa1v|^i9N;IvgFii zfa3rec9nU6C0hlGH8P7wE*%GI3k_YTFiW$!AR`NywR8Gy93hjPH{_qAB3x&fhIkRy z2v+$6_|?&GIRuYN#BJmM9F@P8bjBzzb+RWI<)dsUsD(8MPMSl#RV6RojrcGs1SysP z0vfci+8n^qfr+1|`jN~ax?FHETi~i6KVT&k9+t-&!o(v>PQ$NgR zj+7EPNaMH&p?u2RNrutfbSOjWC1&lV&kB~oxmu+o^D3!1ScEcz@Hdi-1Px#%r$P`V zlEB{IMO0dt*_PZ`H;L59*W7=EqV$$R6$N=&$;Uqz1}{OgEA)w0u!3OWzaS3oseXZtelDg$)WY%QwlmdI+j}+Bp4+xtkCD} zwB!Z$VtF6|Ica!KXYlLnIoLGt9AH!Pk7HW3n8`i&$w}`V=q6pgX8H&{rXDB4OA=n( zAB}6Pc;?0csiqUI8!RqE98bl_QoAv|!WtzI^`0NR82>%G?{PR~R4;!1V<}`nQ=Cdb zJXldbKM_>f4H(H>@YV>PxexkZIpc)zan@b}(`K3(Ed66@2TG7D=IZDHkgp!0vND79 z^X>(K;S!#cxAdSWc$)y2k`#Icm1@n9mDOiSBi2y(sy(4aWHe&fuxSWn z4Cy~>Fq>`R(K}8<-{Jl0Ket@-7>Ffzp7ll7AV8v3#ASkGe$7tr9C~J9fTP9y!->d& z#E`60DW2nWry~(`QgXn>AsJf#aUs|B5jhPmb~l9X(TA#^NTbdo={>4MbkKD+pJHtS z!5aG~2FE+`_^>r~?IQp!k$+n-i(Ja_dkV%566v(U1K`7OY|9{YAuA3*_(?=s24gVj(rW*^ zV|dH$kISGvp*i0{exI?4uE-ecQ$|gGSV+8RvKlr`8CEt?5a~1_e=e-M_a!)TS5pRE zmF}36kRG`0Of~-GE^&AeR9%tY1HiRIs;~(Ti^ZrR$iHU^k|fG*>_ZRlp#LW#3O)?g zr3_8t!b!E2%)mTHJjghJQe9M=M8h#}R=AZ7f9xnyA7bAkCYM2=^N+v<_N}b6^X%8) zJ#$*Wp{K&W=V(5(zA=hT)61JN)G+^W0h2Pj_w>bBfLzr1Lc6Vz^Z+n0h%TQx^0cpJ z3hrhjry%3IVD@SruWf=}0=yF@2y*z?8_;@x@FI^Ar_dxb3@sCEiUJDh%`$_0~*Yt(e?R2l@zepQ@mske@^1uiz?7!Q^GC^Iv2D%%?o zVg}UI^<}C+3k=NQcxoRuXaiVyLEd3|`xS&T*gghTCgi6ld~~eZ)C&=Z3!0IxpsYZg z%(jK4V1+_Ydt+m0L;HNqO+%k2ShrJ2Lp=Ah~WMhUhv(fIC2BH%>GHVt!tp^qUnPSi>~C4)7gMS7jxNNC_j~ zcGPqA!!<$62o1}&W7oEK2p`pT6@q##?`g5z_U*D;cpSoz{t}6SbXHKs$KkW%;d37c zz<8jyJqZnuCSb{pO*Da$4mK7W(nj|Ex;64BH{;3e1ZvH<*h(}j7_GiL_-YX|13?!tS2hc`ioHW}lS%mebao&x!z2ExzKkx4O6z&3B9J~C|`RK4UZgVdHp zAB0p8ra)2)PBJ4Vo!%PxYYYw|;&ua2yAMY`a^s+u3_xMnfP}E}3(R?u&1^SoG*P@6LE7sN{r>$ro}^mFdjNLxSYIueEf`&`ERkn=AtHJ_sqb-c-^n>a~zJeBD89@ zhrQBnnqLOwTU&?|f3K<1UVSP#lL zvjJLKh@u0r=|Lv2m<4$Uu(vXDh{>x`ig&uz_A&ZtsYuQo-1+J5a)lFLDg>wV@7DNM+gJi|JMCv*j5*j?dyff4uuaFMjWf5Nab7*Ad1 z%h?Qsd{z;#eQv>w*{pc9&W%`y)YNpTE-a*p9%PezAN@`+$zxH!`3(3}>L#FwBN~yM zkevl!-XgVSM)&r{weH@v3$q!+uKTi`FH*Be#L`@bNtQn)5sRFh!rqlATrNKzcdi;5 z3*?nZc2Cje;V)@10|)`gdr4U7vKbJ(>Q8r<#Gm%|Zo($RLQo9&v-4=*01in0rQ=0F z|D(A*W3}=Cfl_!=-4#v!EHbh=uuAytqocX^7Z~^qt4*?8n;PK6qoa({pS z^+o?&5px#t%vBmJ0<{Yml-}WG$h1J}V*7rjT~=`kpalVT95q8slybZsg|empxI^u$ zIVhUgf^qW$B9m)g->>=Z4@EBMt zJAXF1RBo_rldSlLo;POheNo8nf#(XL5bVhLqj|wXHehqbYkK+mmB0eoRy&ss-RZt4 zptNa2&baRj0W7Z1Qb%^go+-?H7elq>-(%~r+IClQ5*T0GeR(xi!8*~|EsB~81p_TH zLYk^!pN1w}6QCrR2Ex$~%`x24iX`?S=e-EoU@1wnM?7C$W1(1j6%iWD|MbKWXXlkR zb0@-L=yXU9j1Rlk?X$VRBcYM|}@5 zlGYvdhgvs#agQ$|9qO2{NAEu!W^C|?4U#Gr-* zH)J56upFB3G8LXzjzK2Y;2qq>Ms7ko8Fz?Uh6B2XtO0-U5VzVCcni6vufG&=h zgVj%r^;_gVMWo!=gubjq_A4A*&+(%J17SI&zwTAW2W`jE$=`D&5DQ52eAN||bm42< z4%e|=4zHy8WT4JiPAWKjz5hh27jtZ| z93WSr^#&S7M88+6CRPuioBS)a7a(H+Ct{R(O_}#BjP<^02{4H*}m{Q!H%nT%m-zxy2 z1Nt7;a=d1r%Ala4@?DDqU9$oDKo=*4WrN>hEx8!y*&U?jlGgHQs9YuSeK%D`c$q#Q zF)3{K>}Rlq$2#q)pw(=jT%A8dGow*Zpv9! z&GjZzTrR;SqtrB+6~dkn4cdAc=>iQZp^P%YU%&a@sfyi)LLNkS8#uj)npXe<%6<1i^Z7QviKWH`mnr@`51 z%9AsjM0TOIki3PCcTOivJ%A?mkkAkjnzC8++Q@sk2~3JG%zh{6iVa&`+mq3@$D!AL zM=5hdy0V|-VPdudnB1>?h?=7s)fNGhHKhM+yPenS^o#i|xF3+}AU4eglp`2ZMSQJ& zjCL7T7Rf&#ARO~hFiK#3RHZW?2(EJCj$~q0=74Chb=QWU4e3~q z0i#?6^AIeefcWtf%QODB8Om+9&_SAREy)ayvPCR6HiVW!ctDMLdg4Sufa6U*081if zqZ;S|Me7cx?uLrwARHpKG=OTKEF#JA9D{90YeIxW)CcjXl8rmgdl~iqA=>o%>ftjR zHek@io?BB?^FOF`$~iM$t#okbF1ql%TiIwWkaTKSNvByvp=oqQ{5J<$83Yq?XmAkK z`mbm3q@mtzUG28+bNkd1(VV2ktLjHS$F;V~G;NLvoTw0gr+(Y{ZSF3p#|0|lz8c8a(@alUYr6Kc4dO!fy@Lx2#G4&DPD{1OHrs7f7%GxiH81wuS8>->)Y;+^{|VD`EA$1o&>GD`!KAaE*H68HwvW>%I?+n z-SZ^s&(ESIr*a#dcK3?O?Rb%;ekW>9yZKgga8EAbfEA0cI6n0m`VNhNS4to&xQjj_ zcG9g!OYY$`!wOAR^9^jW=R@^|i4W|p>agr3`xyO;?_kx7Mn%BY-QWAG@UM{Kvq5{* z{Qd;$+ozbK3dK&ebYl0-iE9hVWtYO}u|sG@(0tjuvu5la1jG4k($Q#_ppE+0fes(+ zqIPZ>nant-;C-dcyOy<};_%dGfKI43Bzhj$$M2mk?VDQ>#9XAYL%6hI>XexLAxUaO@CTbKfkGu zyu*Q|aJ=s@0)(%yZr37^&d`Q26g}Q?0@`ES*27GAT5roG9rw2Z(Vj`w0iP?vt@k`I zn*I9(=H5hHv{%dC)XC3atKW>yW)jd^)O^5QZvz^@(N02V9Q(yT2a}bygVlh*5s%$d zF}sLN%7~QD~bBIFGzqEC#a(YsSlZ0>thsn4wVcQlknv z{*Id-q`yL*%I55GYo*3eKDSXt8Yjq3i6BfBJe~W~rmY;z`bESZ=daCKrcOn@Z^G^B zMVEed?3>OwD2?7rfFk2L^7H6VoYNdr{u8u#so4@a9n)U$QRI3@W=g;&s361QjYc^E ziX>bOl1cqW7E+J=5S4bIX5&O7B8BB;DWd>+i=$T}yR>%@zM`BKsbxM5FjK;PuvVEs z&U;BzeGk{An%eR&R*Yx#on?n^c&IxoY7Y#wmJ3u(@eGl0=V3(DxtD1QMhcERZ4MD| z5jbdQ6}vPG%slU>!vi+Z1968iLSrFSl^!5Jr5=KMyk{*A*+C-Pk*G)n%tE3dmK7RR zK25?<3D9&pPBhXn3DxEm!>?E@hDU60S2}-ULr8*j&U|6BhNh|xnZ_whM<04DjQJd- z>;wJ%c*`_yQg7=Zss(uUuBE{^V7jKLWL#9ifFh8(SZuJRbwKLu2Vx+(qfqc+x6%U- zj5u71Y=TYbi2O`BMr`VD@6%tHS+{L8Kj?|AdX}~-b#gioI63B$_gHz=)zxag=gw(5 zy&1LoacHr92nuF+4pIbs{!o{F>QA}%de9JrDZv0a+V4V?d()-j8`rm%{+PrZJB~&o zlX5|~z2!@Ny@d$;@xyZPhUFhV^W6%;4O`{t_|r(Ix|AO*eO#NBcK!NwonC-V70y=)<{VT3kz?xZN}_Z5vq^0)m_f)oBPr2 zhxgOZ^Y5tpn&@^MOvt?YyKqxrv6qFwhbLOeETu3WulsY~CYS~tFQ5Nb#nK{dlbI zJmT0wKm7@cnPvQOI}}z)5@b(;Be1V*Ef~%_!CPTqi>o>$S zXH2^MZr^2vNm^g?-IFHys|lCgR~+2=5AbYZhbDp_{|3B zilzu zAr{w@F=u$QVY_#;$NKxftg47NSYVmH>RYPwIUI+lX?-jyZ<+a}HkE*SdG?jBFt?uy?hwhtY3MF77l8o0L-|LPK6Wwc8cd_^VVMa6 zhrp7IMoI*Gb<6zzJ4sv!tEsa&TsKN$SOB^zo{K94ZlOs_dAYcao==HC2!E{!t zj|g9IENGk|BxKvRS!mX*K=GA(RfL>&R#~TvEo16Q^X-Xx%pM$K z`Yqm~Y@dS2aUZ{tvG+|##icjo-W z!5QxsH<%~spNaU|6jAhX!RXV7&V?f2vTJHYeqrIVe2YCWKF3)KJ4u3LKBSw8pB zVLkVz%0-C4e_7BgUG40crIvX(9_pvGgW(U>aesti91`#+-QC^yokz6-jE3)i%fC#z z7<18?UZX$gHC3Ev>?g{X|NlQ^nE#UpqA{B|@T1@W@%T}DVhu8Q)z8wh^>T0ZjIooX zXr2gq^HdANLZ}Xf6e`IBNWEO)z8mCA9tZ%~@=<%Dl?UZyFq$?f)dHIykZ=#9#!!n9 z!6X(i8xBDE5jZW2#n|6-=DyT>kYi1qyIccuEX8YX=m5cuf{IR>Hwt8GGd8Q7;wE(R z$PNX{r{&MP2*gJ{_5jYK006j>v{Y@^r!N7O#GyzqUkvw65r`1*`ML5myaM#fr5~K} zPdxeFi4~$*Rsu~KVSC`U%mZ5h`9c)*#58<@hDjieDM@#<1oR8hnc@pGq8VCzPeRv) zra7Vvjzd&$01OL~$XQ~z5MV;Gr+}6h;7620sYYTP;xE&9wHNkC)30)$tXR<^kVilQ zuxVZ1agL@lBY$u>eE2O2+8eO24gW^RC16M!=&Fb#Om-iTMDbnX|Ml1FXnkvOXw()k z%hkr9*n^AMdm(Nvp*97eowRwH;C^b3fAWu&BHoa~0aAdMs6HC)wV z7{H)rqtWmP_&bRZjAAO_$ubt}r4T7~b&n8_A#^_cj*}PeUh#^kDM)t-*CbO>1llA3 zvNTo{MOPV!Hk?1BKcO{SP>YtmO5VybW&^7cJRUsWof$bWzXf1&g+->Z6aiCqgV{h5 zF?KPIMt+&z35?joh1{bi`UNUD{G9w!2o5l~%h251JR{KrgBw8|ODtM5sDsp)Y|zkG zze6Aw%6hV?p%FT;?32n!1MrL&9?oin^b8ccW9QAdgX6G|EcOKNp^t=5H}RPXN7xEd z0B~Ia5s^gj9Ej6yKoo9#Fkep=oR-;~zxd*+@b>6Z7B{2LCHyS3N({Bkc0W0`1s|x_ z3;(q`g%U1ATigK38r4l&JAJR=hWlnJWQ+v<58J4 zicDv`B2JS+h53FFFRc+S+_7nJ81_bW8r;wiJR%+-3b}KFsbsq5Gww$r=WVjtMkG6W zK!P+oj)Mg90sEPdBz$)%K=7!^{E{p!CJmx{)C&ecH{lr*VLPq^ES2TKUuk~go6c+{ zxU$IiD}k*?x%~5f0?=S?fpLn~-a!mfXd<6*Afp0t0$=+Ntm|GQXaMTb6QfS5T465t zebDw}{4H@BZbUJ|egI;~HxyF0SVdTaOV)mu%>*PDT+@#Pu^BSxEx;>)A}yU` zlb=B-ZP{!DAz;1zhF7R>Vs1>N2kXB9kJ1pxZ_525ASM0QzyK2>{+aK2)5_Ck7l;$ho zneH_Dh5NW)`28ZD{{hUX5`6m9O@f_+nQ3AsOqg&UEr5Oxf31jQ5h`;t6I;B|_YYaz@IFN)bO ze+4aFZmslQK+iSj{Cue>GaSzEN0pbwcL3~#w$N7m6lBd|7rGa>Ub~-$t;LHFd{$9vcejxZ+U!i*@*ith-uyocy}r_YQDz36ujJ} zuQ+`xzh|vq$*c2Oog49{zQ_GLigFB$$Trf9XskrodM9835DgtN7ndMkqGe(&qXOSf z)?suYSeM@Kz!Xv+ENhBphlLMbT(T3W`vqcu^KJ1Ff$+ZP5SQpbBq}O`8pBGT)G}A z$%1d-U&q0dYaI<<8-^Qh6Vbjl|F%FJKnwxAXgrBM??Nj50Wk+6W__wE^Q!r8l~27} z>^kyWaR@hTs7ZEOs&uzisa80nzP4(TZF@oPCjZny*59f6_+nO-C(QFD*q}UeCyRd4 zvj&YGG)F}twPVXGoDp5>?TOU3debNzQ?cG%Vn4Pg^v|}N4k;aAr01_NJq_jsr>Qox zw>s7Nj!Q9&1-h|1spi7XexQmI1UTyA@7kV;G)AY0<%h}%d0Oa@8+isHg~JQileZ|? zjK?PcIy-PVqLb(S;P+g49ETJJga+$mUzu~6;q?+)H2mEMKfhB|h|!phsXlUkwPm3f zWbt)?S2eeK0*$F3$*7Z!(E;Xk1H-nAOA2&qudY(_%?ecE z5?C?%oI^v|x?WhhqUxs-S-*u3*jE;-61t{PdFJtvW{Y7yCR%S)3Rq-^QFZAdN_tn;R32Sr};2LO_w#X-#*8CxU{=zx#~uuw98grTQ#izZ!=3K}wf zMA}2ZSTxasf~>~iR+yCggaWaYz}fUazg3U5D4GZ{o_Z_teu93kwNFBO`SSb z*SYL-3FL<*I;QYvTAo+ojR*gp8ycwur#N>_lTotc znkCra9DmB^DUYU{8J+pdEZs=83Yq?2b07-``E8}A$?fhzbw#2a&w6~rK>sJGX1a)+ zuptJ1fpI)zxD{ls*O%EyZXW+Dv)A#Cd0`lP;-I{imWkshSmp}(72CTMbVfh zFbwH)@a~CWMtyS_#Hi}JgU_)QxlJ)r6assp9q7+NyPY6ByaOwwxPDFeqdTmOtSrI2@bNv?gvb{T2*cHV7cWv%&+pjd&Bk|2(M0&$6ur zt?N9Qw5>17c2P*Lgr_9-%F-A1<6YR0w}Ae0plz-PDjZ69DaV5_8YfIZm5{VF=oB=M z1m-1#%QfMwa&UGILLV6oGl&Xvc@=-F1zYJ3pi*dYO5n@|XXa0{a*Sc(ABi3z*C##! zY0ItVBRLQpfzkc(k$u#UwyJov!wl&aBFzKX6OFI1f{+z??r#|by04Mdve;*S5h#W# z*WY|T;v+JIO<5zuk0~QSmMBrKyLu_=mjYQk;&Ko>vf>cfDHbyigZB*`v}MMtwetjX zuRxJ^5fqcT*v3j|4INZowQ3#`18+RT6?h?L$Qi7fEP+UEz^^VL6B7i>Zv!zskX+2C zK_#VqAdm2|y;qycX6GkWv%p+`^M4Z71VQGDGA5o+y72(SII?PgV8t zEYBF2>Ah=-LgKUmX>Wl#Ty!iJZ(x#ZMmFg-M?v#CdEngO!&TtEI$a7bY47;#G5F@N zv-2irTTmowkv-O5LzIqa3PGW za{lH{0BEzD*T6=#D+5vdw%aFA(!qKImR z`}bwwObV44m6v#~M7yDik6pSHhP&@*mI^*d^4Kt%#~H^uZvyQVN77F$ldaiNJUeF@ z7V)a+>G$Z=$XYdE!VL}p$Q4(SDItHhyE|jpuqB#IiKLaHN*^Z$c!H`aaaEEM z^tl*BcNyWEYET>#xfK=0uoB8aX+Aj>K?0E__hel^HoXAk{A`q9B(A-tZeO!L zqd7^0OEBilhk>vL{xL|NS1w%8&Up3PJR{ZdKHWYSFDhWsn^|OleQ76SobnRp>`BNf zJm-?l*PkGmMqX2}(&zjnUvi6Zv;*cL`Bs9C#!klM=_laSH(U-u`%&?IRS3_yPm)WX zzA0!pR>0^yMjIz)V5u2O-Tu3lkZ;JTAqqbzuya|rZe4(_yj3wvJ`!bFbh=&VK}jzs zGF|O|OlLvqv`HEwS1xap2c;9+JZsfWoT0C<`im9z=(mT0I~L%6&0?;%J$TqrC@ioC zaGv-SZrr#ryi*OP&47vF=PB=t)K#K#nFN}8+TE$=WEU;UU4-boAaCySy&4$Dj_fFX_iiNN1%U`Mt!)gBTp>X$zw0aY$F-V~p-X;2A2!4fM5O#emK7xkx zowWN@GiCDR6*+F;`fe4rL`7t=rH3Bvl2?5+Tn_mwhB*FCeF!jWh)99}ZjCSX`6bzM zy=zmbrd)Gqv>|@+lAU;c1!H~&T3B-|h zI2EOg_AU*gXEmbjqiZPqvahCH1H(X{qg8$F%u)25ocshv+?}2M&Xinc3icz_FcX0r zWA&#iQY7h)MS_70Ir!qB|GaP52P*?k0<+V(zdjm#nEGiB*qqdCO6gX_9f0+Omt=+K zM~-z=MjZKxld%L8RuYwy7zgPckqXeM(u3INEij>NSsw1_&Zxe709?_KOSYW7XxjAY zdiDvAloT*Xx&BDw5g4?B4x@>lTPbdl8g;@qkv2LY@s58-J5Zcy#`zFP<8Ze#2 zIGjSJXqj>USrk}I#Cjy^GBC0a$lv%`RU{$*U)Au7$qx%v+{~Brf(tIH1_QgDQE#u+ z9xNeY^g80QY;&z=VQyQp|0-*yUam8M7wlkm+U}v<%{3-}DTJIaxsyWZ$D%OwI5dc}m)MibX`!q>=HEf@tF;-qfp% zcvT|L<8R43aU!)r37&wqRzmuR_SuA?j>$~uihY0Zl6}VunuDmWT?3<(cW8bB&|Hje z=;G{9N3;WdA{~z&H3U#V1rNDfBV_D5*=27Mq^hD4i#Rj65K2PU;O|+FAG_VGxCCJq zz@9ktm}8zjduCu_I256wWtKf6BRcZ)xOKp>z5RH!^R>KuvqV7A~0i*XD^ z8nPYntA)B2%@)l|Kp*5aRO%uMR|2lPhP2mXO8j7)FbS%r2$Dc{kuu!}; z-3PmYS8*as~^YIE3rBFn}=C$&VkcQW2!KG36@{rA}q%E&H0=lnh$k;lqbd zey(Nww?K*GvqnhrLCSc>As#T8MTQ^NdmN5fpP(O-Qt6?VP|yNl4*-)j3TyHX$~ul| zz;;yGQLI?xtUjiPaJHxTJi$hiS~Q1dduI04T%hiPx2djG0t9!VIgQ5F;gMheVB5n! zX8Crl${JZ(C5046pj-O3hv4Z*_lMu!3nclzvX-V#bXhQ8>s4uIeg^EHc&twz%H1s1Yw6s z(b4~!SS*gz3H|V5;DLd_CNg12N~Z%7T_CowN=9o~#btwj%0GfH?S8k)2j1g=-~^_B z&7!6>)C?kO$F4(aLvz|cBCH}Yxea7Wz7$&{dY+9nN<%Y|oDqFhDAdJYj*MGu>9WO;GJ;CG6kJ# znC}sb5Z*dw=MMrZzDrtGDnq(5=bg4(#=HlieUatp$4y!*dK>IH3zk%pH|75=G z>HXovVVbpq2+mHPpCj?X&^r0H!gd;Ndr?qaIxR3HGcyyR>jcg9>F?7k)aFrFvh^0ni(Mr0_2*X-CMCrWZv=J z0Gmxh{PQ#>eL{~;@tz{qp;MPIV=MRhg4Tx-NGC%kKfCYSbKuhDK-?kq#;}A7)KCN+ zO=&I8SP%ijR#GScHziyg4S@vU4Ki#Jbhfm}J$)v(Y&(!Z^8n9AA4Lh&P?Ymhp$8(t zVmUdv3fcQmF-v57h#bJtn301it@}?tMFb8oe@x#V&T~6PW@hnOiD&7X@h%abZ=>Xs z>_0IA_?=6cLR&#{RZfDyvw@VjNH$0%4B^Qiv9XJsI&J{E@XPjq0H3wWtq1fU^&7!s_Qe zem34ZU>=_LL(?ZFUj1269d6eCh!(Ci5$v4kev%hD`CYj@Pkbg28iujmGW2G7K>Ui; ze@AHA^1DzHk+Otis&qfU#XHnKc@2zMVL`6xv<7oNadOprM~2dP34mzY9C(RluJ8GS zoP}VO725t4fhgQuoy<(Rwt(y(mP0Ua(&U6@9V-DFRROPpt&bq2$ykU7G2i6ylO+I! zB&6ScH$hfPgG9)~33`|$)X7#Y<;i4hPU8yMes-n5h&r5T2?%crowa1Uj=pXG3H0*z zFX|61O~zZnNd@SB-QVK{7(mO z23B!QERKCQ)M#7C@D1?iVrfT{pd1V$3$FGYafkn91NI}?TJ%635QlsYkvW8hN#K;f zy`(ho5yk;xGm`%haZF+&UA2hLjO9rvPa~2qvUOr*A6nn^_3M>ClPM=g7D=`RfHQ(1 z1G))Fk~p_z_ zWo5%w{o5%IyQDU_m^{oJ%q1d>_3bjh$Vk_$|9hp z@`p)@6#9;8Zy`4Do2Xo-_FUobxX6+Q?`X%-lJBGPF%i4!Z* zuvY840Q+vhl}}}Fz&X0LB?@H$ehM(&1{`!jPrQ{;C|exI{}_K%@)yRkwis4d1x&-& zvjtXY>o_a8uoGrFr15^k3)&wUP8D*Q-+ZM)Tk$+2T;KUxG?GhfcS2jYjze7~g{LWj zfadaB+>j$lr$LyV&G?N*AMGz?Mo$bu0FQD)0n*% zYJy%OJvT&vf8d~?@e#G)?@$gCeS;7w%2KH7MD1SdqwufE9Bg}Bf9)}_+%H4G0u>=8 zLV80JZP7vAc+x8W?|jaaSRI9tb9n3YOXS(2aNwboijqGp*at8ILn~0c;s?Ok*Coer zf_(Re>Lq$f|D11Y&F_1AdTw6gtz=l`a3)oH72~y(`qTfYd@VjZ&nvxvR2hJ1p%0wV z2O0<0=?SnN4V*S{@3|6rtY)&Xy2mbkTlaW?-%<@P@SjhwD#ui2Aal#$6r+5W2%PCI z&yb<<+g_U{W|^zoqr)N}Kl)xWZUfna_5oP2UV^=V*Y9GsUM|L-zxPq`ah3GC`>(WN zYYf!Urvn`d5n_^KSbX5D!RnRq+w&Q}3)x?;>LqqzkH&$m4LLkJc{dy*dccXH9;#0v zNrUM`^LQ%)=nl*4TCA`}0M!z0GDN%Fd(@OEO@;vk8T@ggp|MMj=F#wd@J;93z|!*I zMvtKBHbh9+Cz>?!+1Wd z08jlEhZ^iAYZOe>6awdR2?Fqyr8~C+6?qT5r3XZw{o3EBYZR_u%?~%O=^iIym0T}^ zL}*J>c?fF5vGFagPW0EmY=a$wkPMPjBc~gUj3yw)M0@kU@Al{Po|jVL0>T0P5AXeO zo-I7wU<_t){z;Inb3vdel{?W#>_O5Jh8w$qmE?|Ph@W_)1c zZ9rn2HUXYmAGL_ zH0*Qrfd>4D%)HUwJF&^H){wv8w}%gj+pW*#c7tenA=@-t^F$pQiRfx47C+Hi;S#v` zj#qrI{HMR@ms6s~G37V0pBh>P&IV+DTmL^39B2w_dl9K39^FFH(*0t7c%45{D@)^f zQWY3R`lCxz2IK(Y^|BG;5~~f9T-+XXmGs#9@@cA%^23h&Man-q^PX2!CfJ$hDg+s? zSs?9iBqiVX$StaJ%a@{AZjKihZ8ROcKQ~&z;it3+}wbjwMa9FvtlEFjQ)2^`gTK`N{rlka$}nj+oc38( zRPKM{+X+7FmBn=tP-BInLpe z3{z?Fue9FDD`z?VvpS+e)AIPk9X6EgwxEy{fb#)8splH9#>K(so9@D|P_#q;lm#3{yFVt3E_{)KVdi?7s)I!vWcl^Ojo z=KDDMox<4fY=!ByyNuo!Q!ambIVx{GEr~c#6+@%4-aC*h!KrT>3Ad)w!b8oaV6p-o%t;_kIDAj%xqq z$9-Z~0U3#-A8d1$r*7U*I8U*!r!4)$iJI4*4BL6{&)PeAYyiXF7hEbTUF*t=Tw)!5 z^#rwr3ZH-EVRHMa6a}AXrz#)gbtx2_SaQpH`}Qqw(WFU} zmcR)Ilq-_J(ww`UQW|2xWLl5p9=_pww*gFs8ZZ^nKIlt7Xk3737NPZqF#L|DQo>;e zf(;{S{V$zho{IfGCW0oWEect3Gbo)lIrs8kbmodKbnE?A%oF=ISmZezMXsm;EO@_B z)82?NK1RB)qXkt=Ji_yK4PlzYwGqcWbmB-K{5OrU6XUaqVM_c?w1NqHvN_9J zrwJZ5nF->~{g>e1Wa|tQk?XU2?^WWnTz>;4PZUX|(8(+P+l<^9#E!8J2Z~g!24C2{ z&feqej!`cyz!RP4GNjxnf9)bY$pj4f969TOqI`n}(9@?(n-;xc?_i&nfJO@1#Ctt7 z!a>0m5*EHIGy_at^2dO?Ig1Sn-do@}{1PKIwbkQUL|CF}eiAfFrUaCVy)GYdC#jsRgsI38SE~OO!ySVg^ zJuhFWhf zh%YKC8WRQuD~JNNh?9tb1?*sp1uUqjQEahrfBTDB_x^v^x@%qjaV7A)>T_?J!Wq`SCuf@q{?WfgH-b?=68!95}w0c|Ex_W#z$!#N-KWoom1 z8bd8&lNJ}1gQhpoU%O4IVI9nfjZEj9W3_F9RF`7VZJ6*fMf~Orkh((JHU7^Oyuz$i zt8aI9fH>kiLW{j(B=8bZ!b}ol2)+k%Hu5^-cC(Ro$yl1MGun}p4s?^$os#F@6I>tK zZ)#nwt@@A41BNdh<*1PZbuE=PmU5ULn&Fgq6Pv<7jpL%Kk+h+dE0zTb*x-^6y`8_C zm$21hFgi|48SG=l51|9Nf()R3YrnhlRhDLJQp=TL8=c-W&nw0rH%}NwJ|;+C&xdv9 zX(+B7Z^W{9skD^6!LV_V`K%kB*@Nf(HLdmvn`&(%!R_UAh@bZl)b2w*XzU!|-ST<; zWN1Gc#eOs}igxZS-~LrP$KgUZ=ff9}M!jF>GTLUvSPZ1l z=|BVpc0@92MtPlUhzPGEnXTP|t|Gjv6WYTphr`s^M86_hh+Cc99~2YnoZY6behH>* z$#Hc<2cxyr2qCO>%AnOufWBTno*`#TZr@&grBHmz%yPn9vzp%(=d{GOBJb?3=!-XK z`Ndn9j)@Cgd7WMT)}(+!_ck4H&Vd@CUi2IL{R$9Q3`EBZB1SGrdHshJP#~LSbTzDB&!KURPDQAct76Y)~Wz^eAKXAihm@&5M7tqm+i9MpH zbmAJ&t%zx3k+oQUCgx6x5>qf#-Op$kM(%>5~z7lYDbuOd}_J30J+6G@& zrXm{&El)>6MqhvJl7x5O_L7P8^rH_)T&diPszO$mMi^(>g7J;ohlTt-x7@EzaTk_| zP3*C2p<&*^Pnta32LP0yeruo1OT06t{xvl?PKIXYzExTSUTwR8vC9A#S=2jq3wD)9 z5>rxA>@Rq7-sRu13F=w(uhEvV@EYB1o8|O+xwyMLc*Mu}Wmy?WH1Uc`aHot+3Zv>4qlfV}@tgK$~ z_;LFsbad8~Z?Dq1igIE1&p9C7_V3=ueZo-skr!#4WKh?WT_07OxP|s*!j2V^Y0?w4 zgiYe5i0ENjKrMbkj=H3MPv_(X*)OMQ#KWTMdDGv(UIyTCeBHaQO)hT@%Bp}7yE^LL z$@ND@&##l#VWHjAGc9eBg6TRc&f#ICiAL!YyrP$@ozJt9zm9kh`cJ6U0r2wW+0Ajg z%EwV=MeM5q$h-!}G)&tMG9(CnPvnbC0#$oLunb!Gm@I&;U8tHheN3Iv#QMO_!3Tba zUALa_rp-nd_zY?A7IL@}c-j7P522$#pI|U!E}XTb2{n?B(VyK)^A40O;x+qvXK3Dm z(Uh-VtEnyOvIEVk)v65`U-a)@bs|X)70I5|e5vc0o@7hVH<_%TrTR)wu)6)C6nx z?;u(!gP_36MPCg+Fro?N{l0FT%pUu*OCmch=pyhy2Lf$F>R-=YZ`iZ2S#10)Y^6*K zbil{=FJC@cLDtC~7Rs_)&>?;*@9j4~ z%(`A?(PSKPrdGUCiX4pJQ_T51T!RtZhoW8o^Y+$t*MxZ5~e_h zvZOw?&L-=RmdPS2(~Y^7h&pZBq*_vcg6Qn|>`FBiMdX(d$NXuu@BrE<;mJQV6H>&E z`y}k%<{PT~CQX3h$DnYvM+1c?S8~~yG^p`tGc;jYV>ZaH@l@PKdTce!BnAM6*B{RC znf`E81`KQ6h{}angwefPR$~{q>}M2`PDS%H$h!RnsK(>y>5I9h!AUfWNP?7KYKV`- zK#;qe&x{;eDT8b}y?aK3IOWx@eHL}6%>f7I4+KKXp8tIvm_8I$n$01#JVnnynm0Q_ zj#j>Yn%&TI)>nzc#&Aw)DxOkb3cWMz$Lm*OA70omPdj3S_k)W>EjZdk;UV_sw@S)l z7UAVNmI6v5m&8ebicZO6apJgianw292eIWWwGpj`v|w z@M9&M=q?xTQTV9GjU(;%SDtf1mvL^1giG^o1L0*X37_%At|%KT6w%_ti_<3OzRLoj zc}%e#@-_5mZ4srG5`p6Qcet&s#JQyCpKcHSmLe$`Ucwz1s$c{iQc(kLXYHK%@#C7J z90}%Tdege{K=#ojfZ}AkwQ-#I7 z^vWJ7FJSsHdi5+x=m1}v*7%m21L>0~9n8=4Dd47D3@8HwAO@P_J3>}QkvH;yjl8v5 zGV$_mdHED*b!c_RK>=}#xlWzZ^pgd*c>Y`_t?e~Oqm-C1V z?7*>UYaE((IWj&Zfk)ENTtrQ`zPN~teKECSBXbc_5=V|k-}Fo5ZeMu2^NB?iRIZ)K z7dtqk3JRMpZtI zvbGS7;nNUBG{YEiuBC*Z_D$~-Cf5XScM6A9As4>+_S;n@kl$T%UQ3B~?KzriM{^OG zbZCp=M71qFg|bDr>PJt?zA$TCQNfy@GFfT9i`(Nli@2~C+xHZDi@0IGzM$#7S!x6- zVb+aVp6a{GU7}tHMTwK{?|T$8y>Z95){FSc55W;g_6U_X(HtASE(Zkrw86Ro; zI#@-J>Hp#Bk}J45b(?W|mXb5Gg2Z;rru(#>@ggIG38^>1EM;shnU{~hd1PA2J|cDg z6Z9mtkbw0wC$ki*-vp2ecP-NM&-77$Ya{ut88V^A!<1duv&ks@@n1f5Cfgzw15I`_ z34WUEg>6H&2W#krC6WVJP0go6BCgf_2EyYz9dz)dIas4}Um?Mr*J#ES)kIgG-X zt>)1Lp-Zg&-uJzG(mK>`Gn2@VT+YAu?1TndHFcYsb)_^Z)6E!qtySx#4mW{dZbYE2 zbbmOpw4|h&0lRT>VuQKp!Zd*qo8W)$wy(T#Y|2ZX`*ZNHN+-&SlG%rKP3WE|~;t&GwidGWzc6H~(VaBKBIw)T!IQ zXh6ffUe_icE)C@EPR_L$q{#tf`Yx|3&&`goCiFy%!6dHkr=wmT8|?pLH)Gzj0G&2l z+|Ui6zw1vLtcE&ioplTe6^p-<$Lmg3MUZW$Lek~Gb!bo zTtWfJ7Q*ETk6AWAWqjH8Yt66XjXRmvYrAgp*z;Yo)`>$!P639AP;P9&%6EZE`TZ^i ziU;L&bANAx14m%9_cIQLLp-a1TOyPCh904dfNGRU(RY*yC%Tom@4oA`;K;@%PRUSK z8Z2l8GW$=LVedEOQpY-E?PN4;BFUx4xL4g;W>g>e>A}m8ytHbE5>uH-cj#jp3N_H?2LLA*m5I+Lkb zdu8e`j7eR4CHa)ClI|PBn{@P}xE8(*kOa?MKJa3+JcBhZwzxRRX zVf--JEKBW!CLLd`(FKpbf_Bv4UNS}9jZl|}Bhe)*Dp>~o3%fq@bxmAGY0})_VDWyM zv2EKny+mT{({1=nIw@D`?GJ~lt7Jj$INw~rAz|_NyI!Z23!nvmeP^{E2nDf7}-?@3Dqqp=>i_c+Hl|C#uIeL)zL%Y}2F5F=zT1;}*)hRH&A9LqJhy&n zYcV+73%xb$KN0Jsv#?56PM$M*J_cNIV;U=2_7*2_rNx|+PhjvT( z5E5tc&&kBbk?aWqApiza;ra2P3SZW_X(vQd*8xB{jwq$EDRO93(kDMe{s2_*WUyE3 z5bI76!R9MyOeuZWd49^l4hG!R?a9p`kUrnsX9(~%zkvp%E0bwx*d(~@Sbg*aI+Spf z<2?NJJ``XTq^9FIpv@#r^*VPhGQy+2ICvxmp-|Nq0CIM|!ho9YZSaUikjbE?{9NL)Ra1AVum(ctxzBvUCi7z)LEG#~E zPF|wE4}OO@{Y62rjE7`zdiOC&f{BKCxEyIf_hrU4B7A)4Lugs7-FV-*lY&j!E>?KY{O6i4wEz9o}fUS8=yJe#QwB!cjH zaUZl&>jbHo=Fs@OSP|UOJ&-I?X6O~MMou+%6y2DRJ$*6tg4xw)W!vtU@#oKJp||f~efuN-25i`F+9xrm*89|FCl>U! zcIbcia*HdgjaXN$ V_OQMgCAtOU6TcqYJ{mdazW~2qlS}{r literal 0 HcmV?d00001 diff --git a/sim-cluster/Makefile b/sim-cluster/Makefile deleted file mode 100755 index df9ac68..0000000 --- a/sim-cluster/Makefile +++ /dev/null @@ -1,35 +0,0 @@ -BIN = ../bin - -default: all - -M4 := $(shell which m4) - -ifeq ($(M4),) -$(warning m4 not found!) -false -endif - -$(BIN)/%.py: %.py - (echo "undefine(\`format')changequote({{,}})dnl"; cat $*.py) | sed 's/#ifdef/ifdef/' | sed 's/#)/)/' | sed 's/#,/,/' | m4 -D $(DEFS) > $@; chmod +x $@ -# cpp -D $(DEFS) < $*.py > $@; chmod +x $@ - -PYs := $(wildcard *.py) -EXEs += $(PYs:%.py=$(BIN)/%.py) - -$(BIN)/%.sh: %.sh - cp $*.sh $@; chmod +x $@ - -SHs := $(wildcard *.sh) -EXEs += $(SHs:%.sh=$(BIN)/%.sh) - -DEFS = _DEBUG_ -#DEFS = _RELEASE_ -#DEFS = _SIMULATION_ - -all: $(EXEs) - -info: - @echo $(EXEs) - -clean: - rm -f $(EXEs) diff --git a/sim-cluster/blocking_TCP_socket.py b/sim-cluster/blocking_TCP_socket.py deleted file mode 100755 index 5a62341..0000000 --- a/sim-cluster/blocking_TCP_socket.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: iso-8859-15 -*- - -# {{{ GNU GENERAL PUBLIC LICENSE - -# This is part of the P2PSP (Peer-to-Peer Simple Protocol) -# . -# -# Copyright (C) 2013 Vicente González Ruiz. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# }}} - -import socket - -class blocking_TCP_socket(socket.socket): - - def __init__(self, *p): - super(blocking_TCP_socket, self).__init__(*p) - - def brecv(self, size): - data = super(blocking_TCP_socket, self).recv(size) - while len(data) < size: - data += super(blocking_TCP_socket, self).recv(size - len(data)) - return data - - def baccept(self): - return super(blocking_TCP_socket, self).accept() diff --git a/sim-cluster/churn.py b/sim-cluster/churn.py deleted file mode 100644 index eb25529..0000000 --- a/sim-cluster/churn.py +++ /dev/null @@ -1,31 +0,0 @@ -import random -import time -import datetime -import sys - -#maximum possible time -NEVER = sys.float_info.max - -def weibull_random(shape, scale): - #random.weibullvariate(alpha,beta), where alpha is the scale and beta the shape - return random.weibullvariate(scale, shape) - -#returns a death time in the future (drawn from the weibull distribution with shape 0.4 and the provided scale) -#returns the maximum available time if scale == 0. This means that the peer will never die. -#the return type is a float (number of seconds after the initial time, the epoch) -def new_death_time(scale): - if scale == 0: - return NEVER #maximum float - else: - return time.mktime(time.localtime()) + weibull_random(0.4,scale) - -#returns true if the present moment in time is beyond death_time -def time_to_die(death_time): - return (death_time-time.mktime(time.localtime())<=0) - -''' -i=0 -while i<100: - print(weibull_random(0.4,30)) - i += 1 -''' \ No newline at end of file diff --git a/sim-cluster/cluster.sh b/sim-cluster/cluster.sh deleted file mode 100755 index 29662ad..0000000 --- a/sim-cluster/cluster.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/bin/bash - -set -x - -# Runs a real cluster (a video is transmitted and played). Flash-crowd churn. - -block_size=1024 -buffer_size=32 # blocks -source_channel=\"134.ogg\" -source_hostname=\"localhost\" -source_port=4551 -splitter_hostname=\"localhost\" -splitter_port=4552 -gatherer_port=$[splitter_port+1] -number_of_peers=2 - -usage() { - echo $0 - echo " [-b (block size, \"$block_size\" by default)]" - echo " [-u (buffer size, \"$buffer_size\" by default)]" - echo " [-c (source channel, \"$source_channel\" by default)]" - echo " [-a (source hostname, $source_hostname by default)]" - echo " [-p (source port, $source_port by default)]" - echo " [-n (number of peers, $number_of_peers by default)]" - echo " [-l (splitter port, $splitter_port by default)]" - echo " [-s (splitter hostname, $spltter_hostname by default)]" - echo " [-v (video filename, \"$video\" by default)]" - echo " [-? (help)]" -} - -echo $0: parsing: $@ - -while getopts "b:u:c:w:a:p:n:l:s:v:?" opt; do - case ${opt} in - b) - block_size="${OPTARG}" - ;; - u) - buffer_size="${OPTARG}" - ;; - c) - source_channel="${OPTARG}" - ;; - a) - source_hostname="${OPTARG}" - ;; - p) - source_port="${OPTARG}" - ;; - n) - number_of_peers="${OPTARG}" - ;; - l) - splitter_port="${OPTARG}" - ;; - s) - splitter_hostname="${OPTARG}" - ;; - v) - video="${OPTARG}" - ;; - ?) - usage - exit 0 - ;; - \?) - echo "Invalid option: -${OPTARG}" >&2 - usage - exit 1 - ;; - :) - echo "Option -${OPTARG} requires an argument." >&2 - usage - exit 1 - ;; - esac -done - -#xterm -e "./splitter.py --block_size=$block_size --channel=$source_channel --source_hostname=$source_hostname --source_port=$source_port --listening_port=$splitter_port" & - -xterm -e "./splitter.py --source_hostname=localhost" & - -sleep 1 - -#xterm -e "./gatherer.py --buffer_size=$buffer_size --listening_port=$[splitter_port+1] --channel=$source_channel --source_hostname=$source_hostname --source_port=$source_port --splitter_hostname=$splitter_hostname --splitter_port=$splitter_port" & - -xterm -e "./gatherer.py --splitter_hostname=localhost" & - -sleep 1 - -vlc http://localhost:9999 & - -echo -n "Number of peers" = $number_of_peers - -echo -n "Hit enter to continue" - -read - -COUNTER=0 -while [ $COUNTER -lt $number_of_peers ]; -do - #./peer.py --buffer_size=$buffer_size --listening_port=$[splitter_port+1] --channel="$source_channel" --source_hostname="$source_hostname" --source_port=$source_port --splitter_hostname="$splitter_hostname" --splitter_port=$splitter_port --no_player -number_of_blocks=100 & - ./peer.py --splitter_hostname=localhost --no_player --number_of_blocks=100 & - let COUNTER=COUNTER+1 - -done - -set +x \ No newline at end of file diff --git a/sim-cluster/colors.py b/sim-cluster/colors.py deleted file mode 100755 index 0d600fc..0000000 --- a/sim-cluster/colors.py +++ /dev/null @@ -1,12 +0,0 @@ -# Colorized printing - -class Color: - - none = '\033[0m' - red = '\033[91m' - green = '\033[92m' - yellow = '\033[93m' - blue = '\033[94m' - purple = '\033[95m' - cyan = '\033[96m' - white = '\033[97m' diff --git a/sim-cluster/common.py b/sim-cluster/common.py deleted file mode 100644 index 5a1849c..0000000 --- a/sim-cluster/common.py +++ /dev/null @@ -1,8 +0,0 @@ -# Constants common for the splitter, the peer and the gatherer, such as the block size. - -class Common: - - buffer_size = 256 - block_size = 1024 - header_size = 1024*20 #long enough for the video header - \ No newline at end of file diff --git a/sim-cluster/copyright.txt b/sim-cluster/copyright.txt deleted file mode 100755 index f1c9326..0000000 --- a/sim-cluster/copyright.txt +++ /dev/null @@ -1,23 +0,0 @@ -# {{{ GNU GENERAL PUBLIC LICENSE - -# This is the splitter node of the P2PSP (Peer-to-Peer Simple Protocol) -# . -# -# Copyright (C) 2013 Cristobal Medina López, Juan Pablo García Ortiz, -# Juan Alvaro Muñoz Naranjo, Leocadio González Casado and Vicente -# González Ruiz. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# }}} diff --git a/sim-cluster/create_cluster.sh b/sim-cluster/create_cluster.sh deleted file mode 100755 index 708180f..0000000 --- a/sim-cluster/create_cluster.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/bash - -# Lauch a splitter, a gatherer and a player. - -block_size=1024 -buffer_size=32 # blocks -source_channel=134.ogg -source_hostname=localhost -source_port=4551 -splitter_hostname=localhost -splitter_port=4552 -gatherer_port=9999 - -usage() { - echo $0 - echo "Launches a splitter, a gatherer and a player." - echo "Parameters:" - echo " [-b (block size, \"$block_size\" by default)]" - echo " [-u (buffer size, \"$buffer_size\" by default)]" - echo " [-c (source channel, \"$source_channel\" by default)]" - echo " [-a (source hostname, $source_hostname by default)]" - echo " [-p (source port, $source_port by default)]" - echo " [-n (number of peers, $number_of_peers by default)]" - echo " [-l (splitter port, $splitter_port by default)]" - echo " [-s (splitter hostname, $spltter_hostname by default)]" - echo " [-v (video filename, \"$video\" by default)]" - echo " [-? (help)]" -} - -echo $0: parsing: $@ - -while getopts "b:u:c:w:a:p:l:s:v:?" opt; do - case ${opt} in - b) - block_size="${OPTARG}" - ;; - u) - buffer_size="${OPTARG}" - ;; - c) - source_channel="${OPTARG}" - ;; - a) - source_hostname="${OPTARG}" - ;; - p) - source_port="${OPTARG}" - ;; - l) - splitter_port="${OPTARG}" - ;; - s) - splitter_hostname="${OPTARG}" - ;; - v) - video="${OPTARG}" - ;; - ?) - usage - exit 0 - ;; - \?) - echo "Invalid option: -${OPTARG}" >&2 - usage - exit 1 - ;; - :) - echo "Option -${OPTARG} requires an argument." >&2 - usage - exit 1 - ;; - esac -done - -#clear previous output files -rm /home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/output/* - -#xterm -e "./splitter.py --block_size=$block_size --channel=$source_channel --source_hostname=$source_hostname --source_port=$source_port --listening_port=$splitter_port" & - -#start the splitter -xterm -l -lf ./output/salida_splitter.txt -e "./splitter.py --source_hostname=localhost" & - -sleep 1 - -#start the gatherer -#xterm -e "./gatherer.py --buffer_size=$buffer_size --listening_port=$[splitter_port+1] --channel=$source_channel --source_hostname=$source_hostname --source_port=$source_port --splitter_hostname=$splitter_hostname --splitter_port=$splitter_port" & -xterm -l -lf ./output/salida_gatherer.txt -e "./gatherer.py --splitter_hostname=localhost --source_hostname=localhost" & - -sleep 1 - -#start the player -vlc http://localhost:9999 & - -#start all peers diff --git a/sim-cluster/drain2.py b/sim-cluster/drain2.py deleted file mode 100755 index 71c91c7..0000000 --- a/sim-cluster/drain2.py +++ /dev/null @@ -1,185 +0,0 @@ -#!/usr/bin/env python -# -*- coding: iso-8859-15 -*- -# -# drain.py -# - -# {{{ Imports - -import sys -import socket -from colors import Color -import struct -from time import gmtime, strftime -import argparse -import threading -from threading import Lock -import blocking_socket - -# }}} - -IP_ADDR = 0 -PORT = 1 - -buffer_size = 32 -#cluster_port = 0 # OS default behavior will be used for port binding -clients_port = 9999 -server_IP = '150.214.150.68' -server_port = 4551 -channel = '134.ogg' # Lo indica el source -source_IP = '150.214.150.68' -source_port = 4552 -header_size = 1024*20*10 -block_size = 1024 # <- Ojo, valor recibido desde la fuente - -# {{{ Args handing - -parser = argparse.ArgumentParser(description='This is the drain node of a P2PSP cluster.') -parser.add_argument('--buffer_size', help='size of the video buffer in blocks'.format(buffer_size)) -#parser.add_argument('--cluster_port', help='port used to communicate with the cluster. (Default = {})'.format(cluster_port)) -parser.add_argument('--clients_port', help='Port used to communicate with the player. (Default = {})'.format(clients_port)) -parser.add_argument('--channel', help='Name of the channel served by the streaming server. (Default = {})'.format(channel)) -parser.add_argument('--server_IP', help='IP address of the streaming server. (Default = {})'.format(server_IP)) -parser.add_argument('--server_port', help='Listening port of the streaming server. (Default = {})'.format(server_port)) -parser.add_argument('--source_IP', help='IP address of the source. (Default = {})'.format(source_IP)) -parser.add_argument('--source_port', help='Listening port of the source. (Default = {})'.format(source_port)) - -args = parser.parse_known_args()[0] -if args.buffer_size: - buffer_size = int(args.buffer_size) -if args.clients_port: - clients_port = int(args.player_port) -#if args.cluster_port: -# cluster_port = int(args.cluster_port) -if args.channel: - channel = args.channel -if args.server_IP: - server_IP = args.server_IP -if args.server_port: - server_port = int(args.server_port) -if args.source_IP: - source_IP = args.source_IP -if args.source_port: - source_port = args.source_port - -# }}} -server = (server_IP, server_port) -source = (source_IP, source_port) -# {{{ Connect with the source - -source_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) -source_sock.connect(source) -if __debug__: - print strftime("[%Y-%m-%d %H:%M:%S]", gmtime()), \ - source_sock.getsockname(), "connected to the source" - -# }}} -# {{{ Transform the peer-source TCP socket into a UDP socket - -cluster_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) -cluster_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) -print source_sock.getsockname()[PORT] -cluster_sock.bind(('',source_sock.getsockname()[PORT])) -print cluster_sock.getsockname() -source_sock.close() - -# }}} -# {{{ Receive blocks from the source/peers - -block_buffer = [None]*buffer_size -block_numbers = [0]*buffer_size -block_number = 0 # Last received block - -#cluster_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) -#cluster_sock.bind(('', cluster_port)) - -lock = Lock() - -class Receive_blocks(threading.Thread): - - def __init__(self, cluster_sock): - threading.Thread.__init__(self) - self.sock = cluster_sock - - def run(self): - while True: - lock.acquire() - message, sender = self.sock.recvfrom(struct.calcsize("H1024s")) - number, block = struct.unpack("H1024s", message) - block_number = socket.ntohs(number) - if __debug__: - print strftime("[%Y-%m-%d %H:%M:%S]", gmtime()), \ - sender, block_number, Color.green + "->" + Color.none, \ - self.sock.getsockname() - block_buffer[block_number % buffer_size] = block - block_numbers[block_number % buffer_size] = block_number - lock.release() - -Receive_blocks(cluster_sock).start() - -# }}} -# {{{ Serve the clients - -class Client_handler(threading.Thread): - - def __init__(self, client): - threading.Thread.__init__(self) - self.client_sock, self.client_addr = client - - def run(self): - global block_buffer - global block_number - # {{{ Create a TCP socket to Icecast - - #server_sock = blocking_socket(socket.AF_INET, socket.SOCK_STREAM) - server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - print server - server_sock.connect(server) - server_sock.sendall("GET /" + channel + " HTTP/1.1\r\n\r\n") - print "Client_hander:", "!" - - # }}} - # {{{ Receive the video header from Icecast and send it to the client - - data = server_sock.recv(header_size) - total_received = len(data) - self.client_sock.sendall(data) - while total_received < header_size: - data = server_sock.recv(header_size - len(data)) - self.client_sock.sendall(data) - total_received += len(data) - - print "Client_hander:", "header" - - # }}} - # {{{ Close the TCP socket with the streaming server - - server_sock.close() - - # }}} - # {{{ Now, send buffer's blocks to the client, forever - block_to_play = block_number - buffer_size/2 - while True: - lock.acquire() - print len(block_buffer[block_to_play % buffer_size]) - #print block_buffer[block_to_play % buffer_size] - self.client_sock.sendall(block_buffer[block_to_play % buffer_size]) - block_to_play = (block_to_play + 1) % 65536 - lock.release() - - # }}} - -sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) -sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) -sock.bind(('', clients_port)) -sock.listen(0) -if __debug__: - print "Waiting for clients " - -while True: # Serve forever. - client = sock.accept() - if __debug__: - print "\bc", - Client_handler(client).start() - -# }}} diff --git a/sim-cluster/flash_crowd.sh b/sim-cluster/flash_crowd.sh deleted file mode 100755 index fba8fca..0000000 --- a/sim-cluster/flash_crowd.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash - -set -x - -# Simulates flash-crowd peer churn. - -#number_of_blocks=100 -number_of_peers=2 - -usage() { - echo $0 - echo "Simulates flash-crowd peer churn." - echo "Parameters:" -# echo " [-b (number of blocks, $number_of_blocks by default)]" - echo " [-n (number of peers, $number_of_peers by default)]" - echo " [-? (help)]" -} - -echo $0: parsing: $@ - -while getopts "b:n:?" opt; do - case ${opt} in - b) - number_of_blocks="${OPTARG}" - ;; - n) - number_of_peers="${OPTARG}" - ;; - ?) - usage - exit 0 - ;; - \?) - echo "Invalid option: -${OPTARG}" >&2 - usage - exit 1 - ;; - :) - echo "Option -${OPTARG} requires an argument." >&2 - usage - exit 1 - ;; - esac -done - -COUNTER=0 -while [ $COUNTER -lt $number_of_peers ]; -do - #./peer.py --buffer_size=$buffer_size --listening_port=$[splitter_port+1] --channel="$source_channel" --source_hostname="$source_hostname" --source_port=$source_port --splitter_hostname="$splitter_hostname" --splitter_port=$splitter_port --no_player -number_of_blocks=100 & - #./peer.py --splitter_hostname=localhost --no_player --number_of_blocks=$number_of_blocks & - #./peer.py --splitter_hostname=localhost --no_player --logging_level=DEBUG > ./output/peer-${COUNTER} & - ./peer.py --splitter_hostname=localhost --no_player --logging_level=DEBUG --logging_file=./output/peer-${COUNTER} --churn=0 & - let COUNTER=COUNTER+1 - -done - -set +x diff --git a/sim-cluster/gatherer.py b/sim-cluster/gatherer.py deleted file mode 100755 index 5b4a689..0000000 --- a/sim-cluster/gatherer.py +++ /dev/null @@ -1,478 +0,0 @@ -#!/usr/bin/python -# -*- coding: iso-8859-15 -*- - -# Note: if you run the python interpreter in the optimzed mode (-O), -# debug messages will be disabled. - -# {{{ GNU GENERAL PUBLIC LICENSE - -# This is the gatherer node of the P2PSP (Peer-to-Peer Simple Protocol) -# . -# -# Copyright (C) 2013 Vicente González Ruiz. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# }}} - -# Try running me as: -# -# ./splitter.py --source_hostname="localhost" -# ./gatherer.py --splitter_hostname="localhost" --source_hostname="localhost" -# vlc http://localhost:9999 & - -# {{{ Imports - -import logging -import os -from colors import Color -from common import Common -import sys -import socket -import struct -import argparse -import time - -# }}} - -IP_ADDR = 0 -PORT = 1 - -# Number of blocks of the buffer -#buffer_size = 32 -buffer_size = Common.buffer_size - -#cluster_port = 0 # OS default behavior will be used for port binding - -# Port to communicate with the player -listening_port = 9999 - -# Splitter endpoint -#splitter_hostname = '150.214.150.68' -splitter_hostname = 'localhost' -splitter_port = 4552 - -# Estas cuatro variables las debería indicar el splitter -channel = '134.ogg' -#block_size = 1024 -block_size = Common.block_size - -# Source's end-point -#source_hostname = '150.214.150.68' -source_hostname = 'localhost' -source_port = 4551 - -# Number of bytes of the stream's header -#header_size = 1024*20*10 -#header_size = 1024*20 -header_size = Common.header_size - -logging_levelname = 'INFO' # 'DEBUG' (default), 'INFO' (cyan), - # 'WARNING' (blue), 'ERROR' (red), - # 'CRITICAL' (yellow) -logging_level = logging.INFO - -# {{{ Args handing - -print 'Argument List:', str(sys.argv) - -parser = argparse.ArgumentParser( - description='This is the gatherer node of a P2PSP network.') - -parser.add_argument('--buffer_size', - help='size of the video buffer in blocks'.format(buffer_size)) - -parser.add_argument('--block_size', - help='Block size in bytes. (Default = {})'.format(block_size)) - -parser.add_argument('--channel', - help='Name of the channel served by the streaming source. (Default = {})'.format(channel)) - -parser.add_argument('--listening_port', - help='Port used to communicate with the player. (Default = {})'.format(listening_port)) - -parser.add_argument('--logging_levelname', - help='Name of the channel served by the streaming source. (Default = "{}")'.format(logging_levelname)) - -parser.add_argument('--source_hostname', - help='Hostname of the streaming source. (Default = {})'.format(source_hostname)) - -parser.add_argument('--source_port', - help='Listening port of the streaming source. (Default = {})'.format(source_port)) - -parser.add_argument('--splitter_hostname', - help='Hostname of the splitter. (Default = {})'.format(splitter_hostname)) - -parser.add_argument('--splitter_port', - help='Listening port of the splitter. (Default = {})'.format(splitter_port)) - -args = parser.parse_known_args()[0] -if args.buffer_size: - buffer_size = int(args.buffer_size) -if args.block_size: - block_size = int(args.block_size) -if args.channel: - channel = args.channel -if args.listening_port: - listening_port = int(args.listening_port) -if args.logging_levelname == 'DEBUG': - logging_level = logging.DEBUG -if args.logging_levelname == 'INFO': - logging_level = logging.INFO -if args.logging_levelname == 'WARNING': - logging_level = logging.WARNING -if args.logging_levelname == 'ERROR': - logging_level = logging.ERROR -if args.logging_levelname == 'CRITICAL': - logging_level = logging.CRITICAL -if args.source_hostname: - source_hostname = args.source_hostname -if args.source_port: - source_port = int(args.source_port) -if args.splitter_hostname: - splitter_hostname = args.splitter_hostname -if args.splitter_port: - splitter_port = args.splitter_port - -# }}} - -print 'This is a P2PSP gatherer node ...', -if __debug__: - print 'running in debug mode' -else: - print 'running in release mode' - -# {{{ Debugging initialization - -# create logger -logger = logging.getLogger('gatherer (' + str(os.getpid()) + ')') -logger.setLevel(logging_level) - -# create console handler and set the level -ch = logging.StreamHandler() -ch.setLevel(logging_level) -# create formatter -formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') -# add formatter to ch -ch.setFormatter(formatter) -# add ch to logger -logger.addHandler(ch) - -''' -#jalvaro: create a file handler for the critical level, to store times. I know I shouldn't be using critical :D -fh_timing = logging.FileHandler('/home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/timing/gatherer') -fh_timing.setLevel(logging.CRITICAL) -logger.addHandler(fh_timing) -''' -# }}} - -source = (source_hostname, source_port) -splitter = (splitter_hostname, splitter_port) - -block_format_string = "H"+str(block_size)+"s" - -print("Buffer size: "+str(buffer_size)+" blocks") -print("Block size: "+str(block_size)+" bytes") -logger.info("Buffer size: "+str(buffer_size)+" blocks") -logger.info("Block size: "+str(block_size)+" bytes") - -def get_player_socket(): - # {{{ - - #sock = blocking_TCP_socket.blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM) - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - # In Windows systems this call doesn't work! - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except: - pass - sock.bind(('', listening_port)) - sock.listen(0) - - logger.info(Color.cyan + '{}'.format(sock.getsockname()) + - ' waiting for the player on port ' + - str(listening_port) + Color.none) - # }}} - - #sock, player = sock.baccept() - sock, player = sock.accept() - sock.setblocking(0) - return sock - - # }}} -player_sock = get_player_socket() # The gatherer is blocked until the - # player establish a connection. -# {{{ debug - -if __debug__: - logger.debug(Color.cyan + '{}'.format(player_sock.getsockname()) + - ' The player ' + - '{}'.format(player_sock.getpeername()) + - ' has establised a connection' + Color.none) - -def communicate_the_header(): - # {{{ - source_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - source_sock.connect(source) - source_sock.sendall("GET /" + channel + " HTTP/1.1\r\n\r\n") - - # {{{ Receive the video header from the source and send it to the player - - # Nota: este proceso puede fallar si durante la recepción de los - # bloques el stream se acaba. Habría que realizar de nuevo la - # petición HTTP (como hace el servidor). - - logger.info(Color.cyan + - str(source_sock.getsockname()) + - ' retrieving the header ...' + - Color.none) - - data = source_sock.recv(header_size) - total_received = len(data) - player_sock.sendall(data) - while total_received < header_size: - if __debug__: - logger.debug(str(total_received)) - data = source_sock.recv(header_size - len(data)) - player_sock.sendall(data) - total_received += len(data) - - # }}} - - logger.info(Color.cyan + - str(source_sock.getsockname()) + - ' done' + Color.none) - - source_sock.close() - # }}} - - -communicate_the_header() # Retrieve the header of the stream from the - # source and send it to the player. - -# {{{ debug -if __debug__: - logger.debug(" Trying to connect to the splitter at" + str(splitter)) -# }}} - -def connect_to_the_splitter(): - # {{{ - - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.connect(splitter) - return sock - - # }}} -splitter_sock = connect_to_the_splitter() # Connect to the splitter in - # order to tell it who the - # gatherer is. -splitter = splitter_sock.getpeername() # "localhost" -> "127.0.0.1" - -logger.info(Color.cyan + - '{}'.format(splitter_sock.getsockname()) + - ' connected to the splitter' + - Color.none) - -# {{{ The gatherer is always the first node to connect to the splitter -# and therefore, in this momment the cluster is formed only by the -# splitter and the gatherer. So, it is time to create a new socket to -# receive blocks (now from the splitter and after, when at least one -# peer be in the cluster, from the peer(s) of the cluster), but that -# uses the UDP. This is called "cluster_sock". We also close the TCP -# socket that the gatherer has used to connect to the splitter. }}} - -def create_cluster_sock(): - # {{{ - - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - try: - # In Windows systems this call doesn't work! - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except: - pass - sock.bind(('',splitter_sock.getsockname()[PORT])) - return sock - - # }}} -cluster_sock = create_cluster_sock() -cluster_sock.settimeout(1) -splitter_sock.close() - -# {{{ We define the buffer structure. Three components are needed: (1) -# the buffer that stores the received blocks, (2) the -# buffer that stores the number of the blocks and (3) the -# buffer that stores if a block has been received or not. -# }}} -blocks = [None]*buffer_size -received = [False]*buffer_size -#numbers = [0]*buffer_size - -def receive(): - # {{{ - - global splitter - - try: - #message, sender = cluster_sock.recvfrom(struct.calcsize("H1024s")) - message, sender = cluster_sock.recvfrom(struct.calcsize(block_format_string)) - #number, block = struct.unpack("H1024s", message) - number, block = struct.unpack(block_format_string, message) - block_number = socket.ntohs(number) - blocks[block_number % buffer_size] = block - received[block_number % buffer_size] = True - - # {{{ debug - if __debug__: - if sender == splitter: - logger.debug('{}'.format(cluster_sock.getsockname()) + - " <- " + - '{}'.format(block_number) + - ' ' + - '{}'.format(sender) + " (splitter)") - else: - logger.debug('{}'.format(cluster_sock.getsockname()) + - " <- " + - '{}'.format(block_number) + - ' ' + - '{}'.format(sender) + " (peer)") - # }}} - - return block_number - except socket.timeout: - logger.warning(Color.blue + "cluster timeout!" + Color.none) - return -1 - - # }}} - -# {{{ Now, here the gatherer's life begins (receive incomming blocks and -# send them to the player). But in order to handle the jitter, we must -# to prefetch some blocks before to start to send them. (Week 4/5) -# }}} - -# Lets buffer data in order to handle the jitter. By default, we -# prefetch up to the half of the buffer. This should handle a jitter -# smaller or equal than the half of the buffer (measured in time). - -# {{{ debug -if __debug__: - logger.debug(str(cluster_sock.getsockname()) + ' buffering ...') -# }}} - -logger.info(Color.cyan + - str(cluster_sock.getsockname()) + - ' receiving data ...' + - Color.none) - -''' -x = block_to_play = receive_a_block() -while not received[(x+buffer_size/2) % buffer_size]: - x = receive_a_block() -''' -''' -block_to_play = receive_a_block() % buffer_size -while not received[(receive_a_block() + buffer_size/2) % buffer_size]: - pass -''' - -''' -#Fill half the buffer -''' -#WARNING!!! -#time.clock() measures the time spent by the process (so the time spent waiting for an execution slot in the processor is left out) -#time.time() measures wall time, this means execution time plus waiting time - -#start_buffering_time = time.clock() -start_buffering_time = time.time() - -block_number = receive() -while block_number<=0: - block_number = receive() -block_to_play = block_number % buffer_size -for x in xrange(buffer_size/2): - while receive()<=0: - pass - -#end_buffering_time = time.clock() -end_buffering_time = time.time() -buffering_time = end_buffering_time - start_buffering_time - - -logger.info(str(cluster_sock.getsockname()) + ' buffering done') - -#timing info -#logger.critical('BUF_TIME '+str(buffering_time)+' secs') #buffering time in SECONDS -#logger.critical('BUF_LEN '+str(buffer_size)+' bytes') - -''' -#End buffering -''' - - -def send_a_block_to_the_player(): - # {{{ - - global block_to_play - ''' - while not received[(block_to_play % buffer_size)]: - message = struct.pack("!H", block_to_play) - cluster_sock.sendto(message, splitter) - ''' - if not received[block_to_play]: - message = struct.pack("!H", block_to_play) - cluster_sock.sendto(message, splitter) - - logger.info(Color.cyan + - str(cluster_sock.getsockname()) + - ' complaining about lost block ' + - str(block_to_play) + - Color.none) - - # La mayoría de los players se sincronizan antes si en lugar - # de no enviar nada se envía un bloque vacío. Esto habría que - # probarlo. - - try: - player_sock.sendall(blocks[block_to_play]) - - # {{{ debug - if __debug__: - logger.debug('{}'.format(player_sock.getsockname()) + - ' ' + - str(block_to_play) + - ' -> (player) ' + - '{}'.format(player_sock.getpeername())) - - # }}} - - except socket.error: - logger.error(Color.red + 'player disconnected!' + Color.none) - #quit() - except Exception as detail: - logger.error(Color.red + 'unhandled exception ' + str(detail) + Color.none) - - received[block_to_play] = False - - # }}} - -while True: - block_number = receive() - send_a_block_to_the_player() - block_to_play = (block_to_play + 1) % buffer_size - -''' -while True: - send_a_block_to_the_player() - block_to_play = (block_to_play + 1) % buffer_size - receive() -''' diff --git a/sim-cluster/get_results.py b/sim-cluster/get_results.py deleted file mode 100644 index 59cc157..0000000 --- a/sim-cluster/get_results.py +++ /dev/null @@ -1,64 +0,0 @@ -''' -Created on 22/04/2013 - -@author: jalvaro -Read timing info from files. For measuring the performance of simulations. -Execution: python get_results.py ruta tam_buf -''' -import sys -import glob - -sum = float(0) -count = 0 -fail_count = 0 -avg = float(0) -sum_errors_buf = int(0) -count_errors_buf = int(0) -avg = float(0) -#files_path = glob.glob('/home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/timing/*') -tam_buf = 0 - -try: - if(sys.argv[1].endswith('/')): - files_path = glob.glob(sys.argv[1]+'*') - else: - files_path = glob.glob(sys.argv[1]+'/*') - tam_buf = int(sys.argv[2]) -except: - print('Incorrect arguments. Usage: python get_timing.py ruta tam_buf') - sys.exit() - -#print(files_path) -for file_path in files_path: - fr = open(file_path,'r') - try: - #read avg time - line = fr.readline() - time = float(line.split()[1]) - #print('Time '+str(time)) - sum += time - count += 1 - fr.readline() - line = fr.readline() - num_errors_buf = float(line.split()[1]) - #print('Num errors buf '+str(num_errors_buf)) - sum_errors_buf += num_errors_buf - count_errors_buf += 1 - except: - fail_count += 1 - -avg = sum / count -avg_errors_buf = sum_errors_buf / count_errors_buf -avg_perc_errors_buf = avg_errors_buf *100 / (tam_buf/2) -print('') -print('Avg time: ' + str(avg) + ' secs') -print('Avg num errors buf ' + str(avg_errors_buf)) -print('Avg % errors buf '+str(avg_perc_errors_buf)) -print('') -print(str(count) + ' valid values read') -print(str(fail_count) + ' invalid values read') - - - - - diff --git a/sim-cluster/getpid.py b/sim-cluster/getpid.py deleted file mode 100755 index 39892bd..0000000 --- a/sim-cluster/getpid.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/python -# -*- coding: iso-8859-15 -*- - -import os -import argparse -import daemon - -parser = argparse.ArgumentParser(description='Testing getpid.') -parser.add_argument('--showpeers', help='Show the list of peers.') -args = parser.parse_known_args()[0] -if args.showpeers: - print os.getpid() diff --git a/sim-cluster/peer-h.py b/sim-cluster/peer-h.py deleted file mode 100755 index 2ec5613..0000000 --- a/sim-cluster/peer-h.py +++ /dev/null @@ -1,745 +0,0 @@ -#!/usr/bin/python -# -*- coding: iso-8859-15 -*- - -# Note: if you run the python interpreter in the optimzed mode (-O), -# debug messages will be disabled. - -# {{{ GNU GENERAL PUBLIC LICENSE - -# This is the peer node of the P2PSP (Peer-to-Peer Simple Protocol) -# . -# -# Copyright (C) 2013 Vicente González Ruiz. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# }}} - -# Try running me as: -# -# ./splitter.py --source_hostname="localhost" -# ./gatherer.py --splitter_hostname="localhost" --source_hostname="localhost" -# vlc http://localhost:9999 & -# ./peer.py --splitter_hostname="localhost" --source_hostname="localhost" -# vlc http://localhost:9998 & - -# {{{ Imports - -''' -# VERSIÓN 2 DEL PEER, el peer manda mensajes de hola para presentarse ante los demás peers del cluster. -# Usar con splitter.py y gatherer.py -''' - -import os -import logging -from colors import Color -from common import Common -import sys -import socket -import struct -import time -import argparse -import churn - -# }}} - -IP_ADDR = 0 -PORT = 1 - -# Number of blocks of the buffer -#buffer_size = 32 -#buffer_size = 256 -buffer_size = Common.buffer_size - -#cluster_port = 0 # OS default behavior will be used for port binding - -# Port to communicate with the player -listening_port = 9998 - -# Splitter endpoint -#splitter_hostname = '150.214.150.68' -splitter_hostname = 'localhost' -splitter_port = 4552 - -# Number of bytes of the stream's header -#header_size = 1024*20*10 -#header_size = 1024*20 -header_size = Common.header_size - -# Estas cuatro variables las debería indicar el splitter -#source_hostname = '150.214.150.68' -source_hostname = 'localhost' -source_port = 4551 -channel = '134.ogg' -#block_size = 1024 -block_size = Common.block_size - -# Controls if the stream is sent to a player -_PLAYER_ = True - -# Maximun number of blocks to receive from the splitter -number_of_blocks = 999999999 - - -logging_levelname = 'INFO' # 'DEBUG' (default), 'INFO' (cyan), - # 'WARNING' (blue), 'ERROR' (red), - # 'CRITICAL' (yellow) - -logging_level = logging.INFO - -logging_filename = '' - -console_logging = True -file_logging = True - -weibull_scale = 0 #for churn. 0 means no churn. - -# {{{ Args handing - -print 'Argument List:', str(sys.argv) - -parser = argparse.ArgumentParser( - description='This is a peer node of a P2PSP network.') - -parser.add_argument('--buffer_size', - help='size of the video buffer in blocks. (Default = {})'.format(buffer_size)) - -parser.add_argument('--block_size', - help='Block size in bytes. (Default = {})'.format(block_size)) - -parser.add_argument('--channel', - help='Name of the channel served by the streaming source. (Default = {})'.format(channel)) - -parser.add_argument('--listening_port', - help='Port used to communicate with the player. (Default = {})'.format(listening_port)) - -parser.add_argument('--logging_levelname', - help='Name of the channel served by the streaming source. (Default = "{}")'.format(logging_levelname)) - -parser.add_argument('--logging_filename', - help='Name of the logging output file. (Default = "{})'.format(logging_filename)) - -parser.add_argument('--number_of_blocks', - help='Maximun number of blocks to receive from the splitter. (Default = {}). If not specified, the peer runs forever.'.format(number_of_blocks)) - -parser.add_argument('--source_hostname', - help='Hostname of the streaming source. (Default = {})'.format(source_hostname)) - -parser.add_argument('--source_port', - help='Listening port of the streaming source. (Default = {})'.format(source_port)) - -parser.add_argument('--splitter_hostname', - help='Hostname of the splitter. (Default = {})'.format(splitter_hostname)) - -parser.add_argument('--splitter_port', - help='Listening port of the splitter. (Default = {})'.format(splitter_port)) - -parser.add_argument('--no_player', help='Do no send the stream to a player.', action="store_true") - -parser.add_argument('--churn', help='Scale parameter for the Weibull function, 0 means no churn. (Default = {})'.format(weibull_scale)) - -args = parser.parse_known_args()[0] -if args.buffer_size: - buffer_size = int(args.buffer_size) -if args.block_size: - block_size = int(args.block_size) -if args.channel: - channel = args.channel -if args.listening_port: - listening_port = int(args.listening_port) -if args.logging_levelname == 'DEBUG': - logging_level = logging.DEBUG -if args.logging_levelname == 'INFO': - logging_level = logging.INFO -if args.logging_levelname == 'WARNING': - logging_level = logging.WARNING -if args.logging_levelname == 'ERROR': - logging_level = logging.ERROR -if args.logging_levelname == 'CRITICAL': - logging_level = logging.CRITICAL -if args.logging_filename: - logging_filename = args.logging_filename -if args.number_of_blocks: - number_of_blocks = int(args.number_of_blocks) -if args.source_hostname: - source_hostname = args.source_hostname -if args.source_port: - source_port = int(args.source_port) -if args.splitter_hostname: - splitter_hostname = args.splitter_hostname -if args.splitter_port: - splitter_port = args.splitter_port -if args.no_player: - _PLAYER_ = False -if args.churn: - weibull_scale = int(args.churn) - -# }}} - -print 'This is a P2PSP peer node ...', -if __debug__: - print 'running in debug mode' -else: - print 'running in release mode' - - -# {{{ Logging initialization - -# create logger -logger = logging.getLogger('peer (' + str(os.getpid()) + ')') -logger.setLevel(logging_level) - -# create console handler and set the level -if console_logging == True: - ch = logging.StreamHandler() - ch.setLevel(logging_level) - # create formatter - formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') - # add formatter to ch - ch.setFormatter(formatter) - # add ch to logger - logger.addHandler(ch) - -#jalvaro -# create file handler and set the level -if args.logging_filename and file_logging == False: - fh = logging.FileHandler('./output/peer-'+str(os.getpid())) - fh.setLevel(logging_level) - #add fh to logger - logger.addHandler(fh) -#jalvaro: create a file handler for the critical level, to store times. I know I shouldn't be using critical :D -fh_timing = logging.FileHandler('./timing/peer-'+str(os.getpid())) -fh_timing.setLevel(logging.CRITICAL) -logger.addHandler(fh_timing) - -# }}} - -print("Buffer size: "+str(buffer_size)+" blocks") -print("Block size: "+str(block_size)+" bytes") -logger.info("Buffer size: "+str(buffer_size)+" blocks") -logger.info("Block size: "+str(block_size)+" bytes") - -source = (source_hostname, source_port) -splitter = (splitter_hostname, splitter_port) - -block_format_string = "H"+str(block_size)+"s" - -def get_player_socket(): - # {{{ - - #sock = blocking_TCP_socket.blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM) - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - - try: - # In Windows systems this call doesn't work! - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except: - pass - sock.bind(('', listening_port)) - sock.listen(0) - - if __debug__: - logger.info(Color.cyan + '{}'.format(sock.getsockname()) + ' waiting for the player on port ' + str(listening_port) + Color.none) - print("Waiting for the player") - # }}} - - #sock, player = sock.baccept() - sock, player = sock.accept() - sock.setblocking(0) - return sock - - # }}} - -if _PLAYER_: - player_sock = get_player_socket() # The peer is blocked until the - # player establish a connection. - - # {{{ debug - - if __debug__: - logger.debug('{}'.format(player_sock.getsockname()) + - ' The player ' + - '{}'.format(player_sock.getpeername()) + - ' has establised a connection') - print("Player connected") - # }}} - -def communicate_the_header(): - # {{{ - source_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - source_sock.connect(source) - source_sock.sendall("GET /" + channel + " HTTP/1.1\r\n\r\n") - - # {{{ Receive the video header from the source and send it to the player - - # Nota: este proceso puede fallar si durante la recepción de los - # bloques el stream se acaba. Habría que realizar de nuevo la - # petición HTTP (como hace el servidor). - - if __debug__: - logger.info(Color.cyan + str(source_sock.getsockname()) + ' retrieving the header ...' + Color.none) - - data = source_sock.recv(header_size) - total_received = len(data) - player_sock.sendall(data) - while total_received < header_size: - if __debug__: - logger.debug(str(total_received)) - data = source_sock.recv(header_size - len(data)) - player_sock.sendall(data) - total_received += len(data) - - # }}} - - if __debug__: - logger.info(Color.cyan + str(source_sock.getsockname()) + ' done' + Color.none) - - source_sock.close() - # }}} - -if _PLAYER_: - communicate_the_header() # Retrieve the header of the stream from the - # source and send it to the player. -print("Got the header") - -# {{{ debug -if __debug__: - logger.debug(" Trying to connect to the splitter at" + str(splitter)) -# }}} - -def connect_to_the_splitter(): - # {{{ - - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.connect(splitter) - return sock - - # }}} - -# COMIENZO DE BUFFERING TIME -start_buffering_time = time.time() - -splitter_sock = connect_to_the_splitter() # Connect to the splitter in - # order to tell it who the - # gatherer is. -splitter = splitter_sock.getpeername() # "localhost" -> "127.0.0.1" - -if __debug__: - logger.info(Color.cyan + '{}'.format(splitter_sock.getsockname()) + ' connected to the splitter' + Color.none) - -def create_cluster_sock(): - # {{{ - - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - try: - # In Windows systems this call doesn't work! - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except: - pass - sock.bind(('',splitter_sock.getsockname()[PORT])) - return sock - - # }}} -cluster_sock = create_cluster_sock() -cluster_sock.settimeout(1) - -# {{{ This is the list of peers of the cluster. Each peer uses this -# structure to resend the blocks received from the splitter to these -# nodes. }}} -peer_list = [] - -# {{{ This store the insolidarity of the peers of the cluster. When -# the solidarity exceed a threshold, the peer is deleted from the list -# of peers. }}} -peer_insolidarity = {} - -#Commented due to gatherer removal -#gatherer = None - -def retrieve_the_list_of_peers(): - # {{{ - - #Commented due to gatherer removal - #global gatherer - number_of_peers = socket.ntohs( - struct.unpack("H",splitter_sock.recv(struct.calcsize("H")))[0]) - - # {{{ debug - - if __debug__: - logger.debug('{}'.format(splitter_sock.getsockname()) + - ' <- ' + - '{}'.format(splitter_sock.getpeername()) + - ' Cluster size = ' + - str(number_of_peers)) - - # }}} - - #Commented due to gatherer removal - #message = splitter_sock.recv(struct.calcsize("4sH")) - #IP_addr, port = struct.unpack("4sH", message) - #IP_addr = socket.inet_ntoa(IP_addr) - #port = socket.ntohs(port) - #gatherer = (IP_addr, port) - while number_of_peers > 0: - message = splitter_sock.recv(struct.calcsize("4sH")) - IP_addr, port = struct.unpack("4sH", message) - IP_addr = socket.inet_ntoa(IP_addr) - port = socket.ntohs(port) - peer = (IP_addr, port) - - # {{{ debug - - if __debug__: - logger.debug('{}'.format(splitter_sock.getsockname()) + - ' <- ' + - '{}'.format(splitter_sock.getpeername()) + - ' Peer ' + - str(peer)) - - # }}} - - peer_list.append(peer) - peer_insolidarity[peer] = 0 - - #say hello to other peer - cluster_sock.sendto('', peer) # Send a empty block (this - # should be fast) - - number_of_peers -= 1 - - # }}} - -retrieve_the_list_of_peers() - -splitter_sock.close() - -# {{{ In this momment, most of the rest of peers of the cluster are -# sending blocks to the new peer. -# }}} - -# {{{ We define the buffer structure. Two components are needed: (1) -# the blocks buffer that stores the received blocks (2) the received -# buffer that stores if a block has been received or not. -# }}} -blocks = [None]*buffer_size -received = [False]*buffer_size - -# True if the peer has received "number_of_blocks" blocks. -blocks_exhausted = False - -# This variable holds the last block received from the splitter. It is -# used below to send the "last" block in the congestion avoiding mode. -last = '' - -# Number of times that the last block has been sent to the cluster (we -# send the block each time we receive a block). -counter = 0 - -def receive_and_feed(): - global last - global counter - global blocks_exhausted - global number_of_blocks - - try: - # {{{ Receive and send - #message, sender = cluster_sock.recvfrom(struct.calcsize("H1024s")) - message, sender = cluster_sock.recvfrom(struct.calcsize(block_format_string)) - #if len(message) == struct.calcsize("H1024s"): - if len(message) == struct.calcsize(block_format_string): - # {{{ Received a video block - #number, block = struct.unpack("H1024s", message) - number, block = struct.unpack(block_format_string, message) - block_number = socket.ntohs(number) - # {{{ debug - if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - " <- " + - '{}'.format(block_number) + - ' ' + - '{}'.format(sender)) - - # }}} - blocks[block_number % buffer_size] = block - received[block_number % buffer_size] = True - - if sender == splitter: - # {{{ Send the previously received block in burst mode. - - ''' - #Commented due to gatherer removal - cluster_sock.sendto(message, gatherer) - # {{{ debug - if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - ' ' + - str(block_number) + - ' -> (gatherer) ' + - '{}'.format(gatherer)) - - # }}} - ''' - - while( (counter < len(peer_list)) & (counter > 0)): - peer = peer_list[counter] - cluster_sock.sendto(last, peer) - # if not is_player_working: - # cluster_sock.sendto('', peer) - - peer_insolidarity[peer] += 1 - if peer_insolidarity[peer] > 64: # <- Important parameter!! - del peer_insolidarity[peer] - peer_list.remove(peer) - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(peer) + ' removed' + Color.none) - - counter += 1 - - # {{{ debug - if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - ' ' + - str(block_number) + - ' -> (peer) ' + - '{}'.format(peer)) - - # }}} - - counter = 0 - last = message - ''' - if args.number_of_blocks: - number_of_blocks -= 1 - if number_of_blocks <= 0: - blocks_exhausted = True -''' - # }}} - else: - # {{{ Check if the peer is new - - if sender not in peer_list: - # The peer is new - peer_list.append(sender) - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' added by data block' + Color.none) - peer_insolidarity[sender] = 0 - - # }}} - - if counter < len(peer_list): - # {{{ Send the last block in congestion avoiding mode - - peer = peer_list[counter] - cluster_sock.sendto(last, peer) - - peer_insolidarity[peer] += 1 - if peer_insolidarity[peer] > 64: # <- Important parameter!! - del peer_insolidarity[peer] - peer_list.remove(peer) - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(peer) + ' removed by unsupportive' + Color.none) - - # {{{ debug - if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - ' ' + - str(block_number) + - ' -> (peer) ' + - '{}'.format(peer)) - - # }}} - - counter += 1 - - # }}} - - if args.number_of_blocks: - number_of_blocks -= 1 - if number_of_blocks <= 0: - blocks_exhausted = True - - - return block_number - # }}} - elif message=='': - # {{{ Received a control block: "hi" - - if sender not in peer_list: - peer_list.append(sender) - peer_insolidarity[sender] = 0 - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' added by control block' + Color.none) - elif message=='bye': - peer_list.remove(sender) - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' removed by control block' + Color.none) - return -1 - # }}} - # }}} - except socket.timeout: - # {{{ - if __debug__: - logger.warning(Color.red + "cluster timeout!" + Color.none) - return -2 - # }}} - -# {{{ debug -if __debug__: - logger.debug(str(cluster_sock.getsockname()) + ' buffering ...') -# }}} - -if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' receiving data ...' + Color.none) - -''' -#Fill half the buffer -''' -#WARNING!!! -#time.clock() measures the time spent by the process (so the time spent waiting for an execution slot in the processor is left out) -#time.time() measures wall time, this means execution time plus waiting time - -last_block_number = 0 -error_counter = 0 - -block_number = receive_and_feed() -while block_number<=0: - block_number = receive_and_feed() -block_to_play = block_number % buffer_size -for x in xrange(buffer_size/2): - while receive_and_feed()<=0: - pass -#go through the buffer -num_errors_buf = 0 -for x in range(block_to_play, block_to_play+(buffer_size/2)): - if received[x%buffer_size] == False: - num_errors_buf += 1 - - -''' -block_number = receive_and_feed() -while block_number<=0: - block_number = receive_and_feed() -block_to_play = block_number % buffer_size -for x in xrange(buffer_size/2): - last_block_number = receive_and_feed() - if last_block_number <= 0: - error_counter += 1 -''' - -# FIN DE BUFFERING TIME -end_buffering_time = time.time() -buffering_time = end_buffering_time - start_buffering_time - -logger.info(str(cluster_sock.getsockname()) + ' buffering done') -logger.info('NUM_PEERS '+str(len(peer_list))) - -logger.critical('BUF_TIME '+str(buffering_time)+' secs') #buffering time in SECONDS -logger.critical('BUF_LEN '+str(buffer_size)+' bytes') -logger.critical('NUM_ERRORS_BUF '+str(error_counter)) -percentage_errors_buf = float(error_counter*100)/float(buffer_size/2) -logger.critical('PERCENTAGE_ERRORS_BUF ' + str(percentage_errors_buf)) -#logger.critical('PERCENTAGE_ERRORS_BUF {:.2}%'.format(percentage_errors_buf)) -logger.critical('NUM_PEERS '+str(len(peer_list))) -# }}} - - -''' -#End buffering -''' - -player_connected = True - -def send_a_block_to_the_player(): - # {{{ - - global block_to_play - global player_sock - global player_connected - - if not received[block_to_play]: - message = struct.pack("!H", block_to_play) - cluster_sock.sendto(message, splitter) - - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' complaining about lost block ' + str(block_to_play) + Color.none) - - # La mayoría de los players se sincronizan antes si en lugar - # de no enviar nada se envía un bloque vacío. Esto habría que - # probarlo. - - try: - player_sock.sendall(blocks[block_to_play]) - - # {{{ debug - if __debug__: - logger.debug('{}'.format(player_sock.getsockname()) + - ' ' + - str(block_to_play) + - ' -> (player) ' + - '{}'.format(player_sock.getpeername())) - - # }}} - - except socket.error: - if __debug__: - logger.error(Color.red + 'player disconected!' + Color.none) - player_connected = False - return - except Exception as detail: - if __debug__: - logger.error(Color.red + 'unhandled exception ' + str(detail) + Color.none) - return - - received[block_to_play] = False - - # }}} - -#get a death time -#death_time = churn.new_death_time(20) -death_time = churn.new_death_time(weibull_scale) - -''' -#Once the buffer is half-filled, then start operating normally -''' -#while player_connected and not blocks_exhausted: -while player_connected and not churn.time_to_die(death_time): - - if __debug__ and death_time != churn.NEVER: - current_time = time.localtime() - logger.debug(Color.green+'Current time is '+str(current_time.tm_hour).zfill(2)+':'+str(current_time.tm_min).zfill(2)+':'+str(current_time.tm_sec).zfill(2)+Color.none) - logger.debug(Color.green+'Scheduled death time is '+str(time.localtime(death_time).tm_hour).zfill(2)+':'+str(time.localtime(death_time).tm_min).zfill(2)+':'+str(time.localtime(death_time).tm_sec).zfill(2)+Color.none) - - block_number = receive_and_feed() - if block_number>=0: - if (block_number % 256) == 0: - for i in peer_insolidarity: - peer_insolidarity[i] /= 2 - if _PLAYER_: - send_a_block_to_the_player() - block_to_play = (block_to_play + 1) % buffer_size - #elif block_number == -2: #this stops the peer after only one cluster timeout - # break - if __debug__: - logger.debug('NUM PEERS '+str(len(peer_list))) - -if __debug__: - logger.info(Color.cyan + 'Goodbye!' + Color.none) -goodbye = 'bye' -cluster_sock.sendto(goodbye, splitter) -for x in xrange(3): - receive_and_feed() -for peer in peer_list: - cluster_sock.sendto(goodbye, peer) - diff --git a/sim-cluster/peer-x.py b/sim-cluster/peer-x.py deleted file mode 100755 index 7aaf998..0000000 --- a/sim-cluster/peer-x.py +++ /dev/null @@ -1,791 +0,0 @@ -#!/usr/bin/python -# -*- coding: iso-8859-15 -*- - -# Note: if you run the python interpreter in the optimzed mode (-O), -# debug messages will be disabled. - -# {{{ GNU GENERAL PUBLIC LICENSE - -# This is the peer node of the P2PSP (Peer-to-Peer Simple Protocol) -# . -# -# Copyright (C) 2013 Vicente González Ruiz. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# }}} - -# Try running me as: -# -# ./splitter.py --source_hostname="localhost" -# ./gatherer.py --splitter_hostname="localhost" --source_hostname="localhost" -# vlc http://localhost:9999 & -# ./peer.py --splitter_hostname="localhost" --source_hostname="localhost" -# vlc http://localhost:9998 & - -''' -# VERSIÓN bloque-exclusivo DEL PEER, no hay mensajes de hola por parte de los peers. -# El peer recibe un bloque de stream exclusivo a su llegada, y lo reenvía a todos a modo de "hola". -# Con esto prentendemos acelerar el proceso de buffering y saturar menos la red. -# Usar con splitter-x.py y gatherer.py -''' - -# {{{ Imports - -import os -import logging -from colors import Color -from common import Common -import sys -import socket -import struct -import time -import argparse -import churn - -# }}} - -IP_ADDR = 0 -PORT = 1 - -# Number of blocks of the buffer -#buffer_size = 32 -#buffer_size = 256 -buffer_size = Common.buffer_size - -#cluster_port = 0 # OS default behavior will be used for port binding - -# Port to communicate with the player -listening_port = 9998 - -# Splitter endpoint -#splitter_hostname = '150.214.150.68' -splitter_hostname = 'localhost' -splitter_port = 4552 - -# Estas cuatro variables las debería indicar el splitter -#source_hostname = '150.214.150.68' -source_hostname = 'localhost' -source_port = 4551 -channel = '134.ogg' -#block_size = 1024 -block_size = Common.block_size - -# Number of bytes of the stream's header -#header_size = 1024*20*10 -#header_size = 1024*20 -header_size = Common.header_size - -# Controls if the stream is sent to a player -_PLAYER_ = True - -# Maximun number of blocks to receive from the splitter -number_of_blocks = 999999999 - -# The buffer of stream blocks -blocks = [None]*buffer_size -received = [False]*buffer_size - -# This variable holds the last block received from the splitter. It is -# used below to send the "last" block in the congestion avoiding mode. -last = '' - -logging_levelname = 'INFO' # 'DEBUG' (default), 'INFO' (cyan), - # 'WARNING' (blue), 'ERROR' (red), - # 'CRITICAL' (yellow) - -logging_level = logging.INFO - -logging_filename = '' - -console_logging = False -file_logging = False - -weibull_scale = 0 #for churn. 0 means no churn. - -# {{{ Args handing - -print 'Argument List:', str(sys.argv) - -parser = argparse.ArgumentParser( - description='This is a peer node of a P2PSP network.') - -parser.add_argument('--buffer_size', - help='size of the video buffer in blocks. (Default = {})'.format(buffer_size)) - -parser.add_argument('--block_size', - help='Block size in bytes. (Default = {})'.format(block_size)) - -parser.add_argument('--channel', - help='Name of the channel served by the streaming source. (Default = {})'.format(channel)) - -parser.add_argument('--listening_port', - help='Port used to communicate with the player. (Default = {})'.format(listening_port)) - -parser.add_argument('--logging_levelname', - help='Name of the channel served by the streaming source. (Default = "{}")'.format(logging_levelname)) - -parser.add_argument('--logging_filename', - help='Name of the logging output file. (Default = "{})'.format(logging_filename)) - -parser.add_argument('--number_of_blocks', - help='Maximun number of blocks to receive from the splitter. (Default = {}). If not specified, the peer runs forever.'.format(number_of_blocks)) - -parser.add_argument('--source_hostname', - help='Hostname of the streaming source. (Default = {})'.format(source_hostname)) - -parser.add_argument('--source_port', - help='Listening port of the streaming source. (Default = {})'.format(source_port)) - -parser.add_argument('--splitter_hostname', - help='Hostname of the splitter. (Default = {})'.format(splitter_hostname)) - -parser.add_argument('--splitter_port', - help='Listening port of the splitter. (Default = {})'.format(splitter_port)) - -parser.add_argument('--no_player', help='Do no send the stream to a player.', action="store_true") - -parser.add_argument('--churn', help='Scale parameter for the Weibull function, 0 means no churn. (Default = {})'.format(weibull_scale)) - -args = parser.parse_known_args()[0] -if args.buffer_size: - buffer_size = int(args.buffer_size) -if args.block_size: - block_size = int(args.block_size) -if args.channel: - channel = args.channel -if args.listening_port: - listening_port = int(args.listening_port) -if args.logging_levelname == 'DEBUG': - logging_level = logging.DEBUG -if args.logging_levelname == 'INFO': - logging_level = logging.INFO -if args.logging_levelname == 'WARNING': - logging_level = logging.WARNING -if args.logging_levelname == 'ERROR': - logging_level = logging.ERROR -if args.logging_levelname == 'CRITICAL': - logging_level = logging.CRITICAL -if args.logging_filename: - logging_filename = args.logging_filename -if args.number_of_blocks: - number_of_blocks = int(args.number_of_blocks) -if args.source_hostname: - source_hostname = args.source_hostname -if args.source_port: - source_port = int(args.source_port) -if args.splitter_hostname: - splitter_hostname = args.splitter_hostname -if args.splitter_port: - splitter_port = args.splitter_port -if args.no_player: - _PLAYER_ = False -if args.churn: - weibull_scale = int(args.churn) - -# }}} - -print 'This is a P2PSP peer node ...', -if __debug__: - print 'running in debug mode' -else: - print 'running in release mode' - - -# {{{ Logging initialization - -# create logger -logger = logging.getLogger('peer (' + str(os.getpid()) + ')') -logger.setLevel(logging_level) - -# create console handler and set the level -if console_logging == True: - ch = logging.StreamHandler() - ch.setLevel(logging_level) - # create formatter - formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') - # add formatter to ch - ch.setFormatter(formatter) - # add ch to logger - logger.addHandler(ch) - -#jalvaro -# create file handler and set the level -if args.logging_filename and file_logging == True: - fh = logging.FileHandler('/home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/output/peer-'+str(os.getpid())) - fh.setLevel(logging_level) - #add fh to logger - logger.addHandler(fh) -#jalvaro: create a file handler for the critical level, to store times. I know I shouldn't be using critical :D -fh_timing = logging.FileHandler('/home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/timing/peer-'+str(os.getpid())) -fh_timing.setLevel(logging.CRITICAL) -logger.addHandler(fh_timing) - -# }}} - -print("Buffer size: "+str(buffer_size)+" blocks") -print("Block size: "+str(block_size)+" bytes") -logger.info("Buffer size: "+str(buffer_size)+" blocks") -logger.info("Block size: "+str(block_size)+" bytes") - -source = (source_hostname, source_port) -splitter = (splitter_hostname, splitter_port) - -block_format_string = "H"+str(block_size)+"s" - -def get_player_socket(): - # {{{ - - #sock = blocking_TCP_socket.blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM) - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - - try: - # In Windows systems this call doesn't work! - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except: - pass - sock.bind(('', listening_port)) - sock.listen(0) - - if __debug__: - logger.info(Color.cyan + '{}'.format(sock.getsockname()) + ' waiting for the player on port ' + str(listening_port) + Color.none) - # }}} - - #sock, player = sock.baccept() - sock, player = sock.accept() - sock.setblocking(0) - return sock - - # }}} - -if _PLAYER_: - player_sock = get_player_socket() # The peer is blocked until the - # player establish a connection. - - # {{{ debug - - if __debug__: - logger.debug('{}'.format(player_sock.getsockname()) + - ' The player ' + - '{}'.format(player_sock.getpeername()) + - ' has establised a connection') - - # }}} - -def communicate_the_header(): - # {{{ - source_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - source_sock.connect(source) - source_sock.sendall("GET /" + channel + " HTTP/1.1\r\n\r\n") - - # {{{ Receive the video header from the source and send it to the player - - # Nota: este proceso puede fallar si durante la recepción de los - # bloques el stream se acaba. Habría que realizar de nuevo la - # petición HTTP (como hace el servidor). - - if __debug__: - logger.info(Color.cyan + str(source_sock.getsockname()) + ' retrieving the header ...' + Color.none) - - data = source_sock.recv(header_size) - total_received = len(data) - player_sock.sendall(data) - while total_received < header_size: - if __debug__: - logger.debug(str(total_received)) - data = source_sock.recv(header_size - len(data)) - player_sock.sendall(data) - total_received += len(data) - - # }}} - - if __debug__: - logger.info(Color.cyan + str(source_sock.getsockname()) + ' done' + Color.none) - - source_sock.close() - # }}} - -if _PLAYER_: - communicate_the_header() # Retrieve the header of the stream from the - # source and send it to the player. - -# {{{ debug -if __debug__: - logger.debug(" Trying to connect to the splitter at" + str(splitter)) -# }}} - -def connect_to_the_splitter(): - # {{{ - - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.connect(splitter) - return sock - - # }}} - -# COMIENZO DE BUFFERING TIME -start_buffering_time = time.time() - -splitter_sock = connect_to_the_splitter() # Connect to the splitter in - # order to tell it who the - # gatherer is. -splitter = splitter_sock.getpeername() # "localhost" -> "127.0.0.1" - -if __debug__: - logger.info(Color.cyan + '{}'.format(splitter_sock.getsockname()) + ' connected to the splitter' + Color.none) - -def create_cluster_sock(): - # {{{ - - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - try: - # In Windows systems this call doesn't work! - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except: - pass - sock.bind(('',splitter_sock.getsockname()[PORT])) - return sock - - # }}} -cluster_sock = create_cluster_sock() -cluster_sock.settimeout(1) - -# {{{ This is the list of peers of the cluster. Each peer uses this -# structure to resend the blocks received from the splitter to these -# nodes. }}} -peer_list = [] - -# {{{ This store the insolidarity of the peers of the cluster. When -# the solidarity exceed a threshold, the peer is deleted from the list -# of peers. }}} -peer_insolidarity = {} - -gatherer = None - -def retrieve_first_block(): - global block_number - global buffer_size - global blocks - - #message = splitter_sock.recv(struct.calcsize("H1024s")) - message = splitter_sock.recv(struct.calcsize(block_format_string)) - print("First block received from splitter. "+str(len(message))+" bytes") - - #number, block = struct.unpack("H1024s", message) - number, block = struct.unpack(block_format_string, message) - block_number = socket.ntohs(number) - if __debug__: - logger.debug("First block number: "+str(block_number)) - logger.debug("First block in buffer position: "+str(block_number%buffer_size)) - # {{{ debug - ''' - if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - " <- " + - '{}'.format(block_number) + - ' ' + - '{}'.format(sender)) - ''' - # }}} - blocks[block_number % buffer_size] = block - received[block_number % buffer_size] = True - return message - -#first_payload contains (block_number,block) -first_payload = retrieve_first_block() -last = first_payload - -def retrieve_the_list_of_peers(): - # {{{ - global gatherer - global first_payload - - number_of_peers = socket.ntohs( - struct.unpack("H",splitter_sock.recv(struct.calcsize("H")))[0]) - - # {{{ debug - - if __debug__: - logger.debug('{}'.format(splitter_sock.getsockname()) + - ' <- ' + - '{}'.format(splitter_sock.getpeername()) + - ' Cluster size = ' + - str(number_of_peers)) - - # }}} - message = splitter_sock.recv(struct.calcsize("4sH")) - IP_addr, port = struct.unpack("4sH", message) - IP_addr = socket.inet_ntoa(IP_addr) - port = socket.ntohs(port) - gatherer = (IP_addr, port) - #send the first block to the gatherer, very important! - cluster_sock.sendto(first_payload, gatherer) - while number_of_peers > 0: - message = splitter_sock.recv(struct.calcsize("4sH")) - IP_addr, port = struct.unpack("4sH", message) - IP_addr = socket.inet_ntoa(IP_addr) - port = socket.ntohs(port) - peer = (IP_addr, port) - # {{{ debug - if __debug__: - logger.debug('{}'.format(splitter_sock.getsockname()) + - ' <- ' + - '{}'.format(splitter_sock.getpeername()) + - ' Peer ' + - str(peer)) - # }}} - peer_list.append(peer) - peer_insolidarity[peer] = 0 -# cluster_sock.sendto('', peer) # Send a empty block (this - # should be fast) - #send the block - cluster_sock.sendto(first_payload, peer) - number_of_peers -= 1 - # }}} - -retrieve_the_list_of_peers() - -splitter_sock.close() - -# {{{ In this momment, most of the rest of peers of the cluster are -# sending blocks to the new peer. -# }}} - -# {{{ We define the buffer structure. Two components are needed: (1) -# the blocks buffer that stores the received blocks (2) the received -# buffer that stores if a block has been received or not. -# }}} -#blocks = [None]*buffer_size -#received = [False]*buffer_size - -# True if the peer has received "number_of_blocks" blocks. -blocks_exhausted = False - - - -# Number of times that the last block has been sent to the cluster (we -# send the block each time we receive a block). -counter = 0 - -def receive_and_feed(): - global last - global counter - global blocks_exhausted - global number_of_blocks - - try: - # {{{ Receive and send - #message, sender = cluster_sock.recvfrom(struct.calcsize("H1024s")) - message, sender = cluster_sock.recvfrom(struct.calcsize(block_format_string)) - #if len(message) == struct.calcsize("H1024s"): - if len(message) == struct.calcsize(block_format_string): - # {{{ Received a video block - #number, block = struct.unpack("H1024s", message) - number, block = struct.unpack(block_format_string, message) - block_number = socket.ntohs(number) - # {{{ debug - if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - " <- " + - '{}'.format(block_number) + - ' ' + - '{}'.format(sender)) - - # }}} - blocks[block_number % buffer_size] = block - received[block_number % buffer_size] = True - - if sender == splitter: - # {{{ Send the previously received block in burst mode. - - cluster_sock.sendto(message, gatherer) - # {{{ debug - if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - ' ' + - str(block_number) + - ' -> (gatherer) ' + - '{}'.format(gatherer)) - - # }}} - - if __debug__: - logger.debug("Sending block "+str(block_number)+" in burst mode") - logger.debug("Counter value: "+str(counter)) - - # finish sending the last block to all peers in "burst mode" before sending the new one - while( (counter < len(peer_list)) & (counter > 0)): - peer = peer_list[counter] - - if __debug__: - logger.debug("Counter: "+str(counter)+", Peer"+str(peer)) - - cluster_sock.sendto(last, peer) - # if not is_player_working: - # cluster_sock.sendto('', peer) - - if __debug__ and last=='': - logger.debug("I'M SENDING A '' MESSAGE") - - peer_insolidarity[peer] += 1 - if peer_insolidarity[peer] > 64: # <- Important parameter!! - del peer_insolidarity[peer] - peer_list.remove(peer) - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(peer) + ' removed' + Color.none) - - counter += 1 - - # {{{ debug - if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - ' ' + - str(block_number) + - ' -> (peer) ' + - '{}'.format(peer)) - - # }}} - - counter = 0 - last = message - ''' - if args.number_of_blocks: - number_of_blocks -= 1 - if number_of_blocks <= 0: - blocks_exhausted = True -''' - # }}} - else: - # the sender is not the splitter, hence it's a peer - # {{{ Check if the peer is new - - if sender not in peer_list: - # The peer is new - #peer_list.append(sender) - #peer_list.insert(0,sender) - peer_list.insert(counter,sender) - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' added by data block' + Color.none) - peer_insolidarity[sender] = 0 - - # }}} - - if counter < len(peer_list): - # {{{ Send the last block in congestion avoiding mode - - peer = peer_list[counter] - cluster_sock.sendto(last, peer) - - peer_insolidarity[peer] += 1 - if peer_insolidarity[peer] > 64: # <- Important parameter!! - del peer_insolidarity[peer] - peer_list.remove(peer) - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(peer) + ' removed by unsupportive' + Color.none) - - # {{{ debug - if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - ' ' + - str(block_number) + - ' -> (peer) ' + - '{}'.format(peer)) - - # }}} - - counter += 1 - - # }}} - - if args.number_of_blocks: - number_of_blocks -= 1 - if number_of_blocks <= 0: - blocks_exhausted = True - - - return block_number - # }}} - elif message=='bye': - # {{{ Received a goodbye control block - - #if sender not in peer_list: - # peer_list.append(sender) - # peer_insolidarity[sender] = 0 - # if __debug__: - # logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' added by control block' + Color.none) - #else: - try: - peer_list.remove(sender) - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' removed by control block' + Color.none) - except: - pass - return -1 - # }}} - # }}} - except socket.timeout: - # {{{ - if __debug__: - logger.warning(Color.red + "cluster timeout!" + Color.none) - return -2 - # }}} - -# {{{ debug -if __debug__: - logger.debug(str(cluster_sock.getsockname()) + ' buffering ...') -# }}} - -if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' receiving data ...' + Color.none) - -''' -#Fill half the buffer -''' -#WARNING!!! -#time.clock() measures the time spent by the process (so the time spent waiting for an execution slot in the processor is left out) -#time.time() measures wall time, this means execution time plus waiting time - -last_block_number = 0 -error_counter = 0 -#start_buffering_time = time.time() - -block_number = receive_and_feed() -while block_number<=0: - block_number = receive_and_feed() -block_to_play = block_number % buffer_size -for x in xrange(buffer_size/2): - while receive_and_feed()<=0: - pass -#go through the buffer -num_errors_buf = 0 -for x in range(block_to_play, block_to_play+(buffer_size/2)): - if received[x%buffer_size] == False: - num_errors_buf += 1 - -''' -block_number = receive_and_feed() -while block_number<=0: - block_number = receive_and_feed() -block_to_play = block_number % buffer_size -for x in xrange(buffer_size/2): - last_block_number = receive_and_feed() - if last_block_number <= 0: - error_counter += 1 -''' -#FIN DE BUFFERING TIME -end_buffering_time = time.time() -buffering_time = end_buffering_time - start_buffering_time - -if __debug__: - logger.info(str(cluster_sock.getsockname()) + ' buffering done') - logger.info('NUM_PEERS '+str(len(peer_list))) - logger.critical('BUF_TIME '+str(buffering_time)+' secs') #buffering time in SECONDS - logger.critical('BUF_LEN '+str(buffer_size)+' bytes') - logger.critical('NUM_ERRORS_BUF '+str(error_counter)) - percentage_errors_buf = float(error_counter*100)/float(buffer_size/2) - logger.critical('PERCENTAGE_ERRORS_BUF ' + str(percentage_errors_buf)) - #logger.critical('PERCENTAGE_ERRORS_BUF {:.2}%'.format(percentage_errors_buf)) - logger.critical('NUM_PEERS '+str(len(peer_list))) - # }}} - -''' -#End buffering -''' - -player_connected = True - -def send_a_block_to_the_player(): - # {{{ - - global block_to_play - global player_sock - global player_connected - - if not received[block_to_play]: - message = struct.pack("!H", block_to_play) - cluster_sock.sendto(message, splitter) - - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' complaining about lost block ' + str(block_to_play) + Color.none) - - # La mayoría de los players se sincronizan antes si en lugar - # de no enviar nada se envía un bloque vacío. Esto habría que - # probarlo. - - try: - player_sock.sendall(blocks[block_to_play]) - - # {{{ debug - if __debug__: - logger.debug('{}'.format(player_sock.getsockname()) + - ' ' + - str(block_to_play) + - ' -> (player) ' + - '{}'.format(player_sock.getpeername())) - - # }}} - - except socket.error: - if __debug__: - logger.error(Color.red + 'player disconected!' + Color.none) - player_connected = False - return - except Exception as detail: - if __debug__: - logger.error(Color.red + 'unhandled exception ' + str(detail) + Color.none) - return - - received[block_to_play] = False - - # }}} - -#get a death time -#death_time = churn.new_death_time(20) -death_time = churn.new_death_time(weibull_scale) - -''' -#Once the buffer is half-filled, then start operating normally -''' -#while player_connected and not blocks_exhausted: -while player_connected and not churn.time_to_die(death_time): - - if __debug__ and death_time != churn.NEVER: - current_time = time.localtime() - logger.debug(Color.green+'Current time is '+str(current_time.tm_hour).zfill(2)+':'+str(current_time.tm_min).zfill(2)+':'+str(current_time.tm_sec).zfill(2)+Color.none) - logger.debug(Color.green+'Scheduled death time is '+str(time.localtime(death_time).tm_hour).zfill(2)+':'+str(time.localtime(death_time).tm_min).zfill(2)+':'+str(time.localtime(death_time).tm_sec).zfill(2)+Color.none) - - block_number = receive_and_feed() - if block_number>=0: - if (block_number % 256) == 0: - for i in peer_insolidarity: - peer_insolidarity[i] /= 2 - if not received[block_to_play]: - print(str(cluster_sock.getsockname())+"Block "+str(block_to_play)+" missed") - if _PLAYER_: - send_a_block_to_the_player() - block_to_play = (block_to_play + 1) % buffer_size - #elif block_number == -2: #this stops the peer after only one cluster timeout - # break - if __debug__: - logger.debug('NUM PEERS '+str(len(peer_list))) - -if __debug__: - logger.info(Color.cyan + 'Goodbye!' + Color.none) -goodbye = 'bye' -cluster_sock.sendto(goodbye, splitter) -for x in xrange(3): - receive_and_feed() -for peer in peer_list: - cluster_sock.sendto(goodbye, peer) - diff --git a/sim-cluster/peer.py b/sim-cluster/peer.py deleted file mode 100755 index 7162d05..0000000 --- a/sim-cluster/peer.py +++ /dev/null @@ -1,740 +0,0 @@ -#!/usr/bin/python -# -*- coding: iso-8859-15 -*- - -# Note: if you run the python interpreter in the optimzed mode (-O), -# debug messages will be disabled. - -# {{{ GNU GENERAL PUBLIC LICENSE - -# This is the peer node of the P2PSP (Peer-to-Peer Simple Protocol) -# . -# -# Copyright (C) 2013 Vicente González Ruiz. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# }}} - -# Try running me as: -# -# ./splitter.py --source_hostname="localhost" -# ./gatherer.py --splitter_hostname="localhost" --source_hostname="localhost" -# vlc http://localhost:9999 & -# ./peer.py --splitter_hostname="localhost" --source_hostname="localhost" -# vlc http://localhost:9998 & - -# {{{ Imports - -''' -# VERSIÓN BÁSICA DEL PEER, no hay mensajes de hola por parte de los peers. -# Usar con splitter.py y gatherer.py (sin números) -''' - -import os -import logging -from colors import Color -from common import Common -import sys -import socket -import struct -import time -import argparse -import churn - -# }}} - -IP_ADDR = 0 -PORT = 1 - -# Number of blocks of the buffer -#buffer_size = 32 -#buffer_size = 256 -buffer_size = Common.buffer_size - -#cluster_port = 0 # OS default behavior will be used for port binding - -# Port to communicate with the player -listening_port = 9998 - -# Splitter endpoint -#splitter_hostname = '150.214.150.68' -splitter_hostname = 'localhost' -splitter_port = 4552 - -# Number of bytes of the stream's header -#header_size = 1024*20*10 -#header_size = 1024*20 -header_size = Common.header_size - -# Estas cuatro variables las debería indicar el splitter -#source_hostname = '150.214.150.68' -source_hostname = 'localhost' -source_port = 4551 -channel = '134.ogg' -#block_size = 1024 -block_size = Common.block_size - -# Controls if the stream is sent to a player -_PLAYER_ = True - -# Maximun number of blocks to receive from the splitter -number_of_blocks = 999999999 - - -logging_levelname = 'INFO' # 'DEBUG' (default), 'INFO' (cyan), - # 'WARNING' (blue), 'ERROR' (red), - # 'CRITICAL' (yellow) - -logging_level = logging.INFO - -logging_filename = '' - -console_logging = False -file_logging = True - -weibull_scale = 0 #for churn. 0 means no churn. - -# {{{ Args handing - -print 'Argument List:', str(sys.argv) - -parser = argparse.ArgumentParser( - description='This is a peer node of a P2PSP network.') - -parser.add_argument('--buffer_size', - help='size of the video buffer in blocks. (Default = {})'.format(buffer_size)) - -parser.add_argument('--block_size', - help='Block size in bytes. (Default = {})'.format(block_size)) - -parser.add_argument('--channel', - help='Name of the channel served by the streaming source. (Default = {})'.format(channel)) - -parser.add_argument('--listening_port', - help='Port used to communicate with the player. (Default = {})'.format(listening_port)) - -parser.add_argument('--logging_levelname', - help='Name of the channel served by the streaming source. (Default = "{}")'.format(logging_levelname)) - -parser.add_argument('--logging_filename', - help='Name of the logging output file. (Default = "{})'.format(logging_filename)) - -parser.add_argument('--number_of_blocks', - help='Maximun number of blocks to receive from the splitter. (Default = {}). If not specified, the peer runs forever.'.format(number_of_blocks)) - -parser.add_argument('--source_hostname', - help='Hostname of the streaming source. (Default = {})'.format(source_hostname)) - -parser.add_argument('--source_port', - help='Listening port of the streaming source. (Default = {})'.format(source_port)) - -parser.add_argument('--splitter_hostname', - help='Hostname of the splitter. (Default = {})'.format(splitter_hostname)) - -parser.add_argument('--splitter_port', - help='Listening port of the splitter. (Default = {})'.format(splitter_port)) - -parser.add_argument('--no_player', help='Do no send the stream to a player.', action="store_true") - -parser.add_argument('--churn', help='Scale parameter for the Weibull function, 0 means no churn. (Default = {})'.format(weibull_scale)) - -args = parser.parse_known_args()[0] -if args.buffer_size: - buffer_size = int(args.buffer_size) -if args.block_size: - block_size = int(args.block_size) -if args.channel: - channel = args.channel -if args.listening_port: - listening_port = int(args.listening_port) -if args.logging_levelname == 'DEBUG': - logging_level = logging.DEBUG -if args.logging_levelname == 'INFO': - logging_level = logging.INFO -if args.logging_levelname == 'WARNING': - logging_level = logging.WARNING -if args.logging_levelname == 'ERROR': - logging_level = logging.ERROR -if args.logging_levelname == 'CRITICAL': - logging_level = logging.CRITICAL -if args.logging_filename: - logging_filename = args.logging_filename -if args.number_of_blocks: - number_of_blocks = int(args.number_of_blocks) -if args.source_hostname: - source_hostname = args.source_hostname -if args.source_port: - source_port = int(args.source_port) -if args.splitter_hostname: - splitter_hostname = args.splitter_hostname -if args.splitter_port: - splitter_port = args.splitter_port -if args.no_player: - _PLAYER_ = False -if args.churn: - weibull_scale = int(args.churn) - -# }}} - -print 'This is a P2PSP peer node ...', -if __debug__: - print 'running in debug mode' -else: - print 'running in release mode' - - -# {{{ Logging initialization - -# create logger -logger = logging.getLogger('peer (' + str(os.getpid()) + ')') -logger.setLevel(logging_level) - -# create console handler and set the level -if console_logging == True: - ch = logging.StreamHandler() - ch.setLevel(logging_level) - # create formatter - formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') - # add formatter to ch - ch.setFormatter(formatter) - # add ch to logger - logger.addHandler(ch) - -#jalvaro -# create file handler and set the level -if args.logging_filename and file_logging == True: - fh = logging.FileHandler('/home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/output/peer-'+str(os.getpid())) - fh.setLevel(logging_level) - #add fh to logger - logger.addHandler(fh) -#jalvaro: create a file handler for the critical level, to store times. I know I shouldn't be using critical :D -fh_timing = logging.FileHandler('/home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/timing/peer-'+str(os.getpid())) -fh_timing.setLevel(logging.CRITICAL) -logger.addHandler(fh_timing) - -# }}} - -print("Buffer size: "+str(buffer_size)+" blocks") -print("Block size: "+str(block_size)+" bytes") -logger.info("Buffer size: "+str(buffer_size)+" blocks") -logger.info("Block size: "+str(block_size)+" bytes") - -source = (source_hostname, source_port) -splitter = (splitter_hostname, splitter_port) - -block_format_string = "H"+str(block_size)+"s" - -def get_player_socket(): - # {{{ - - #sock = blocking_TCP_socket.blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM) - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - - try: - # In Windows systems this call doesn't work! - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except: - pass - sock.bind(('', listening_port)) - sock.listen(0) - - if __debug__: - logger.info(Color.cyan + '{}'.format(sock.getsockname()) + ' waiting for the player on port ' + str(listening_port) + Color.none) - # }}} - - #sock, player = sock.baccept() - sock, player = sock.accept() - sock.setblocking(0) - return sock - - # }}} - -if _PLAYER_: - player_sock = get_player_socket() # The peer is blocked until the - # player establish a connection. - - # {{{ debug - - if __debug__: - logger.debug('{}'.format(player_sock.getsockname()) + - ' The player ' + - '{}'.format(player_sock.getpeername()) + - ' has establised a connection') - - # }}} - -def communicate_the_header(): - # {{{ - source_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - source_sock.connect(source) - source_sock.sendall("GET /" + channel + " HTTP/1.1\r\n\r\n") - - # {{{ Receive the video header from the source and send it to the player - - # Nota: este proceso puede fallar si durante la recepción de los - # bloques el stream se acaba. Habría que realizar de nuevo la - # petición HTTP (como hace el servidor). - - if __debug__: - logger.info(Color.cyan + str(source_sock.getsockname()) + ' retrieving the header ...' + Color.none) - - data = source_sock.recv(header_size) - total_received = len(data) - player_sock.sendall(data) - while total_received < header_size: - if __debug__: - logger.debug(str(total_received)) - data = source_sock.recv(header_size - len(data)) - player_sock.sendall(data) - total_received += len(data) - - # }}} - - if __debug__: - logger.info(Color.cyan + str(source_sock.getsockname()) + ' done' + Color.none) - - source_sock.close() - # }}} - -if _PLAYER_: - communicate_the_header() # Retrieve the header of the stream from the - # source and send it to the player. - -# {{{ debug -if __debug__: - logger.debug(" Trying to connect to the splitter at" + str(splitter)) -# }}} - -def connect_to_the_splitter(): - # {{{ - - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.connect(splitter) - return sock - - # }}} - -# COMIENZO DE BUFFERING TIME -start_buffering_time = time.time() - -splitter_sock = connect_to_the_splitter() # Connect to the splitter in - # order to tell it who the - # gatherer is. -splitter = splitter_sock.getpeername() # "localhost" -> "127.0.0.1" - -if __debug__: - logger.info(Color.cyan + '{}'.format(splitter_sock.getsockname()) + ' connected to the splitter' + Color.none) - -def create_cluster_sock(): - # {{{ - - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - try: - # In Windows systems this call doesn't work! - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except: - pass - sock.bind(('',splitter_sock.getsockname()[PORT])) - return sock - - # }}} -cluster_sock = create_cluster_sock() -cluster_sock.settimeout(1) - -# {{{ This is the list of peers of the cluster. Each peer uses this -# structure to resend the blocks received from the splitter to these -# nodes. }}} -peer_list = [] - -# {{{ This store the insolidarity of the peers of the cluster. When -# the solidarity exceed a threshold, the peer is deleted from the list -# of peers. }}} -peer_insolidarity = {} - -gatherer = None - -def retrieve_the_list_of_peers(): - # {{{ - global gatherer - number_of_peers = socket.ntohs( - struct.unpack("H",splitter_sock.recv(struct.calcsize("H")))[0]) - - # {{{ debug - - if __debug__: - logger.debug('{}'.format(splitter_sock.getsockname()) + - ' <- ' + - '{}'.format(splitter_sock.getpeername()) + - ' Cluster size = ' + - str(number_of_peers)) - - # }}} - message = splitter_sock.recv(struct.calcsize("4sH")) - IP_addr, port = struct.unpack("4sH", message) - IP_addr = socket.inet_ntoa(IP_addr) - port = socket.ntohs(port) - gatherer = (IP_addr, port) - while number_of_peers > 0: - message = splitter_sock.recv(struct.calcsize("4sH")) - IP_addr, port = struct.unpack("4sH", message) - IP_addr = socket.inet_ntoa(IP_addr) - port = socket.ntohs(port) - peer = (IP_addr, port) - - # {{{ debug - - if __debug__: - logger.debug('{}'.format(splitter_sock.getsockname()) + - ' <- ' + - '{}'.format(splitter_sock.getpeername()) + - ' Peer ' + - str(peer)) - - # }}} - - peer_list.append(peer) - peer_insolidarity[peer] = 0 - - cluster_sock.sendto('', peer) # Send a empty block (this - # should be fast) - - number_of_peers -= 1 - - # }}} - -retrieve_the_list_of_peers() - -splitter_sock.close() - -# {{{ In this momment, most of the rest of peers of the cluster are -# sending blocks to the new peer. -# }}} - -# {{{ We define the buffer structure. Two components are needed: (1) -# the blocks buffer that stores the received blocks (2) the received -# buffer that stores if a block has been received or not. -# }}} -blocks = [None]*buffer_size -received = [False]*buffer_size - -# True if the peer has received "number_of_blocks" blocks. -blocks_exhausted = False - -# This variable holds the last block received from the splitter. It is -# used below to send the "last" block in the congestion avoiding mode. -last = '' - -# Number of times that the last block has been sent to the cluster (we -# send the block each time we receive a block). -counter = 0 - -def receive_and_feed(): - global last - global counter - global blocks_exhausted - global number_of_blocks - - try: - # {{{ Receive and send - #message, sender = cluster_sock.recvfrom(struct.calcsize("H1024s")) - message, sender = cluster_sock.recvfrom(struct.calcsize(block_format_string)) - #if len(message) == struct.calcsize("H1024s"): - if len(message) == struct.calcsize(block_format_string): - # {{{ Received a video block - #number, block = struct.unpack("H1024s", message) - number, block = struct.unpack(block_format_string, message) - block_number = socket.ntohs(number) - # {{{ debug - if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - " <- " + - '{}'.format(block_number) + - ' ' + - '{}'.format(sender)) - - # }}} - blocks[block_number % buffer_size] = block - received[block_number % buffer_size] = True - - if sender == splitter: - # {{{ Send the previously received block in burst mode. - - cluster_sock.sendto(message, gatherer) - # {{{ debug - if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - ' ' + - str(block_number) + - ' -> (gatherer) ' + - '{}'.format(gatherer)) - - # }}} - - if __debug__: - logger.debug("Sending block "+str(block_number)+" in burst mode") - logger.debug("Counter value: "+str(counter)) - - while( (counter < len(peer_list)) & (counter > 0)): - peer = peer_list[counter] - - if __debug__: - logger.debug("Counter: "+str(counter)+", Peer"+str(peer)) - - cluster_sock.sendto(last, peer) - # if not is_player_working: - # cluster_sock.sendto('', peer) - - peer_insolidarity[peer] += 1 - if peer_insolidarity[peer] > 64: # <- Important parameter!! - del peer_insolidarity[peer] - peer_list.remove(peer) - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(peer) + ' removed' + Color.none) - - counter += 1 - - # {{{ debug - if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - ' ' + - str(block_number) + - ' -> (peer) ' + - '{}'.format(peer)) - - # }}} - - counter = 0 - last = message - ''' - if args.number_of_blocks: - number_of_blocks -= 1 - if number_of_blocks <= 0: - blocks_exhausted = True -''' - # }}} - else: - # {{{ Check if the peer is new - - if sender not in peer_list: - # The peer is new - peer_list.append(sender) - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' added by data block' + Color.none) - peer_insolidarity[sender] = 0 - - # }}} - - if counter < len(peer_list): - # {{{ Send the last block in congestion avoiding mode - - peer = peer_list[counter] - cluster_sock.sendto(last, peer) - - peer_insolidarity[peer] += 1 - if peer_insolidarity[peer] > 64: # <- Important parameter!! - del peer_insolidarity[peer] - peer_list.remove(peer) - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(peer) + ' removed by unsupportive' + Color.none) - - # {{{ debug - if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - ' ' + - str(block_number) + - ' -> (peer) ' + - '{}'.format(peer)) - - # }}} - - counter += 1 - - # }}} - - if args.number_of_blocks: - number_of_blocks -= 1 - if number_of_blocks <= 0: - blocks_exhausted = True - - - return block_number - # }}} - elif message=='': - # {{{ Received a control block - - if sender not in peer_list: - peer_list.append(sender) - peer_insolidarity[sender] = 0 - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' added by control block' + Color.none) - else: - peer_list.remove(sender) - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' removed by control block' + Color.none) - return -1 - # }}} - # }}} - except socket.timeout: - # {{{ - if __debug__: - logger.warning(Color.red + "cluster timeout!" + Color.none) - return -2 - # }}} - -# {{{ debug -if __debug__: - logger.debug(str(cluster_sock.getsockname()) + ' buffering ...') -# }}} - -if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' receiving data ...' + Color.none) - -''' -#Fill half the buffer -''' -#WARNING!!! -#time.clock() measures the time spent by the process (so the time spent waiting for an execution slot in the processor is left out) -#time.time() measures wall time, this means execution time plus waiting time - -last_block_number = 0 -error_counter = 0 - -block_number = receive_and_feed() -while block_number<=0: - block_number = receive_and_feed() -block_to_play = block_number % buffer_size -for x in xrange(buffer_size/2): - while receive_and_feed()<=0: - pass -#go through the buffer -num_errors_buf = 0 -for x in range(block_to_play, block_to_play+(buffer_size/2)): - if received[x%buffer_size] == False: - num_errors_buf += 1 - -''' -block_number = receive_and_feed() -while block_number<=0: - block_number = receive_and_feed() -block_to_play = block_number % buffer_size -for x in xrange(buffer_size/2): - last_block_number = receive_and_feed() - if last_block_number <= 0: - error_counter += 1 -''' - -# FIN DE BUFFERING TIME -end_buffering_time = time.time() -buffering_time = end_buffering_time - start_buffering_time - -if __debug__: - logger.info(str(cluster_sock.getsockname()) + ' buffering done') - logger.info('NUM_PEERS '+str(len(peer_list))) - logger.critical('BUF_TIME '+str(buffering_time)+' secs') #buffering time in SECONDS - logger.critical('BUF_LEN '+str(buffer_size)+' bytes') - logger.critical('NUM_ERRORS_BUF '+str(error_counter)) - percentage_errors_buf = float(error_counter*100)/float(buffer_size/2) - logger.critical('PERCENTAGE_ERRORS_BUF ' + str(percentage_errors_buf)) - #logger.critical('PERCENTAGE_ERRORS_BUF {:.2}%'.format(percentage_errors_buf)) - logger.critical('NUM_PEERS '+str(len(peer_list))) - # }}} - -''' -#End buffering -''' - -player_connected = True - -def send_a_block_to_the_player(): - # {{{ - - global block_to_play - global player_sock - global player_connected - - if not received[block_to_play]: - message = struct.pack("!H", block_to_play) - cluster_sock.sendto(message, splitter) - - if __debug__: - logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' complaining about lost block ' + str(block_to_play) + Color.none) - - # La mayoría de los players se sincronizan antes si en lugar - # de no enviar nada se envía un bloque vacío. Esto habría que - # probarlo. - - try: - player_sock.sendall(blocks[block_to_play]) - - # {{{ debug - if __debug__: - logger.debug('{}'.format(player_sock.getsockname()) + - ' ' + - str(block_to_play) + - ' -> (player) ' + - '{}'.format(player_sock.getpeername())) - - # }}} - - except socket.error: - if __debug__: - logger.error(Color.red + 'player disconected!' + Color.none) - player_connected = False - return - except Exception as detail: - if __debug__: - logger.error(Color.red + 'unhandled exception ' + str(detail) + Color.none) - return - - received[block_to_play] = False - - # }}} - -#get a death time -#death_time = churn.new_death_time(20) -death_time = churn.new_death_time(weibull_scale) - -''' -#Once the buffer is half-filled, then start operating normally -''' -#while player_connected and not blocks_exhausted: -while player_connected and not churn.time_to_die(death_time): - - if __debug__ and death_time != churn.NEVER: - current_time = time.localtime() - logger.debug(Color.green+'Current time is '+str(current_time.tm_hour).zfill(2)+':'+str(current_time.tm_min).zfill(2)+':'+str(current_time.tm_sec).zfill(2)+Color.none) - logger.debug(Color.green+'Scheduled death time is '+str(time.localtime(death_time).tm_hour).zfill(2)+':'+str(time.localtime(death_time).tm_min).zfill(2)+':'+str(time.localtime(death_time).tm_sec).zfill(2)+Color.none) - - block_number = receive_and_feed() - if block_number>=0: - if (block_number % 256) == 0: - for i in peer_insolidarity: - peer_insolidarity[i] /= 2 - if _PLAYER_: - send_a_block_to_the_player() - block_to_play = (block_to_play + 1) % buffer_size - #elif block_number == -2: #this stops the peer after only one cluster timeout - # break - if __debug__: - logger.debug('NUM PEERS '+str(len(peer_list))) - -if __debug__: - logger.info(Color.cyan + 'Goodbye!' + Color.none) -goodbye = '' -cluster_sock.sendto(goodbye, splitter) -for x in xrange(3): - receive_and_feed() -for peer in peer_list: - cluster_sock.sendto(goodbye, peer) - diff --git a/sim-cluster/run_oggfwd.sh b/sim-cluster/run_oggfwd.sh deleted file mode 100755 index 3b1013d..0000000 --- a/sim-cluster/run_oggfwd.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash - -icecast_name="localhost" -icecast_port=4551 -video=/home/jalvaro/workspace/sim/gnagl.ogg -#video=/home/jalvaro/workspaces-eclipse/P2PSP/Big_Buck_Bunny_small.ogv -#video=/home/jalvaro/workspaces-eclipse/P2PSP/sample48.ogg -password=1qaz -channel=134.ogg - -usage() { - echo $0 - echo " [-c (icecast mount-point, \"$channel\" by default)]" - echo " [-w (icecast password, \"$password\" by default)]" - echo " [-a (icecast hostname, $icecast_name by default)]" - echo " [-p (icecast port, $icecast_port by default)]" - echo " [-v (video file-name, \"$video\" by default)]" - echo " [-? (help)]" -} - -echo $0: parsing: $@ - -while getopts "c:w:a:p:v:?" opt; do - case ${opt} in - c) - channel="${OPTARG}" - ;; - w) - password="${OPTARG}" - ;; - a) - icecast_name="${OPTARG}" - ;; - p) - icecast_port="${OPTARG}" - ;; - v) - video="${OPTARG}" - ;; - ?) - usage - exit 0 - ;; - \?) - echo "Invalid option: -${OPTARG}" >&2 - usage - exit 1 - ;; - :) - echo "Option -${OPTARG} requires an argument." >&2 - usage - exit 1 - ;; - esac -done - -#old_IFS=$IFS -#IFS=":" -#icecast_host=${icecast[0]} -#icecast_port=${icecast[1]} -#IFS=$old_IFS - -echo "Feeding http://$icecast_name:$icecast_port/$channel with \"$video\" forever ..." - -set -x - -while true -do - oggfwd $icecast_name $icecast_port $password $channel < $video -done - -set +x diff --git a/sim-cluster/simulation.sh b/sim-cluster/simulation.sh deleted file mode 100755 index 857667d..0000000 --- a/sim-cluster/simulation.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/bash - -set -x - -# Simulates flash-crowd peer churn. - -#number_of_blocks=100 -number_of_peers=2 -churn_scale=0 -buffer_size=32 -block_size=1024 - -usage() { - echo $0 - echo "Simulates flash-crowd peer churn." - echo "Parameters:" -# echo " [-b (number of blocks, $number_of_blocks by default)]" - echo " [-n (number of peers, $number_of_peers by default)]" - echo " [-c (churn scale, $churn_scale by default, meaning no churn)]" - echo " [-s (buffer size in blocks, $buffer_size by default)]" - echo " [-l (block size in bytes, $block_size by default)]" - echo " [-? (help)]" -} - -echo $0: parsing: $@ - -while getopts "b:n:c:s:l:?" opt; do - case ${opt} in - b) - number_of_blocks="${OPTARG}" - ;; - n) - number_of_peers="${OPTARG}" - ;; - c) - churn_scale="${OPTARG}" - ;; - s) - buffer_size="${OPTARG}" - ;; - l) - block_size="${OPTARG}" - ;; - ?) - usage - exit 0 - ;; - \?) - echo "Invalid option: -${OPTARG}" >&2 - usage - exit 1 - ;; - :) - echo "Option -${OPTARG} requires an argument." >&2 - usage - exit 1 - ;; - esac -done - -echo "block_size is ${block_size}" - -#clear previous output files -rm /home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/output/* -rm /home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/timing/* - -#start the splitter -xterm -l -lf ./output/salida_splitter.txt -e "./splitter.py --source_hostname=localhost --logging_level=INFO --buffer_size=$buffer_size --block_size=$block_size"& - -#sleep 1 - -#start the gatherer -#xterm -l -lf ./output/salida_gatherer.txt -e "./gatherer.py --splitter_hostname=localhost --source_hostname=localhost --logging_level=INFO --buffer_size=$buffer_size --block_size=$block_size" & - -sleep 1s - -#start the player -#vlc http://localhost:9999 & - -sleep 5s -#x(){ -COUNTER=0 -while [ $COUNTER -lt $((number_of_peers-1)) ]; -do - ./peer-h.py --splitter_hostname=localhost --source_hostname=localhost --no_player --logging_level=DEBUG --logging_file=./output/peer-${COUNTER} --churn=${churn_scale} --buffer_size=${buffer_size} --block_size=$block_size& - let COUNTER=COUNTER+1 -done -#} -rm ./output/salida_peer_player.txt -xterm -l -lf ./output/salida_peer_player.txt -e "./peer-h.py --splitter_hostname=localhost --source_hostname=localhost --logging_level=DEBUG --logging_file=./output/peer-${COUNTER} --churn=${churn_scale} --buffer_size=${buffer_size} --block_size=$block_size"& - -sleep 1s -vlc http://localhost:9998 & -set +x diff --git a/sim-cluster/simulator.sh b/sim-cluster/simulator.sh deleted file mode 100755 index 31cc881..0000000 --- a/sim-cluster/simulator.sh +++ /dev/null @@ -1,82 +0,0 @@ -set -x -home="/home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster" -strategy="bloque-exclusivo-nuevos-siguiente-block768" -sleep_time=100s -sleep_time_oggfwd=30s -num_peers_array=(10 50 100 300) -#upper_limit=400 - -cd ${home}/timing/ -rm -rf ./* -cd .. - -pkill oggfwd - -for num_peers in ${num_peers_array[*]} -do - -##32 BITS - echo "Experiment 1 for ${num_peers} peers, buffer 32" - ./run_oggfwd.sh & - sleep ${sleep_time_oggfwd} - ${home}/simulation.sh -c 0 -n ${num_peers} -s 32 & - sleep ${sleep_time} - ${home}/stop_simulation.sh - mkdir -p ${home}/timing/1/${strategy}/buffer-32bits/${num_peers} - mv ${home}/timing/peer* ${home}/timing/1/${strategy}/buffer-32bits/${num_peers} - pkill oggfwd - - echo "Experiment 2 for ${num_peers} peers, buffer 32" - ./run_oggfwd.sh & - sleep ${sleep_time_oggfwd} - ${home}/simulation.sh -c 0 -n ${num_peers} -s 32 & - sleep ${sleep_time} - ${home}/stop_simulation.sh - mkdir -p ${home}/timing/2/${strategy}/buffer-32bits/${num_peers} - mv ${home}/timing/peer* ${home}/timing/2/${strategy}/buffer-32bits/${num_peers} - pkill oggfwd - - echo "Experiment 3 for ${num_peers} peers, buffer 32" - ./run_oggfwd.sh & - sleep ${sleep_time_oggfwd} - ${home}/simulation.sh -c 0 -n ${num_peers} -s 32 & - sleep ${sleep_time} - ${home}/stop_simulation.sh - mkdir -p ${home}/timing/3/${strategy}/buffer-32bits/${num_peers} - mv ${home}/timing/peer* ${home}/timing/3/${strategy}/buffer-32bits/${num_peers} - pkill oggfwd - -#256 BITS - echo "Experiment 1 for ${num_peers} peers, buffer 256" - ./run_oggfwd.sh & - sleep ${sleep_time_oggfwd} - ${home}/simulation.sh -c 0 -n ${num_peers} -s 256 & - sleep ${sleep_time} - ${home}/stop_simulation.sh - mkdir -p ${home}/timing/1/${strategy}/buffer-256bits/${num_peers} - mv ${home}/timing/peer* ${home}/timing/1/${strategy}/buffer-256bits/${num_peers} - pkill oggfwd - - echo "Experiment 2 for ${num_peers} peers, buffer 256" - ./run_oggfwd.sh & - sleep ${sleep_time_oggfwd} - ${home}/simulation.sh -c 0 -n ${num_peers} -s 256 & - sleep ${sleep_time} - ${home}/stop_simulation.sh - mkdir -p ${home}/timing/2/${strategy}/buffer-256bits/${num_peers} - mv ${home}/timing/peer* ${home}/timing/2/${strategy}/buffer-256bits/${num_peers} - pkill oggfwd - - echo "Experiment 3 for ${num_peers} peers, buffer 256" - ./run_oggfwd.sh & - sleep ${sleep_time_oggfwd} - ${home}/simulation.sh -c 0 -n ${num_peers} -s 256 & - sleep ${sleep_time} - ${home}/stop_simulation.sh - mkdir -p ${home}/timing/3/${strategy}/buffer-256bits/${num_peers} - mv ${home}/timing/peer* ${home}/timing/3/${strategy}/buffer-256bits/${num_peers} - pkill oggfwd - -done -set +x - diff --git a/sim-cluster/splitter-x.py b/sim-cluster/splitter-x.py deleted file mode 100755 index 69a39dd..0000000 --- a/sim-cluster/splitter-x.py +++ /dev/null @@ -1,693 +0,0 @@ -#!/usr/bin/python -# -*- coding: iso-8859-15 -*- - -# Note: if you run the python interpreter in the optimzed mode (-O), -# debug messages will be disabled. - -# {{{ GNU GENERAL PUBLIC LICENSE - -# This is the splitter node of the P2PSP (Peer-to-Peer Simple Protocol) -# . -# -# Copyright (C) 2013 Vicente González Ruiz. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# }}} - -# Try running me as: -# -# xterm -e "./splitter.py" & -# xterm -e './gatherer.py --splitter_hostname="localhost"' & -# vlc http://localhost:9999 & - -''' -# VERSIÓN bloque-exclusivo DEL SPLITTER. -# El splitter envía un bloque de stream exclusivo a cada peer entrante. El peer reenvía dicho bloque a todos a modo de "hola". -# Con esto prentendemos acelerar el proceso de buffering y saturar menos la red. -# Usar con peer-x.py y gatherer.py -''' - -# {{{ imports - -import logging -from colors import Color -from common import Common -import socket -from blocking_TCP_socket import blocking_TCP_socket -import sys -import struct -import time -#import thread -from threading import Thread -from threading import Lock -from threading import RLock -from time import gmtime, strftime -import os -import argparse - -# }}} - -total_blocks = 1 #starts in 1 to avoid div-by-zero issues when calculating the percentage -total_blocks = long(total_blocks) #to declare it long. Alternatively: total_blocks = 0L -loss_percentage = 0 -loss_percentage = float(loss_percentage) #the same with the percentage of loss - -IP_ADDR = 0 -PORT = 1 - - -#buffer_size = 32 # Buffer size in the peers and the gatherer -#block_size = 1024 -buffer_size = Common.buffer_size -block_size = Common.block_size - -channel = '134.ogg' -#source_hostname = '150.214.150.68' -source_hostname = 'localhost' -source_port = 4551 -listening_port = 4552 - -logging_levelname = 'INFO' # 'DEBUG' (default), 'INFO' (cyan), - # 'WARNING' (blue), 'ERROR' (red), - # 'CRITICAL' (yellow) -logging_level = logging.INFO - -# {{{ Args handing - -print 'Argument List:', str(sys.argv) - -parser = argparse.ArgumentParser( - description='This is the splitter node of a P2PSP network.') - -parser.add_argument('--buffer_size', - help='size of the video buffer in blocks'.format(buffer_size)) - -parser.add_argument('--block_size', - help='Block size in bytes. (Default = {})'.format(block_size)) - -parser.add_argument('--channel', - help='Name of the channel served by the streaming source. (Default = "{}")'.format(channel)) - -parser.add_argument('--logging_levelname', - help='Name of the channel served by the streaming source. (Default = "{}")'.format(logging_levelname)) - -parser.add_argument('--source_hostname', - help='Hostname of the streaming server. (Default = "{}")'.format(source_hostname)) - -parser.add_argument('--source_port', - help='Listening port of the streaming server. (Default = {})'.format(source_port)) - -parser.add_argument('--listening_port', - help='Port to talk with the gatherer and peers. (Default = {})'.format(listening_port)) - -args = parser.parse_known_args()[0] -if args.buffer_size: - buffer_size = int(args.buffer_size) -if args.block_size: - block_size = int(args.block_size) -if args.channel: - channel = args.channel -if args.logging_levelname == 'DEBUG': - logging_level = logging.DEBUG -if args.logging_levelname == 'INFO': - logging_level = logging.INFO -if args.logging_levelname == 'WARNING': - logging_level = logging.WARNING -if args.logging_levelname == 'ERROR': - logging_level = logging.ERROR -if args.logging_levelname == 'CRITICAL': - logging_level = logging.CRITICAL -if args.source_hostname: - source_hostname = str(args.source_hostname) -if args.source_port: - source_port = int(args.source_port) -if args.listening_port: - listening_port = int(args.listening_port) - -# }}} - -print 'This is a P2PSP splitter node ...', -if __debug__: - print 'running in debug mode' -else: - print 'running in release mode' - -# {{{ Logging initialization - -# Echar un vistazo a logging.config. - -# create logger -logger = logging.getLogger('splitter (' + str(os.getpid()) + ')') -logger.setLevel(logging_level) - -# create console handler and set the level -ch = logging.StreamHandler() -ch.setLevel(logging_level) - -# create formatter -formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') -#formatter = logging.Formatter("%(asctime)s [%(funcName)s: %(filename)s,%(lineno)d] %(message)s") - -# add formatter to ch -ch.setFormatter(formatter) - -# add ch to logger -logger.addHandler(ch) - -#logging.basicConfig(format='%(asctime)s.%(msecs)d %(message)s', -# datefmt='%H:%M:%S', -# level=logging.DEBUG) -# logging.basicConfig(format='%(asctime)s.%(msecs)d %(message)s', -# datefmt='%H:%M:%S') -# else: -# print 'Running in release mode' -# logging.basicConfig(format='%(asctime)s.%(msecs)d %(message)s', -# datefmt='%H:%M:%S', -# level=logging.CRITICAL) - -# }}} - -source = (source_hostname, source_port) - -# }}} - -# The list of peers (included the gatherer) -peer_list = [] - -# The number of the last received block from the streaming server -block_number = 0 - -# Used to find the peer to which a block has been sent -destination_of_block = [('0.0.0.0',0) for i in xrange(buffer_size)] - -# Unreliability rate of a peer -unreliability = {} - -# Complaining rate of a peer -complains = {} - -# The peer_list iterator -peer_index = 0 - -# A lock to perform mutual exclusion for accesing to the list of peers -peer_list_lock = Lock() -# A lock for source_sock -source_sock_lock = Lock() - -gatherer = None - -block_format_string = "H"+str(block_size)+"s" - -print("Buffer size: "+str(buffer_size)+" blocks") -print("Block size: "+str(block_size)+" bytes") -logger.info("Buffer size: "+str(buffer_size)+" blocks") -logger.info("Block size: "+str(block_size)+" bytes") - -# {{{ Handle one telnet client - -class get_the_state(Thread): - # {{{ - - global peer_list - - def __init__(self): - Thread.__init__(self) - - def run(self): - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - # This does not work in Windows systems. - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except: - pass - sock.bind(('', listening_port+1)) - - logger.info(Color.cyan + - '{}'.format(sock.getsockname()) + - ' listening (telnet) on port ' + - str(listening_port+1) + - Color.none) - - sock.listen(0) - try: - while True: - connection = sock.accept()[0] - message = 'a' - while message[0] != 'q': - connection.sendall('Gatherer = ' + str(gatherer) + '\n') - connection.sendall('Number of peers = ' + str(len(peer_list)) + '\n') - counter = 0 - for p in peer_list: - loss_percentage = float(unreliability[p]*100)/float(total_blocks) - connection.sendall(str(counter) + - '\t' + str(p) + - '\t' + 'unreliability=' + str(unreliability[p]) +' ({:.2}%)'.format(loss_percentage)+ - '\t' + 'complains=' + str(complains[p]) + - '\n') - counter += 1 - connection.sendall('\n Total blocks sent = '+str(total_blocks)) - connection.sendall(Color.cyan + '\nEnter a line that beggings with "q" to exit or any other key to continue\n' + Color.none) - message = connection.recv(2) - - connection.close() - - except: - pass - -get_the_state().setDaemon(True) -get_the_state().daemon=True -get_the_state().start() - -# }}} - -# Return the connection socket used to establish the connections of the -# peers (and the gatherer) (Week 3) - -def get_peer_connection_socket(): - #sock = blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM) - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - - try: - # This does not work in Windows systems. - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except: - pass - - sock.bind( ('', listening_port) ) - sock.listen(100) - - return sock - -peer_connection_sock = get_peer_connection_socket() - -logger.info(Color.cyan + - '{}'.format(peer_connection_sock.getsockname()) + - ' waiting for the gatherer on port ' + - str(listening_port) + - Color.none) - -gatherer = peer_connection_sock.accept()[1] - -logger.info(Color.cyan + - '{}'.format(peer_connection_sock.getsockname()) + - ' the gatherer is ' + - str(gatherer) + - Color.none) - -# {{{ Handle peer arrivals. - -class handle_arrivals(Thread): - # {{{ - - def __init__(self): - Thread.__init__(self) - - def run(self): - - global block_number - global total_blocks - global destination_of_block - global unreliability - global complains - - while True: - # {{{ Wait for the connection from the peer /PS0/ - - peer_serve_socket, peer = peer_connection_sock.accept() - - # {{{ debug - if __debug__: - logger.debug('{}'.format(peer_serve_socket.getsockname()) + - ' Accepted connection from peer ' + - str(peer)) - # }}} - - # }}} - - - # {{{ debug - - if __debug__: - logger.debug('{}'.format(peer_serve_socket.getsockname()) + - ' Sending the list of peers') - # }}} - - try: - #get a new stream block for the incoming peer - block = receive_next_block() - - # {{{ debug - if __debug__: - logger.debug('{}'.format(source_sock.getsockname()) + - Color.green + ' <- ' + Color.none + - '{}'.format(source_sock.getpeername()) + " (source)" + - ' ' + - '{}'.format(block_number)) - # }}} - - - try: - peer_list_lock.acquire() #get the lock - temp_peer_list = list(peer_list) #for later use outside the critical section. http://henry.precheur.org/python/copy_list - - peer_list.append(peer) - temp_block_number = block_number #for later use outside the critical secion - total_blocks += 1 - destination_of_block[block_number % buffer_size] = peer - block_number = (block_number + 1) % 65536 - except Exception: - print("Exception adding the peer to the peer list in handle arrivals") - finally: - peer_list_lock.release() #release the lock - - if __debug__: - logger.debug("First block sent to peer "+str(peer)+" : "+str(temp_block_number)) - logger.debug("First block sent to peer "+str(peer)+" in buffer position : "+str((temp_block_number)%buffer_size)) - - unreliability[peer] = 0 - complains[peer] = 0 - - #send the block - #message = struct.pack("H1024s", socket.htons(temp_block_number), block) - message = struct.pack(block_format_string, socket.htons(temp_block_number), block) - peer_serve_socket.sendall(message) - - #send the list of peers - message = struct.pack("H", socket.htons(len(temp_peer_list))) - peer_serve_socket.sendall(message) - message = struct.pack("4sH", socket.inet_aton(gatherer[IP_ADDR]),socket.htons(gatherer[PORT])) - peer_serve_socket.sendall(message) - for p in temp_peer_list: - message = struct.pack("4sH", socket.inet_aton(p[IP_ADDR]),socket.htons(p[PORT])) - peer_serve_socket.sendall(message) - - # {{{ debug - - if __debug__: - logger.debug(str(len(temp_peer_list)) + ' peers sent (plus gatherer)') - - # }}} - - # }}} - - # {{{ Close the TCP socket with the peer/gatherer - - peer_serve_socket.close() - - # }}} - - logger.info(Color.cyan + - str(peer) + - ' has joined the cluster' + - Color.none) - except: - print("Exception in handle_arrivals") - - - # }}} - -handle_arrivals().setDaemon(True) -handle_arrivals().daemon=True -handle_arrivals().start() - -# }}} - -# {{{ Create the socket to send the blocks of stream to the peers/gatherer - -def create_cluster_sock(listening_port): - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - try: - # This does not work in Windows systems. - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except: - pass - sock.bind(('', listening_port)) - #peer_socket.bind(('',peer_connection_sock.getsockname()[PORT])) - - return sock - -cluster_sock = create_cluster_sock(listening_port) - -# }}} - -# {{{ Handle peer/gatherer complains and goodbye messages (Week 10) - -class listen_to_the_cluster(Thread): - # {{{ - - def __init__(self): - Thread.__init__(self) - - def run(self): - - global peer_index - - while True: - # {{{ debug - if __debug__: - logger.debug('waiting for messages from the cluster') - # }}} - message, sender = cluster_sock.recvfrom(struct.calcsize("H")) - - #if len(message) == 0: - if message == 'bye': - try: - peer_list_lock.acquire() #get the lock - peer_list.remove(sender) - logger.info(Color.cyan + str(sender) + ' has left the cluster' + Color.none) - except: - logger.warning(Color.blue + 'Received a googbye message from ' + str(sender) + ' which is not in the list of peers' + Color.none) - pass - finally: - peer_list_lock.release() #release the lock - else: - # The sender of the packet complains and the packet - # comes with the index of a lost block - try: - peer_list_lock.acquire() #get the lock - lost_block = struct.unpack("!H",message)[0] - destination = destination_of_block[lost_block] - - logger.debug(Color.cyan + str(sender) + ' complains about lost block ' + str(lost_block) + ' sent to ' + str(destination) + Color.none) - unreliability[destination] += 1 - finally: - peer_list_lock.release() #release the lock - -'''jalvaro: i'm commenting this so peers are not expeled -#if the sender of the complaint is the gatherer then the splitter removes the infractor inmediately - if sender == gatherer: - try: - peer_list.remove(destination) - del unreliability[destination] - del complains[destination] - - logger.info(Color.cyan + - str(destination) + - ' has been removed' + - Color.none) - except: - pass - - else: - try: - unreliability[destination] += 1 - if unreliability[destination] > len(peer_list): - # Too many complains about an unsuportive peer - peer_list.remove(destination) - del unreliability[destination] - del complains[destination] - - logger.info(Color.cyan + - str(destination) + - ' has been removed' + - Color.none) - - except: - pass -''' - - # }}} - -listen_to_the_cluster().setDaemon(True) -listen_to_the_cluster().daemon=True -listen_to_the_cluster().start() - -# }}} - -# {{{ Connect to the streaming server and request the channel (week 2) - -source_sock = blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM) -source_sock.connect(source) - -# {{{ debug - -if __debug__: - logger.debug('{}'.format(source_sock.getsockname()) + - ' connected to the video source ' + - '{}'.format(source_sock.getpeername())) - -# }}} - -# {{{ Request the video to the source - -GET_message = 'GET /' + channel + " HTTP/1.1\r\n\r\n" -source_sock.sendall(GET_message) - -# }}} - -# {{{ debug - -if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - ' sending the rest of the stream ...') - -# }}} - -# {{{ Feed the peers - -while True: - - # (Week 2) - def receive_next_block(): - # {{{ - - global source_sock - - source_sock_lock.acquire() #get the lock - try: - block = source_sock.recv(block_size) - tries = 0 - while len(block) < block_size: - tries += 1 - if tries > 3: - - # {{{ debug - if __debug__: - logger.debug('GET') - # }}} - - time.sleep(1) - source_sock.close() - source_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - source_sock.connect(source) - source_sock.sendall(GET_message) - - #block += source_sock.recv(1024-len(block)) - block += source_sock.recv(block_size - len(block)) - finally: - source_sock_lock.release() #release the lock - return block - - # }}} - - block = receive_next_block() - #block = source_sock.brecv(block_size) - - # {{{ debug - if __debug__: - - logger.debug('{}'.format(source_sock.getsockname()) + - Color.green + ' <- ' + Color.none + - '{}'.format(source_sock.getpeername()) + " (source)" + - ' ' + - '{}'.format(block_number)) - # }}} - - ''' - Nuevo código - ''' - peer_list_lock.acquire() #get peer_list_lock - try: - len_peer_list = len(peer_list) - try: - peer = peer_list[peer_index] - except: - try: - peer = peer_list[0] - except: - peer = gatherer - len_peer_list = 1 - destination_of_block[block_number % buffer_size] = peer - peer_index = (peer_index + 1) % len_peer_list - temp_block_number = block_number #for later use outside the critical section - block_number = (block_number + 1) % 65536 - total_blocks += 1 - finally: - peer_list_lock.release() # release peer_list_lock - - #message = struct.pack("H1024s", socket.htons(temp_block_number), block) - message = struct.pack(block_format_string, socket.htons(temp_block_number), block) - cluster_sock.sendto(message, peer) - - if peer == gatherer: - logger.debug('{}'.format(cluster_sock.getsockname())+Color.green+' -> '+Color.none+ str(peer)+' (gatherer) '+str(temp_block_number)) - else: - logger.debug('{}'.format(cluster_sock.getsockname())+Color.green+' -> '+Color.none+ str(peer)+' (peer) '+str(temp_block_number)) - - logger.debug("NUM_PEERS "+str(len(peer_list))) - - ''' - Fin del nuevo código - ''' - - ''' - #Código antiguo - #with peer_list_lock: - #peer_list_lock.acquire() - len_peer_list = len(peer_list) - #if peer_index < len_peer_list: - try: - peer = peer_list[peer_index] - except: - try: - peer = peer_list[0] - except: - peer = gatherer - len_peer_list = 1 - #peer_list_lock.release() - - # {{{ debug - if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - Color.green + ' -> ' + Color.none + - '{}'.format(peer) + - ' ' + - '{}'.format(block_number)) - # }}} - - message = struct.pack("H1024s", socket.htons(block_number), block) - #if not (block_number%2)==0: - cluster_sock.sendto(message, peer) - # Ojo, a veces peta diciendo: "IndexError: list index out of range" - destination_of_block[block_number % buffer_size] = peer - - peer_index = (peer_index + 1) % len_peer_list - - block_number = (block_number + 1) % 65536 - - total_blocks += 1 - #Fin del código antiguo - ''' - - ''' - #decrement unreliability and complaints after every 256 packets - if (block_number % 256) == 0: - for i in unreliability: - unreliability[i] /= 2 - for i in complains: - complains[i] /= 2 - ''' - -# }}} diff --git a/sim-cluster/splitter.py b/sim-cluster/splitter.py deleted file mode 100755 index ba2f8ad..0000000 --- a/sim-cluster/splitter.py +++ /dev/null @@ -1,730 +0,0 @@ -#!/usr/bin/python -# -*- coding: iso-8859-15 -*- - -# Note: if you run the python interpreter in the optimzed mode (-O), -# debug messages will be disabled. - -# {{{ GNU GENERAL PUBLIC LICENSE - -# This is the splitter node of the P2PSP (Peer-to-Peer Simple Protocol) -# . -# -# Copyright (C) 2013 Vicente González Ruiz. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# }}} - -# Try running me as: -# -# xterm -e "./splitter.py" & -# xterm -e './gatherer.py --splitter_hostname="localhost"' & -# vlc http://localhost:9999 & - -# {{{ imports - -import logging -from colors import Color -from common import Common -import socket -from blocking_TCP_socket import blocking_TCP_socket -import sys -import struct -import time -#import thread -from threading import Thread -from threading import Lock -from threading import RLock -from time import gmtime, strftime -import os -import argparse - -# }}} - -total_blocks = 1 #starts in 1 to avoid div-by-zero issues when calculating the percentage -total_blocks = long(total_blocks) #to declare it long. Alternatively: total_blocks = 0L -loss_percentage = 0 -loss_percentage = float(loss_percentage) #the same with the percentage of loss - -IP_ADDR = 0 -PORT = 1 - -# Buffer size in the peers and the gatherer -#buffer_size = 32 -#block_size = 1024 -buffer_size = Common.buffer_size -block_size = Common.block_size - -channel = '134.ogg' -#source_hostname = '150.214.150.68' -source_hostname = 'localhost' -source_port = 4551 -listening_port = 4552 - -logging_levelname = 'INFO' # 'DEBUG' (default), 'INFO' (cyan), - # 'WARNING' (blue), 'ERROR' (red), - # 'CRITICAL' (yellow) -logging_level = logging.INFO - -# {{{ Args handing - -print 'Argument List:', str(sys.argv) - -parser = argparse.ArgumentParser( - description='This is the splitter node of a P2PSP network.') - -parser.add_argument('--buffer_size', - help='size of the video buffer in blocks'.format(buffer_size)) - -parser.add_argument('--block_size', - help='Block size in bytes. (Default = {})'.format(block_size)) - -parser.add_argument('--channel', - help='Name of the channel served by the streaming source. (Default = "{}")'.format(channel)) - -parser.add_argument('--logging_levelname', - help='Name of the channel served by the streaming source. (Default = "{}")'.format(logging_levelname)) - -parser.add_argument('--source_hostname', - help='Hostname of the streaming server. (Default = "{}")'.format(source_hostname)) - -parser.add_argument('--source_port', - help='Listening port of the streaming server. (Default = {})'.format(source_port)) - -parser.add_argument('--listening_port', - help='Port to talk with the gatherer and peers. (Default = {})'.format(listening_port)) - -args = parser.parse_known_args()[0] -if args.buffer_size: - buffer_size = int(args.buffer_size) -if args.block_size: - block_size = int(args.block_size) -if args.channel: - channel = args.channel -if args.logging_levelname == 'DEBUG': - logging_level = logging.DEBUG -if args.logging_levelname == 'INFO': - logging_level = logging.INFO -if args.logging_levelname == 'WARNING': - logging_level = logging.WARNING -if args.logging_levelname == 'ERROR': - logging_level = logging.ERROR -if args.logging_levelname == 'CRITICAL': - logging_level = logging.CRITICAL -if args.source_hostname: - source_hostname = str(args.source_hostname) -if args.source_port: - source_port = int(args.source_port) -if args.listening_port: - listening_port = int(args.listening_port) - -# }}} - -print 'This is a P2PSP splitter node ...', -if __debug__: - print 'running in debug mode' -else: - print 'running in release mode' - -# {{{ Logging initialization - -# Echar un vistazo a logging.config. - -# create logger -logger = logging.getLogger('splitter (' + str(os.getpid()) + ')') -logger.setLevel(logging_level) - -# create console handler and set the level -ch = logging.StreamHandler() -ch.setLevel(logging_level) - -# create formatter -formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') -#formatter = logging.Formatter("%(asctime)s [%(funcName)s: %(filename)s,%(lineno)d] %(message)s") - -# add formatter to ch -ch.setFormatter(formatter) - -# add ch to logger -logger.addHandler(ch) - -#logging.basicConfig(format='%(asctime)s.%(msecs)d %(message)s', -# datefmt='%H:%M:%S', -# level=logging.DEBUG) -# logging.basicConfig(format='%(asctime)s.%(msecs)d %(message)s', -# datefmt='%H:%M:%S') -# else: -# print 'Running in release mode' -# logging.basicConfig(format='%(asctime)s.%(msecs)d %(message)s', -# datefmt='%H:%M:%S', -# level=logging.CRITICAL) - -# }}} - -source = (source_hostname, source_port) - -# }}} - -# The list of peers (included the gatherer) -peer_list = [] - -# The number of the last received block from the streaming server -block_number = 0 - - - -# Used to find the peer to which a block has been sent -destination_of_block = [('0.0.0.0',0) for i in xrange(buffer_size)] - -# Unreliability rate of a peer -unreliability = {} - -# Complaining rate of a peer -complains = {} - -# The peer_list iterator -peer_index = 0 - -# A lock to perform mutual exclusion for accesing to the list of peers -peer_list_lock = Lock() - -#gatherer = None - -block_format_string = "H"+str(block_size)+"s" - -print("Buffer size: "+str(buffer_size)+" blocks") -print("Block size: "+str(block_size)+" bytes") -logger.info("Buffer size: "+str(buffer_size)+" blocks") -logger.info("Block size: "+str(block_size)+" bytes") - -# {{{ Handle one telnet client - -class get_the_state(Thread): - # {{{ - - global peer_list - - def __init__(self): - Thread.__init__(self) - - def run(self): - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - # This does not work in Windows systems. - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except: - pass - sock.bind(('', listening_port+1)) - - logger.info(Color.cyan + - '{}'.format(sock.getsockname()) + - ' listening (telnet) on port ' + - str(listening_port+1) + - Color.none) - - sock.listen(0) - try: - while True: - connection = sock.accept()[0] - message = 'a' - while message[0] != 'q': - #Commented due to gatherer removal - #connection.sendall('Gatherer = ' + str(gatherer) + '\n') - connection.sendall('Number of peers = ' + str(len(peer_list)) + '\n') - counter = 0 - for p in peer_list: - loss_percentage = float(unreliability[p]*100)/float(total_blocks) - connection.sendall(str(counter) + - '\t' + str(p) + - '\t' + 'unreliability=' + str(unreliability[p]) +' ({:.2}%)'.format(loss_percentage)+ - '\t' + 'complains=' + str(complains[p]) + - '\n') - counter += 1 - connection.sendall('\n Total blocks sent = '+str(total_blocks)) - connection.sendall(Color.cyan + '\nEnter a line that beggings with "q" to exit or any other key to continue\n' + Color.none) - message = connection.recv(2) - - connection.close() - - except: - pass - -get_the_state().setDaemon(True) -get_the_state().daemon=True -get_the_state().start() - -# }}} - -# Return the connection socket used to establish the connections of the -# peers (and the gatherer) (Week 3) - -def get_peer_connection_socket(): - #sock = blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM) - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - - try: - # This does not work in Windows systems. - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except: - pass - - sock.bind( ('', listening_port) ) - #sock.listen(5) - sock.listen(socket.SOMAXCONN) #set the connection queue to the max! - - return sock - -peer_connection_sock = get_peer_connection_socket() - -''' -#Commented due to gatherer removal -logger.info(Color.cyan + - '{}'.format(peer_connection_sock.getsockname()) + - ' waiting for the gatherer on port ' + - str(listening_port) + - Color.none) - -gatherer = peer_connection_sock.accept()[1] - -logger.info(Color.cyan + - '{}'.format(peer_connection_sock.getsockname()) + - ' the gatherer is ' + - str(gatherer) + - Color.none) -''' - -# {{{ Handle the arrival of a peer. This class is called y handle_arrivals -class handle_one_arrival(Thread): - peer_serve_socket = "" - peer = "" - - def __init__(self, peer_serve_socket, peer): - Thread.__init__(self) - self.peer_serve_socket = peer_serve_socket - self.peer = peer - - def run(self): - - global peer_list - global unreliability - global complains - global logger - - # {{{ debug - if __debug__: - logger.debug('{}'.format(self.peer_serve_socket.getsockname()) + - ' Accepted connection from peer ' + - str(self.peer)) - # }}} - - # {{{ Send the list of peers to the peer /PS4/ - # {{{ debug - - if __debug__: - logger.debug('{}'.format(self.peer_serve_socket.getsockname()) + - ' Sending the list of peers') - # }}} - - #get a copy of peer_list to prevent race conditions! - #list slicing ([:]) is faster than the list() method according to http://stackoverflow.com/questions/2612802/how-to-clone-a-list-in-python - #peer_list_copy = peer_list[:] - - #message = struct.pack("H", socket.htons(len(peer_list_copy))) - message = struct.pack("H", socket.htons(len(peer_list))) - self.peer_serve_socket.sendall(message) - - #Commented due to gatherer removal - #message = struct.pack("4sH", socket.inet_aton(gatherer[IP_ADDR]),socket.htons(gatherer[PORT])) - #self.peer_serve_socket.sendall(message) - - #for p in peer_list_copy: - for p in peer_list: - message = struct.pack( - "4sH", socket.inet_aton(p[IP_ADDR]), - socket.htons(p[PORT])) - self.peer_serve_socket.sendall(message) - - # {{{ debug - - if __debug__: - logger.debug(str(len(peer_list)) + ' peers sent') - - # }}} - - # }}} - - # {{{ Close the TCP socket with the peer/gatherer - - self.peer_serve_socket.close() - - # }}} - - #add peer to the REAL peer_list - peer_list.append(self.peer) - unreliability[self.peer] = 0 - complains[self.peer] = 0 - - logger.info(Color.cyan + - str(self.peer) + - ' has joined the cluster' + - Color.none) - # }}} - -# {{{ Main handler peer arrivals. -class handle_arrivals(Thread): - # {{{ - - def __init__(self): - Thread.__init__(self) - - def run(self): - #peer_connection_sock = blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM) - #peer_connection_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - #peer_connection_sock.bind(("", listening_port)) # Listen to any interface - #peer_connection_sock.listen(5) - #global peer_connection_sock - while True: - # {{{ Wait for the connection from the peer /PS0/ - - peer_serve_socket, peer = peer_connection_sock.accept() - handle_one_arrival(peer_serve_socket, peer).start() - - - #aquí comienza el thread - ''' - - # {{{ debug - if __debug__: - logger.debug('{}'.format(peer_serve_socket.getsockname()) + - ' Accepted connection from peer ' + - str(peer)) - # }}} - - # }}} - - # {{{ Send the last block to the peer /PS3/ - - # Solicitar un nuevo bloque a Icecast y enviárselo al peer - #block = block_buffer[last_block % buffer_size] - #payload = struct.pack("H1024s", socket.htons(last_block), block) - #peer_serve_socket.sendall(payload) - - # }}} - - # {{{ Send the list of peers to the peer /PS4/ - - # {{{ debug - - if __debug__: - logger.debug('{}'.format(peer_serve_socket.getsockname()) + - ' Sending the list of peers') - # }}} - - message = struct.pack("H", socket.htons(len(peer_list))) - peer_serve_socket.sendall(message) - message = struct.pack( - "4sH", socket.inet_aton(gatherer[IP_ADDR]), - socket.htons(gatherer[PORT])) - peer_serve_socket.sendall(message) - for p in peer_list: - message = struct.pack( - "4sH", socket.inet_aton(p[IP_ADDR]), - socket.htons(p[PORT])) - peer_serve_socket.sendall(message) - - # {{{ debug - - if __debug__: - logger.debug(str(len(peer_list)) + ' peers sent') - - # }}} - - # }}} - - # {{{ Close the TCP socket with the peer/gatherer - - peer_serve_socket.close() - - # }}} - - # Then the first peer arrival, the first entry of the list - # of peers is replaced by the peer. - #if peer_list[0] == gatherer: - # peer_list[0] = peer - #else: - #with peer_list_lock: - #peer_list_lock.acquire() - peer_list.append(peer) - #peer_list_lock.release() - unreliability[peer] = 0 - complains[peer] = 0 - - logger.info(Color.cyan + - str(peer) + - ' has joined the cluster' + - Color.none) - ''' - #fin del thread - # }}} - -print("Peer list length: "+str(len(peer_list))) - -handle_arrivals().setDaemon(True) #setting the thread as daemon makes it die when the main process ends. Otherwise, it'd never stop since it runs a while(true). -handle_arrivals().daemon=True -handle_arrivals().start() - -# }}} - -# {{{ Create the socket to send the blocks of stream to the peers/gatherer - -def create_cluster_sock(listening_port): - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - try: - # This does not work in Windows systems. - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except: - pass - sock.bind(('', listening_port)) - #peer_socket.bind(('',peer_connection_sock.getsockname()[PORT])) - - return sock - -cluster_sock = create_cluster_sock(listening_port) - -# }}} - -# {{{ Handle peer/gatherer complains and goodbye messages (Week 10) - -class listen_to_the_cluster(Thread): - # {{{ - - def __init__(self): - Thread.__init__(self) - - def run(self): - - global peer_index - - while True: - # {{{ debug - if __debug__: - logger.debug('waiting for messages from the cluster') - # }}} - message, sender = cluster_sock.recvfrom(struct.calcsize("H")) - - #if len(message) == 0: - if message == 'bye': - try: - peer_list.remove(sender) - logger.info(Color.cyan + - str(sender) + - ' has left the cluster' + - Color.none) - except: - logger.warning(Color.blue + - 'Received a googbye message from ' + - str(sender) + - ' which is not in the list of peers' + - Color.none) - pass - else: - # The sender of the packet complains and the packet - # comes with the index of a lost block - lost_block = struct.unpack("!H",message)[0] - destination = destination_of_block[lost_block] - - logger.info(Color.cyan + - str(sender) + - ' complains about lost block ' + - str(lost_block) + - ' sent to ' + - str(destination) + - Color.none) - unreliability[destination] += 1 -'''jalvaro: i'm commenting this so peers are not expeled -#if the sender of the complaint is the gatherer then the splitter removes the infractor inmediately - if sender == gatherer: - try: - peer_list.remove(destination) - del unreliability[destination] - del complains[destination] - - logger.info(Color.cyan + - str(destination) + - ' has been removed' + - Color.none) - except: - pass - - else: - try: - unreliability[destination] += 1 - if unreliability[destination] > len(peer_list): - # Too many complains about an unsuportive peer - peer_list.remove(destination) - del unreliability[destination] - del complains[destination] - - logger.info(Color.cyan + - str(destination) + - ' has been removed' + - Color.none) - - except: - pass -''' - - # }}} - -listen_to_the_cluster().setDaemon(True) -listen_to_the_cluster().daemon=True -listen_to_the_cluster().start() - -# }}} - -# {{{ Connect to the streaming server and request the channel (week 2) - -source_sock = blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM) -source_sock.connect(source) - -# {{{ debug - -if __debug__: - logger.debug('{}'.format(source_sock.getsockname()) + - ' connected to the video source ' + - '{}'.format(source_sock.getpeername())) - -# }}} - -# {{{ Request the video to the source - -GET_message = 'GET /' + channel + " HTTP/1.1\r\n\r\n" -source_sock.sendall(GET_message) - -# }}} - -# {{{ debug - -if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - ' sending the rest of the stream ...') - -# }}} - -# {{{ Feed the peers - -while True: - - # (Week 2) - def receive_next_block(): - # {{{ - - global source_sock - - block = source_sock.recv(block_size) - tries = 0 - while len(block) < block_size: - tries += 1 - if tries > 3: - - # {{{ debug - if __debug__: - logger.debug('GET') - # }}} - - time.sleep(1) - source_sock.close() - source_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - source_sock.connect(source) - source_sock.sendall(GET_message) - - #block += source_sock.recv(1024-len(block)) - block += source_sock.recv(block_size - len(block)) - return block - - # }}} - - block = receive_next_block() - #block = source_sock.brecv(block_size) - - # {{{ debug - if __debug__: - - logger.debug('{}'.format(source_sock.getsockname()) + - Color.green + ' <- ' + Color.none + - '{}'.format(source_sock.getpeername()) + - ' ' + - '{}'.format(block_number)) - # }}} - print("Block "+str(block_number)+" received") - - #with peer_list_lock: - #peer_list_lock.acquire() - len_peer_list = len(peer_list) - print("Length of peer_list: "+str(len_peer_list)) - #if peer_index < len_peer_list: - try: - peer = peer_list[peer_index] - print("Destinatario peer: "+str(peer)) - except: - try: - peer = peer_list[0] - print("Destinatario peer2: "+str(peer)) - except: - #Commented due to gatherer removal - #peer = gatherer - #len_peer_list = 1 - peer = None - len_peer_list = 1 #should be zero but that would raise a modulo by zero exception - print("No hay peers") - - #peer_list_lock.release() - - # {{{ debug - if __debug__: - logger.debug('{}'.format(cluster_sock.getsockname()) + - Color.green + ' -> ' + Color.none + - '{}'.format(peer) + - ' ' + - '{}'.format(block_number)) - # }}} - - print("peer != None: "+str(peer!=None)) - if peer != None: - print("Sending block "+str(block_number)) - #message = struct.pack("H1024s", socket.htons(block_number), block) - message = struct.pack(block_format_string, socket.htons(block_number), block) - #if not (block_number%2)==0: - cluster_sock.sendto(message, peer) - #print("Block "+str(block_number)+"sent to "+str(peer)) - - # Ojo, a veces peta diciendo: "IndexError: list index out of range" - destination_of_block[block_number % buffer_size] = peer - - peer_index = (peer_index + 1) % len_peer_list - - block_number = (block_number + 1) % 65536 - - total_blocks += 1 - - ''' - #decrement unreliability and complaints after every 256 packets - if (block_number % 256) == 0: - for i in unreliability: - unreliability[i] /= 2 - for i in complains: - complains[i] /= 2 - ''' - -# }}} diff --git a/sim-cluster/stop_simulation.sh b/sim-cluster/stop_simulation.sh deleted file mode 100755 index 727da19..0000000 --- a/sim-cluster/stop_simulation.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -#Kill every related process. Don't kill oggfw! -pkill splitter -pkill gatherer -pkill vlc -pkill peer diff --git a/sim-cluster/test_get.py b/sim-cluster/test_get.py deleted file mode 100755 index 22cb149..0000000 --- a/sim-cluster/test_get.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/python -# -*- coding: iso-8859-15 -*- - -# {{{ GNU GENERAL PUBLIC LICENSE - -# This is the splitter node of the P2PSP (Peer-to-Peer Simple Protocol) -# . -# -# Copyright (C) 2013 Cristobal Medina López, Juan Pablo García Ortiz, -# Juan Alvaro Muñoz Naranjo, Leocadio González Casado and Vicente -# González Ruiz. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# }}} - -# Test the connection with the streaming server. - -# {{{ imports - -import logging -import socket -from blocking_TCP_socket import blocking_TCP_socket -import sys -import struct -import time -from threading import Thread -from threading import Lock -from colors import Color -import signal -from time import gmtime, strftime -import os -import argparse - -# }}} - -IP_ADDR = 0 -PORT = 1 - -block_size = 1024 -channel = '134.ogg' -source_name = '150.214.150.68' -source_port = 4551 -listening_port = 4552 - -# {{{ Args handing - -parser = argparse.ArgumentParser(description='This a test_get of a P2PSP cluster.') -parser.add_argument('--block_size', help='Block size in bytes. (Default = {})'.format(block_size)) -parser.add_argument('--channel', help='Name of the channel served by the streaming source. (Default = "{}")'.format(channel)) -parser.add_argument('--source_name', help='Name of the streaming server. (Default = "{}")'.format(source_name)) -parser.add_argument('--source_port', help='Listening port of the streaming server. (Default = {})'.format(source_port)) -parser.add_argument('--listening_port', help='Port to talk with the drain and peers. (Default = {})'.format(listening_port)) - -args = parser.parse_known_args()[0] -if args.block_size: - block_size = int(args.block_size) -if args.channel: - channel = args.channel -if args.source_name: - source_name = args.source_name -if args.source_port: - source_port = int(args.source_port) -if args.listening_port: - listening_port = int(args.listening_port) - -source = (source_name, source_port) - -# {{{ debug -if __debug__: - print 'Running in debug mode' - logging.basicConfig(format='%(asctime)s.%(msecs)d %(message)s', - datefmt='%H:%M:%S', - level=logging.WARNING) -else: - print 'Running in release mode' - logging.basicConfig(format='%(asctime)s.%(msecs)d %(message)s', - datefmt='%H:%M:%S', - level=logging.CRITICAL) -# }}} - -# {{{ The drain is blocked until a player establish a connection. (Week 4/6) - -def get_player_socket(): - - # {{{ - - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - # In Windows systems this call doesn't work! - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except: - pass - sock.bind(('', listening_port)) - sock.listen(0) - - # {{{ debug - if __debug__: - logging.warning('{}'.format(sock.getsockname()) - + ' Waiting for the player connection ...') - # }}} - - sock, player = sock.accept() - #sock.setblocking(0) - return sock - - # }}} - -player_sock = get_player_socket() - -# }}} - -source_sock = blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM) -source_sock.connect(source) -source_sock.sendall("GET /" + channel + " HTTP/1.1\r\n\r\n") - -header_size = 1000000 - -''' -data = source_sock.recv(header_size) -total_received = len(data) -player_sock.sendall(data) -while total_received < header_size: - if __debug__: - logging.warning('h') - data = source_sock.recv(header_size - len(data)) - player_sock.sendall(data) - total_received += len(data) -''' - - -block = source_sock.brecv(block_size) -total_received = len(block) -player_sock.sendall(block) -print total_received -while total_received < header_size: - if __debug__: - logging.warning(str(len(block))) - block = source_sock.brecv(block_size) - player_sock.sendall(block) - total_received += block_size - diff --git a/sim-cluster/test_p2psp.sh b/sim-cluster/test_p2psp.sh deleted file mode 100755 index 3baa5c5..0000000 --- a/sim-cluster/test_p2psp.sh +++ /dev/null @@ -1,3 +0,0 @@ -xterm -e "./splitter.py" & -xterm -e './gatherer.py --splitter_name="localhost"' & -vlc http://localhost:9999 & diff --git a/src/InvObserver.java b/src/InvObserver.java new file mode 100755 index 0000000..41d1a02 --- /dev/null +++ b/src/InvObserver.java @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2003-2005 The BISON Project + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +package txrelaysim.src; + +import txrelaysim.src.helpers.*; + +import peersim.config.*; +import peersim.core.*; +import peersim.util.*; + +import java.util.Map; +import java.util.Iterator; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.HashMap; +import java.util.*; + + +public class InvObserver implements Control +{ + /** + * The protocol to operate on. + * @config + */ + private static final String PAR_PROT = "protocol"; + + /** The name of this observer in the configuration */ + private final String name; + + /** Protocol identifier */ + private final int pid; + + /** + * Standard constructor that reads the configuration parameters. + * Invoked by the simulation engine. + * @param name the configuration prefix for this class + */ + public InvObserver(String name) { + this.name = name; + pid = Configuration.getPid(name + "." + PAR_PROT); + } + + public boolean execute() { + // Track how many invs were sent. + ArrayList extraInvs = new ArrayList<>(); + ArrayList shortInvs = new ArrayList<>(); + // Track reconciliation results across experiments. + ArrayList successRecons = new ArrayList<>(); + ArrayList failedRecons = new ArrayList<>(); + // Track how soon transactions were propagating across the network. + HashMap> txArrivalTimes = new HashMap>(); + for(int i = 1; i < Network.size(); i++) { + Peer peer = (Peer) Network.get(i).getProtocol(pid); + extraInvs.add(peer.extraInvs); + shortInvs.add(peer.shortInvs); + + successRecons.add(peer.successRecons); + failedRecons.add(peer.failedRecons); + + Iterator it = peer.txArrivalTimes.entrySet().iterator(); + while (it.hasNext()) { + Map.Entry pair = (Map.Entry)it.next(); + Integer txId = (Integer)pair.getKey(); + Long arrivalTime = (Long)pair.getValue(); + if (txArrivalTimes.get(txId) == null) { + txArrivalTimes.put(txId, new ArrayList<>()); + } + txArrivalTimes.get(txId).add(arrivalTime); + } + } + + // Measure the delays it took to reach majority of the nodes (based on receival time). + ArrayList avgTxArrivalDelay = new ArrayList<>(); + Iterator it = txArrivalTimes.entrySet().iterator(); + while (it.hasNext()) { + Map.Entry pair = (Map.Entry)it.next(); + // A workaround to avoid unchecked cast. + ArrayList ar = (ArrayList) pair.getValue(); + ArrayList arrivalTimes = new ArrayList<>(); + for (Object x : ar) { + arrivalTimes.add((Long) x); + } + + if (arrivalTimes.size() < Network.size() - 1) { + // Don't bother printing results if relay is in progress (some nodes didn't receive + // all transactions yet). + System.err.println("Transactions are still propagating"); + return false; + } + Collections.sort(arrivalTimes); + int percentile95Index = (int)(arrivalTimes.size() * 0.95); + Long percentile95delay = (arrivalTimes.get(percentile95Index) - arrivalTimes.get(0)); + avgTxArrivalDelay.add(percentile95delay); + } + + + // Print results. + int allTxs = txArrivalTimes.size(); + + if (allTxs == 0) { + return false; + } + + double avgMaxDelay = avgTxArrivalDelay.stream().mapToLong(val -> val).average().orElse(0.0); + System.out.println("Avg max latency: " + avgMaxDelay); + + double avgExtraInvs = extraInvs.stream().mapToInt(val -> val).average().orElse(0.0); + System.out.println(avgExtraInvs / allTxs + " extra inv per tx on average."); + + double avgShortInvs = shortInvs.stream().mapToInt(val -> val).average().orElse(0.0); + System.out.println(avgShortInvs / allTxs + " shortInvs per tx on average."); + + double avgSuccessRecons = successRecons.stream().mapToInt(val -> val).average().orElse(0.0); + System.out.println(avgSuccessRecons + " successful recons on average."); + + double avgFailedRecons = failedRecons.stream().mapToInt(val -> val).average().orElse(0.0); + System.out.println(avgFailedRecons + " failed recons on average."); + + return false; + } +} \ No newline at end of file diff --git a/src/Neighbor.java b/src/Neighbor.java deleted file mode 100644 index 51930e5..0000000 --- a/src/Neighbor.java +++ /dev/null @@ -1,17 +0,0 @@ -package sim.src; - -import peersim.core.Node; - -public class Neighbor { - - private Node node; - - public Neighbor(Node node) { - this.node = node; - } - - public Node getNode() { - return this.node; - } - -} diff --git a/src/Peer.java b/src/Peer.java index c5f8a6d..20fce4b 100755 --- a/src/Peer.java +++ b/src/Peer.java @@ -1,148 +1,363 @@ -package sim.src; +package txrelaysim.src; + +import txrelaysim.src.helpers.*; import java.util.ArrayList; +import java.util.Set; +import java.util.HashSet; +import java.util.HashMap; +import java.util.Queue; +import java.util.Map; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.Random; import peersim.cdsim.CDProtocol; import peersim.config.Configuration; import peersim.config.FastConfig; import peersim.core.Network; import peersim.core.Node; +import peersim.core.CommonState; import peersim.edsim.*; import peersim.transport.Transport; public class Peer implements CDProtocol, EDProtocol { + /* System */ public static int pidPeer; - public boolean isPeer = false; - private int bufferSize; - public IntMessage[] buffer; - public ArrayList peerList; - public ArrayList badPeerList; - public boolean isMalicious = false; - public boolean isTrusted = false; + + /* Constants and delays. Reconciliation only! */ + public int inFloodLimit; + public int outFloodLimit; + public int reconciliationInterval; + public int inFloodDelay; + public int outFloodDelay; + public double defaultQ; + + /* State */ + public boolean isReachable = false; + public ArrayList outboundPeers; + public ArrayList inboundPeers; + public ArrayList inboundFloodDestinations; + public ArrayList outboundFloodDestinations; + public HashMap txArrivalTimes; + public HashMap> peerKnowsTxs; + public long nextFloodInbound = 0; + + /* Reconciliation state */ + public boolean reconcile = false; + public Queue reconciliationQueue; + public long nextRecon = 0; + public long nextReconResponse = 0; + private HashMap> reconSets; + + /* Stats */ + public int extraInvs; + public int shortInvs; + public int successRecons; + public int extSuccessRecons; + public int failedRecons; public Peer(String prefix) { - bufferSize = Configuration.getInt(prefix+".buffer_size", 32); - buffer = new IntMessage[bufferSize]; - peerList = new ArrayList(); - badPeerList = new ArrayList(); + inboundPeers = new ArrayList<>(); + outboundPeers = new ArrayList<>(); + inboundFloodDestinations = new ArrayList<>(); + outboundFloodDestinations = new ArrayList<>(); + reconciliationQueue = new LinkedList<>(); + reconSets = new HashMap<>(); + peerKnowsTxs = new HashMap<>(); + txArrivalTimes = new HashMap<>(); + } + + public Object clone() { + return new Peer(""); } - + + @Override + public void nextCycle(Node node, int pid) { + if (reconcile) { + // If reconciliation is enabled on this node, it should periodically request reconciliations + // with a queue of its reconciling peers. + long curTime = CommonState.getTime(); + if (reconciliationQueue.peek() != null && curTime > nextRecon) { + Node recipient = reconciliationQueue.poll(); + + SimpleMessage request = new SimpleMessage(SimpleEvent.RECON_REQUEST, node); + ((Transport)recipient.getProtocol(FastConfig.getTransport(pid))).send(node, recipient, request, Peer.pidPeer); + + // Move this node to the end of the queue, schedule the next reconciliation. + reconciliationQueue.offer(recipient); + nextRecon = curTime + reconciliationInterval; + } + } + } + @Override - public void nextCycle(Node node, int pid) {} - - /** - * The last packet FROM THE SOURCE from anyone is resent to everyone - * @Override - */ public void processEvent(Node node, int pid, Object event) { SimpleEvent castedEvent = (SimpleEvent)event; switch (castedEvent.getType()) { - case SimpleEvent.CHUNK: - processChunkMessage(node, pid, (IntMessage)castedEvent); + case SimpleEvent.INV: + // INV received from a peer. + handleInvMessage(node, pid, (IntMessage)castedEvent); break; - case SimpleEvent.PEERLIST: - processPeerlistMessage(node, pid, (ArrayListMessage)castedEvent); + case SimpleEvent.RECON_REQUEST: + // Reconciliation request from a peer. + handleReconRequest(node, pid, (SimpleMessage)castedEvent); break; - case SimpleEvent.HELLO: - processHelloMessage(node, pid, (SimpleMessage)castedEvent); + case SimpleEvent.SKETCH: + // Sketch from a peer in response to reconciliation request. + ArrayListMessage ar = (ArrayListMessage) castedEvent; + ArrayList remoteSet = new ArrayList(); + for (Object x : ar.getArrayList()) { + remoteSet.add((Integer) x); + } + handleSketchMessage(node, pid, ar.getSender(), remoteSet); + break; + case SimpleEvent.SCHEDULED_INV: + // Self-scheduled INV to be sent to a peer. + executeScheduledInv(node, pid, (TupleMessage)castedEvent); break; - case SimpleEvent.GOODBYE: - processGoodbyeMessage(node, pid, (SimpleMessage)castedEvent); - break; - case SimpleEvent.BAD_PEER: - processBadPeerMessage(node, pid, (IntMessage)castedEvent); + case SimpleEvent.SCHEDULED_SKETCH: + // Self-scheduled SKETCH to be sent to a peer. + executeScheduledSketch(node, pid, (SimpleMessage)castedEvent); break; } } - - private void processChunkMessage(Node node, int pid, IntMessage message) { - storeInBuffer(node, message); - if(message.getSender().getIndex() == SourceInitializer.sourceIndex) { //the sender is the source - int latencySum = 0; - for (Neighbor peer : peerList) { - IntMessage chunkMessage = new IntMessage(SimpleEvent.CHUNK, node, message.getInteger() * (this.isMalicious ? -1 : 1)); - latencySum += chunkMessage.getLatency(peer.getNode(), pid); - EDSimulator.add(latencySum, chunkMessage, peer.getNode(), pid); + + // Handle a transaction announcement (INV) from a peer. Remember when the transaction was + // announced, and set it for further relay to other peers. + private void handleInvMessage(Node node, int pid, IntMessage message) { + int txId = message.getInteger(); + Node sender = message.getSender(); + + if (sender.getID() != 0) { + // Came not from source. + peerKnowsTxs.get(sender).add(txId); + if (reconcile) { + removeFromReconSet(node, txId, sender); } + } + + if (!txArrivalTimes.keySet().contains(txId)) { + txArrivalTimes.put(txId, CommonState.getTime()); + relayTx(node, pid, txId, sender); } else { - if (this.isTrusted) { - TupleMessage chunkCheckMessage = new TupleMessage(SimpleEvent.CHUNK_CHECK, node, message.getSender().getIndex(), message.getInteger()); - long latency = chunkCheckMessage.getLatency(Network.get(0), pid); - EDSimulator.add(latency, chunkCheckMessage, Network.get(0), Source.pidSource); + ++extraInvs; + } + } + + private void handleReconRequest(Node node, int pid, SimpleMessage message) { + Node sender = message.getSender(); + + long curTime = CommonState.getTime(); + long delay; + if (nextReconResponse < curTime) { + delay = 0; + // Switch to sketch fanout batching delay. + nextReconResponse = curTime + generateRandomDelay(0); + } else { + delay = nextReconResponse - curTime; + } + SimpleMessage scheduledSketch = new SimpleMessage(SimpleEvent.SCHEDULED_SKETCH, sender); + EDSimulator.add(delay, scheduledSketch, node, Peer.pidPeer); // send to self. + } + + // Handle a sketch a peer sent us in response to our request. All sketch extension logic and + // txId exchange is done here implicitly without actually sending messages, because a it can be + // easily modeled and accounted at this node locally. + private void handleSketchMessage(Node node, int pid, Node sender, ArrayList remoteSet) { + Set localSet = reconSets.get(sender); + + // Although diff estimation should happen at the sketch sender side, we do it here because + // it works in our simplified model, to save extra messages. + // To make it more detailed, we could remember the set size at request time here. + int localSetSize = localSet.size(); + int remoteSetSize = remoteSet.size(); + // TODO: Q could be dynamicly updated after each reconciliation. + int capacity = Math.abs(localSetSize - remoteSetSize) + (int)(defaultQ * (localSetSize + remoteSetSize)) + 1; + + int shared = 0, usMiss = 0, theyMiss = 0; + // Handle transactions the local (sketch receiving) node doesn't have. + for (Integer txId : remoteSet) { + peerKnowsTxs.get(sender).add(txId); + if (localSet.contains(txId)) { + ++shared; + } else { + ++usMiss; + if (!txArrivalTimes.keySet().contains(txId)) { + // This rarely happens. + txArrivalTimes.put(txId, CommonState.getTime()); + relayTx(node, pid, txId, sender); + } else { + ++extraInvs; + } } - if (!isInBadPeerList(message.getSender().getIndex())) { - addNewNeighbor(message.getSender()); + } + + // Handle transactions which the remote (sketch sending) node doesn't have. + for (Integer txId : localSet) { + if (!remoteSet.contains(txId)) { + scheduleInv(node, 0, sender, txId); + theyMiss++; } } + + // Compute the cost of this sketch exchange. + int diff = usMiss + theyMiss; + if (capacity > diff) { + // Reconciliation succeeded right away. + shortInvs += capacity; // account for sketch + successRecons++; + } else if (capacity * 2 > diff) { + // Reconciliation succeeded after extension. + shortInvs += capacity * 2; // account for sketch and extension + extSuccessRecons++; + } else { + // Reconciliation failed. + shortInvs += capacity * 2; // account for sketch and extension + // Above, we already sent them invs they miss. + // Here, we just account for all the remaining full invs: what we miss, and shared txs. + extraInvs += usMiss + shared; + failedRecons++; + } + localSet.clear(); } - - private void storeInBuffer(Node node, IntMessage message) { - if (!isInBadPeerList(message.getSender().getIndex())) { - buffer[Math.abs(message.getInteger()) % buffer.length] = message; + + // A node previously scheduled a transaction announcement to the peer. Execute it here when + // this function is called by the scheduler. + private void executeScheduledInv(Node node, int pid, TupleMessage scheduledInv) { + Node recipient = scheduledInv.getX(); + int txId = scheduledInv.getY(); + if (!peerKnowsTxs.get(recipient).contains(txId)) { + peerKnowsTxs.get(recipient).add(txId); + IntMessage inv = new IntMessage(SimpleEvent.INV, node, txId); + ((Transport)recipient.getProtocol(FastConfig.getTransport(Peer.pidPeer))).send(node, recipient, inv, Peer.pidPeer); + if (reconcile) { + removeFromReconSet(node, txId, recipient); + } } } - - private boolean isInBadPeerList(int index) { - boolean isInBadPeerList = false; - for (int peer : badPeerList) { - if (peer == index) { - isInBadPeerList = true; - break; + + // A node previously scheduled a sketch transmission to the peer. Execute it here when + // this function is called by the scheduler. + private void executeScheduledSketch(Node node, int pid, SimpleMessage scheduledSketch) { + Node recipient = scheduledSketch.getSender(); + HashSet reconSet = reconSets.get(recipient); + ArrayListMessage sketch = new ArrayListMessage(SimpleEvent.SKETCH, node, new ArrayList(reconSet)); + ((Transport)recipient.getProtocol(FastConfig.getTransport(Peer.pidPeer))).send(node, recipient, sketch, Peer.pidPeer); + for (Integer txId: reconSet) { + peerKnowsTxs.get(recipient).add(txId); + } + reconSet.clear(); + } + + private void relayTx(Node node, int pid, int txId, Node sender) { + if (reconcile) { + addToReconSets(node, pid, txId, sender); + } + flood(node, pid, sender, txId); + } + + private void flood(Node node, int pid, Node sender, int txId) { + // Send to inbounds. + for (Node peer : inboundFloodDestinations) { + long curTime = CommonState.getTime(); + // To preserve privacy against inbound observers with multiple connections, + // they share the timer (as in the Bitcoin peer-to-peer layer). + if (nextFloodInbound < curTime) { + scheduleInv(node, 0, peer, txId); + nextFloodInbound = curTime + generateRandomDelay(this.inFloodDelay); + } else { + scheduleInv(node, nextFloodInbound - curTime, peer, txId); } } - return isInBadPeerList; + + // Send to outbounds. + for (Node peer : outboundFloodDestinations) { + long delay = generateRandomDelay(this.outFloodDelay); + scheduleInv(node, delay, peer, txId); + } } - - private void processPeerlistMessage(Node node, int pid, ArrayListMessage message) { - peerList.clear(); - for (Neighbor peer : message.getArrayList()) { - peerList.add(peer); - SimpleMessage helloMessage = new SimpleMessage(SimpleEvent.HELLO, node); - long latency = helloMessage.getLatency(peer.getNode(), pid); - EDSimulator.add(latency, helloMessage, peer.getNode(), pid); + + private void addToReconSets(Node node, int pid, int txId, Node sender) { + for (Node n: reconSets.keySet()) { + if (n != sender) { + reconSets.get(n).add(txId); + } + } + } + + private void removeFromReconSet(Node node, int txId, Node target) { + if (reconSets.get(target).contains(txId)) { + reconSets.get(target).remove(txId); } } - private void processHelloMessage(Node node, int pid, SimpleMessage message) { - addNewNeighbor(message.getSender()); + // We don't announce transactions right away, because usually the delay takes place to make it + // more private. + private void scheduleInv(Node node, long delay, Node recipient, int txId) { + if (recipient.getID() == 0) { + // Don't send to source. + return; + } + + if (peerKnowsTxs.get(recipient).contains(txId)) { + return; + } + TupleMessage scheduledInv = new TupleMessage(SimpleEvent.SCHEDULED_INV, node, recipient, txId); + EDSimulator.add(delay, scheduledInv, node, Peer.pidPeer); // send to self. } - private void processGoodbyeMessage(Node node, int pid, SimpleMessage message) { - // remove neighbor from peerList + // A helper for scheduling events which happen after a random delay. + private long generateRandomDelay(long avgDelay) { + return CommonState.r.nextLong(avgDelay * 2 + 1); } - - private void addNewNeighbor(Node node) { - boolean isExist = false; - for (Neighbor peer : peerList) { - if (peer.getNode().getID() == node.getID()) { - isExist = true; + + // The following methods used for setting up the topology. + + public void addInboundPeer(Node inboundPeer) { + boolean alreadyConnected = false; + for (Node existingPeer : inboundPeers) { + if (existingPeer.getID() == inboundPeer.getID()) { + alreadyConnected = true; break; } } - if (!isExist) { - peerList.add(new Neighbor(node)); + if (!alreadyConnected) { + inboundPeers.add(inboundPeer); + if (reconcile) { + reconSets.put(inboundPeer, new HashSet<>()); + if (inboundFloodDestinations.size() < inFloodLimit) { + inboundFloodDestinations.add(inboundPeer); + } + } else { + inboundFloodDestinations.add(inboundPeer); + } + peerKnowsTxs.put(inboundPeer, new HashSet<>()); } } - - private void processBadPeerMessage(Node node, int pid, IntMessage message) { - badPeerList.add(message.getInteger()); - removeNeighbor(message.getInteger()); - } - - private void removeNeighbor(int index) { - Neighbor toRemove = null; - for (Neighbor peer : peerList) { - if (peer.getNode().getIndex() == index) { - toRemove = peer; + + public void addOutboundPeer(Node outboundPeer) { + boolean alreadyConnected = false; + for (Node existingPeer : outboundPeers) { + if (existingPeer.getID() == outboundPeer.getID()) { + alreadyConnected = true; break; } } - peerList.remove(toRemove); + if (!alreadyConnected) { + outboundPeers.add(outboundPeer); + if (reconcile) { + reconciliationQueue.offer(outboundPeer); + reconSets.put(outboundPeer, new HashSet<>()); + if (outboundFloodDestinations.size() < outFloodLimit) { + outboundFloodDestinations.add(outboundPeer); + } + } else { + outboundFloodDestinations.add(outboundPeer); + } + peerKnowsTxs.put(outboundPeer, new HashSet<>()); + } } - - public Object clone() { - return new Peer(""); - } } \ No newline at end of file diff --git a/src/PeerInitializer.java b/src/PeerInitializer.java index 5e9f088..6402cfe 100755 --- a/src/PeerInitializer.java +++ b/src/PeerInitializer.java @@ -1,4 +1,9 @@ -package sim.src; +package txrelaysim.src; + +import txrelaysim.src.helpers.*; + +import java.util.HashSet; +import java.util.HashMap; import peersim.config.*; import peersim.core.*; @@ -8,50 +13,93 @@ public class PeerInitializer implements Control { private int pid; - private int maliciousCount; - private int trustedCount; - - private static final String PAR_PROT = "protocol"; - private static final String PAR_MALICIOUS_COUNT = "malicious_count"; - private static final String PAR_TRUSTED_COUNT = "trusted_count"; - + private int reachableCount; + private int outPeers; + private int inFloodDelay; + private int outFloodDelay; + + private boolean allReconcile; + // Reconciliation params + private int outFloodPeers; + private int inFloodPeers; + private double defaultQ; + private int reconciliationInterval; + public PeerInitializer(String prefix) { - pid = Configuration.getPid(prefix + "." + PAR_PROT); - maliciousCount = Configuration.getInt(prefix + "." + PAR_MALICIOUS_COUNT); - trustedCount = Configuration.getInt(prefix + "." + PAR_TRUSTED_COUNT); - } - + pid = Configuration.getPid(prefix + "." + "protocol"); + reachableCount = Configuration.getInt(prefix + "." + "reachable_count"); + outPeers = Configuration.getInt(prefix + "." + "out_peers"); + inFloodDelay = Configuration.getInt(prefix + "." + "in_flood_delay"); + outFloodDelay = Configuration.getInt(prefix + "." + "out_flood_delay"); + + allReconcile = Configuration.getBoolean(prefix + "." + "all_reconcile"); + if (allReconcile) { + reconciliationInterval = Configuration.getInt(prefix + "." + "reconciliation_interval"); + outFloodPeers = Configuration.getInt(prefix + "." + "out_flood_peers", outPeers); + inFloodPeers = Configuration.getInt(prefix + "." + "in_flood_peers"); + defaultQ = Configuration.getDouble(prefix + "." + "default_q"); + } + } + @Override public boolean execute() { Peer.pidPeer = pid; - - //set source as not peer - ((Peer)Network.get(SourceInitializer.sourceIndex).getProtocol(pid)).isPeer = false; - - Node source = Network.get(0); - while (maliciousCount > 0) { + + // Set a subset of nodes to be reachable by other nodes. + while (reachableCount > 0) { int r = CommonState.r.nextInt(Network.size() - 1) + 1; - if (!((Peer)Network.get(r).getProtocol(pid)).isMalicious && !((Peer)Network.get(r).getProtocol(pid)).isTrusted) { - ((Peer)Network.get(r).getProtocol(pid)).isMalicious = true; - maliciousCount--; + if (!((Peer)Network.get(r).getProtocol(pid)).isReachable) { + ((Peer)Network.get(r).getProtocol(pid)).isReachable = true; + reachableCount--; } } - while (trustedCount > 0) { - int r = CommonState.r.nextInt(Network.size() - 1) + 1; - if (!((Peer)Network.get(r).getProtocol(pid)).isMalicious && !((Peer)Network.get(r).getProtocol(pid)).isTrusted) { - ((Peer)Network.get(r).getProtocol(pid)).isTrusted = true; - trustedCount--; + + // A list storing who is already connected to who, so that we don't make duplicate conns. + HashMap> peers = new HashMap<>(); + for (int i = 1; i < Network.size(); i++) { + peers.put(i, new HashSet<>()); + // Initial parameters setting for all nodes. + ((Peer)Network.get(i).getProtocol(pid)).inFloodDelay = inFloodDelay; + ((Peer)Network.get(i).getProtocol(pid)).outFloodDelay = outFloodDelay; + if (allReconcile) { + ((Peer)Network.get(i).getProtocol(pid)).reconcile = true; + ((Peer)Network.get(i).getProtocol(pid)).reconciliationInterval = reconciliationInterval; + ((Peer)Network.get(i).getProtocol(pid)).inFloodLimit = inFloodPeers; + ((Peer)Network.get(i).getProtocol(pid)).outFloodLimit = outFloodPeers; + ((Peer)Network.get(i).getProtocol(pid)).defaultQ = defaultQ; } } + + // Connect all nodes to a limited number of reachable nodes. for(int i = 1; i < Network.size(); i++) { - Node node = Network.get(i); - ((Peer)node.getProtocol(pid)).isPeer = true; - SimpleMessage message = new SimpleMessage(SimpleEvent.HELLO, Network.get(i)); - long latency = CommonState.r.nextInt(Network.size()); - EDSimulator.add(latency, message, source, Source.pidSource); + Node curNode = Network.get(i); + int conns = 0; + while (conns < outPeers) { + int randomNodeIndex = CommonState.r.nextInt(Network.size() - 1) + 1; + if (randomNodeIndex == i) { + continue; + } + + Node randomNode = Network.get(randomNodeIndex); + + if (!((Peer)randomNode.getProtocol(pid)).isReachable) { + continue; + } + if (peers.get(i).contains(randomNodeIndex) || peers.get(randomNodeIndex).contains(i)) { + continue; + } + + peers.get(i).add(randomNodeIndex); + peers.get(randomNodeIndex).add(i); + + // Actual connecting. + ((Peer)curNode.getProtocol(pid)).addOutboundPeer(randomNode); + ((Peer)randomNode.getProtocol(pid)).addInboundPeer(curNode); + ++conns; + } } - - + + System.err.println("Initialized peers"); return true; } } \ No newline at end of file diff --git a/src/PeerObserver.java b/src/PeerObserver.java deleted file mode 100755 index 8beafc4..0000000 --- a/src/PeerObserver.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2003-2005 The BISON Project - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - */ - -package sim.src; - -import peersim.config.*; -import peersim.core.*; -import peersim.util.*; - -/** -* Print statistics over a vector. The vector is defined by a protocol, -* specified by {@value #PAR_PROT}, that has to implement -* {@link SingleValue}. -* Statistics printed are: min, max, number of samples, average, variance, -* number of minimal instances, number of maximal instances (using -* {@link IncrementalStats#toString}). -* @see IncrementalStats -*/ -public class PeerObserver implements Control -{ - /** - * The protocol to operate on. - * @config - */ - private static final String PAR_PROT = "protocol"; - - /** The name of this observer in the configuration */ - private final String name; - - /** Protocol identifier */ - private final int pid; - - private int cycle_length; - - /** - * Standard constructor that reads the configuration parameters. - * Invoked by the simulation engine. - * @param name the configuration prefix for this class - */ - public PeerObserver(String name) { - this.name = name; - pid = Configuration.getPid(name + "." + PAR_PROT); - cycle_length = Configuration.getInt("CYCLE"); - } - - public boolean execute() { - Peer peer; - - System.out.println("---------------------------------------------------------------------------------"); - System.out.println("This is PeerObserver. Buffers..."); - for(int i = 1; i < Network.size(); i++) { - peer = (Peer) Network.get(i).getProtocol(pid); - if (peer.isMalicious) { - System.out.print("(A)"); - } else if (peer.isTrusted) { - System.out.print("(T)"); - } - System.out.print("Node "+i+" buffer: "); - for(int j = 0; j < peer.buffer.length; j++) { - if(peer.buffer[j] == null) - System.out.print(" | "); - else - System.out.print(peer.buffer[j].getInteger() + " | "); - } - System.out.println(); - System.out.print("Node "+i+" neighbors: "); - for (Neighbor neighbor : peer.peerList) { - if (((Peer)neighbor.getNode().getProtocol(pid)).isMalicious) { - System.out.print("(A)"); - } else if (((Peer)neighbor.getNode().getProtocol(pid)).isTrusted) { - System.out.print("(T)"); - } - System.out.print (neighbor.getNode().getIndex() + ", "); - } - System.out.println(); - } - System.out.println("---------------------------------------------------------------------------------"); - - return false; - } -} \ No newline at end of file diff --git a/src/PoisonedChunksObserver.java b/src/PoisonedChunksObserver.java deleted file mode 100644 index 28a057d..0000000 --- a/src/PoisonedChunksObserver.java +++ /dev/null @@ -1,50 +0,0 @@ -package sim.src; - -import peersim.config.Configuration; -import peersim.core.CommonState; -import peersim.core.Control; -import peersim.core.Network; - -public class PoisonedChunksObserver implements Control { - - private static final String PAR_PROT = "protocol"; - - private String name; - private int pid; - private int cycleLength; - private int poisonedChunks; - - public PoisonedChunksObserver(String name) { - this.name = name; - pid = Configuration.getPid(name + "." + PAR_PROT); - cycleLength = Configuration.getInt("CYCLE"); - poisonedChunks = 0; - } - - @Override - public boolean execute() { - Peer peer; - int currentPoisonedChunks = 0; - for (int i = 1; i < Network.size(); i++) { - peer = (Peer) Network.get(i).getProtocol(pid); - for(int j = 0; j < peer.buffer.length; j++) { - if(peer.buffer[j] == null) { - - } else { - if (peer.buffer[j].getInteger() < 0) { - currentPoisonedChunks++; - } - } - } - } - if (currentPoisonedChunks >= poisonedChunks && CommonState.getEndTime() - CommonState.getTime() > 2 * cycleLength) { - poisonedChunks = currentPoisonedChunks; - } else { - System.out.println("== " + poisonedChunks + " poisoned chunks" + " " + CommonState.getTime() ); - return true; - } - - return false; - } - -} diff --git a/src/SimpleEvent.java b/src/SimpleEvent.java deleted file mode 100644 index 8e4aab3..0000000 --- a/src/SimpleEvent.java +++ /dev/null @@ -1,22 +0,0 @@ -package sim.src; - -public class SimpleEvent { - - public static final int HELLO = 1; - public static final int GOODBYE = 2; - public static final int CHUNK = 3; - public static final int PEERLIST = 4; - public static final int CHUNK_CHECK = 5; - public static final int BAD_PEER = 6; - - private int type; - - public SimpleEvent(int type) { - this.type = type; - } - - public int getType() { - return this.type; - } - -} diff --git a/src/Source.java b/src/Source.java index d80c024..970b96b 100755 --- a/src/Source.java +++ b/src/Source.java @@ -1,11 +1,15 @@ -package sim.src; +package txrelaysim.src; + +import txrelaysim.src.helpers.*; import java.util.ArrayList; import peersim.cdsim.CDProtocol; import peersim.config.FastConfig; +import peersim.config.Configuration; import peersim.core.Network; import peersim.core.Node; +import peersim.core.CommonState; import peersim.edsim.EDProtocol; import peersim.edsim.EDSimulator; import peersim.transport.Transport; @@ -13,129 +17,52 @@ public class Source implements CDProtocol, EDProtocol { - public static int pidSource; - + public static int pidSource; + public static int tps; + public boolean isSource = false; - private int packetIndex = 1; - private int recipientIndex = 1; - private int cycle = 1; - private ArrayList peerList; - + public int txId = 0; + private ArrayList peerList; + public Source(String prefix) { - this.peerList = new ArrayList(); + this.peerList = new ArrayList<>(); } - + @Override public void nextCycle(Node node, int pid) { Node recipient; int nextNodeIndex; - - if(isSource == false) + + if (isSource == false) + return; + + if (CommonState.getEndTime() < CommonState.getTime() + 40 * 1000) { + // if the experiment is over soon, stop issuing transactions and let existing propagate. return; - - if (peerList.size() > 0) { - if (recipientIndex >= peerList.size()) { - recipientIndex = 0; - } - recipient = peerList.get(recipientIndex).getNode(); - //next node in the list - nextNodeIndex = (recipientIndex+1) % peerList.size(); - - //send packet to this node, with nextNodeIndex in the resendTo field - IntMessage chunkMessage = new IntMessage(SimpleEvent.CHUNK, node, packetIndex); - ((Transport)recipient.getProtocol(FastConfig.getTransport(pid))).send(node, recipient, chunkMessage, Peer.pidPeer); - - //for next cycle - packetIndex++; - recipientIndex = nextNodeIndex; } - cycle++; - } - /* - * Returns the regular peer with absolute index "index" - */ - public Peer getPeer(int index) { - Node node = Network.get(index); - //look for the Peer protocol - for(int p = 0; p < node.protocolSize(); p++) - { - if(node.getProtocol(p) instanceof Peer) - return (Peer)node.getProtocol(p); + int randomNumberOfTxs = CommonState.r.nextInt(this.tps * 2); // anything from 0 to tps * 2. + + for (int i = 0; i < randomNumberOfTxs; ++i) { + txId++; + int randomRecipientIndex = CommonState.r.nextInt(peerList.size() - 1) + 1; + recipient = peerList.get(randomRecipientIndex); + IntMessage inv = new IntMessage(SimpleEvent.INV, node, txId); + ((Transport)recipient.getProtocol(FastConfig.getTransport(pid))).send(node, recipient, inv, Peer.pidPeer); } - - return null; - } - - public Object clone() { - return new Source(""); } @Override public void processEvent(Node node, int pid, Object event) { - SimpleEvent castedEvent = (SimpleEvent)event; - switch (castedEvent.getType()) { - case SimpleEvent.HELLO: - processHelloMessage(node, pid, (SimpleMessage)castedEvent); - break; - case SimpleEvent.GOODBYE: - processGoodbyeMessage(node, pid, (SimpleMessage)castedEvent); - break; - case SimpleEvent.CHUNK_CHECK: - processChunkCheckMessage(node, pid, (TupleMessage)castedEvent); - } + return; } - - private void processHelloMessage(Node node, int pid, SimpleMessage receivedMessage) { - ArrayList clone = new ArrayList(); - synchronized (this.peerList) { - for (Neighbor peer : this.peerList) { - clone.add(peer); - } - } - ArrayListMessage message = new ArrayListMessage(SimpleEvent.PEERLIST, node, clone); - Node sender = receivedMessage.getSender(); - - long latency = message.getLatency(sender, pid); - EDSimulator.add(latency, message, sender, Peer.pidPeer); - - peerList.add(new Neighbor(receivedMessage.getSender())); - } - - private void processGoodbyeMessage(Node node, int pid, SimpleMessage receivedMessage) { - Neighbor peerToRemove = null; - for (Neighbor peer : peerList) { - if (peer.getNode().getID() == receivedMessage.getSender().getID()) { - peerToRemove = peer; - break; - } - } - if (peerToRemove != null) { - peerList.remove(peerToRemove); - } - } - - private void processChunkCheckMessage(Node node, int pid, TupleMessage receivedMessage) { - int chunkNum = receivedMessage.getY(); - if (chunkNum < 0) { // poisoned chunk - removeNeighbor(receivedMessage.getX()); - IntMessage badPeerMessage = new IntMessage(SimpleEvent.BAD_PEER, node, receivedMessage.getX()); - for (Neighbor peer : peerList) { - long latency = badPeerMessage.getLatency(peer.getNode(), pid); - EDSimulator.add(latency, badPeerMessage, peer.getNode(), Peer.pidPeer); - } - } + + public Object clone() { + return new Source(""); } - - private void removeNeighbor(int index) { - Neighbor toRemove = null; - for (Neighbor peer : peerList) { - if (peer.getNode().getIndex() == index) { - toRemove = peer; - break; - } - } - peerList.remove(toRemove); + + public void addPeer(Node peer) { + peerList.add(peer); } - + } \ No newline at end of file diff --git a/src/SourceInitializer.java b/src/SourceInitializer.java index ebc76c6..fbd7f3f 100755 --- a/src/SourceInitializer.java +++ b/src/SourceInitializer.java @@ -1,4 +1,4 @@ -package sim.src; +package txrelaysim.src; import peersim.config.*; import peersim.core.*; @@ -6,26 +6,39 @@ public class SourceInitializer implements Control { public static final int sourceIndex = 0; - + private static final String PAR_PROT = "protocol"; private final int pid; - + private int tps; + public SourceInitializer(String prefix) { pid = Configuration.getPid(prefix + "." + PAR_PROT); + tps = Configuration.getInt(prefix + ".tps"); } @Override public boolean execute() { - //set the Source pid + // Set the Source pid. Source.pidSource = pid; - - //set node 0 as source + + // Set node 0 as source. ((Source) Network.get(sourceIndex).getProtocol(pid)).isSource = true; - - //set other nodes as not source + ((Source) Network.get(sourceIndex).getProtocol(pid)).tps = tps; + + //set other nodes as not source. for(int i = 1; i < Network.size()-1; i++) ((Source) Network.get(i).getProtocol(pid)).isSource = false; + // Source connects to some nodes. + Node source = Network.get(0); + int sourceConns = 0; + while (sourceConns < 20) { + int randomNodeIndex = CommonState.r.nextInt(Network.size() - 1) + 1; + Node node = Network.get(randomNodeIndex); + ((Source)source.getProtocol(pid)).addPeer(node); + ++sourceConns; + } + return true; } } diff --git a/src/ArrayListMessage.java b/src/helpers/ArrayListMessage.java similarity index 91% rename from src/ArrayListMessage.java rename to src/helpers/ArrayListMessage.java index ace5c53..e08497d 100644 --- a/src/ArrayListMessage.java +++ b/src/helpers/ArrayListMessage.java @@ -1,4 +1,4 @@ -package sim.src; +package txrelaysim.src.helpers; import java.util.ArrayList; @@ -7,12 +7,12 @@ public class ArrayListMessage extends SimpleMessage { private ArrayList arrayList; - + public ArrayListMessage(int type, Node sender, ArrayList arrayList) { super(type, sender); this.arrayList = arrayList; } - + public ArrayList getArrayList() { return this.arrayList; } diff --git a/src/IntMessage.java b/src/helpers/IntMessage.java similarity index 88% rename from src/IntMessage.java rename to src/helpers/IntMessage.java index ff9ca4b..6228a48 100644 --- a/src/IntMessage.java +++ b/src/helpers/IntMessage.java @@ -1,16 +1,16 @@ -package sim.src; +package txrelaysim.src.helpers; import peersim.core.Node; public class IntMessage extends SimpleMessage { - + private int integer; public IntMessage(int type, Node sender, int integer) { super(type, sender); this.integer = integer; } - + public int getInteger() { return this.integer; } diff --git a/src/helpers/SimpleEvent.java b/src/helpers/SimpleEvent.java new file mode 100644 index 0000000..1e7692d --- /dev/null +++ b/src/helpers/SimpleEvent.java @@ -0,0 +1,21 @@ +package txrelaysim.src.helpers; + +public class SimpleEvent { + + public static final int INV = 1; + public static final int RECON_REQUEST = 2; + public static final int SKETCH = 3; + public static final int SCHEDULED_INV = 4; + public static final int SCHEDULED_SKETCH = 5; + + private int type; + + public SimpleEvent(int type) { + this.type = type; + } + + public int getType() { + return this.type; + } + +} diff --git a/src/SimpleMessage.java b/src/helpers/SimpleMessage.java similarity index 52% rename from src/SimpleMessage.java rename to src/helpers/SimpleMessage.java index 1897c24..3613488 100644 --- a/src/SimpleMessage.java +++ b/src/helpers/SimpleMessage.java @@ -1,4 +1,4 @@ -package sim.src; +package txrelaysim.src.helpers; import peersim.config.FastConfig; import peersim.core.Node; @@ -7,23 +7,13 @@ public class SimpleMessage extends SimpleEvent { private Node sender; - + public SimpleMessage(int type, Node sender) { super(type); this.sender = sender; } - + public Node getSender() { return this.sender; } - - public long getLatency(Node dest, int pid) { - Node src = this.getSender(); - long latency = ((Transport)src.getProtocol(FastConfig.getTransport(pid))).getLatency(src, dest); - if (this.getType() != SimpleEvent.CHUNK) { - latency = 1; - } - return latency; - } - } \ No newline at end of file diff --git a/src/TupleMessage.java b/src/helpers/TupleMessage.java similarity index 56% rename from src/TupleMessage.java rename to src/helpers/TupleMessage.java index 146ea57..9ebbe07 100644 --- a/src/TupleMessage.java +++ b/src/helpers/TupleMessage.java @@ -1,22 +1,22 @@ -package sim.src; +package txrelaysim.src.helpers; import peersim.core.Node; public class TupleMessage extends SimpleMessage { - - private int x; + + private Node x; private int y; - - public TupleMessage(int type, Node sender, int x, int y) { + + public TupleMessage(int type, Node sender, Node x, int y) { super(type, sender); this.x = x; this.y = y; } - - public int getX() { + + public Node getX() { return this.x; } - + public int getY() { return this.y; } diff --git a/utils/average.py b/utils/average.py deleted file mode 100644 index a1e7e3d..0000000 --- a/utils/average.py +++ /dev/null @@ -1,26 +0,0 @@ -import getopt, sys, re - -def main(argv): - try: - opts, args = getopt.getopt(argv, "i:", []) - except getopt.GetoptError as err: - print str(err) - sys.exit(2) - if len(opts) == 0: - sys.exit(2) - - logFile = opts[0][1] - poisonedChunksSum = 0 - experiments = 0 - p = re.compile("==\s(\d+)") - with open(logFile) as f: - for line in f: - m = p.search(line) - if m: - experiments += 1 - poisonedChunksSum += int(m.group(1)) - - print float(poisonedChunksSum) / experiments - -if __name__ == "__main__": - main(sys.argv[1:]) \ No newline at end of file diff --git a/utils/expected.py b/utils/expected.py deleted file mode 100644 index f71e29f..0000000 --- a/utils/expected.py +++ /dev/null @@ -1,46 +0,0 @@ -import sys, getopt - -def C(n, k): - if 0 <= k <= n: - ntok = 1 - ktok = 1 - for t in xrange(1, min(k, n - k) + 1): - ntok *= n - ktok *= t - n -= 1 - return ntok // ktok - else: - return 0 - -def f(n, T): - re = 0.0 - for i in range(1, n-T+1): - re += i * (float(C(n-i-1, T-1)) / C(n-1, T)) - return re - -def g(n, T, A): - re = 0.0 - for i in range(1, A+1): - re += f(n-i+1, T) - return re - -def main(argv): - try: - opts, args = getopt.getopt(argv, "t:a:n:", []) - except getopt.GetoptError as err: - print str(err) - sys.exit(2) - T = 1 - A = 1 - n = 9 - for o, a in opts: - if o == "-t": - T = int(a) - elif o == "-a": - A = int(a) - elif o == "-n": - n = int(a) - print g(n, T, A) - -if __name__ == "__main__": - main(sys.argv[1:]) \ No newline at end of file From 91b54116681e1013dc59faad81d3b7ea4da74250 Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Wed, 23 Jun 2021 09:45:08 +0300 Subject: [PATCH 02/18] Rotate flooding destination peers after each reconciliation --- src/Peer.java | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/src/Peer.java b/src/Peer.java index 20fce4b..7517d67 100755 --- a/src/Peer.java +++ b/src/Peer.java @@ -273,11 +273,37 @@ private void flood(Node node, int pid, Node sender, int txId) { } } + // Rotate inbound flood destinations. + if (inFloodLimit != inboundPeers.size() && inboundPeers.size() != 0) { + inboundFloodDestinations.clear(); + for (int i = 0; i < inFloodLimit;) { + int randomIndex = CommonState.r.nextInt(inboundPeers.size()); + Node randomInboundPeer = inboundPeers.get(randomIndex); + if (!inboundFloodDestinations.contains(randomInboundPeer)) { + inboundFloodDestinations.add(randomInboundPeer); + ++i; + } + } + } + // Send to outbounds. for (Node peer : outboundFloodDestinations) { long delay = generateRandomDelay(this.outFloodDelay); scheduleInv(node, delay, peer, txId); } + + // Rotate outbound flood destinations. + if (outFloodLimit != outboundPeers.size() && outboundPeers.size() != 0) { + outboundFloodDestinations.clear(); + for (int i = 0; i < outFloodLimit;) { + int randomIndex = CommonState.r.nextInt(outboundPeers.size()); + Node randomInboundPeer = outboundPeers.get(randomIndex); + if (!outboundFloodDestinations.contains(randomInboundPeer)) { + outboundFloodDestinations.add(randomInboundPeer); + ++i; + } + } + } } private void addToReconSets(Node node, int pid, int txId, Node sender) { From a19bf074a6abac0a9289bce675651ebbd54ace8e Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Mon, 19 Jul 2021 08:54:15 +0300 Subject: [PATCH 03/18] fix flooding --- src/InvObserver.java | 3 ++- src/Peer.java | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/InvObserver.java b/src/InvObserver.java index 41d1a02..7093c6c 100755 --- a/src/InvObserver.java +++ b/src/InvObserver.java @@ -100,7 +100,8 @@ public boolean execute() { if (arrivalTimes.size() < Network.size() - 1) { // Don't bother printing results if relay is in progress (some nodes didn't receive // all transactions yet). - System.err.println("Transactions are still propagating"); + System.err.println("Transactions are still propagating: " + + arrivalTimes.size() + " < " + (Network.size() - 1)); return false; } Collections.sort(arrivalTimes); diff --git a/src/Peer.java b/src/Peer.java index 7517d67..8dfbd2a 100755 --- a/src/Peer.java +++ b/src/Peer.java @@ -274,7 +274,7 @@ private void flood(Node node, int pid, Node sender, int txId) { } // Rotate inbound flood destinations. - if (inFloodLimit != inboundPeers.size() && inboundPeers.size() != 0) { + if (reconcile && inFloodLimit != inboundPeers.size() && inboundPeers.size() != 0) { inboundFloodDestinations.clear(); for (int i = 0; i < inFloodLimit;) { int randomIndex = CommonState.r.nextInt(inboundPeers.size()); @@ -293,7 +293,7 @@ private void flood(Node node, int pid, Node sender, int txId) { } // Rotate outbound flood destinations. - if (outFloodLimit != outboundPeers.size() && outboundPeers.size() != 0) { + if (reconcile && outFloodLimit != outboundPeers.size() && outboundPeers.size() != 0) { outboundFloodDestinations.clear(); for (int i = 0; i < outFloodLimit;) { int randomIndex = CommonState.r.nextInt(outboundPeers.size()); From 45150c4f53097e096065d4d2bec557c295176e58 Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Mon, 19 Jul 2021 10:49:29 +0300 Subject: [PATCH 04/18] add private black holes to measure latency impact --- config/config.txt | 1 + src/InvObserver.java | 57 +++++++++++++++++++++++++--------------- src/Peer.java | 9 +++++++ src/PeerInitializer.java | 15 ++++++++++- 4 files changed, 60 insertions(+), 22 deletions(-) diff --git a/config/config.txt b/config/config.txt index 83a7719..a956a18 100755 --- a/config/config.txt +++ b/config/config.txt @@ -57,6 +57,7 @@ init.2.reconciliation_interval 500 init.2.in_flood_peers 2 init.2.out_flood_peers 2 init.2.default_q 0.01 +init.2.private_black_holes_percent = 0 init.sch1 CDScheduler init.sch1.protocol 1 diff --git a/src/InvObserver.java b/src/InvObserver.java index 7093c6c..788215a 100755 --- a/src/InvObserver.java +++ b/src/InvObserver.java @@ -65,13 +65,9 @@ public boolean execute() { ArrayList failedRecons = new ArrayList<>(); // Track how soon transactions were propagating across the network. HashMap> txArrivalTimes = new HashMap>(); + int blackHoles = 0; for(int i = 1; i < Network.size(); i++) { Peer peer = (Peer) Network.get(i).getProtocol(pid); - extraInvs.add(peer.extraInvs); - shortInvs.add(peer.shortInvs); - - successRecons.add(peer.successRecons); - failedRecons.add(peer.failedRecons); Iterator it = peer.txArrivalTimes.entrySet().iterator(); while (it.hasNext()) { @@ -83,6 +79,16 @@ public boolean execute() { } txArrivalTimes.get(txId).add(arrivalTime); } + + if (peer.isBlackHole) { + ++blackHoles; + continue; + } + extraInvs.add(peer.extraInvs); + shortInvs.add(peer.shortInvs); + + successRecons.add(peer.successRecons); + failedRecons.add(peer.failedRecons); } // Measure the delays it took to reach majority of the nodes (based on receival time). @@ -93,24 +99,23 @@ public boolean execute() { // A workaround to avoid unchecked cast. ArrayList ar = (ArrayList) pair.getValue(); ArrayList arrivalTimes = new ArrayList<>(); + + if (ar.size() < (Network.size() - 1) * 0.99) { + // Don't bother printing results if relay is in progress (some nodes didn't receive + // the transactions yet). + continue; + } + for (Object x : ar) { arrivalTimes.add((Long) x); } - if (arrivalTimes.size() < Network.size() - 1) { - // Don't bother printing results if relay is in progress (some nodes didn't receive - // all transactions yet). - System.err.println("Transactions are still propagating: " + - arrivalTimes.size() + " < " + (Network.size() - 1)); - return false; - } Collections.sort(arrivalTimes); int percentile95Index = (int)(arrivalTimes.size() * 0.95); Long percentile95delay = (arrivalTimes.get(percentile95Index) - arrivalTimes.get(0)); avgTxArrivalDelay.add(percentile95delay); } - // Print results. int allTxs = txArrivalTimes.size(); @@ -118,20 +123,30 @@ public boolean execute() { return false; } + System.err.println("Relayed txs: " + allTxs); + double avgMaxDelay = avgTxArrivalDelay.stream().mapToLong(val -> val).average().orElse(0.0); System.out.println("Avg max latency: " + avgMaxDelay); - double avgExtraInvs = extraInvs.stream().mapToInt(val -> val).average().orElse(0.0); - System.out.println(avgExtraInvs / allTxs + " extra inv per tx on average."); + if (blackHoles == 0) { + // Note that black holes are only useful to measure latency + // impact, measuring/comparing bandwidth is currently not supported because it depends + // on how exactly black holes operate (do they reconcile with empty sketches? or drop + // sketches/requests on the floor?). + double avgExtraInvs = extraInvs.stream().mapToInt(val -> val).average().orElse(0.0); + System.out.println(avgExtraInvs / allTxs + " extra inv per tx on average."); - double avgShortInvs = shortInvs.stream().mapToInt(val -> val).average().orElse(0.0); - System.out.println(avgShortInvs / allTxs + " shortInvs per tx on average."); + double avgSuccessRecons = successRecons.stream().mapToInt(val -> val).average().orElse(0.0); + if (avgSuccessRecons > 0) { + System.out.println(avgSuccessRecons + " successful recons on average."); - double avgSuccessRecons = successRecons.stream().mapToInt(val -> val).average().orElse(0.0); - System.out.println(avgSuccessRecons + " successful recons on average."); + double avgFailedRecons = failedRecons.stream().mapToInt(val -> val).average().orElse(0.0); + System.out.println(avgFailedRecons + " failed recons on average."); - double avgFailedRecons = failedRecons.stream().mapToInt(val -> val).average().orElse(0.0); - System.out.println(avgFailedRecons + " failed recons on average."); + double avgShortInvs = shortInvs.stream().mapToInt(val -> val).average().orElse(0.0); + System.out.println(avgShortInvs / allTxs + " shortInvs per tx on average."); + } + } return false; } diff --git a/src/Peer.java b/src/Peer.java index 8dfbd2a..78dbb83 100755 --- a/src/Peer.java +++ b/src/Peer.java @@ -37,6 +37,7 @@ public class Peer implements CDProtocol, EDProtocol /* State */ public boolean isReachable = false; + public boolean isBlackHole = false; public ArrayList outboundPeers; public ArrayList inboundPeers; public ArrayList inboundFloodDestinations; @@ -253,6 +254,14 @@ private void executeScheduledSketch(Node node, int pid, SimpleMessage scheduledS } private void relayTx(Node node, int pid, int txId, Node sender) { + if (isBlackHole) { + // Black holes don't relay. Note that black holes are only useful to measure latency + // impact, measuring/comparing bandwidth is currently not supported because it depends + // on how exactly black holes operate (do they reconcile with empty sketches? or drop + // sketches/requests on the floor?). + return; + } + if (reconcile) { addToReconSets(node, pid, txId, sender); } diff --git a/src/PeerInitializer.java b/src/PeerInitializer.java index 6402cfe..aebc983 100755 --- a/src/PeerInitializer.java +++ b/src/PeerInitializer.java @@ -14,6 +14,7 @@ public class PeerInitializer implements Control { private int pid; private int reachableCount; + private int privateBlackHolesPercent; private int outPeers; private int inFloodDelay; private int outFloodDelay; @@ -32,6 +33,7 @@ public PeerInitializer(String prefix) { inFloodDelay = Configuration.getInt(prefix + "." + "in_flood_delay"); outFloodDelay = Configuration.getInt(prefix + "." + "out_flood_delay"); + privateBlackHolesPercent = Configuration.getInt(prefix + "." + "private_black_holes_percent", 0); allReconcile = Configuration.getBoolean(prefix + "." + "all_reconcile"); if (allReconcile) { reconciliationInterval = Configuration.getInt(prefix + "." + "reconciliation_interval"); @@ -45,15 +47,26 @@ public PeerInitializer(String prefix) { public boolean execute() { Peer.pidPeer = pid; + int privateBlackHolesCount = (Network.size() - reachableCount) * privateBlackHolesPercent / 100; // Set a subset of nodes to be reachable by other nodes. while (reachableCount > 0) { int r = CommonState.r.nextInt(Network.size() - 1) + 1; if (!((Peer)Network.get(r).getProtocol(pid)).isReachable) { ((Peer)Network.get(r).getProtocol(pid)).isReachable = true; - reachableCount--; + --reachableCount; } } + System.err.println("Black holes: " + privateBlackHolesCount); + while (privateBlackHolesCount > 0) { + int r = CommonState.r.nextInt(Network.size() - 1) + 1; + if (!((Peer)Network.get(r).getProtocol(pid)).isReachable) { + ((Peer)Network.get(r).getProtocol(pid)).isBlackHole = true; + --privateBlackHolesCount; + } + } + System.err.println("Black holes: " + privateBlackHolesCount); + // A list storing who is already connected to who, so that we don't make duplicate conns. HashMap> peers = new HashMap<>(); for (int i = 1; i < Network.size(); i++) { From b6f6275584a25619c3b8c9a51913675c8e12dae4 Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Mon, 19 Jul 2021 11:03:30 +0300 Subject: [PATCH 05/18] respond to sketch requests at the same time --- src/Peer.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Peer.java b/src/Peer.java index 78dbb83..9fadf43 100755 --- a/src/Peer.java +++ b/src/Peer.java @@ -154,12 +154,13 @@ private void handleReconRequest(Node node, int pid, SimpleMessage message) { long curTime = CommonState.getTime(); long delay; if (nextReconResponse < curTime) { - delay = 0; - // Switch to sketch fanout batching delay. - nextReconResponse = curTime + generateRandomDelay(0); + delay = generateRandomDelay(inFloodDelay / 2); + nextReconResponse = curTime + delay; } else { delay = nextReconResponse - curTime; } + // TODO: it would be more efficient to batch them, and use a loop to check it. + // A loop would help to not store scheduled messages in memory. SimpleMessage scheduledSketch = new SimpleMessage(SimpleEvent.SCHEDULED_SKETCH, sender); EDSimulator.add(delay, scheduledSketch, node, Peer.pidPeer); // send to self. } From 58c11b6837b7c735624343831f2f3f087c5acc43 Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Tue, 20 Jul 2021 12:06:59 +0300 Subject: [PATCH 06/18] better bandwidth accounting --- config/config.txt | 2 +- src/InvObserver.java | 16 ++++---- src/Peer.java | 72 ++++++++++++++++++++++++++---------- src/helpers/SimpleEvent.java | 5 ++- 4 files changed, 64 insertions(+), 31 deletions(-) diff --git a/config/config.txt b/config/config.txt index a956a18..124b8fb 100755 --- a/config/config.txt +++ b/config/config.txt @@ -56,7 +56,7 @@ init.2.all_reconcile true init.2.reconciliation_interval 500 init.2.in_flood_peers 2 init.2.out_flood_peers 2 -init.2.default_q 0.01 +init.2.default_q 0.05 init.2.private_black_holes_percent = 0 init.sch1 CDScheduler diff --git a/src/InvObserver.java b/src/InvObserver.java index 788215a..0d6cebe 100755 --- a/src/InvObserver.java +++ b/src/InvObserver.java @@ -58,8 +58,8 @@ public InvObserver(String name) { public boolean execute() { // Track how many invs were sent. - ArrayList extraInvs = new ArrayList<>(); - ArrayList shortInvs = new ArrayList<>(); + ArrayList invsSent = new ArrayList<>(); + ArrayList shortInvsSent = new ArrayList<>(); // Track reconciliation results across experiments. ArrayList successRecons = new ArrayList<>(); ArrayList failedRecons = new ArrayList<>(); @@ -84,8 +84,8 @@ public boolean execute() { ++blackHoles; continue; } - extraInvs.add(peer.extraInvs); - shortInvs.add(peer.shortInvs); + invsSent.add(peer.invsSent); + shortInvsSent.add(peer.shortInvsSent); successRecons.add(peer.successRecons); failedRecons.add(peer.failedRecons); @@ -133,8 +133,8 @@ public boolean execute() { // impact, measuring/comparing bandwidth is currently not supported because it depends // on how exactly black holes operate (do they reconcile with empty sketches? or drop // sketches/requests on the floor?). - double avgExtraInvs = extraInvs.stream().mapToInt(val -> val).average().orElse(0.0); - System.out.println(avgExtraInvs / allTxs + " extra inv per tx on average."); + double avgInvsSent = invsSent.stream().mapToInt(val -> val).average().orElse(0.0); + System.out.println(avgInvsSent / allTxs + " invs per tx on average."); double avgSuccessRecons = successRecons.stream().mapToInt(val -> val).average().orElse(0.0); if (avgSuccessRecons > 0) { @@ -143,8 +143,8 @@ public boolean execute() { double avgFailedRecons = failedRecons.stream().mapToInt(val -> val).average().orElse(0.0); System.out.println(avgFailedRecons + " failed recons on average."); - double avgShortInvs = shortInvs.stream().mapToInt(val -> val).average().orElse(0.0); - System.out.println(avgShortInvs / allTxs + " shortInvs per tx on average."); + double avgShortInvsSent = shortInvsSent.stream().mapToInt(val -> val).average().orElse(0.0); + System.out.println(avgShortInvsSent / allTxs + " shortInvs per tx on average."); } } diff --git a/src/Peer.java b/src/Peer.java index 9fadf43..28378b8 100755 --- a/src/Peer.java +++ b/src/Peer.java @@ -54,8 +54,9 @@ public class Peer implements CDProtocol, EDProtocol private HashMap> reconSets; /* Stats */ - public int extraInvs; - public int shortInvs; + public int invsSent; + public int shortInvsSent; + public int successRecons; public int extSuccessRecons; public int failedRecons; @@ -123,6 +124,10 @@ public void processEvent(Node node, int pid, Object event) { // Self-scheduled SKETCH to be sent to a peer. executeScheduledSketch(node, pid, (SimpleMessage)castedEvent); break; + case SimpleEvent.RECON_FINALIZATION: + // We use this to track how many inv/shortinvs messages were sent for statas. + handleReconFinalization(node, pid, (ArrayListMessage)castedEvent); + break; } } @@ -143,8 +148,6 @@ private void handleInvMessage(Node node, int pid, IntMessage message) { if (!txArrivalTimes.keySet().contains(txId)) { txArrivalTimes.put(txId, CommonState.getTime()); relayTx(node, pid, txId, sender); - } else { - ++extraInvs; } } @@ -170,15 +173,6 @@ private void handleReconRequest(Node node, int pid, SimpleMessage message) { // easily modeled and accounted at this node locally. private void handleSketchMessage(Node node, int pid, Node sender, ArrayList remoteSet) { Set localSet = reconSets.get(sender); - - // Although diff estimation should happen at the sketch sender side, we do it here because - // it works in our simplified model, to save extra messages. - // To make it more detailed, we could remember the set size at request time here. - int localSetSize = localSet.size(); - int remoteSetSize = remoteSet.size(); - // TODO: Q could be dynamicly updated after each reconciliation. - int capacity = Math.abs(localSetSize - remoteSetSize) + (int)(defaultQ * (localSetSize + remoteSetSize)) + 1; - int shared = 0, usMiss = 0, theyMiss = 0; // Handle transactions the local (sketch receiving) node doesn't have. for (Integer txId : remoteSet) { @@ -188,11 +182,13 @@ private void handleSketchMessage(Node node, int pid, Node sender, ArrayList diff) { // Reconciliation succeeded right away. - shortInvs += capacity; // account for sketch successRecons++; + theySentShortInvs = capacity; // account for sketch + shortInvsSent += usMiss; + theySentInvs += usMiss; } else if (capacity * 2 > diff) { // Reconciliation succeeded after extension. - shortInvs += capacity * 2; // account for sketch and extension extSuccessRecons++; + theySentShortInvs = capacity * 2; // account for sketch and extension + shortInvsSent += usMiss; + theySentInvs += usMiss; } else { // Reconciliation failed. - shortInvs += capacity * 2; // account for sketch and extension + failedRecons++; + theySentShortInvs = capacity * 2; // account for sketch and extension // Above, we already sent them invs they miss. // Here, we just account for all the remaining full invs: what we miss, and shared txs. - extraInvs += usMiss + shared; - failedRecons++; + // I think ideally the "inefficient" overlap between our set and their set should + // be sent by us, hence the accounting below. + invsSent += shared; + theySentInvs = usMiss; } + + ArrayList finalizationData = new ArrayList(); + finalizationData.add(theySentInvs); + finalizationData.add(theySentShortInvs); + + // System.err.println(theySentShortInvs); + + ArrayListMessage reconFinalization = new ArrayListMessage( + SimpleEvent.RECON_FINALIZATION, node, finalizationData); + ((Transport)sender.getProtocol(FastConfig.getTransport(Peer.pidPeer))).send( + node, sender, reconFinalization, Peer.pidPeer); + localSet.clear(); } + private void handleReconFinalization(Node node, int pid, ArrayListMessage message) { + invsSent += (Integer)message.getArrayList().get(0); + shortInvsSent += (Integer)message.getArrayList().get(1); + } + // A node previously scheduled a transaction announcement to the peer. Execute it here when // this function is called by the scheduler. private void executeScheduledInv(Node node, int pid, TupleMessage scheduledInv) { @@ -235,6 +266,7 @@ private void executeScheduledInv(Node node, int pid, TupleMessage scheduledInv) peerKnowsTxs.get(recipient).add(txId); IntMessage inv = new IntMessage(SimpleEvent.INV, node, txId); ((Transport)recipient.getProtocol(FastConfig.getTransport(Peer.pidPeer))).send(node, recipient, inv, Peer.pidPeer); + ++invsSent; if (reconcile) { removeFromReconSet(node, txId, recipient); } diff --git a/src/helpers/SimpleEvent.java b/src/helpers/SimpleEvent.java index 1e7692d..ea0f68d 100644 --- a/src/helpers/SimpleEvent.java +++ b/src/helpers/SimpleEvent.java @@ -5,8 +5,9 @@ public class SimpleEvent { public static final int INV = 1; public static final int RECON_REQUEST = 2; public static final int SKETCH = 3; - public static final int SCHEDULED_INV = 4; - public static final int SCHEDULED_SKETCH = 5; + public static final int RECON_FINALIZATION = 4; + public static final int SCHEDULED_INV = 5; + public static final int SCHEDULED_SKETCH = 6; private int type; From df0cff6ce3545f2c8cb7d993a420d663cef6f596 Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Tue, 20 Jul 2021 12:23:16 +0300 Subject: [PATCH 07/18] Bandwidth accounting for tx traffic It will be later used to see how much bandwidth specific nodes use. --- src/InvObserver.java | 5 +++++ src/Peer.java | 10 ++++++++++ src/helpers/SimpleEvent.java | 11 ++++++----- 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/src/InvObserver.java b/src/InvObserver.java index 0d6cebe..ebb9fd1 100755 --- a/src/InvObserver.java +++ b/src/InvObserver.java @@ -60,6 +60,7 @@ public boolean execute() { // Track how many invs were sent. ArrayList invsSent = new ArrayList<>(); ArrayList shortInvsSent = new ArrayList<>(); + ArrayList txSent = new ArrayList<>(); // Track reconciliation results across experiments. ArrayList successRecons = new ArrayList<>(); ArrayList failedRecons = new ArrayList<>(); @@ -86,6 +87,7 @@ public boolean execute() { } invsSent.add(peer.invsSent); shortInvsSent.add(peer.shortInvsSent); + txSent.add(peer.txSent); successRecons.add(peer.successRecons); failedRecons.add(peer.failedRecons); @@ -136,6 +138,9 @@ public boolean execute() { double avgInvsSent = invsSent.stream().mapToInt(val -> val).average().orElse(0.0); System.out.println(avgInvsSent / allTxs + " invs per tx on average."); + double avgTxSent = txSent.stream().mapToInt(val -> val).average().orElse(0.0); + assert(0.99 < avgTxSent && avgTxSent < 1.01); + double avgSuccessRecons = successRecons.stream().mapToInt(val -> val).average().orElse(0.0); if (avgSuccessRecons > 0) { System.out.println(avgSuccessRecons + " successful recons on average."); diff --git a/src/Peer.java b/src/Peer.java index 28378b8..883ef5f 100755 --- a/src/Peer.java +++ b/src/Peer.java @@ -56,6 +56,7 @@ public class Peer implements CDProtocol, EDProtocol /* Stats */ public int invsSent; public int shortInvsSent; + public int txSent; public int successRecons; public int extSuccessRecons; @@ -128,6 +129,11 @@ public void processEvent(Node node, int pid, Object event) { // We use this to track how many inv/shortinvs messages were sent for statas. handleReconFinalization(node, pid, (ArrayListMessage)castedEvent); break; + case SimpleEvent.GETDATA: + // We use this just for bandwidth accounting, the actual txId (what we need) was already + // commnunicated so nothing to do here. + ++txSent; + break; } } @@ -146,6 +152,8 @@ private void handleInvMessage(Node node, int pid, IntMessage message) { } if (!txArrivalTimes.keySet().contains(txId)) { + SimpleMessage getdata = new SimpleMessage(SimpleEvent.GETDATA, node); + ((Transport)sender.getProtocol(FastConfig.getTransport(pid))).send(node, sender, getdata, Peer.pidPeer); txArrivalTimes.put(txId, CommonState.getTime()); relayTx(node, pid, txId, sender); } @@ -182,6 +190,8 @@ private void handleSketchMessage(Node node, int pid, Node sender, ArrayList Date: Tue, 20 Jul 2021 14:15:57 +0300 Subject: [PATCH 08/18] Bandwidth accounting across types of nodes --- src/InvObserver.java | 49 +++++++++++++++++++++++++++++--------------- 1 file changed, 32 insertions(+), 17 deletions(-) diff --git a/src/InvObserver.java b/src/InvObserver.java index ebb9fd1..14f5fbb 100755 --- a/src/InvObserver.java +++ b/src/InvObserver.java @@ -57,16 +57,17 @@ public InvObserver(String name) { } public boolean execute() { - // Track how many invs were sent. - ArrayList invsSent = new ArrayList<>(); - ArrayList shortInvsSent = new ArrayList<>(); - ArrayList txSent = new ArrayList<>(); + // Track how many invs were sent. Reachable nodes are tracked by [0], private are tracked + // by [1]. + int[] invsSent = new int[2]; + int[] shortInvsSent = new int[2]; + int[] txSent = new int[2]; // Track reconciliation results across experiments. ArrayList successRecons = new ArrayList<>(); ArrayList failedRecons = new ArrayList<>(); // Track how soon transactions were propagating across the network. HashMap> txArrivalTimes = new HashMap>(); - int blackHoles = 0; + int blackHoles = 0, reachableNodes = 0; for(int i = 1; i < Network.size(); i++) { Peer peer = (Peer) Network.get(i).getProtocol(pid); @@ -85,9 +86,17 @@ public boolean execute() { ++blackHoles; continue; } - invsSent.add(peer.invsSent); - shortInvsSent.add(peer.shortInvsSent); - txSent.add(peer.txSent); + + if (peer.isReachable) { + invsSent[0] += peer.invsSent; + shortInvsSent[0] += peer.shortInvsSent; + txSent[0] += peer.txSent; + ++reachableNodes; + } else { + invsSent[1] += peer.invsSent; + shortInvsSent[1] += peer.shortInvsSent; + txSent[1] += peer.txSent; + } successRecons.add(peer.successRecons); failedRecons.add(peer.failedRecons); @@ -135,11 +144,20 @@ public boolean execute() { // impact, measuring/comparing bandwidth is currently not supported because it depends // on how exactly black holes operate (do they reconcile with empty sketches? or drop // sketches/requests on the floor?). - double avgInvsSent = invsSent.stream().mapToInt(val -> val).average().orElse(0.0); - System.out.println(avgInvsSent / allTxs + " invs per tx on average."); - double avgTxSent = txSent.stream().mapToInt(val -> val).average().orElse(0.0); - assert(0.99 < avgTxSent && avgTxSent < 1.01); + System.out.println("Total bandwidth per tx:"); + System.out.println("INV items: " + (invsSent[0] + invsSent[1] + + (shortInvsSent[0] + shortInvsSent[1]) * 0.25) / allTxs / (Network.size() - 1)); + System.out.println("TX items: " + (txSent[0] + txSent[1]) * 1.0 / allTxs / (Network.size() - 1)); + + System.out.println("An average reachable node spends the following bandwidth per tx:"); + System.out.println("INV items: " + (invsSent[0] + shortInvsSent[0] * 0.25) / allTxs / reachableNodes); + System.out.println("TX items: " + txSent[0] * 1.0 / allTxs / reachableNodes); + + int privateNodes = Network.size() - 1 - reachableNodes; + System.out.println("An average private node spends the following bandwidth per tx:"); + System.out.println("INV items: " + (invsSent[1] + shortInvsSent[1] * 0.25) / allTxs / privateNodes); + System.out.println("TX items: " + txSent[1] * 1.0 / allTxs / privateNodes); double avgSuccessRecons = successRecons.stream().mapToInt(val -> val).average().orElse(0.0); if (avgSuccessRecons > 0) { @@ -147,12 +165,9 @@ public boolean execute() { double avgFailedRecons = failedRecons.stream().mapToInt(val -> val).average().orElse(0.0); System.out.println(avgFailedRecons + " failed recons on average."); - - double avgShortInvsSent = shortInvsSent.stream().mapToInt(val -> val).average().orElse(0.0); - System.out.println(avgShortInvsSent / allTxs + " shortInvs per tx on average."); } } - + System.err.println(""); return false; } -} \ No newline at end of file +} From c2c2b10e0464cc843ac51ee5cf0b1b22b4cbfd7f Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Thu, 29 Jul 2021 09:34:43 +0300 Subject: [PATCH 09/18] Various fixes --- config/config.txt | 12 +-- src/InvObserver.java | 13 ++-- src/Peer.java | 158 ++++++++++++++------------------------- src/PeerInitializer.java | 12 +-- 4 files changed, 77 insertions(+), 118 deletions(-) diff --git a/config/config.txt b/config/config.txt index 124b8fb..3fb2301 100755 --- a/config/config.txt +++ b/config/config.txt @@ -14,7 +14,7 @@ MAXDELAY 100 DROP 0 random.seed 9098797865656766578567 -network.size 20000 +network.size 15000 simulation.endtime CYCLE*CYCLES simulation.logtime CYCLE * 50 simulation.experiments 1 @@ -48,15 +48,15 @@ init.1.tps 7 init.2 txrelaysim.src.PeerInitializer init.2.protocol 2 -init.2.reachable_count 2000 init.2.out_peers 8 +init.2.reachable_count 1500 init.2.in_flood_delay 2000 init.2.out_flood_delay 1000 init.2.all_reconcile true -init.2.reconciliation_interval 500 -init.2.in_flood_peers 2 -init.2.out_flood_peers 2 -init.2.default_q 0.05 +init.2.reconciliation_interval 1000 +init.2.in_flood_peers_percent 10 +init.2.out_flood_peers_percent 10 +init.2.default_q 0.25 init.2.private_black_holes_percent = 0 init.sch1 CDScheduler diff --git a/src/InvObserver.java b/src/InvObserver.java index 14f5fbb..ad120d5 100755 --- a/src/InvObserver.java +++ b/src/InvObserver.java @@ -148,16 +148,17 @@ public boolean execute() { System.out.println("Total bandwidth per tx:"); System.out.println("INV items: " + (invsSent[0] + invsSent[1] + (shortInvsSent[0] + shortInvsSent[1]) * 0.25) / allTxs / (Network.size() - 1)); + System.out.println("Of them short invs: " + ((shortInvsSent[0] + shortInvsSent[1]) * 0.25) / allTxs / (Network.size() - 1)); System.out.println("TX items: " + (txSent[0] + txSent[1]) * 1.0 / allTxs / (Network.size() - 1)); - System.out.println("An average reachable node spends the following bandwidth per tx:"); - System.out.println("INV items: " + (invsSent[0] + shortInvsSent[0] * 0.25) / allTxs / reachableNodes); - System.out.println("TX items: " + txSent[0] * 1.0 / allTxs / reachableNodes); + System.out.println("An average reachable node spends the following bandwidth:"); + System.out.println("INV items: " + (invsSent[0] + shortInvsSent[0] * 0.25)); + System.out.println("TX items: " + txSent[0]); int privateNodes = Network.size() - 1 - reachableNodes; - System.out.println("An average private node spends the following bandwidth per tx:"); - System.out.println("INV items: " + (invsSent[1] + shortInvsSent[1] * 0.25) / allTxs / privateNodes); - System.out.println("TX items: " + txSent[1] * 1.0 / allTxs / privateNodes); + System.out.println("An average private node spends the following bandwidth:"); + System.out.println("INV items: " + (invsSent[1] + shortInvsSent[1] * 0.25)); + System.out.println("TX items: " + txSent[1]); double avgSuccessRecons = successRecons.stream().mapToInt(val -> val).average().orElse(0.0); if (avgSuccessRecons > 0) { diff --git a/src/Peer.java b/src/Peer.java index 883ef5f..f8547a6 100755 --- a/src/Peer.java +++ b/src/Peer.java @@ -28,8 +28,8 @@ public class Peer implements CDProtocol, EDProtocol public static int pidPeer; /* Constants and delays. Reconciliation only! */ - public int inFloodLimit; - public int outFloodLimit; + public double inFloodLimitPercent; + public double outFloodLimitPercent; public int reconciliationInterval; public int inFloodDelay; public int outFloodDelay; @@ -40,8 +40,6 @@ public class Peer implements CDProtocol, EDProtocol public boolean isBlackHole = false; public ArrayList outboundPeers; public ArrayList inboundPeers; - public ArrayList inboundFloodDestinations; - public ArrayList outboundFloodDestinations; public HashMap txArrivalTimes; public HashMap> peerKnowsTxs; public long nextFloodInbound = 0; @@ -50,7 +48,7 @@ public class Peer implements CDProtocol, EDProtocol public boolean reconcile = false; public Queue reconciliationQueue; public long nextRecon = 0; - public long nextReconResponse = 0; + private HashMap nextReconResponse; private HashMap> reconSets; /* Stats */ @@ -65,12 +63,11 @@ public class Peer implements CDProtocol, EDProtocol public Peer(String prefix) { inboundPeers = new ArrayList<>(); outboundPeers = new ArrayList<>(); - inboundFloodDestinations = new ArrayList<>(); - outboundFloodDestinations = new ArrayList<>(); reconciliationQueue = new LinkedList<>(); reconSets = new HashMap<>(); peerKnowsTxs = new HashMap<>(); txArrivalTimes = new HashMap<>(); + nextReconResponse = new HashMap<>(); } public Object clone() { @@ -121,10 +118,6 @@ public void processEvent(Node node, int pid, Object event) { // Self-scheduled INV to be sent to a peer. executeScheduledInv(node, pid, (TupleMessage)castedEvent); break; - case SimpleEvent.SCHEDULED_SKETCH: - // Self-scheduled SKETCH to be sent to a peer. - executeScheduledSketch(node, pid, (SimpleMessage)castedEvent); - break; case SimpleEvent.RECON_FINALIZATION: // We use this to track how many inv/shortinvs messages were sent for statas. handleReconFinalization(node, pid, (ArrayListMessage)castedEvent); @@ -163,17 +156,13 @@ private void handleReconRequest(Node node, int pid, SimpleMessage message) { Node sender = message.getSender(); long curTime = CommonState.getTime(); - long delay; - if (nextReconResponse < curTime) { - delay = generateRandomDelay(inFloodDelay / 2); - nextReconResponse = curTime + delay; - } else { - delay = nextReconResponse - curTime; + HashSet reconSet = reconSets.get(sender); + ArrayListMessage sketch = new ArrayListMessage(SimpleEvent.SKETCH, node, new ArrayList(reconSet)); + ((Transport)sender.getProtocol(FastConfig.getTransport(Peer.pidPeer))).send(node, sender, sketch, Peer.pidPeer); + for (Integer txId: reconSet) { + peerKnowsTxs.get(sender).add(txId); } - // TODO: it would be more efficient to batch them, and use a loop to check it. - // A loop would help to not store scheduled messages in memory. - SimpleMessage scheduledSketch = new SimpleMessage(SimpleEvent.SCHEDULED_SKETCH, sender); - EDSimulator.add(delay, scheduledSketch, node, Peer.pidPeer); // send to self. + reconSet.clear(); } // Handle a sketch a peer sent us in response to our request. All sketch extension logic and @@ -206,8 +195,10 @@ private void handleSketchMessage(Node node, int pid, Node sender, ArrayList reconSet = reconSets.get(recipient); - ArrayListMessage sketch = new ArrayListMessage(SimpleEvent.SKETCH, node, new ArrayList(reconSet)); - ((Transport)recipient.getProtocol(FastConfig.getTransport(Peer.pidPeer))).send(node, recipient, sketch, Peer.pidPeer); - for (Integer txId: reconSet) { - peerKnowsTxs.get(recipient).add(txId); + if (flood) { + IntMessage inv = new IntMessage(SimpleEvent.INV, node, txId); + ((Transport)recipient.getProtocol(FastConfig.getTransport(Peer.pidPeer))).send(node, recipient, inv, Peer.pidPeer); + ++invsSent; + } } - reconSet.clear(); } private void relayTx(Node node, int pid, int txId, Node sender) { @@ -305,65 +309,27 @@ private void relayTx(Node node, int pid, int txId, Node sender) { return; } - if (reconcile) { - addToReconSets(node, pid, txId, sender); - } - flood(node, pid, sender, txId); - } - - private void flood(Node node, int pid, Node sender, int txId) { - // Send to inbounds. - for (Node peer : inboundFloodDestinations) { - long curTime = CommonState.getTime(); - // To preserve privacy against inbound observers with multiple connections, - // they share the timer (as in the Bitcoin peer-to-peer layer). - if (nextFloodInbound < curTime) { - scheduleInv(node, 0, peer, txId); - nextFloodInbound = curTime + generateRandomDelay(this.inFloodDelay); - } else { - scheduleInv(node, nextFloodInbound - curTime, peer, txId); - } + // Send to inbounds (flood or recon). + // To preserve privacy against inbound observers with multiple connections, + // they share the timer (as in the Bitcoin peer-to-peer layer). + long delay; + long curTime = CommonState.getTime(); + if (nextFloodInbound < curTime) { + nextFloodInbound = curTime + generateRandomDelay(this.inFloodDelay); + delay = 0; + } else { + delay = nextFloodInbound - curTime; } - // Rotate inbound flood destinations. - if (reconcile && inFloodLimit != inboundPeers.size() && inboundPeers.size() != 0) { - inboundFloodDestinations.clear(); - for (int i = 0; i < inFloodLimit;) { - int randomIndex = CommonState.r.nextInt(inboundPeers.size()); - Node randomInboundPeer = inboundPeers.get(randomIndex); - if (!inboundFloodDestinations.contains(randomInboundPeer)) { - inboundFloodDestinations.add(randomInboundPeer); - ++i; - } - } + for (Node peer : inboundPeers) { + scheduleInv(node, delay, peer, txId); } // Send to outbounds. - for (Node peer : outboundFloodDestinations) { - long delay = generateRandomDelay(this.outFloodDelay); + for (Node peer : outboundPeers) { + delay = generateRandomDelay(this.outFloodDelay); scheduleInv(node, delay, peer, txId); } - - // Rotate outbound flood destinations. - if (reconcile && outFloodLimit != outboundPeers.size() && outboundPeers.size() != 0) { - outboundFloodDestinations.clear(); - for (int i = 0; i < outFloodLimit;) { - int randomIndex = CommonState.r.nextInt(outboundPeers.size()); - Node randomInboundPeer = outboundPeers.get(randomIndex); - if (!outboundFloodDestinations.contains(randomInboundPeer)) { - outboundFloodDestinations.add(randomInboundPeer); - ++i; - } - } - } - } - - private void addToReconSets(Node node, int pid, int txId, Node sender) { - for (Node n: reconSets.keySet()) { - if (n != sender) { - reconSets.get(n).add(txId); - } - } } private void removeFromReconSet(Node node, int txId, Node target) { @@ -406,11 +372,7 @@ public void addInboundPeer(Node inboundPeer) { inboundPeers.add(inboundPeer); if (reconcile) { reconSets.put(inboundPeer, new HashSet<>()); - if (inboundFloodDestinations.size() < inFloodLimit) { - inboundFloodDestinations.add(inboundPeer); - } - } else { - inboundFloodDestinations.add(inboundPeer); + nextReconResponse.put(inboundPeer, Long.valueOf(0)); } peerKnowsTxs.put(inboundPeer, new HashSet<>()); } @@ -429,11 +391,7 @@ public void addOutboundPeer(Node outboundPeer) { if (reconcile) { reconciliationQueue.offer(outboundPeer); reconSets.put(outboundPeer, new HashSet<>()); - if (outboundFloodDestinations.size() < outFloodLimit) { - outboundFloodDestinations.add(outboundPeer); - } - } else { - outboundFloodDestinations.add(outboundPeer); + nextReconResponse.put(outboundPeer, Long.valueOf(0)); } peerKnowsTxs.put(outboundPeer, new HashSet<>()); } diff --git a/src/PeerInitializer.java b/src/PeerInitializer.java index aebc983..ca1e43f 100755 --- a/src/PeerInitializer.java +++ b/src/PeerInitializer.java @@ -21,8 +21,8 @@ public class PeerInitializer implements Control private boolean allReconcile; // Reconciliation params - private int outFloodPeers; - private int inFloodPeers; + private double outFloodPeersPercent; + private double inFloodPeersPercent; private double defaultQ; private int reconciliationInterval; @@ -37,8 +37,8 @@ public PeerInitializer(String prefix) { allReconcile = Configuration.getBoolean(prefix + "." + "all_reconcile"); if (allReconcile) { reconciliationInterval = Configuration.getInt(prefix + "." + "reconciliation_interval"); - outFloodPeers = Configuration.getInt(prefix + "." + "out_flood_peers", outPeers); - inFloodPeers = Configuration.getInt(prefix + "." + "in_flood_peers"); + outFloodPeersPercent = Configuration.getDouble(prefix + "." + "out_flood_peers_percent"); + inFloodPeersPercent = Configuration.getDouble(prefix + "." + "in_flood_peers_percent"); defaultQ = Configuration.getDouble(prefix + "." + "default_q"); } } @@ -77,8 +77,8 @@ public boolean execute() { if (allReconcile) { ((Peer)Network.get(i).getProtocol(pid)).reconcile = true; ((Peer)Network.get(i).getProtocol(pid)).reconciliationInterval = reconciliationInterval; - ((Peer)Network.get(i).getProtocol(pid)).inFloodLimit = inFloodPeers; - ((Peer)Network.get(i).getProtocol(pid)).outFloodLimit = outFloodPeers; + ((Peer)Network.get(i).getProtocol(pid)).inFloodLimitPercent = inFloodPeersPercent; + ((Peer)Network.get(i).getProtocol(pid)).outFloodLimitPercent = outFloodPeersPercent; ((Peer)Network.get(i).getProtocol(pid)).defaultQ = defaultQ; } } From 613620e5c6061059c71ada0c1e04d47f3204e7ee Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Thu, 13 Jan 2022 16:00:27 +0200 Subject: [PATCH 10/18] Rework the way we pick flood directions Also some refactoring and comments. --- src/Peer.java | 96 +++++++++++++---------------------- src/PeerInitializer.java | 14 ++--- src/helpers/TupleMessage.java | 7 ++- 3 files changed, 49 insertions(+), 68 deletions(-) diff --git a/src/Peer.java b/src/Peer.java index f8547a6..8e67215 100755 --- a/src/Peer.java +++ b/src/Peer.java @@ -11,6 +11,7 @@ import java.util.Iterator; import java.util.LinkedList; import java.util.Random; +import java.util.Collections; import peersim.cdsim.CDProtocol; import peersim.config.Configuration; @@ -48,7 +49,6 @@ public class Peer implements CDProtocol, EDProtocol public boolean reconcile = false; public Queue reconciliationQueue; public long nextRecon = 0; - private HashMap nextReconResponse; private HashMap> reconSets; /* Stats */ @@ -67,7 +67,6 @@ public Peer(String prefix) { reconSets = new HashMap<>(); peerKnowsTxs = new HashMap<>(); txArrivalTimes = new HashMap<>(); - nextReconResponse = new HashMap<>(); } public Object clone() { @@ -263,36 +262,19 @@ private void handleReconFinalization(Node node, int pid, ArrayListMessage messag private void executeScheduledInv(Node node, int pid, TupleMessage scheduledInv) { Node recipient = scheduledInv.getX(); int txId = scheduledInv.getY(); + boolean shouldFlood = scheduledInv.getZ(); if (!peerKnowsTxs.get(recipient).contains(txId)) { peerKnowsTxs.get(recipient).add(txId); - Boolean flood = false; if (reconcile) { - int indexAmongOutbounds = outboundPeers.indexOf(recipient); - int indexAmongInbounds = inboundPeers.indexOf(recipient); - if (indexAmongOutbounds != -1) { - if (outFloodLimitPercent != 0) { - int reverseFloodProbability = (int)(100.0 / outFloodLimitPercent); - if ((indexAmongOutbounds % reverseFloodProbability) == (txId % reverseFloodProbability)) flood = true; - } - } else if (indexAmongInbounds != -1) { - if (inFloodLimitPercent != 0) { - int reverseFloodProbability = (int)(100.0 / inFloodLimitPercent); - if ((indexAmongInbounds % reverseFloodProbability) == (txId % reverseFloodProbability)) flood = true; - } - } - - if (flood) { + if (shouldFlood) { removeFromReconSet(node, txId, recipient); } else { reconSets.get(recipient).add(txId); } - // System.err.println(flood); - } else { - flood = true; } - if (flood) { + if (shouldFlood) { IntMessage inv = new IntMessage(SimpleEvent.INV, node, txId); ((Transport)recipient.getProtocol(FastConfig.getTransport(Peer.pidPeer))).send(node, recipient, inv, Peer.pidPeer); ++invsSent; @@ -321,14 +303,29 @@ private void relayTx(Node node, int pid, int txId, Node sender) { delay = nextFloodInbound - curTime; } + // Send to inbounds. + int inboundFloodTargets = (int)(inboundPeers.size() * inFloodLimitPercent / 100); + Collections.shuffle(inboundPeers); for (Node peer : inboundPeers) { - scheduleInv(node, delay, peer, txId); + boolean shouldFlood = false; + if (inboundFloodTargets > 0) { + shouldFlood = true; + inboundFloodTargets--; + } + scheduleInv(node, delay, peer, txId, shouldFlood); } // Send to outbounds. + int outboundFloodTargets = (int)(outboundPeers.size() * outFloodLimitPercent / 100); + Collections.shuffle(outboundPeers); for (Node peer : outboundPeers) { delay = generateRandomDelay(this.outFloodDelay); - scheduleInv(node, delay, peer, txId); + boolean shouldFlood = false; + if (outboundFloodTargets > 0) { + shouldFlood = true; + outboundFloodTargets--; + } + scheduleInv(node, delay, peer, txId, shouldFlood); } } @@ -340,7 +337,7 @@ private void removeFromReconSet(Node node, int txId, Node target) { // We don't announce transactions right away, because usually the delay takes place to make it // more private. - private void scheduleInv(Node node, long delay, Node recipient, int txId) { + private void scheduleInv(Node node, long delay, Node recipient, int txId, boolean shouldFlood) { if (recipient.getID() == 0) { // Don't send to source. return; @@ -349,7 +346,7 @@ private void scheduleInv(Node node, long delay, Node recipient, int txId) { if (peerKnowsTxs.get(recipient).contains(txId)) { return; } - TupleMessage scheduledInv = new TupleMessage(SimpleEvent.SCHEDULED_INV, node, recipient, txId); + TupleMessage scheduledInv = new TupleMessage(SimpleEvent.SCHEDULED_INV, node, recipient, txId, shouldFlood); EDSimulator.add(delay, scheduledInv, node, Peer.pidPeer); // send to self. } @@ -358,42 +355,19 @@ private long generateRandomDelay(long avgDelay) { return CommonState.r.nextLong(avgDelay * 2 + 1); } - // The following methods used for setting up the topology. - - public void addInboundPeer(Node inboundPeer) { - boolean alreadyConnected = false; - for (Node existingPeer : inboundPeers) { - if (existingPeer.getID() == inboundPeer.getID()) { - alreadyConnected = true; - break; - } - } - if (!alreadyConnected) { - inboundPeers.add(inboundPeer); - if (reconcile) { - reconSets.put(inboundPeer, new HashSet<>()); - nextReconResponse.put(inboundPeer, Long.valueOf(0)); - } - peerKnowsTxs.put(inboundPeer, new HashSet<>()); - } - } - - public void addOutboundPeer(Node outboundPeer) { - boolean alreadyConnected = false; - for (Node existingPeer : outboundPeers) { - if (existingPeer.getID() == outboundPeer.getID()) { - alreadyConnected = true; - break; - } + // Used for setting up the topology. + public void addPeer(Node peer, boolean outbound) { + if (outbound) { + if (outboundPeers.contains(peer)) return; + outboundPeers.add(peer); + } else { + if (inboundPeers.contains(peer)) return; + inboundPeers.add(peer); } - if (!alreadyConnected) { - outboundPeers.add(outboundPeer); - if (reconcile) { - reconciliationQueue.offer(outboundPeer); - reconSets.put(outboundPeer, new HashSet<>()); - nextReconResponse.put(outboundPeer, Long.valueOf(0)); - } - peerKnowsTxs.put(outboundPeer, new HashSet<>()); + peerKnowsTxs.put(peer, new HashSet<>()); + if (reconcile) { + if (outbound) { reconciliationQueue.offer(peer); } + reconSets.put(peer, new HashSet<>()); } } } \ No newline at end of file diff --git a/src/PeerInitializer.java b/src/PeerInitializer.java index ca1e43f..52579d1 100755 --- a/src/PeerInitializer.java +++ b/src/PeerInitializer.java @@ -32,14 +32,13 @@ public PeerInitializer(String prefix) { outPeers = Configuration.getInt(prefix + "." + "out_peers"); inFloodDelay = Configuration.getInt(prefix + "." + "in_flood_delay"); outFloodDelay = Configuration.getInt(prefix + "." + "out_flood_delay"); - privateBlackHolesPercent = Configuration.getInt(prefix + "." + "private_black_holes_percent", 0); allReconcile = Configuration.getBoolean(prefix + "." + "all_reconcile"); if (allReconcile) { reconciliationInterval = Configuration.getInt(prefix + "." + "reconciliation_interval"); + defaultQ = Configuration.getDouble(prefix + "." + "default_q"); outFloodPeersPercent = Configuration.getDouble(prefix + "." + "out_flood_peers_percent"); inFloodPeersPercent = Configuration.getDouble(prefix + "." + "in_flood_peers_percent"); - defaultQ = Configuration.getDouble(prefix + "." + "default_q"); } } @@ -75,11 +74,14 @@ public boolean execute() { ((Peer)Network.get(i).getProtocol(pid)).inFloodDelay = inFloodDelay; ((Peer)Network.get(i).getProtocol(pid)).outFloodDelay = outFloodDelay; if (allReconcile) { - ((Peer)Network.get(i).getProtocol(pid)).reconcile = true; - ((Peer)Network.get(i).getProtocol(pid)).reconciliationInterval = reconciliationInterval; ((Peer)Network.get(i).getProtocol(pid)).inFloodLimitPercent = inFloodPeersPercent; ((Peer)Network.get(i).getProtocol(pid)).outFloodLimitPercent = outFloodPeersPercent; + ((Peer)Network.get(i).getProtocol(pid)).reconcile = true; + ((Peer)Network.get(i).getProtocol(pid)).reconciliationInterval = reconciliationInterval; ((Peer)Network.get(i).getProtocol(pid)).defaultQ = defaultQ; + } else { + ((Peer)Network.get(i).getProtocol(pid)).inFloodLimitPercent = 100; + ((Peer)Network.get(i).getProtocol(pid)).outFloodLimitPercent = 100; } } @@ -106,8 +108,8 @@ public boolean execute() { peers.get(randomNodeIndex).add(i); // Actual connecting. - ((Peer)curNode.getProtocol(pid)).addOutboundPeer(randomNode); - ((Peer)randomNode.getProtocol(pid)).addInboundPeer(curNode); + ((Peer)curNode.getProtocol(pid)).addPeer(randomNode, true); + ((Peer)randomNode.getProtocol(pid)).addPeer(curNode, false); ++conns; } } diff --git a/src/helpers/TupleMessage.java b/src/helpers/TupleMessage.java index 9ebbe07..a8d7c31 100644 --- a/src/helpers/TupleMessage.java +++ b/src/helpers/TupleMessage.java @@ -6,11 +6,13 @@ public class TupleMessage extends SimpleMessage { private Node x; private int y; + private boolean z; - public TupleMessage(int type, Node sender, Node x, int y) { + public TupleMessage(int type, Node sender, Node x, int y, boolean z) { super(type, sender); this.x = x; this.y = y; + this.z = z; } public Node getX() { @@ -21,4 +23,7 @@ public int getY() { return this.y; } + public boolean getZ() { + return this.z; + } } From db2e7182590119c5a72ae213143d324b027a62f6 Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Thu, 13 Jan 2022 16:44:45 +0200 Subject: [PATCH 11/18] Add assert --- src/Peer.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Peer.java b/src/Peer.java index 8e67215..395f97b 100755 --- a/src/Peer.java +++ b/src/Peer.java @@ -358,10 +358,10 @@ private long generateRandomDelay(long avgDelay) { // Used for setting up the topology. public void addPeer(Node peer, boolean outbound) { if (outbound) { - if (outboundPeers.contains(peer)) return; + assert(!outboundPeers.contains(peer)); outboundPeers.add(peer); } else { - if (inboundPeers.contains(peer)) return; + assert(!inboundPeers.contains(peer)); inboundPeers.add(peer); } peerKnowsTxs.put(peer, new HashSet<>()); From 1c176f168fdc91e877a83e064e53c4a88f943361 Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Thu, 13 Jan 2022 16:45:33 +0200 Subject: [PATCH 12/18] Allow a subset (%) of peers to support reconciliation Co-authored-by: glozow --- config/config.txt | 16 ++++++++++------ src/Peer.java | 40 +++++++++++++++++++++++++++++++++++---- src/PeerInitializer.java | 41 ++++++++++++++++++++++++++-------------- 3 files changed, 73 insertions(+), 24 deletions(-) diff --git a/config/config.txt b/config/config.txt index 3fb2301..4a03788 100755 --- a/config/config.txt +++ b/config/config.txt @@ -50,12 +50,16 @@ init.2 txrelaysim.src.PeerInitializer init.2.protocol 2 init.2.out_peers 8 init.2.reachable_count 1500 -init.2.in_flood_delay 2000 -init.2.out_flood_delay 1000 -init.2.all_reconcile true -init.2.reconciliation_interval 1000 -init.2.in_flood_peers_percent 10 -init.2.out_flood_peers_percent 10 +# Delays applied by legacy nodes when relaying transactions (to inbounds and outbounds) +init.2.in_flood_delay_legacy_peer 5000 +init.2.out_flood_delay_legacy_peer 2000 +# Delays applied by reconciling nodes when relaying transactions (to inbounds and outbounds) +init.2.in_flood_delay_recon_peer 5000 +init.2.out_flood_delay_recon_peer 2000 +init.2.reconcile_percent 0 +init.2.reconciliation_interval 8000 +init.2.in_flood_peers_percent 100 +init.2.out_flood_peers_percent 100 init.2.default_q 0.25 init.2.private_black_holes_percent = 0 diff --git a/src/Peer.java b/src/Peer.java index 395f97b..8383faf 100755 --- a/src/Peer.java +++ b/src/Peer.java @@ -49,6 +49,7 @@ public class Peer implements CDProtocol, EDProtocol public boolean reconcile = false; public Queue reconciliationQueue; public long nextRecon = 0; + // This variable is used to check if a peer supports reconciliations. private HashMap> reconSets; /* Stats */ @@ -138,7 +139,7 @@ private void handleInvMessage(Node node, int pid, IntMessage message) { if (sender.getID() != 0) { // Came not from source. peerKnowsTxs.get(sender).add(txId); - if (reconcile) { + if (reconcile && reconSets.containsKey(sender)) { removeFromReconSet(node, txId, sender); } } @@ -266,7 +267,7 @@ private void executeScheduledInv(Node node, int pid, TupleMessage scheduledInv) if (!peerKnowsTxs.get(recipient).contains(txId)) { peerKnowsTxs.get(recipient).add(txId); - if (reconcile) { + if (reconcile && reconSets.containsKey(recipient)) { if (shouldFlood) { removeFromReconSet(node, txId, recipient); } else { @@ -304,9 +305,24 @@ private void relayTx(Node node, int pid, int txId, Node sender) { } // Send to inbounds. + // First flood to all non-reconciling peers. + // Then flood to a random subset of remaining reconciling peers, according to a defined + // fraction. For the rest, reconcile. int inboundFloodTargets = (int)(inboundPeers.size() * inFloodLimitPercent / 100); + for (Node peer : inboundPeers) { + if (!reconSets.containsKey(peer)) { // check for non-reconciling + scheduleInv(node, delay, peer, txId, true); + if (inboundFloodTargets > 0) inboundFloodTargets--; + } + } + + // Now flood to a random subset of remaining (reconciling) peers, according to a defined + // fraction. For the rest, reconcile. Collections.shuffle(inboundPeers); for (Node peer : inboundPeers) { + // Skip non-reconciling peers. + if (!reconSets.containsKey(peer)) continue; + boolean shouldFlood = false; if (inboundFloodTargets > 0) { shouldFlood = true; @@ -316,9 +332,25 @@ private void relayTx(Node node, int pid, int txId, Node sender) { } // Send to outbounds. + // First flood to all non-reconciling peers. + // Then flood to a random subset of remaining reconciling peers, according to a defined + // fraction. For the rest, reconcile. int outboundFloodTargets = (int)(outboundPeers.size() * outFloodLimitPercent / 100); + for (Node peer : outboundPeers) { + if (!reconSets.containsKey(peer)) { // check for non-reconciling + delay = generateRandomDelay(this.outFloodDelay); + scheduleInv(node, delay, peer, txId, true); + if (outboundFloodTargets > 0) outboundFloodTargets--; + } + } + + // Now flood to a random subset of remaining (reconciling) peers, according to a defined + // fraction. For the rest, reconcile. Collections.shuffle(outboundPeers); for (Node peer : outboundPeers) { + // Skip non-reconciling peers. + if (!reconSets.containsKey(peer)) continue; + delay = generateRandomDelay(this.outFloodDelay); boolean shouldFlood = false; if (outboundFloodTargets > 0) { @@ -356,7 +388,7 @@ private long generateRandomDelay(long avgDelay) { } // Used for setting up the topology. - public void addPeer(Node peer, boolean outbound) { + public void addPeer(Node peer, boolean outbound, boolean supportsRecon) { if (outbound) { assert(!outboundPeers.contains(peer)); outboundPeers.add(peer); @@ -365,7 +397,7 @@ public void addPeer(Node peer, boolean outbound) { inboundPeers.add(peer); } peerKnowsTxs.put(peer, new HashSet<>()); - if (reconcile) { + if (reconcile && supportsRecon) { if (outbound) { reconciliationQueue.offer(peer); } reconSets.put(peer, new HashSet<>()); } diff --git a/src/PeerInitializer.java b/src/PeerInitializer.java index 52579d1..9bec31d 100755 --- a/src/PeerInitializer.java +++ b/src/PeerInitializer.java @@ -16,11 +16,13 @@ public class PeerInitializer implements Control private int reachableCount; private int privateBlackHolesPercent; private int outPeers; - private int inFloodDelay; - private int outFloodDelay; + private int inFloodDelayReconPeer; + private int outFloodDelayReconPeer; + private int inFloodDelayLegacyPeer; + private int outFloodDelayLegacyPeer; - private boolean allReconcile; // Reconciliation params + private int reconcilePercent; private double outFloodPeersPercent; private double inFloodPeersPercent; private double defaultQ; @@ -30,11 +32,13 @@ public PeerInitializer(String prefix) { pid = Configuration.getPid(prefix + "." + "protocol"); reachableCount = Configuration.getInt(prefix + "." + "reachable_count"); outPeers = Configuration.getInt(prefix + "." + "out_peers"); - inFloodDelay = Configuration.getInt(prefix + "." + "in_flood_delay"); - outFloodDelay = Configuration.getInt(prefix + "." + "out_flood_delay"); + inFloodDelayReconPeer = Configuration.getInt(prefix + "." + "in_flood_delay_recon_peer"); + outFloodDelayReconPeer = Configuration.getInt(prefix + "." + "out_flood_delay_recon_peer"); + inFloodDelayLegacyPeer = Configuration.getInt(prefix + "." + "in_flood_delay_legacy_peer"); + outFloodDelayLegacyPeer = Configuration.getInt(prefix + "." + "out_flood_delay_legacy_peer"); privateBlackHolesPercent = Configuration.getInt(prefix + "." + "private_black_holes_percent", 0); - allReconcile = Configuration.getBoolean(prefix + "." + "all_reconcile"); - if (allReconcile) { + reconcilePercent = Configuration.getInt(prefix + "." + "reconcile_percent"); + if (reconcilePercent > 0) { reconciliationInterval = Configuration.getInt(prefix + "." + "reconciliation_interval"); defaultQ = Configuration.getDouble(prefix + "." + "default_q"); outFloodPeersPercent = Configuration.getDouble(prefix + "." + "out_flood_peers_percent"); @@ -66,22 +70,29 @@ public boolean execute() { } System.err.println("Black holes: " + privateBlackHolesCount); + int reconcilingNodes = Network.size() * reconcilePercent / 100; // A list storing who is already connected to who, so that we don't make duplicate conns. HashMap> peers = new HashMap<>(); for (int i = 1; i < Network.size(); i++) { peers.put(i, new HashSet<>()); // Initial parameters setting for all nodes. - ((Peer)Network.get(i).getProtocol(pid)).inFloodDelay = inFloodDelay; - ((Peer)Network.get(i).getProtocol(pid)).outFloodDelay = outFloodDelay; - if (allReconcile) { + + if (reconcilingNodes > 0) { + reconcilingNodes--; + ((Peer)Network.get(i).getProtocol(pid)).reconcile = true; + ((Peer)Network.get(i).getProtocol(pid)).reconciliationInterval = reconciliationInterval; ((Peer)Network.get(i).getProtocol(pid)).inFloodLimitPercent = inFloodPeersPercent; ((Peer)Network.get(i).getProtocol(pid)).outFloodLimitPercent = outFloodPeersPercent; - ((Peer)Network.get(i).getProtocol(pid)).reconcile = true; ((Peer)Network.get(i).getProtocol(pid)).reconciliationInterval = reconciliationInterval; ((Peer)Network.get(i).getProtocol(pid)).defaultQ = defaultQ; + ((Peer)Network.get(i).getProtocol(pid)).inFloodDelay = inFloodDelayReconPeer; + ((Peer)Network.get(i).getProtocol(pid)).outFloodDelay = outFloodDelayReconPeer; } else { + ((Peer)Network.get(i).getProtocol(pid)).reconcile = false; ((Peer)Network.get(i).getProtocol(pid)).inFloodLimitPercent = 100; ((Peer)Network.get(i).getProtocol(pid)).outFloodLimitPercent = 100; + ((Peer)Network.get(i).getProtocol(pid)).inFloodDelay = inFloodDelayLegacyPeer; + ((Peer)Network.get(i).getProtocol(pid)).outFloodDelay = outFloodDelayLegacyPeer; } } @@ -96,8 +107,9 @@ public boolean execute() { } Node randomNode = Network.get(randomNodeIndex); + Peer randomNodeState = ((Peer)Network.get(randomNodeIndex).getProtocol(pid)); - if (!((Peer)randomNode.getProtocol(pid)).isReachable) { + if (!randomNodeState.isReachable) { continue; } if (peers.get(i).contains(randomNodeIndex) || peers.get(randomNodeIndex).contains(i)) { @@ -108,8 +120,9 @@ public boolean execute() { peers.get(randomNodeIndex).add(i); // Actual connecting. - ((Peer)curNode.getProtocol(pid)).addPeer(randomNode, true); - ((Peer)randomNode.getProtocol(pid)).addPeer(curNode, false); + boolean curNodeSupportsRecon = ((Peer)Network.get(i).getProtocol(pid)).reconcile; + ((Peer)curNode.getProtocol(pid)).addPeer(randomNode, true, randomNodeState.reconcile); + ((Peer)randomNode.getProtocol(pid)).addPeer(curNode, false, curNodeSupportsRecon); ++conns; } } From 4ca6b7e53a72b43d950e4b0df877fae0cc169767 Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Mon, 17 Jan 2022 16:46:12 +0200 Subject: [PATCH 13/18] Specify reconciliation intervals per-peer --- src/Peer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Peer.java b/src/Peer.java index 8383faf..b0e3347 100755 --- a/src/Peer.java +++ b/src/Peer.java @@ -88,7 +88,7 @@ public void nextCycle(Node node, int pid) { // Move this node to the end of the queue, schedule the next reconciliation. reconciliationQueue.offer(recipient); - nextRecon = curTime + reconciliationInterval; + nextRecon = curTime + (reconciliationInterval / reconciliationQueue.size()); } } } From f1f3d8805f2a60dbe61746074fa8dab0f2c5f7ac Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Mon, 17 Jan 2022 16:47:05 +0200 Subject: [PATCH 14/18] Improve results printing --- config/config.txt | 10 ++-- src/InvObserver.java | 123 ++++++++++++++++++++++++++++++------------- 2 files changed, 90 insertions(+), 43 deletions(-) diff --git a/config/config.txt b/config/config.txt index 4a03788..7cea718 100755 --- a/config/config.txt +++ b/config/config.txt @@ -54,12 +54,12 @@ init.2.reachable_count 1500 init.2.in_flood_delay_legacy_peer 5000 init.2.out_flood_delay_legacy_peer 2000 # Delays applied by reconciling nodes when relaying transactions (to inbounds and outbounds) -init.2.in_flood_delay_recon_peer 5000 -init.2.out_flood_delay_recon_peer 2000 -init.2.reconcile_percent 0 +init.2.in_flood_delay_recon_peer 4000 +init.2.out_flood_delay_recon_peer 1500 +init.2.reconcile_percent 50 init.2.reconciliation_interval 8000 -init.2.in_flood_peers_percent 100 -init.2.out_flood_peers_percent 100 +init.2.in_flood_peers_percent 10 +init.2.out_flood_peers_percent 10 init.2.default_q 0.25 init.2.private_black_holes_percent = 0 diff --git a/src/InvObserver.java b/src/InvObserver.java index ad120d5..46ce109 100755 --- a/src/InvObserver.java +++ b/src/InvObserver.java @@ -56,21 +56,35 @@ public InvObserver(String name) { pid = Configuration.getPid(name + "." + PAR_PROT); } + public enum Protocol { + ERLAY, + LEGACY, + } + public enum NodeType { + REACHABLE, + PRIVATE, + } + public boolean execute() { - // Track how many invs were sent. Reachable nodes are tracked by [0], private are tracked - // by [1]. - int[] invsSent = new int[2]; - int[] shortInvsSent = new int[2]; - int[] txSent = new int[2]; + // Track how many invs and txs were sent. + HashMap invsByProtocol = new HashMap<>(); + HashMap txsByProtocol = new HashMap<>(); + HashMap invsByNodeType = new HashMap<>(); + HashMap txsByNodeType = new HashMap<>(); + HashMap shortInvsByNodeType = new HashMap<>(); + // Track reconciliation results across experiments. ArrayList successRecons = new ArrayList<>(); ArrayList failedRecons = new ArrayList<>(); // Track how soon transactions were propagating across the network. HashMap> txArrivalTimes = new HashMap>(); - int blackHoles = 0, reachableNodes = 0; + int blackHoles = 0, reconcilingNodes = 0, reachableNodes = 0; + for(int i = 1; i < Network.size(); i++) { Peer peer = (Peer) Network.get(i).getProtocol(pid); + // Store all arrival times (at every node) for all transactions. We will later use this + // to calculate latency. Iterator it = peer.txArrivalTimes.entrySet().iterator(); while (it.hasNext()) { Map.Entry pair = (Map.Entry)it.next(); @@ -82,24 +96,44 @@ public boolean execute() { txArrivalTimes.get(txId).add(arrivalTime); } + // See how many black holes there were. if (peer.isBlackHole) { ++blackHoles; continue; } if (peer.isReachable) { - invsSent[0] += peer.invsSent; - shortInvsSent[0] += peer.shortInvsSent; - txSent[0] += peer.txSent; - ++reachableNodes; + invsByNodeType.put(NodeType.REACHABLE, invsByNodeType.getOrDefault(NodeType.REACHABLE, 0) + peer.invsSent); + txsByNodeType.put(NodeType.REACHABLE, txsByNodeType.getOrDefault(NodeType.REACHABLE, 0) + peer.txSent); + reachableNodes++; + } else { + invsByNodeType.put(NodeType.PRIVATE, invsByNodeType.getOrDefault(NodeType.PRIVATE, 0) + peer.invsSent); + txsByNodeType.put(NodeType.PRIVATE, txsByNodeType.getOrDefault(NodeType.PRIVATE, 0) + peer.txSent); + } + + // See how many inv/shortinv/tx messages every node sent. + if (peer.reconcile) { + invsByProtocol.put(Protocol.ERLAY, invsByProtocol.getOrDefault(Protocol.ERLAY, 0) + peer.invsSent); + txsByProtocol.put(Protocol.ERLAY, txsByProtocol.getOrDefault(Protocol.ERLAY, 0) + peer.txSent); + reconcilingNodes++; + successRecons.add(peer.successRecons); + failedRecons.add(peer.failedRecons); + + if (peer.isReachable) { + shortInvsByNodeType.put(NodeType.REACHABLE, shortInvsByNodeType.getOrDefault(NodeType.REACHABLE, 0) + peer.shortInvsSent); + } else { + shortInvsByNodeType.put(NodeType.PRIVATE, shortInvsByNodeType.getOrDefault(NodeType.PRIVATE, 0) + peer.shortInvsSent); + } } else { - invsSent[1] += peer.invsSent; - shortInvsSent[1] += peer.shortInvsSent; - txSent[1] += peer.txSent; + invsByProtocol.put(Protocol.LEGACY, invsByProtocol.getOrDefault(Protocol.LEGACY, 0) + peer.invsSent); + txsByProtocol.put(Protocol.LEGACY, txsByProtocol.getOrDefault(Protocol.LEGACY, 0) + peer.txSent); } + } + + int allTxs = txArrivalTimes.size(); - successRecons.add(peer.successRecons); - failedRecons.add(peer.failedRecons); + if (allTxs == 0) { + return false; } // Measure the delays it took to reach majority of the nodes (based on receival time). @@ -127,38 +161,40 @@ public boolean execute() { avgTxArrivalDelay.add(percentile95delay); } - // Print results. - int allTxs = txArrivalTimes.size(); - - if (allTxs == 0) { - return false; - } - + System.err.println(""); + System.err.println(""); + System.err.println(""); + System.err.println("-----------RESULTS--------"); System.err.println("Relayed txs: " + allTxs); double avgMaxDelay = avgTxArrivalDelay.stream().mapToLong(val -> val).average().orElse(0.0); System.out.println("Avg max latency: " + avgMaxDelay); - if (blackHoles == 0) { + if (blackHoles != 0) { // Note that black holes are only useful to measure latency // impact, measuring/comparing bandwidth is currently not supported because it depends // on how exactly black holes operate (do they reconcile with empty sketches? or drop // sketches/requests on the floor?). - - System.out.println("Total bandwidth per tx:"); - System.out.println("INV items: " + (invsSent[0] + invsSent[1] + - (shortInvsSent[0] + shortInvsSent[1]) * 0.25) / allTxs / (Network.size() - 1)); - System.out.println("Of them short invs: " + ((shortInvsSent[0] + shortInvsSent[1]) * 0.25) / allTxs / (Network.size() - 1)); - System.out.println("TX items: " + (txSent[0] + txSent[1]) * 1.0 / allTxs / (Network.size() - 1)); - - System.out.println("An average reachable node spends the following bandwidth:"); - System.out.println("INV items: " + (invsSent[0] + shortInvsSent[0] * 0.25)); - System.out.println("TX items: " + txSent[0]); - - int privateNodes = Network.size() - 1 - reachableNodes; - System.out.println("An average private node spends the following bandwidth:"); - System.out.println("INV items: " + (invsSent[1] + shortInvsSent[1] * 0.25)); - System.out.println("TX items: " + txSent[1]); + return false; + } + System.out.println("Total bandwidth per tx"); + int shortInvsTotal = shortInvsByNodeType.getOrDefault(NodeType.REACHABLE, 0) + shortInvsByNodeType.getOrDefault(NodeType.PRIVATE, 0); + int invsTotal = invsByNodeType.get(NodeType.REACHABLE) + invsByNodeType.get(NodeType.PRIVATE); + System.out.println("INV items: " + (invsTotal + shortInvsTotal * 0.25) / allTxs / (Network.size() - 1)); + + System.out.println(""); + System.out.println("Total bandwidth per tx based on the protocol"); + int legacyNodes = Network.size() - reconcilingNodes - 1; + if (legacyNodes > 0) { + System.out.println("Legacy:"); + System.out.println("INV items: " + (invsByProtocol.get(Protocol.LEGACY) * 1.0 / allTxs / legacyNodes)); + System.out.println("TX items: " + (txsByProtocol.get(Protocol.LEGACY) * 1.0 / allTxs / legacyNodes)); + } + if (reconcilingNodes > 0) { + System.out.println("Reconciling:"); + System.out.println("INV items: " + (invsByProtocol.get(Protocol.ERLAY) + shortInvsTotal * 0.25) / allTxs / reconcilingNodes); + System.out.println("Of them short invs: " + (shortInvsTotal * 0.25 / allTxs / reconcilingNodes)); + System.out.println("TX items: " + (txsByProtocol.get(Protocol.ERLAY) * 1.0 / allTxs / reconcilingNodes)); double avgSuccessRecons = successRecons.stream().mapToInt(val -> val).average().orElse(0.0); if (avgSuccessRecons > 0) { @@ -168,6 +204,17 @@ public boolean execute() { System.out.println(avgFailedRecons + " failed recons on average."); } } + + System.out.println(""); + System.out.println("Total bandwidth per tx based on reachability"); + int privateNodes = Network.size() - reachableNodes - 1; + System.out.println("Reachable:"); + System.out.println("INV items: " + (invsByNodeType.get(NodeType.REACHABLE) * 1.0 / allTxs / reachableNodes)); + System.out.println("TX items: " + (txsByNodeType.get(NodeType.REACHABLE) * 1.0 / allTxs / reachableNodes)); + + System.out.println("Private:"); + System.out.println("INV items: " + (invsByNodeType.get(NodeType.PRIVATE) * 1.0 / allTxs / privateNodes)); + System.out.println("TX items: " + (txsByNodeType.get(NodeType.PRIVATE) * 1.0 / allTxs / privateNodes)); System.err.println(""); return false; } From 38c6d21fd8d4c0461797baa0e95f0c6c17bfd494 Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Mon, 24 Jan 2022 15:07:58 +0200 Subject: [PATCH 15/18] Allow changing connectivity for recon nodes only --- config/config.txt | 3 ++- src/PeerInitializer.java | 17 ++++++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/config/config.txt b/config/config.txt index 7cea718..ce22368 100755 --- a/config/config.txt +++ b/config/config.txt @@ -48,7 +48,8 @@ init.1.tps 7 init.2 txrelaysim.src.PeerInitializer init.2.protocol 2 -init.2.out_peers 8 +init.2.out_peers_legacy 8 +init.2.out_peers_recon 8 init.2.reachable_count 1500 # Delays applied by legacy nodes when relaying transactions (to inbounds and outbounds) init.2.in_flood_delay_legacy_peer 5000 diff --git a/src/PeerInitializer.java b/src/PeerInitializer.java index 9bec31d..c9dd1d0 100755 --- a/src/PeerInitializer.java +++ b/src/PeerInitializer.java @@ -15,7 +15,8 @@ public class PeerInitializer implements Control private int pid; private int reachableCount; private int privateBlackHolesPercent; - private int outPeers; + private int outPeersLegacy; + private int outPeersRecon; private int inFloodDelayReconPeer; private int outFloodDelayReconPeer; private int inFloodDelayLegacyPeer; @@ -31,7 +32,8 @@ public class PeerInitializer implements Control public PeerInitializer(String prefix) { pid = Configuration.getPid(prefix + "." + "protocol"); reachableCount = Configuration.getInt(prefix + "." + "reachable_count"); - outPeers = Configuration.getInt(prefix + "." + "out_peers"); + outPeersLegacy = Configuration.getInt(prefix + "." + "out_peers_legacy"); + outPeersRecon = Configuration.getInt(prefix + "." + "out_peers_recon"); inFloodDelayReconPeer = Configuration.getInt(prefix + "." + "in_flood_delay_recon_peer"); outFloodDelayReconPeer = Configuration.getInt(prefix + "." + "out_flood_delay_recon_peer"); inFloodDelayLegacyPeer = Configuration.getInt(prefix + "." + "in_flood_delay_legacy_peer"); @@ -99,8 +101,13 @@ public boolean execute() { // Connect all nodes to a limited number of reachable nodes. for(int i = 1; i < Network.size(); i++) { Node curNode = Network.get(i); - int conns = 0; - while (conns < outPeers) { + int connsTarget; + if (((Peer)curNode.getProtocol(pid)).reconcile) { + connsTarget = outPeersRecon; + } else { + connsTarget = outPeersLegacy; + } + while (connsTarget > 0) { int randomNodeIndex = CommonState.r.nextInt(Network.size() - 1) + 1; if (randomNodeIndex == i) { continue; @@ -123,7 +130,7 @@ public boolean execute() { boolean curNodeSupportsRecon = ((Peer)Network.get(i).getProtocol(pid)).reconcile; ((Peer)curNode.getProtocol(pid)).addPeer(randomNode, true, randomNodeState.reconcile); ((Peer)randomNode.getProtocol(pid)).addPeer(curNode, false, curNodeSupportsRecon); - ++conns; + --connsTarget; } } From 8a0e671d993883986bbab68670791bbe1a453e54 Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Mon, 24 Jan 2022 15:26:54 +0200 Subject: [PATCH 16/18] Comment config values --- config/config.txt | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/config/config.txt b/config/config.txt index ce22368..a002923 100755 --- a/config/config.txt +++ b/config/config.txt @@ -1,16 +1,11 @@ -# parameters of periodic execution -# CYCLES * CYCLE = how many milliseconds the experiment would take -# Note that the source stops issuing transactions 25 seconds before the end, -# to let everything propagate fully. +#### All the Erlay-specific configurations are in the "ERLAY" section above. +#### Everything else is irrelevant framework-related stuff (apart from the network.size parameter, +#### which might be interesting to adjust). + CYCLES 600 CYCLE 100 # milliseconds - -# parameters of message transfer -# delay values here are relative to cycle length, in percentage, -# eg 50 means half the cycle length, 200 twice the cycle length, etc. MINDELAY 5 MAXDELAY 100 -# drop is a probability, 0<=DROP<=1 DROP 0 random.seed 9098797865656766578567 @@ -46,22 +41,34 @@ init.1 txrelaysim.src.SourceInitializer init.1.protocol 1 init.1.tps 7 +############### ERLAY ##################### init.2 txrelaysim.src.PeerInitializer init.2.protocol 2 +# How many outbound connections legacy (flooding) nodes make init.2.out_peers_legacy 8 +# How many outbound connections erlay (reconciling) nodes make init.2.out_peers_recon 8 +# How many reachable nodes we have in the network (total nodes is above called `network.size`) init.2.reachable_count 1500 -# Delays applied by legacy nodes when relaying transactions (to inbounds and outbounds) +# Poisson delays applied by legacy nodes when relaying transactions (to inbounds and outbounds) init.2.in_flood_delay_legacy_peer 5000 init.2.out_flood_delay_legacy_peer 2000 -# Delays applied by reconciling nodes when relaying transactions (to inbounds and outbounds) +# Poisson delays applied by erlay nodes when relaying transactions (to inbounds and outbounds) init.2.in_flood_delay_recon_peer 4000 init.2.out_flood_delay_recon_peer 1500 -init.2.reconcile_percent 50 +# Fraction of all nodes that support reconciliation +init.2.reconcile_percent 25 +# Intervals between reconciliations with a given peer (Alice reconciles with Bob every 8 seconds) init.2.reconciliation_interval 8000 -init.2.in_flood_peers_percent 10 -init.2.out_flood_peers_percent 10 +# For erlay nodes, to how many in/out peers they have they will flood. +# First, they flood to all legacy peers. Then, if the specified % is not reached, they pick random +# peers among the remaining (erlay) peers, and flood to them. To the rest, they reconcile. This +# choice is made per-transaction. +init.2.in_flood_peers_percent 15 +init.2.out_flood_peers_percent 20 +# A coefficient for set difference estimation (used in Erlay) init.2.default_q 0.25 +# How many of the nodes are black holes init.2.private_black_holes_percent = 0 init.sch1 CDScheduler From 4fdec3c1d3fe3ea9b1cd6700c9b8714646d5f909 Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Tue, 25 Jan 2022 10:33:47 +0200 Subject: [PATCH 17/18] Use correct formula for q estimation Co-authored by: dergoegge --- config/config.txt | 2 +- src/Peer.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/config/config.txt b/config/config.txt index a002923..76a4551 100755 --- a/config/config.txt +++ b/config/config.txt @@ -67,7 +67,7 @@ init.2.reconciliation_interval 8000 init.2.in_flood_peers_percent 15 init.2.out_flood_peers_percent 20 # A coefficient for set difference estimation (used in Erlay) -init.2.default_q 0.25 +init.2.default_q 0.1 # How many of the nodes are black holes init.2.private_black_holes_percent = 0 diff --git a/src/Peer.java b/src/Peer.java index b0e3347..e8ed5b6 100755 --- a/src/Peer.java +++ b/src/Peer.java @@ -214,7 +214,7 @@ private void handleSketchMessage(Node node, int pid, Node sender, ArrayList diff) { // Reconciliation succeeded right away. successRecons++; From 944e73c584ccab882d4499cb371cac3017f929a4 Mon Sep 17 00:00:00 2001 From: Gleb Naumenko Date: Tue, 25 Jan 2022 14:41:50 +0200 Subject: [PATCH 18/18] More smooth flood target picking Also rename params --- config/config.txt | 18 +++++++------- src/Peer.java | 52 ++++++++++++++++++++++------------------ src/PeerInitializer.java | 24 +++++++++---------- 3 files changed, 50 insertions(+), 44 deletions(-) diff --git a/config/config.txt b/config/config.txt index 76a4551..0ca3153 100755 --- a/config/config.txt +++ b/config/config.txt @@ -51,23 +51,23 @@ init.2.out_peers_recon 8 # How many reachable nodes we have in the network (total nodes is above called `network.size`) init.2.reachable_count 1500 # Poisson delays applied by legacy nodes when relaying transactions (to inbounds and outbounds) -init.2.in_flood_delay_legacy_peer 5000 -init.2.out_flood_delay_legacy_peer 2000 +init.2.in_relay_delay_legacy_peer 5000 +init.2.out_relay_delay_legacy_peer 2000 # Poisson delays applied by erlay nodes when relaying transactions (to inbounds and outbounds) -init.2.in_flood_delay_recon_peer 4000 -init.2.out_flood_delay_recon_peer 1500 +init.2.in_relay_delay_recon_peer 0 +init.2.out_relay_delay_recon_peer 0 # Fraction of all nodes that support reconciliation -init.2.reconcile_percent 25 +init.2.reconcile_percent 100 # Intervals between reconciliations with a given peer (Alice reconciles with Bob every 8 seconds) -init.2.reconciliation_interval 8000 +init.2.reconciliation_interval 16000 # For erlay nodes, to how many in/out peers they have they will flood. # First, they flood to all legacy peers. Then, if the specified % is not reached, they pick random # peers among the remaining (erlay) peers, and flood to them. To the rest, they reconcile. This # choice is made per-transaction. -init.2.in_flood_peers_percent 15 -init.2.out_flood_peers_percent 20 +init.2.in_flood_peers_percent 0 +init.2.out_flood_peers_percent 0 # A coefficient for set difference estimation (used in Erlay) -init.2.default_q 0.1 +init.2.default_q 0.25 # How many of the nodes are black holes init.2.private_black_holes_percent = 0 diff --git a/src/Peer.java b/src/Peer.java index e8ed5b6..78ec726 100755 --- a/src/Peer.java +++ b/src/Peer.java @@ -32,8 +32,8 @@ public class Peer implements CDProtocol, EDProtocol public double inFloodLimitPercent; public double outFloodLimitPercent; public int reconciliationInterval; - public int inFloodDelay; - public int outFloodDelay; + public int inRelayDelay; + public int outRelayDelay; public double defaultQ; /* State */ @@ -298,7 +298,7 @@ private void relayTx(Node node, int pid, int txId, Node sender) { long delay; long curTime = CommonState.getTime(); if (nextFloodInbound < curTime) { - nextFloodInbound = curTime + generateRandomDelay(this.inFloodDelay); + nextFloodInbound = curTime + generateRandomDelay(this.inRelayDelay); delay = 0; } else { delay = nextFloodInbound - curTime; @@ -308,54 +308,60 @@ private void relayTx(Node node, int pid, int txId, Node sender) { // First flood to all non-reconciling peers. // Then flood to a random subset of remaining reconciling peers, according to a defined // fraction. For the rest, reconcile. - int inboundFloodTargets = (int)(inboundPeers.size() * inFloodLimitPercent / 100); + int flooded = 0; for (Node peer : inboundPeers) { if (!reconSets.containsKey(peer)) { // check for non-reconciling scheduleInv(node, delay, peer, txId, true); - if (inboundFloodTargets > 0) inboundFloodTargets--; + flooded++; } } + double alreadyFloodedPercent, remainsToFloodPercent; + Random randomNum = new Random(); // Now flood to a random subset of remaining (reconciling) peers, according to a defined // fraction. For the rest, reconcile. - Collections.shuffle(inboundPeers); - for (Node peer : inboundPeers) { - // Skip non-reconciling peers. - if (!reconSets.containsKey(peer)) continue; - - boolean shouldFlood = false; - if (inboundFloodTargets > 0) { - shouldFlood = true; - inboundFloodTargets--; + if (inboundPeers.size() > 0) { + alreadyFloodedPercent = flooded * 100.0 / inboundPeers.size(); + // We will flip a coin for the sake of randomness -> privacy every time. + remainsToFloodPercent = inFloodLimitPercent - alreadyFloodedPercent; + Collections.shuffle(inboundPeers); + for (Node peer : inboundPeers) { + // Skip non-reconciling peers. + if (!reconSets.containsKey(peer)) continue; + + boolean shouldFlood = false; + if (randomNum.nextInt(100) < remainsToFloodPercent) { + shouldFlood = true; + } + scheduleInv(node, delay, peer, txId, shouldFlood); } - scheduleInv(node, delay, peer, txId, shouldFlood); } // Send to outbounds. // First flood to all non-reconciling peers. - // Then flood to a random subset of remaining reconciling peers, according to a defined - // fraction. For the rest, reconcile. - int outboundFloodTargets = (int)(outboundPeers.size() * outFloodLimitPercent / 100); + flooded = 0; for (Node peer : outboundPeers) { if (!reconSets.containsKey(peer)) { // check for non-reconciling - delay = generateRandomDelay(this.outFloodDelay); + delay = generateRandomDelay(this.outRelayDelay); scheduleInv(node, delay, peer, txId, true); - if (outboundFloodTargets > 0) outboundFloodTargets--; + flooded++; } } // Now flood to a random subset of remaining (reconciling) peers, according to a defined // fraction. For the rest, reconcile. + alreadyFloodedPercent = flooded * 100.0 / outboundPeers.size(); + // We will flip a coin for the sake of randomness -> privacy every time. + remainsToFloodPercent = outFloodLimitPercent - alreadyFloodedPercent; Collections.shuffle(outboundPeers); for (Node peer : outboundPeers) { // Skip non-reconciling peers. if (!reconSets.containsKey(peer)) continue; - delay = generateRandomDelay(this.outFloodDelay); + delay = generateRandomDelay(this.outRelayDelay); boolean shouldFlood = false; - if (outboundFloodTargets > 0) { + if (randomNum.nextInt(100) < remainsToFloodPercent) { shouldFlood = true; - outboundFloodTargets--; } scheduleInv(node, delay, peer, txId, shouldFlood); } diff --git a/src/PeerInitializer.java b/src/PeerInitializer.java index c9dd1d0..eb800ce 100755 --- a/src/PeerInitializer.java +++ b/src/PeerInitializer.java @@ -17,10 +17,10 @@ public class PeerInitializer implements Control private int privateBlackHolesPercent; private int outPeersLegacy; private int outPeersRecon; - private int inFloodDelayReconPeer; - private int outFloodDelayReconPeer; - private int inFloodDelayLegacyPeer; - private int outFloodDelayLegacyPeer; + private int inRelayDelayReconPeer; + private int outRelayDelayReconPeer; + private int inRelayDelayLegacyPeer; + private int outRelayDelayLegacyPeer; // Reconciliation params private int reconcilePercent; @@ -34,10 +34,10 @@ public PeerInitializer(String prefix) { reachableCount = Configuration.getInt(prefix + "." + "reachable_count"); outPeersLegacy = Configuration.getInt(prefix + "." + "out_peers_legacy"); outPeersRecon = Configuration.getInt(prefix + "." + "out_peers_recon"); - inFloodDelayReconPeer = Configuration.getInt(prefix + "." + "in_flood_delay_recon_peer"); - outFloodDelayReconPeer = Configuration.getInt(prefix + "." + "out_flood_delay_recon_peer"); - inFloodDelayLegacyPeer = Configuration.getInt(prefix + "." + "in_flood_delay_legacy_peer"); - outFloodDelayLegacyPeer = Configuration.getInt(prefix + "." + "out_flood_delay_legacy_peer"); + inRelayDelayReconPeer = Configuration.getInt(prefix + "." + "in_relay_delay_recon_peer"); + outRelayDelayReconPeer = Configuration.getInt(prefix + "." + "out_relay_delay_recon_peer"); + inRelayDelayLegacyPeer = Configuration.getInt(prefix + "." + "in_relay_delay_legacy_peer"); + outRelayDelayLegacyPeer = Configuration.getInt(prefix + "." + "out_relay_delay_legacy_peer"); privateBlackHolesPercent = Configuration.getInt(prefix + "." + "private_black_holes_percent", 0); reconcilePercent = Configuration.getInt(prefix + "." + "reconcile_percent"); if (reconcilePercent > 0) { @@ -87,14 +87,14 @@ public boolean execute() { ((Peer)Network.get(i).getProtocol(pid)).outFloodLimitPercent = outFloodPeersPercent; ((Peer)Network.get(i).getProtocol(pid)).reconciliationInterval = reconciliationInterval; ((Peer)Network.get(i).getProtocol(pid)).defaultQ = defaultQ; - ((Peer)Network.get(i).getProtocol(pid)).inFloodDelay = inFloodDelayReconPeer; - ((Peer)Network.get(i).getProtocol(pid)).outFloodDelay = outFloodDelayReconPeer; + ((Peer)Network.get(i).getProtocol(pid)).inRelayDelay = inRelayDelayReconPeer; + ((Peer)Network.get(i).getProtocol(pid)).outRelayDelay = outRelayDelayReconPeer; } else { ((Peer)Network.get(i).getProtocol(pid)).reconcile = false; ((Peer)Network.get(i).getProtocol(pid)).inFloodLimitPercent = 100; ((Peer)Network.get(i).getProtocol(pid)).outFloodLimitPercent = 100; - ((Peer)Network.get(i).getProtocol(pid)).inFloodDelay = inFloodDelayLegacyPeer; - ((Peer)Network.get(i).getProtocol(pid)).outFloodDelay = outFloodDelayLegacyPeer; + ((Peer)Network.get(i).getProtocol(pid)).inRelayDelay = inRelayDelayLegacyPeer; + ((Peer)Network.get(i).getProtocol(pid)).outRelayDelay = outRelayDelayLegacyPeer; } }