diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..6b468b6
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+*.class
diff --git a/README.md b/README.md
index 62ccabd..06ddab2 100755
--- a/README.md
+++ b/README.md
@@ -1,44 +1,88 @@
-# P2PSP Simulation Project
+# Bitcoin transaction relay simulator
-[](https://gitter.im/P2PSP/PeerSim-simulator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+This project helps to measure different trade-offs in transaction relay protocols for Bitcoin.
+It was primarily designed to compare configurations of Erlay-like protocols, but it also helps just
+explore how flooding works.
-This is the PeerSim simulation branch for P2PSP. Its purpose is to simulate practical conditions with large sets of clients in order to obtain more knowledge about its behaviour.
+The simulator assumes knowledge of the existing Bitcoin p2p stack.
-To run this simulations you need to download the PeerSim simulator from [PeerSim download page](http://sourceforge.net/projects/peersim/).
+It currently omits to send GETDATA/TX messages, because it is not necessary for the current
+case, but can be easily expanded for that logic (as well as more advanced peer selection, block
+propagation research, etc.).
-You can set up your IDE (i.e. Eclipse) to work with PeerSim project as it described [here](http://miromannino.com/blog/integrating-peersim-with-eclipse/).
+Beware, research code.
-Configuration file is located at `config/config.txt`.
+## Organization
-## Running simulation
+This project consists of several main classes:
+1. Peer represents a normal Bitcoin node.
+2. Peer initializer spawns and configures Bitcoin nodes.
+3. Source represents a special node from which transactions initially propagate to random nodes.
+4. Source Initializer spawns and configures source nodes.
+5. Helpers contain custom message types to send between nodes.
+6. InvObserver is a class to collect results at the end of the experiment.
+
+## HOWTO
+
+The configuration file is located at `config/config.txt`. In this file, you can specify network size,
+connectivity, and other protocol-specific constants.
+
+Also, you will need JDK for this.
1. Create a directory:
```shell
- $ mkdir p2psp-peersim && cd p2psp-peersim
+ mkdir p2p-simulations && cd p2p-simulations
```
-
+
2. Clone a repository:
```shell
- $ git clone git@github.com:P2PSP/sim.git
+ git clone git@github.com:naumenkogs/txrelaysim.git
```
-
+
3. Download PeerSim simulator engine (and unzip it):
```shell
- $ wget downloads.sourceforge.net/project/peersim/peersim-1.0.5.zip && unzip peersim-1.0.5.zip
+ wget downloads.sourceforge.net/project/peersim/peersim-1.0.5.zip && unzip peersim-1.0.5.zip
```
-
-4. Compile source files of P2PSP protocol:
+
+4. Compile source files:
```shell
- $ javac -cp ./peersim-1.0.5/peersim-1.0.5.jar:./peersim-1.0.5/jep-2.3.0.jar:./peersim-1.0.5/djep-1.0.0.jar ./sim/src/*.java
- ```
-
+ javac -cp ./peersim-1.0.5/peersim-1.0.5.jar:./peersim-1.0.5/jep-2.3.0.jar:./peersim-1.0.5/djep-1.0.0.jar ./txrelaysim/src/*.java ./txrelaysim/src/helpers/*.java
+ ```
+
5. Run simulation:
```shell
- $ java -cp ./:./peersim-1.0.5/peersim-1.0.5.jar:./peersim-1.0.5/jep-2.3.0.jar:./peersim-1.0.5/djep-1.0.0.jar peersim.Simulator ./sim/config/config.txt
+ java -cp ./:./peersim-1.0.5/peersim-1.0.5.jar:./peersim-1.0.5/jep-2.3.0.jar:./peersim-1.0.5/djep-1.0.0.jar peersim.Simulator ./txrelaysim/config/config.txt
```
-
+
+## Result interpretation
+
+We usually receive something like this at the end of the run:
+```
+1.7822426377729141 extra inv per tx on average.
+2.155010635147142 shortInvs per tx on average.
+23.500275013750688 success recons on average.
+0.08350417520876044 failed recons on average.
+Avg max latency: 7348.884615384615
+```
+
+For every transaction, no matter which protocol is used, the cost is always at least `INV + GETDATA + TX`.
+This data demonstrates extra cost: `+ 1.78 INV + 2.15 * SHORT_INV`, where `SHORT_INV = INV / 4`.
+
+`Avg max latency` represents the time it takes for a transaction to reach 95% of nodes.
+
+## Results
+
+Some results generated from the output of this simulator you can find in the Results folder.
+
+## Scalability
+
+On my 16" MacBook Pro 2019 it takes no more than a couple of minutes to simulate transaction relay across 30,000 nodes.
+If you increase connectivity (more than 8) or the number of nodes, you might run out of memory.
+
+For a more large-scale experiment, I may suggest using a machine with more RAM.
+To make it faster, you probably want a faster CPU.
diff --git a/config/config.txt b/config/config.txt
index 8054b6d..0ca3153 100755
--- a/config/config.txt
+++ b/config/config.txt
@@ -1,35 +1,29 @@
-# network size
-SIZE 10
+#### All the Erlay-specific configurations are in the "ERLAY" section above.
+#### Everything else is irrelevant framework-related stuff (apart from the network.size parameter,
+#### which might be interesting to adjust).
-# parameters of periodic execution
-CYCLES 200
-CYCLE SIZE*10
-
-# parameters of message transfer
-# delay values here are relative to cycle length, in percentage,
-# eg 50 means half the cycle length, 200 twice the cycle length, etc.
+CYCLES 600
+CYCLE 100 # milliseconds
MINDELAY 5
-MAXDELAY 50
-# drop is a probability, 0<=DROP<=1
+MAXDELAY 100
DROP 0
random.seed 9098797865656766578567
-network.size SIZE
+network.size 15000
simulation.endtime CYCLE*CYCLES
-simulation.logtime CYCLE
-simulation.experiments 1000
+simulation.logtime CYCLE * 50
+simulation.experiments 1
################### protocols ===========================
protocol.0 peersim.core.IdleProtocol
protocol.0.step CYCLE
-protocol.1 sim.src.Source
+protocol.1 txrelaysim.src.Source
protocol.1.linkable 0
-protocol.1.step CYCLE
+protocol.1.step 1000 # trigger every seconds to handle tps easier
protocol.1.transport tr
-protocol.2 sim.src.Peer
-protocol.2.buffer_size 32
+protocol.2 txrelaysim.src.Peer
protocol.2.linkable 0
protocol.2.step CYCLE
protocol.2.transport tr
@@ -43,13 +37,39 @@ protocol.tr.transport urt
protocol.tr.drop DROP
################### initialization ======================
-init.1 sim.src.SourceInitializer
+init.1 txrelaysim.src.SourceInitializer
init.1.protocol 1
+init.1.tps 7
-init.2 sim.src.PeerInitializer
+############### ERLAY #####################
+init.2 txrelaysim.src.PeerInitializer
init.2.protocol 2
-init.2.malicious_count 1
-init.2.trusted_count 1
+# How many outbound connections legacy (flooding) nodes make
+init.2.out_peers_legacy 8
+# How many outbound connections erlay (reconciling) nodes make
+init.2.out_peers_recon 8
+# How many reachable nodes we have in the network (total nodes is above called `network.size`)
+init.2.reachable_count 1500
+# Poisson delays applied by legacy nodes when relaying transactions (to inbounds and outbounds)
+init.2.in_relay_delay_legacy_peer 5000
+init.2.out_relay_delay_legacy_peer 2000
+# Poisson delays applied by erlay nodes when relaying transactions (to inbounds and outbounds)
+init.2.in_relay_delay_recon_peer 0
+init.2.out_relay_delay_recon_peer 0
+# Fraction of all nodes that support reconciliation
+init.2.reconcile_percent 100
+# Intervals between reconciliations with a given peer (Alice reconciles with Bob every 8 seconds)
+init.2.reconciliation_interval 16000
+# For erlay nodes, to how many in/out peers they have they will flood.
+# First, they flood to all legacy peers. Then, if the specified % is not reached, they pick random
+# peers among the remaining (erlay) peers, and flood to them. To the rest, they reconcile. This
+# choice is made per-transaction.
+init.2.in_flood_peers_percent 0
+init.2.out_flood_peers_percent 0
+# A coefficient for set difference estimation (used in Erlay)
+init.2.default_q 0.25
+# How many of the nodes are black holes
+init.2.private_black_holes_percent = 0
init.sch1 CDScheduler
init.sch1.protocol 1
@@ -59,13 +79,9 @@ init.sch2 CDScheduler
init.sch2.protocol 2
init.sch2.randstart
-
################ control ==============================
-#control.0 sim.src.PeerObserver
-#control.0.protocol 2
-#control.0.step CYCLE*1
+control.0 txrelaysim.src.InvObserver
+control.0.protocol 2
+control.0.step CYCLE * 100
-control.1 sim.src.PoisonedChunksObserver
-control.1.protocol 2
-control.1.step CYCLE*1
diff --git a/results/flood-erlay/README.md b/results/flood-erlay/README.md
new file mode 100644
index 0000000..942f3f0
--- /dev/null
+++ b/results/flood-erlay/README.md
@@ -0,0 +1,63 @@
+## Pick best Erlay configuration
+
+These experiments were done to explore all reasonable Erlay configuations, and to pick the best for
+the current network and for extended connectivities.
+
+For these experiments, the network had 2,000 reachable nodes and 18,000 non-reachable nodes.
+Increasing it by 2x doesn't change the results much.
+
+The following Erlay parameters could be tweaked:
+1. q choice
+2. reconciliation frequency
+3. in/out flooding delay
+4. in/out number of peers a node floods to
+
+## Latency
+We measure the time it takes for a transaction to reach 95% of the nodes, assuming every node has 8
+tx-relay peers. For this experiment, only 2, 3, 4 matter.
+
+We label the configurations as following:
+
+| Name | In/Out flood delay | Reconciliation interval | In/out flood destinations |
+| ------------- |:-------------:| -----:| -----:|
+| Erlay-a | 2s/5s | 2s | 2/2 |
+| Erlay-b | 2s/5s | 2s | 3/3 |
+| Erlay-c | 1s/2s | 2s | 2/2 |
+| Erlay-d | 1s/2s | 2s | 3/3 |
+| Erlay-e | 1s/2s | 1s | 2/2 |
+| Erlay-f | 1s/2s | 1s | 3/3 |
+
+# Optimal bandwidth
+
+The second experiment considers the first parameter, Q. We try values between 0.01 and 0.2,
+and find out that q=0.01 make most sense for all, because it's best at conserving bandwidth.
+
+It's also possible to see bandwidth breakdown (how many reconciliations failed, etc.),
+and try to optimize it from there, but we leave it for further research.
+
+# Conclusions
+
+
+
+For now, we conclude that Erlay-e-0.01 is the most optimal configuration, because it provides
+the lowest bandwidth overhead (except for Erlay-a-0.01, which is way slower)
+and one of the lowest latencies on par with legacy flooding.
+
+If broken down, Erlay-e-0.01 requires sending 1.77 txid and 1.47 short-txid per transaction, in addition
+to necessary 2 * txid (inv + getdata, as per the latest erlay protocol).
+
+Since the reconciliation failure rate is very low, the first aspect is not caused by it, but is
+rather caused by the natural fanout which happens faster than flooding, and thus it can't be reduced,
+unless we use yet another configuration.
+
+This is, however, problematic: either we increase flood delays (and increase the latency overall),
+or make reconciliations more frequent (and make them less efficient).
+
+The latter aspect can be optimized by better estimating set diffenence (by choosing q better),
+but it's just (1.47 * 4) bytes, which is minor compared to the overall INV traffic,
+so we don't do that for now.
+
+# Scaling with connections
+
+Erlay-e-0.01 also scales well with the number of connections. If we increase the connectivity from
+8 to 12, we get just 77 bytes of extra overhead.
diff --git a/results/flood-erlay/plots/compare_protocols.py b/results/flood-erlay/plots/compare_protocols.py
new file mode 100644
index 0000000..c74329b
--- /dev/null
+++ b/results/flood-erlay/plots/compare_protocols.py
@@ -0,0 +1,23 @@
+import matplotlib.pyplot as plt
+
+latencies = [4.3, 8.1, 7, 6.1, 5.1, 4.3, 3.7]
+bandwidth_overhead = [227, 74, 101, 77, 101, 73, 98]
+
+protocols = ['Legacy flooding', 'Erlay-a-0.01', 'Erlay-b-0.01', 'Erlay-c-0.01', 'Erlay-d-0.01',
+ 'Erlay-e-0.01', 'Erlay-f-0.01']
+
+
+fig, ax = plt.subplots()
+ax.set_xlabel('Latency (s)')
+ax.set_ylabel('Bandwidth overhead (bytes per tx)')
+ax.scatter(latencies, bandwidth_overhead)
+
+for i, txt in enumerate(protocols):
+ if i == 0:
+ ax.annotate(txt, (latencies[i], bandwidth_overhead[i] - 10))
+ elif i == 1:
+ ax.annotate(txt, (latencies[i] - 0.3, bandwidth_overhead[i] + 5))
+ else:
+ ax.annotate(txt, (latencies[i], bandwidth_overhead[i] + 5))
+
+plt.show()
diff --git a/results/flood-erlay/plots/erlay_configurations.png b/results/flood-erlay/plots/erlay_configurations.png
new file mode 100644
index 0000000..06f7978
Binary files /dev/null and b/results/flood-erlay/plots/erlay_configurations.png differ
diff --git a/sim-cluster/Makefile b/sim-cluster/Makefile
deleted file mode 100755
index df9ac68..0000000
--- a/sim-cluster/Makefile
+++ /dev/null
@@ -1,35 +0,0 @@
-BIN = ../bin
-
-default: all
-
-M4 := $(shell which m4)
-
-ifeq ($(M4),)
-$(warning m4 not found!)
-false
-endif
-
-$(BIN)/%.py: %.py
- (echo "undefine(\`format')changequote({{,}})dnl"; cat $*.py) | sed 's/#ifdef/ifdef/' | sed 's/#)/)/' | sed 's/#,/,/' | m4 -D $(DEFS) > $@; chmod +x $@
-# cpp -D $(DEFS) < $*.py > $@; chmod +x $@
-
-PYs := $(wildcard *.py)
-EXEs += $(PYs:%.py=$(BIN)/%.py)
-
-$(BIN)/%.sh: %.sh
- cp $*.sh $@; chmod +x $@
-
-SHs := $(wildcard *.sh)
-EXEs += $(SHs:%.sh=$(BIN)/%.sh)
-
-DEFS = _DEBUG_
-#DEFS = _RELEASE_
-#DEFS = _SIMULATION_
-
-all: $(EXEs)
-
-info:
- @echo $(EXEs)
-
-clean:
- rm -f $(EXEs)
diff --git a/sim-cluster/blocking_TCP_socket.py b/sim-cluster/blocking_TCP_socket.py
deleted file mode 100755
index 5a62341..0000000
--- a/sim-cluster/blocking_TCP_socket.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# -*- coding: iso-8859-15 -*-
-
-# {{{ GNU GENERAL PUBLIC LICENSE
-
-# This is part of the P2PSP (Peer-to-Peer Simple Protocol)
-# .
-#
-# Copyright (C) 2013 Vicente González Ruiz.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-# }}}
-
-import socket
-
-class blocking_TCP_socket(socket.socket):
-
- def __init__(self, *p):
- super(blocking_TCP_socket, self).__init__(*p)
-
- def brecv(self, size):
- data = super(blocking_TCP_socket, self).recv(size)
- while len(data) < size:
- data += super(blocking_TCP_socket, self).recv(size - len(data))
- return data
-
- def baccept(self):
- return super(blocking_TCP_socket, self).accept()
diff --git a/sim-cluster/churn.py b/sim-cluster/churn.py
deleted file mode 100644
index eb25529..0000000
--- a/sim-cluster/churn.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import random
-import time
-import datetime
-import sys
-
-#maximum possible time
-NEVER = sys.float_info.max
-
-def weibull_random(shape, scale):
- #random.weibullvariate(alpha,beta), where alpha is the scale and beta the shape
- return random.weibullvariate(scale, shape)
-
-#returns a death time in the future (drawn from the weibull distribution with shape 0.4 and the provided scale)
-#returns the maximum available time if scale == 0. This means that the peer will never die.
-#the return type is a float (number of seconds after the initial time, the epoch)
-def new_death_time(scale):
- if scale == 0:
- return NEVER #maximum float
- else:
- return time.mktime(time.localtime()) + weibull_random(0.4,scale)
-
-#returns true if the present moment in time is beyond death_time
-def time_to_die(death_time):
- return (death_time-time.mktime(time.localtime())<=0)
-
-'''
-i=0
-while i<100:
- print(weibull_random(0.4,30))
- i += 1
-'''
\ No newline at end of file
diff --git a/sim-cluster/cluster.sh b/sim-cluster/cluster.sh
deleted file mode 100755
index 29662ad..0000000
--- a/sim-cluster/cluster.sh
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/bin/bash
-
-set -x
-
-# Runs a real cluster (a video is transmitted and played). Flash-crowd churn.
-
-block_size=1024
-buffer_size=32 # blocks
-source_channel=\"134.ogg\"
-source_hostname=\"localhost\"
-source_port=4551
-splitter_hostname=\"localhost\"
-splitter_port=4552
-gatherer_port=$[splitter_port+1]
-number_of_peers=2
-
-usage() {
- echo $0
- echo " [-b (block size, \"$block_size\" by default)]"
- echo " [-u (buffer size, \"$buffer_size\" by default)]"
- echo " [-c (source channel, \"$source_channel\" by default)]"
- echo " [-a (source hostname, $source_hostname by default)]"
- echo " [-p (source port, $source_port by default)]"
- echo " [-n (number of peers, $number_of_peers by default)]"
- echo " [-l (splitter port, $splitter_port by default)]"
- echo " [-s (splitter hostname, $spltter_hostname by default)]"
- echo " [-v (video filename, \"$video\" by default)]"
- echo " [-? (help)]"
-}
-
-echo $0: parsing: $@
-
-while getopts "b:u:c:w:a:p:n:l:s:v:?" opt; do
- case ${opt} in
- b)
- block_size="${OPTARG}"
- ;;
- u)
- buffer_size="${OPTARG}"
- ;;
- c)
- source_channel="${OPTARG}"
- ;;
- a)
- source_hostname="${OPTARG}"
- ;;
- p)
- source_port="${OPTARG}"
- ;;
- n)
- number_of_peers="${OPTARG}"
- ;;
- l)
- splitter_port="${OPTARG}"
- ;;
- s)
- splitter_hostname="${OPTARG}"
- ;;
- v)
- video="${OPTARG}"
- ;;
- ?)
- usage
- exit 0
- ;;
- \?)
- echo "Invalid option: -${OPTARG}" >&2
- usage
- exit 1
- ;;
- :)
- echo "Option -${OPTARG} requires an argument." >&2
- usage
- exit 1
- ;;
- esac
-done
-
-#xterm -e "./splitter.py --block_size=$block_size --channel=$source_channel --source_hostname=$source_hostname --source_port=$source_port --listening_port=$splitter_port" &
-
-xterm -e "./splitter.py --source_hostname=localhost" &
-
-sleep 1
-
-#xterm -e "./gatherer.py --buffer_size=$buffer_size --listening_port=$[splitter_port+1] --channel=$source_channel --source_hostname=$source_hostname --source_port=$source_port --splitter_hostname=$splitter_hostname --splitter_port=$splitter_port" &
-
-xterm -e "./gatherer.py --splitter_hostname=localhost" &
-
-sleep 1
-
-vlc http://localhost:9999 &
-
-echo -n "Number of peers" = $number_of_peers
-
-echo -n "Hit enter to continue"
-
-read
-
-COUNTER=0
-while [ $COUNTER -lt $number_of_peers ];
-do
- #./peer.py --buffer_size=$buffer_size --listening_port=$[splitter_port+1] --channel="$source_channel" --source_hostname="$source_hostname" --source_port=$source_port --splitter_hostname="$splitter_hostname" --splitter_port=$splitter_port --no_player -number_of_blocks=100 &
- ./peer.py --splitter_hostname=localhost --no_player --number_of_blocks=100 &
- let COUNTER=COUNTER+1
-
-done
-
-set +x
\ No newline at end of file
diff --git a/sim-cluster/colors.py b/sim-cluster/colors.py
deleted file mode 100755
index 0d600fc..0000000
--- a/sim-cluster/colors.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# Colorized printing
-
-class Color:
-
- none = '\033[0m'
- red = '\033[91m'
- green = '\033[92m'
- yellow = '\033[93m'
- blue = '\033[94m'
- purple = '\033[95m'
- cyan = '\033[96m'
- white = '\033[97m'
diff --git a/sim-cluster/common.py b/sim-cluster/common.py
deleted file mode 100644
index 5a1849c..0000000
--- a/sim-cluster/common.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# Constants common for the splitter, the peer and the gatherer, such as the block size.
-
-class Common:
-
- buffer_size = 256
- block_size = 1024
- header_size = 1024*20 #long enough for the video header
-
\ No newline at end of file
diff --git a/sim-cluster/copyright.txt b/sim-cluster/copyright.txt
deleted file mode 100755
index f1c9326..0000000
--- a/sim-cluster/copyright.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-# {{{ GNU GENERAL PUBLIC LICENSE
-
-# This is the splitter node of the P2PSP (Peer-to-Peer Simple Protocol)
-# .
-#
-# Copyright (C) 2013 Cristobal Medina López, Juan Pablo GarcÃa Ortiz,
-# Juan Alvaro Muñoz Naranjo, Leocadio González Casado and Vicente
-# González Ruiz.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-# }}}
diff --git a/sim-cluster/create_cluster.sh b/sim-cluster/create_cluster.sh
deleted file mode 100755
index 708180f..0000000
--- a/sim-cluster/create_cluster.sh
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/bin/bash
-
-# Lauch a splitter, a gatherer and a player.
-
-block_size=1024
-buffer_size=32 # blocks
-source_channel=134.ogg
-source_hostname=localhost
-source_port=4551
-splitter_hostname=localhost
-splitter_port=4552
-gatherer_port=9999
-
-usage() {
- echo $0
- echo "Launches a splitter, a gatherer and a player."
- echo "Parameters:"
- echo " [-b (block size, \"$block_size\" by default)]"
- echo " [-u (buffer size, \"$buffer_size\" by default)]"
- echo " [-c (source channel, \"$source_channel\" by default)]"
- echo " [-a (source hostname, $source_hostname by default)]"
- echo " [-p (source port, $source_port by default)]"
- echo " [-n (number of peers, $number_of_peers by default)]"
- echo " [-l (splitter port, $splitter_port by default)]"
- echo " [-s (splitter hostname, $spltter_hostname by default)]"
- echo " [-v (video filename, \"$video\" by default)]"
- echo " [-? (help)]"
-}
-
-echo $0: parsing: $@
-
-while getopts "b:u:c:w:a:p:l:s:v:?" opt; do
- case ${opt} in
- b)
- block_size="${OPTARG}"
- ;;
- u)
- buffer_size="${OPTARG}"
- ;;
- c)
- source_channel="${OPTARG}"
- ;;
- a)
- source_hostname="${OPTARG}"
- ;;
- p)
- source_port="${OPTARG}"
- ;;
- l)
- splitter_port="${OPTARG}"
- ;;
- s)
- splitter_hostname="${OPTARG}"
- ;;
- v)
- video="${OPTARG}"
- ;;
- ?)
- usage
- exit 0
- ;;
- \?)
- echo "Invalid option: -${OPTARG}" >&2
- usage
- exit 1
- ;;
- :)
- echo "Option -${OPTARG} requires an argument." >&2
- usage
- exit 1
- ;;
- esac
-done
-
-#clear previous output files
-rm /home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/output/*
-
-#xterm -e "./splitter.py --block_size=$block_size --channel=$source_channel --source_hostname=$source_hostname --source_port=$source_port --listening_port=$splitter_port" &
-
-#start the splitter
-xterm -l -lf ./output/salida_splitter.txt -e "./splitter.py --source_hostname=localhost" &
-
-sleep 1
-
-#start the gatherer
-#xterm -e "./gatherer.py --buffer_size=$buffer_size --listening_port=$[splitter_port+1] --channel=$source_channel --source_hostname=$source_hostname --source_port=$source_port --splitter_hostname=$splitter_hostname --splitter_port=$splitter_port" &
-xterm -l -lf ./output/salida_gatherer.txt -e "./gatherer.py --splitter_hostname=localhost --source_hostname=localhost" &
-
-sleep 1
-
-#start the player
-vlc http://localhost:9999 &
-
-#start all peers
diff --git a/sim-cluster/drain2.py b/sim-cluster/drain2.py
deleted file mode 100755
index 71c91c7..0000000
--- a/sim-cluster/drain2.py
+++ /dev/null
@@ -1,185 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: iso-8859-15 -*-
-#
-# drain.py
-#
-
-# {{{ Imports
-
-import sys
-import socket
-from colors import Color
-import struct
-from time import gmtime, strftime
-import argparse
-import threading
-from threading import Lock
-import blocking_socket
-
-# }}}
-
-IP_ADDR = 0
-PORT = 1
-
-buffer_size = 32
-#cluster_port = 0 # OS default behavior will be used for port binding
-clients_port = 9999
-server_IP = '150.214.150.68'
-server_port = 4551
-channel = '134.ogg' # Lo indica el source
-source_IP = '150.214.150.68'
-source_port = 4552
-header_size = 1024*20*10
-block_size = 1024 # <- Ojo, valor recibido desde la fuente
-
-# {{{ Args handing
-
-parser = argparse.ArgumentParser(description='This is the drain node of a P2PSP cluster.')
-parser.add_argument('--buffer_size', help='size of the video buffer in blocks'.format(buffer_size))
-#parser.add_argument('--cluster_port', help='port used to communicate with the cluster. (Default = {})'.format(cluster_port))
-parser.add_argument('--clients_port', help='Port used to communicate with the player. (Default = {})'.format(clients_port))
-parser.add_argument('--channel', help='Name of the channel served by the streaming server. (Default = {})'.format(channel))
-parser.add_argument('--server_IP', help='IP address of the streaming server. (Default = {})'.format(server_IP))
-parser.add_argument('--server_port', help='Listening port of the streaming server. (Default = {})'.format(server_port))
-parser.add_argument('--source_IP', help='IP address of the source. (Default = {})'.format(source_IP))
-parser.add_argument('--source_port', help='Listening port of the source. (Default = {})'.format(source_port))
-
-args = parser.parse_known_args()[0]
-if args.buffer_size:
- buffer_size = int(args.buffer_size)
-if args.clients_port:
- clients_port = int(args.player_port)
-#if args.cluster_port:
-# cluster_port = int(args.cluster_port)
-if args.channel:
- channel = args.channel
-if args.server_IP:
- server_IP = args.server_IP
-if args.server_port:
- server_port = int(args.server_port)
-if args.source_IP:
- source_IP = args.source_IP
-if args.source_port:
- source_port = args.source_port
-
-# }}}
-server = (server_IP, server_port)
-source = (source_IP, source_port)
-# {{{ Connect with the source
-
-source_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-source_sock.connect(source)
-if __debug__:
- print strftime("[%Y-%m-%d %H:%M:%S]", gmtime()), \
- source_sock.getsockname(), "connected to the source"
-
-# }}}
-# {{{ Transform the peer-source TCP socket into a UDP socket
-
-cluster_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-cluster_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
-print source_sock.getsockname()[PORT]
-cluster_sock.bind(('',source_sock.getsockname()[PORT]))
-print cluster_sock.getsockname()
-source_sock.close()
-
-# }}}
-# {{{ Receive blocks from the source/peers
-
-block_buffer = [None]*buffer_size
-block_numbers = [0]*buffer_size
-block_number = 0 # Last received block
-
-#cluster_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-#cluster_sock.bind(('', cluster_port))
-
-lock = Lock()
-
-class Receive_blocks(threading.Thread):
-
- def __init__(self, cluster_sock):
- threading.Thread.__init__(self)
- self.sock = cluster_sock
-
- def run(self):
- while True:
- lock.acquire()
- message, sender = self.sock.recvfrom(struct.calcsize("H1024s"))
- number, block = struct.unpack("H1024s", message)
- block_number = socket.ntohs(number)
- if __debug__:
- print strftime("[%Y-%m-%d %H:%M:%S]", gmtime()), \
- sender, block_number, Color.green + "->" + Color.none, \
- self.sock.getsockname()
- block_buffer[block_number % buffer_size] = block
- block_numbers[block_number % buffer_size] = block_number
- lock.release()
-
-Receive_blocks(cluster_sock).start()
-
-# }}}
-# {{{ Serve the clients
-
-class Client_handler(threading.Thread):
-
- def __init__(self, client):
- threading.Thread.__init__(self)
- self.client_sock, self.client_addr = client
-
- def run(self):
- global block_buffer
- global block_number
- # {{{ Create a TCP socket to Icecast
-
- #server_sock = blocking_socket(socket.AF_INET, socket.SOCK_STREAM)
- server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- print server
- server_sock.connect(server)
- server_sock.sendall("GET /" + channel + " HTTP/1.1\r\n\r\n")
- print "Client_hander:", "!"
-
- # }}}
- # {{{ Receive the video header from Icecast and send it to the client
-
- data = server_sock.recv(header_size)
- total_received = len(data)
- self.client_sock.sendall(data)
- while total_received < header_size:
- data = server_sock.recv(header_size - len(data))
- self.client_sock.sendall(data)
- total_received += len(data)
-
- print "Client_hander:", "header"
-
- # }}}
- # {{{ Close the TCP socket with the streaming server
-
- server_sock.close()
-
- # }}}
- # {{{ Now, send buffer's blocks to the client, forever
- block_to_play = block_number - buffer_size/2
- while True:
- lock.acquire()
- print len(block_buffer[block_to_play % buffer_size])
- #print block_buffer[block_to_play % buffer_size]
- self.client_sock.sendall(block_buffer[block_to_play % buffer_size])
- block_to_play = (block_to_play + 1) % 65536
- lock.release()
-
- # }}}
-
-sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
-sock.bind(('', clients_port))
-sock.listen(0)
-if __debug__:
- print "Waiting for clients "
-
-while True: # Serve forever.
- client = sock.accept()
- if __debug__:
- print "\bc",
- Client_handler(client).start()
-
-# }}}
diff --git a/sim-cluster/flash_crowd.sh b/sim-cluster/flash_crowd.sh
deleted file mode 100755
index fba8fca..0000000
--- a/sim-cluster/flash_crowd.sh
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/bash
-
-set -x
-
-# Simulates flash-crowd peer churn.
-
-#number_of_blocks=100
-number_of_peers=2
-
-usage() {
- echo $0
- echo "Simulates flash-crowd peer churn."
- echo "Parameters:"
-# echo " [-b (number of blocks, $number_of_blocks by default)]"
- echo " [-n (number of peers, $number_of_peers by default)]"
- echo " [-? (help)]"
-}
-
-echo $0: parsing: $@
-
-while getopts "b:n:?" opt; do
- case ${opt} in
- b)
- number_of_blocks="${OPTARG}"
- ;;
- n)
- number_of_peers="${OPTARG}"
- ;;
- ?)
- usage
- exit 0
- ;;
- \?)
- echo "Invalid option: -${OPTARG}" >&2
- usage
- exit 1
- ;;
- :)
- echo "Option -${OPTARG} requires an argument." >&2
- usage
- exit 1
- ;;
- esac
-done
-
-COUNTER=0
-while [ $COUNTER -lt $number_of_peers ];
-do
- #./peer.py --buffer_size=$buffer_size --listening_port=$[splitter_port+1] --channel="$source_channel" --source_hostname="$source_hostname" --source_port=$source_port --splitter_hostname="$splitter_hostname" --splitter_port=$splitter_port --no_player -number_of_blocks=100 &
- #./peer.py --splitter_hostname=localhost --no_player --number_of_blocks=$number_of_blocks &
- #./peer.py --splitter_hostname=localhost --no_player --logging_level=DEBUG > ./output/peer-${COUNTER} &
- ./peer.py --splitter_hostname=localhost --no_player --logging_level=DEBUG --logging_file=./output/peer-${COUNTER} --churn=0 &
- let COUNTER=COUNTER+1
-
-done
-
-set +x
diff --git a/sim-cluster/gatherer.py b/sim-cluster/gatherer.py
deleted file mode 100755
index 5b4a689..0000000
--- a/sim-cluster/gatherer.py
+++ /dev/null
@@ -1,478 +0,0 @@
-#!/usr/bin/python
-# -*- coding: iso-8859-15 -*-
-
-# Note: if you run the python interpreter in the optimzed mode (-O),
-# debug messages will be disabled.
-
-# {{{ GNU GENERAL PUBLIC LICENSE
-
-# This is the gatherer node of the P2PSP (Peer-to-Peer Simple Protocol)
-# .
-#
-# Copyright (C) 2013 Vicente González Ruiz.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-# }}}
-
-# Try running me as:
-#
-# ./splitter.py --source_hostname="localhost"
-# ./gatherer.py --splitter_hostname="localhost" --source_hostname="localhost"
-# vlc http://localhost:9999 &
-
-# {{{ Imports
-
-import logging
-import os
-from colors import Color
-from common import Common
-import sys
-import socket
-import struct
-import argparse
-import time
-
-# }}}
-
-IP_ADDR = 0
-PORT = 1
-
-# Number of blocks of the buffer
-#buffer_size = 32
-buffer_size = Common.buffer_size
-
-#cluster_port = 0 # OS default behavior will be used for port binding
-
-# Port to communicate with the player
-listening_port = 9999
-
-# Splitter endpoint
-#splitter_hostname = '150.214.150.68'
-splitter_hostname = 'localhost'
-splitter_port = 4552
-
-# Estas cuatro variables las debería indicar el splitter
-channel = '134.ogg'
-#block_size = 1024
-block_size = Common.block_size
-
-# Source's end-point
-#source_hostname = '150.214.150.68'
-source_hostname = 'localhost'
-source_port = 4551
-
-# Number of bytes of the stream's header
-#header_size = 1024*20*10
-#header_size = 1024*20
-header_size = Common.header_size
-
-logging_levelname = 'INFO' # 'DEBUG' (default), 'INFO' (cyan),
- # 'WARNING' (blue), 'ERROR' (red),
- # 'CRITICAL' (yellow)
-logging_level = logging.INFO
-
-# {{{ Args handing
-
-print 'Argument List:', str(sys.argv)
-
-parser = argparse.ArgumentParser(
- description='This is the gatherer node of a P2PSP network.')
-
-parser.add_argument('--buffer_size',
- help='size of the video buffer in blocks'.format(buffer_size))
-
-parser.add_argument('--block_size',
- help='Block size in bytes. (Default = {})'.format(block_size))
-
-parser.add_argument('--channel',
- help='Name of the channel served by the streaming source. (Default = {})'.format(channel))
-
-parser.add_argument('--listening_port',
- help='Port used to communicate with the player. (Default = {})'.format(listening_port))
-
-parser.add_argument('--logging_levelname',
- help='Name of the channel served by the streaming source. (Default = "{}")'.format(logging_levelname))
-
-parser.add_argument('--source_hostname',
- help='Hostname of the streaming source. (Default = {})'.format(source_hostname))
-
-parser.add_argument('--source_port',
- help='Listening port of the streaming source. (Default = {})'.format(source_port))
-
-parser.add_argument('--splitter_hostname',
- help='Hostname of the splitter. (Default = {})'.format(splitter_hostname))
-
-parser.add_argument('--splitter_port',
- help='Listening port of the splitter. (Default = {})'.format(splitter_port))
-
-args = parser.parse_known_args()[0]
-if args.buffer_size:
- buffer_size = int(args.buffer_size)
-if args.block_size:
- block_size = int(args.block_size)
-if args.channel:
- channel = args.channel
-if args.listening_port:
- listening_port = int(args.listening_port)
-if args.logging_levelname == 'DEBUG':
- logging_level = logging.DEBUG
-if args.logging_levelname == 'INFO':
- logging_level = logging.INFO
-if args.logging_levelname == 'WARNING':
- logging_level = logging.WARNING
-if args.logging_levelname == 'ERROR':
- logging_level = logging.ERROR
-if args.logging_levelname == 'CRITICAL':
- logging_level = logging.CRITICAL
-if args.source_hostname:
- source_hostname = args.source_hostname
-if args.source_port:
- source_port = int(args.source_port)
-if args.splitter_hostname:
- splitter_hostname = args.splitter_hostname
-if args.splitter_port:
- splitter_port = args.splitter_port
-
-# }}}
-
-print 'This is a P2PSP gatherer node ...',
-if __debug__:
- print 'running in debug mode'
-else:
- print 'running in release mode'
-
-# {{{ Debugging initialization
-
-# create logger
-logger = logging.getLogger('gatherer (' + str(os.getpid()) + ')')
-logger.setLevel(logging_level)
-
-# create console handler and set the level
-ch = logging.StreamHandler()
-ch.setLevel(logging_level)
-# create formatter
-formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-# add formatter to ch
-ch.setFormatter(formatter)
-# add ch to logger
-logger.addHandler(ch)
-
-'''
-#jalvaro: create a file handler for the critical level, to store times. I know I shouldn't be using critical :D
-fh_timing = logging.FileHandler('/home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/timing/gatherer')
-fh_timing.setLevel(logging.CRITICAL)
-logger.addHandler(fh_timing)
-'''
-# }}}
-
-source = (source_hostname, source_port)
-splitter = (splitter_hostname, splitter_port)
-
-block_format_string = "H"+str(block_size)+"s"
-
-print("Buffer size: "+str(buffer_size)+" blocks")
-print("Block size: "+str(block_size)+" bytes")
-logger.info("Buffer size: "+str(buffer_size)+" blocks")
-logger.info("Block size: "+str(block_size)+" bytes")
-
-def get_player_socket():
- # {{{
-
- #sock = blocking_TCP_socket.blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM)
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- try:
- # In Windows systems this call doesn't work!
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except:
- pass
- sock.bind(('', listening_port))
- sock.listen(0)
-
- logger.info(Color.cyan + '{}'.format(sock.getsockname()) +
- ' waiting for the player on port ' +
- str(listening_port) + Color.none)
- # }}}
-
- #sock, player = sock.baccept()
- sock, player = sock.accept()
- sock.setblocking(0)
- return sock
-
- # }}}
-player_sock = get_player_socket() # The gatherer is blocked until the
- # player establish a connection.
-# {{{ debug
-
-if __debug__:
- logger.debug(Color.cyan + '{}'.format(player_sock.getsockname()) +
- ' The player ' +
- '{}'.format(player_sock.getpeername()) +
- ' has establised a connection' + Color.none)
-
-def communicate_the_header():
- # {{{
- source_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- source_sock.connect(source)
- source_sock.sendall("GET /" + channel + " HTTP/1.1\r\n\r\n")
-
- # {{{ Receive the video header from the source and send it to the player
-
- # Nota: este proceso puede fallar si durante la recepción de los
- # bloques el stream se acaba. Habría que realizar de nuevo la
- # petición HTTP (como hace el servidor).
-
- logger.info(Color.cyan +
- str(source_sock.getsockname()) +
- ' retrieving the header ...' +
- Color.none)
-
- data = source_sock.recv(header_size)
- total_received = len(data)
- player_sock.sendall(data)
- while total_received < header_size:
- if __debug__:
- logger.debug(str(total_received))
- data = source_sock.recv(header_size - len(data))
- player_sock.sendall(data)
- total_received += len(data)
-
- # }}}
-
- logger.info(Color.cyan +
- str(source_sock.getsockname()) +
- ' done' + Color.none)
-
- source_sock.close()
- # }}}
-
-
-communicate_the_header() # Retrieve the header of the stream from the
- # source and send it to the player.
-
-# {{{ debug
-if __debug__:
- logger.debug(" Trying to connect to the splitter at" + str(splitter))
-# }}}
-
-def connect_to_the_splitter():
- # {{{
-
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock.connect(splitter)
- return sock
-
- # }}}
-splitter_sock = connect_to_the_splitter() # Connect to the splitter in
- # order to tell it who the
- # gatherer is.
-splitter = splitter_sock.getpeername() # "localhost" -> "127.0.0.1"
-
-logger.info(Color.cyan +
- '{}'.format(splitter_sock.getsockname()) +
- ' connected to the splitter' +
- Color.none)
-
-# {{{ The gatherer is always the first node to connect to the splitter
-# and therefore, in this momment the cluster is formed only by the
-# splitter and the gatherer. So, it is time to create a new socket to
-# receive blocks (now from the splitter and after, when at least one
-# peer be in the cluster, from the peer(s) of the cluster), but that
-# uses the UDP. This is called "cluster_sock". We also close the TCP
-# socket that the gatherer has used to connect to the splitter. }}}
-
-def create_cluster_sock():
- # {{{
-
- sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- try:
- # In Windows systems this call doesn't work!
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except:
- pass
- sock.bind(('',splitter_sock.getsockname()[PORT]))
- return sock
-
- # }}}
-cluster_sock = create_cluster_sock()
-cluster_sock.settimeout(1)
-splitter_sock.close()
-
-# {{{ We define the buffer structure. Three components are needed: (1)
-# the buffer that stores the received blocks, (2) the
-# buffer that stores the number of the blocks and (3) the
-# buffer that stores if a block has been received or not.
-# }}}
-blocks = [None]*buffer_size
-received = [False]*buffer_size
-#numbers = [0]*buffer_size
-
-def receive():
- # {{{
-
- global splitter
-
- try:
- #message, sender = cluster_sock.recvfrom(struct.calcsize("H1024s"))
- message, sender = cluster_sock.recvfrom(struct.calcsize(block_format_string))
- #number, block = struct.unpack("H1024s", message)
- number, block = struct.unpack(block_format_string, message)
- block_number = socket.ntohs(number)
- blocks[block_number % buffer_size] = block
- received[block_number % buffer_size] = True
-
- # {{{ debug
- if __debug__:
- if sender == splitter:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- " <- " +
- '{}'.format(block_number) +
- ' ' +
- '{}'.format(sender) + " (splitter)")
- else:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- " <- " +
- '{}'.format(block_number) +
- ' ' +
- '{}'.format(sender) + " (peer)")
- # }}}
-
- return block_number
- except socket.timeout:
- logger.warning(Color.blue + "cluster timeout!" + Color.none)
- return -1
-
- # }}}
-
-# {{{ Now, here the gatherer's life begins (receive incomming blocks and
-# send them to the player). But in order to handle the jitter, we must
-# to prefetch some blocks before to start to send them. (Week 4/5)
-# }}}
-
-# Lets buffer data in order to handle the jitter. By default, we
-# prefetch up to the half of the buffer. This should handle a jitter
-# smaller or equal than the half of the buffer (measured in time).
-
-# {{{ debug
-if __debug__:
- logger.debug(str(cluster_sock.getsockname()) + ' buffering ...')
-# }}}
-
-logger.info(Color.cyan +
- str(cluster_sock.getsockname()) +
- ' receiving data ...' +
- Color.none)
-
-'''
-x = block_to_play = receive_a_block()
-while not received[(x+buffer_size/2) % buffer_size]:
- x = receive_a_block()
-'''
-'''
-block_to_play = receive_a_block() % buffer_size
-while not received[(receive_a_block() + buffer_size/2) % buffer_size]:
- pass
-'''
-
-'''
-#Fill half the buffer
-'''
-#WARNING!!!
-#time.clock() measures the time spent by the process (so the time spent waiting for an execution slot in the processor is left out)
-#time.time() measures wall time, this means execution time plus waiting time
-
-#start_buffering_time = time.clock()
-start_buffering_time = time.time()
-
-block_number = receive()
-while block_number<=0:
- block_number = receive()
-block_to_play = block_number % buffer_size
-for x in xrange(buffer_size/2):
- while receive()<=0:
- pass
-
-#end_buffering_time = time.clock()
-end_buffering_time = time.time()
-buffering_time = end_buffering_time - start_buffering_time
-
-
-logger.info(str(cluster_sock.getsockname()) + ' buffering done')
-
-#timing info
-#logger.critical('BUF_TIME '+str(buffering_time)+' secs') #buffering time in SECONDS
-#logger.critical('BUF_LEN '+str(buffer_size)+' bytes')
-
-'''
-#End buffering
-'''
-
-
-def send_a_block_to_the_player():
- # {{{
-
- global block_to_play
- '''
- while not received[(block_to_play % buffer_size)]:
- message = struct.pack("!H", block_to_play)
- cluster_sock.sendto(message, splitter)
- '''
- if not received[block_to_play]:
- message = struct.pack("!H", block_to_play)
- cluster_sock.sendto(message, splitter)
-
- logger.info(Color.cyan +
- str(cluster_sock.getsockname()) +
- ' complaining about lost block ' +
- str(block_to_play) +
- Color.none)
-
- # La mayoría de los players se sincronizan antes si en lugar
- # de no enviar nada se envía un bloque vacío. Esto habría que
- # probarlo.
-
- try:
- player_sock.sendall(blocks[block_to_play])
-
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(player_sock.getsockname()) +
- ' ' +
- str(block_to_play) +
- ' -> (player) ' +
- '{}'.format(player_sock.getpeername()))
-
- # }}}
-
- except socket.error:
- logger.error(Color.red + 'player disconnected!' + Color.none)
- #quit()
- except Exception as detail:
- logger.error(Color.red + 'unhandled exception ' + str(detail) + Color.none)
-
- received[block_to_play] = False
-
- # }}}
-
-while True:
- block_number = receive()
- send_a_block_to_the_player()
- block_to_play = (block_to_play + 1) % buffer_size
-
-'''
-while True:
- send_a_block_to_the_player()
- block_to_play = (block_to_play + 1) % buffer_size
- receive()
-'''
diff --git a/sim-cluster/get_results.py b/sim-cluster/get_results.py
deleted file mode 100644
index 59cc157..0000000
--- a/sim-cluster/get_results.py
+++ /dev/null
@@ -1,64 +0,0 @@
-'''
-Created on 22/04/2013
-
-@author: jalvaro
-Read timing info from files. For measuring the performance of simulations.
-Execution: python get_results.py ruta tam_buf
-'''
-import sys
-import glob
-
-sum = float(0)
-count = 0
-fail_count = 0
-avg = float(0)
-sum_errors_buf = int(0)
-count_errors_buf = int(0)
-avg = float(0)
-#files_path = glob.glob('/home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/timing/*')
-tam_buf = 0
-
-try:
- if(sys.argv[1].endswith('/')):
- files_path = glob.glob(sys.argv[1]+'*')
- else:
- files_path = glob.glob(sys.argv[1]+'/*')
- tam_buf = int(sys.argv[2])
-except:
- print('Incorrect arguments. Usage: python get_timing.py ruta tam_buf')
- sys.exit()
-
-#print(files_path)
-for file_path in files_path:
- fr = open(file_path,'r')
- try:
- #read avg time
- line = fr.readline()
- time = float(line.split()[1])
- #print('Time '+str(time))
- sum += time
- count += 1
- fr.readline()
- line = fr.readline()
- num_errors_buf = float(line.split()[1])
- #print('Num errors buf '+str(num_errors_buf))
- sum_errors_buf += num_errors_buf
- count_errors_buf += 1
- except:
- fail_count += 1
-
-avg = sum / count
-avg_errors_buf = sum_errors_buf / count_errors_buf
-avg_perc_errors_buf = avg_errors_buf *100 / (tam_buf/2)
-print('')
-print('Avg time: ' + str(avg) + ' secs')
-print('Avg num errors buf ' + str(avg_errors_buf))
-print('Avg % errors buf '+str(avg_perc_errors_buf))
-print('')
-print(str(count) + ' valid values read')
-print(str(fail_count) + ' invalid values read')
-
-
-
-
-
diff --git a/sim-cluster/getpid.py b/sim-cluster/getpid.py
deleted file mode 100755
index 39892bd..0000000
--- a/sim-cluster/getpid.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/python
-# -*- coding: iso-8859-15 -*-
-
-import os
-import argparse
-import daemon
-
-parser = argparse.ArgumentParser(description='Testing getpid.')
-parser.add_argument('--showpeers', help='Show the list of peers.')
-args = parser.parse_known_args()[0]
-if args.showpeers:
- print os.getpid()
diff --git a/sim-cluster/peer-h.py b/sim-cluster/peer-h.py
deleted file mode 100755
index 2ec5613..0000000
--- a/sim-cluster/peer-h.py
+++ /dev/null
@@ -1,745 +0,0 @@
-#!/usr/bin/python
-# -*- coding: iso-8859-15 -*-
-
-# Note: if you run the python interpreter in the optimzed mode (-O),
-# debug messages will be disabled.
-
-# {{{ GNU GENERAL PUBLIC LICENSE
-
-# This is the peer node of the P2PSP (Peer-to-Peer Simple Protocol)
-# .
-#
-# Copyright (C) 2013 Vicente González Ruiz.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-# }}}
-
-# Try running me as:
-#
-# ./splitter.py --source_hostname="localhost"
-# ./gatherer.py --splitter_hostname="localhost" --source_hostname="localhost"
-# vlc http://localhost:9999 &
-# ./peer.py --splitter_hostname="localhost" --source_hostname="localhost"
-# vlc http://localhost:9998 &
-
-# {{{ Imports
-
-'''
-# VERSIÓN 2 DEL PEER, el peer manda mensajes de hola para presentarse ante los demás peers del cluster.
-# Usar con splitter.py y gatherer.py
-'''
-
-import os
-import logging
-from colors import Color
-from common import Common
-import sys
-import socket
-import struct
-import time
-import argparse
-import churn
-
-# }}}
-
-IP_ADDR = 0
-PORT = 1
-
-# Number of blocks of the buffer
-#buffer_size = 32
-#buffer_size = 256
-buffer_size = Common.buffer_size
-
-#cluster_port = 0 # OS default behavior will be used for port binding
-
-# Port to communicate with the player
-listening_port = 9998
-
-# Splitter endpoint
-#splitter_hostname = '150.214.150.68'
-splitter_hostname = 'localhost'
-splitter_port = 4552
-
-# Number of bytes of the stream's header
-#header_size = 1024*20*10
-#header_size = 1024*20
-header_size = Common.header_size
-
-# Estas cuatro variables las debería indicar el splitter
-#source_hostname = '150.214.150.68'
-source_hostname = 'localhost'
-source_port = 4551
-channel = '134.ogg'
-#block_size = 1024
-block_size = Common.block_size
-
-# Controls if the stream is sent to a player
-_PLAYER_ = True
-
-# Maximun number of blocks to receive from the splitter
-number_of_blocks = 999999999
-
-
-logging_levelname = 'INFO' # 'DEBUG' (default), 'INFO' (cyan),
- # 'WARNING' (blue), 'ERROR' (red),
- # 'CRITICAL' (yellow)
-
-logging_level = logging.INFO
-
-logging_filename = ''
-
-console_logging = True
-file_logging = True
-
-weibull_scale = 0 #for churn. 0 means no churn.
-
-# {{{ Args handing
-
-print 'Argument List:', str(sys.argv)
-
-parser = argparse.ArgumentParser(
- description='This is a peer node of a P2PSP network.')
-
-parser.add_argument('--buffer_size',
- help='size of the video buffer in blocks. (Default = {})'.format(buffer_size))
-
-parser.add_argument('--block_size',
- help='Block size in bytes. (Default = {})'.format(block_size))
-
-parser.add_argument('--channel',
- help='Name of the channel served by the streaming source. (Default = {})'.format(channel))
-
-parser.add_argument('--listening_port',
- help='Port used to communicate with the player. (Default = {})'.format(listening_port))
-
-parser.add_argument('--logging_levelname',
- help='Name of the channel served by the streaming source. (Default = "{}")'.format(logging_levelname))
-
-parser.add_argument('--logging_filename',
- help='Name of the logging output file. (Default = "{})'.format(logging_filename))
-
-parser.add_argument('--number_of_blocks',
- help='Maximun number of blocks to receive from the splitter. (Default = {}). If not specified, the peer runs forever.'.format(number_of_blocks))
-
-parser.add_argument('--source_hostname',
- help='Hostname of the streaming source. (Default = {})'.format(source_hostname))
-
-parser.add_argument('--source_port',
- help='Listening port of the streaming source. (Default = {})'.format(source_port))
-
-parser.add_argument('--splitter_hostname',
- help='Hostname of the splitter. (Default = {})'.format(splitter_hostname))
-
-parser.add_argument('--splitter_port',
- help='Listening port of the splitter. (Default = {})'.format(splitter_port))
-
-parser.add_argument('--no_player', help='Do no send the stream to a player.', action="store_true")
-
-parser.add_argument('--churn', help='Scale parameter for the Weibull function, 0 means no churn. (Default = {})'.format(weibull_scale))
-
-args = parser.parse_known_args()[0]
-if args.buffer_size:
- buffer_size = int(args.buffer_size)
-if args.block_size:
- block_size = int(args.block_size)
-if args.channel:
- channel = args.channel
-if args.listening_port:
- listening_port = int(args.listening_port)
-if args.logging_levelname == 'DEBUG':
- logging_level = logging.DEBUG
-if args.logging_levelname == 'INFO':
- logging_level = logging.INFO
-if args.logging_levelname == 'WARNING':
- logging_level = logging.WARNING
-if args.logging_levelname == 'ERROR':
- logging_level = logging.ERROR
-if args.logging_levelname == 'CRITICAL':
- logging_level = logging.CRITICAL
-if args.logging_filename:
- logging_filename = args.logging_filename
-if args.number_of_blocks:
- number_of_blocks = int(args.number_of_blocks)
-if args.source_hostname:
- source_hostname = args.source_hostname
-if args.source_port:
- source_port = int(args.source_port)
-if args.splitter_hostname:
- splitter_hostname = args.splitter_hostname
-if args.splitter_port:
- splitter_port = args.splitter_port
-if args.no_player:
- _PLAYER_ = False
-if args.churn:
- weibull_scale = int(args.churn)
-
-# }}}
-
-print 'This is a P2PSP peer node ...',
-if __debug__:
- print 'running in debug mode'
-else:
- print 'running in release mode'
-
-
-# {{{ Logging initialization
-
-# create logger
-logger = logging.getLogger('peer (' + str(os.getpid()) + ')')
-logger.setLevel(logging_level)
-
-# create console handler and set the level
-if console_logging == True:
- ch = logging.StreamHandler()
- ch.setLevel(logging_level)
- # create formatter
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
- # add formatter to ch
- ch.setFormatter(formatter)
- # add ch to logger
- logger.addHandler(ch)
-
-#jalvaro
-# create file handler and set the level
-if args.logging_filename and file_logging == False:
- fh = logging.FileHandler('./output/peer-'+str(os.getpid()))
- fh.setLevel(logging_level)
- #add fh to logger
- logger.addHandler(fh)
-#jalvaro: create a file handler for the critical level, to store times. I know I shouldn't be using critical :D
-fh_timing = logging.FileHandler('./timing/peer-'+str(os.getpid()))
-fh_timing.setLevel(logging.CRITICAL)
-logger.addHandler(fh_timing)
-
-# }}}
-
-print("Buffer size: "+str(buffer_size)+" blocks")
-print("Block size: "+str(block_size)+" bytes")
-logger.info("Buffer size: "+str(buffer_size)+" blocks")
-logger.info("Block size: "+str(block_size)+" bytes")
-
-source = (source_hostname, source_port)
-splitter = (splitter_hostname, splitter_port)
-
-block_format_string = "H"+str(block_size)+"s"
-
-def get_player_socket():
- # {{{
-
- #sock = blocking_TCP_socket.blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM)
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-
- try:
- # In Windows systems this call doesn't work!
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except:
- pass
- sock.bind(('', listening_port))
- sock.listen(0)
-
- if __debug__:
- logger.info(Color.cyan + '{}'.format(sock.getsockname()) + ' waiting for the player on port ' + str(listening_port) + Color.none)
- print("Waiting for the player")
- # }}}
-
- #sock, player = sock.baccept()
- sock, player = sock.accept()
- sock.setblocking(0)
- return sock
-
- # }}}
-
-if _PLAYER_:
- player_sock = get_player_socket() # The peer is blocked until the
- # player establish a connection.
-
- # {{{ debug
-
- if __debug__:
- logger.debug('{}'.format(player_sock.getsockname()) +
- ' The player ' +
- '{}'.format(player_sock.getpeername()) +
- ' has establised a connection')
- print("Player connected")
- # }}}
-
-def communicate_the_header():
- # {{{
- source_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- source_sock.connect(source)
- source_sock.sendall("GET /" + channel + " HTTP/1.1\r\n\r\n")
-
- # {{{ Receive the video header from the source and send it to the player
-
- # Nota: este proceso puede fallar si durante la recepción de los
- # bloques el stream se acaba. Habría que realizar de nuevo la
- # petición HTTP (como hace el servidor).
-
- if __debug__:
- logger.info(Color.cyan + str(source_sock.getsockname()) + ' retrieving the header ...' + Color.none)
-
- data = source_sock.recv(header_size)
- total_received = len(data)
- player_sock.sendall(data)
- while total_received < header_size:
- if __debug__:
- logger.debug(str(total_received))
- data = source_sock.recv(header_size - len(data))
- player_sock.sendall(data)
- total_received += len(data)
-
- # }}}
-
- if __debug__:
- logger.info(Color.cyan + str(source_sock.getsockname()) + ' done' + Color.none)
-
- source_sock.close()
- # }}}
-
-if _PLAYER_:
- communicate_the_header() # Retrieve the header of the stream from the
- # source and send it to the player.
-print("Got the header")
-
-# {{{ debug
-if __debug__:
- logger.debug(" Trying to connect to the splitter at" + str(splitter))
-# }}}
-
-def connect_to_the_splitter():
- # {{{
-
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock.connect(splitter)
- return sock
-
- # }}}
-
-# COMIENZO DE BUFFERING TIME
-start_buffering_time = time.time()
-
-splitter_sock = connect_to_the_splitter() # Connect to the splitter in
- # order to tell it who the
- # gatherer is.
-splitter = splitter_sock.getpeername() # "localhost" -> "127.0.0.1"
-
-if __debug__:
- logger.info(Color.cyan + '{}'.format(splitter_sock.getsockname()) + ' connected to the splitter' + Color.none)
-
-def create_cluster_sock():
- # {{{
-
- sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- try:
- # In Windows systems this call doesn't work!
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except:
- pass
- sock.bind(('',splitter_sock.getsockname()[PORT]))
- return sock
-
- # }}}
-cluster_sock = create_cluster_sock()
-cluster_sock.settimeout(1)
-
-# {{{ This is the list of peers of the cluster. Each peer uses this
-# structure to resend the blocks received from the splitter to these
-# nodes. }}}
-peer_list = []
-
-# {{{ This store the insolidarity of the peers of the cluster. When
-# the solidarity exceed a threshold, the peer is deleted from the list
-# of peers. }}}
-peer_insolidarity = {}
-
-#Commented due to gatherer removal
-#gatherer = None
-
-def retrieve_the_list_of_peers():
- # {{{
-
- #Commented due to gatherer removal
- #global gatherer
- number_of_peers = socket.ntohs(
- struct.unpack("H",splitter_sock.recv(struct.calcsize("H")))[0])
-
- # {{{ debug
-
- if __debug__:
- logger.debug('{}'.format(splitter_sock.getsockname()) +
- ' <- ' +
- '{}'.format(splitter_sock.getpeername()) +
- ' Cluster size = ' +
- str(number_of_peers))
-
- # }}}
-
- #Commented due to gatherer removal
- #message = splitter_sock.recv(struct.calcsize("4sH"))
- #IP_addr, port = struct.unpack("4sH", message)
- #IP_addr = socket.inet_ntoa(IP_addr)
- #port = socket.ntohs(port)
- #gatherer = (IP_addr, port)
- while number_of_peers > 0:
- message = splitter_sock.recv(struct.calcsize("4sH"))
- IP_addr, port = struct.unpack("4sH", message)
- IP_addr = socket.inet_ntoa(IP_addr)
- port = socket.ntohs(port)
- peer = (IP_addr, port)
-
- # {{{ debug
-
- if __debug__:
- logger.debug('{}'.format(splitter_sock.getsockname()) +
- ' <- ' +
- '{}'.format(splitter_sock.getpeername()) +
- ' Peer ' +
- str(peer))
-
- # }}}
-
- peer_list.append(peer)
- peer_insolidarity[peer] = 0
-
- #say hello to other peer
- cluster_sock.sendto('', peer) # Send a empty block (this
- # should be fast)
-
- number_of_peers -= 1
-
- # }}}
-
-retrieve_the_list_of_peers()
-
-splitter_sock.close()
-
-# {{{ In this momment, most of the rest of peers of the cluster are
-# sending blocks to the new peer.
-# }}}
-
-# {{{ We define the buffer structure. Two components are needed: (1)
-# the blocks buffer that stores the received blocks (2) the received
-# buffer that stores if a block has been received or not.
-# }}}
-blocks = [None]*buffer_size
-received = [False]*buffer_size
-
-# True if the peer has received "number_of_blocks" blocks.
-blocks_exhausted = False
-
-# This variable holds the last block received from the splitter. It is
-# used below to send the "last" block in the congestion avoiding mode.
-last = ''
-
-# Number of times that the last block has been sent to the cluster (we
-# send the block each time we receive a block).
-counter = 0
-
-def receive_and_feed():
- global last
- global counter
- global blocks_exhausted
- global number_of_blocks
-
- try:
- # {{{ Receive and send
- #message, sender = cluster_sock.recvfrom(struct.calcsize("H1024s"))
- message, sender = cluster_sock.recvfrom(struct.calcsize(block_format_string))
- #if len(message) == struct.calcsize("H1024s"):
- if len(message) == struct.calcsize(block_format_string):
- # {{{ Received a video block
- #number, block = struct.unpack("H1024s", message)
- number, block = struct.unpack(block_format_string, message)
- block_number = socket.ntohs(number)
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- " <- " +
- '{}'.format(block_number) +
- ' ' +
- '{}'.format(sender))
-
- # }}}
- blocks[block_number % buffer_size] = block
- received[block_number % buffer_size] = True
-
- if sender == splitter:
- # {{{ Send the previously received block in burst mode.
-
- '''
- #Commented due to gatherer removal
- cluster_sock.sendto(message, gatherer)
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- ' ' +
- str(block_number) +
- ' -> (gatherer) ' +
- '{}'.format(gatherer))
-
- # }}}
- '''
-
- while( (counter < len(peer_list)) & (counter > 0)):
- peer = peer_list[counter]
- cluster_sock.sendto(last, peer)
- # if not is_player_working:
- # cluster_sock.sendto('', peer)
-
- peer_insolidarity[peer] += 1
- if peer_insolidarity[peer] > 64: # <- Important parameter!!
- del peer_insolidarity[peer]
- peer_list.remove(peer)
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(peer) + ' removed' + Color.none)
-
- counter += 1
-
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- ' ' +
- str(block_number) +
- ' -> (peer) ' +
- '{}'.format(peer))
-
- # }}}
-
- counter = 0
- last = message
- '''
- if args.number_of_blocks:
- number_of_blocks -= 1
- if number_of_blocks <= 0:
- blocks_exhausted = True
-'''
- # }}}
- else:
- # {{{ Check if the peer is new
-
- if sender not in peer_list:
- # The peer is new
- peer_list.append(sender)
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' added by data block' + Color.none)
- peer_insolidarity[sender] = 0
-
- # }}}
-
- if counter < len(peer_list):
- # {{{ Send the last block in congestion avoiding mode
-
- peer = peer_list[counter]
- cluster_sock.sendto(last, peer)
-
- peer_insolidarity[peer] += 1
- if peer_insolidarity[peer] > 64: # <- Important parameter!!
- del peer_insolidarity[peer]
- peer_list.remove(peer)
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(peer) + ' removed by unsupportive' + Color.none)
-
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- ' ' +
- str(block_number) +
- ' -> (peer) ' +
- '{}'.format(peer))
-
- # }}}
-
- counter += 1
-
- # }}}
-
- if args.number_of_blocks:
- number_of_blocks -= 1
- if number_of_blocks <= 0:
- blocks_exhausted = True
-
-
- return block_number
- # }}}
- elif message=='':
- # {{{ Received a control block: "hi"
-
- if sender not in peer_list:
- peer_list.append(sender)
- peer_insolidarity[sender] = 0
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' added by control block' + Color.none)
- elif message=='bye':
- peer_list.remove(sender)
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' removed by control block' + Color.none)
- return -1
- # }}}
- # }}}
- except socket.timeout:
- # {{{
- if __debug__:
- logger.warning(Color.red + "cluster timeout!" + Color.none)
- return -2
- # }}}
-
-# {{{ debug
-if __debug__:
- logger.debug(str(cluster_sock.getsockname()) + ' buffering ...')
-# }}}
-
-if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' receiving data ...' + Color.none)
-
-'''
-#Fill half the buffer
-'''
-#WARNING!!!
-#time.clock() measures the time spent by the process (so the time spent waiting for an execution slot in the processor is left out)
-#time.time() measures wall time, this means execution time plus waiting time
-
-last_block_number = 0
-error_counter = 0
-
-block_number = receive_and_feed()
-while block_number<=0:
- block_number = receive_and_feed()
-block_to_play = block_number % buffer_size
-for x in xrange(buffer_size/2):
- while receive_and_feed()<=0:
- pass
-#go through the buffer
-num_errors_buf = 0
-for x in range(block_to_play, block_to_play+(buffer_size/2)):
- if received[x%buffer_size] == False:
- num_errors_buf += 1
-
-
-'''
-block_number = receive_and_feed()
-while block_number<=0:
- block_number = receive_and_feed()
-block_to_play = block_number % buffer_size
-for x in xrange(buffer_size/2):
- last_block_number = receive_and_feed()
- if last_block_number <= 0:
- error_counter += 1
-'''
-
-# FIN DE BUFFERING TIME
-end_buffering_time = time.time()
-buffering_time = end_buffering_time - start_buffering_time
-
-logger.info(str(cluster_sock.getsockname()) + ' buffering done')
-logger.info('NUM_PEERS '+str(len(peer_list)))
-
-logger.critical('BUF_TIME '+str(buffering_time)+' secs') #buffering time in SECONDS
-logger.critical('BUF_LEN '+str(buffer_size)+' bytes')
-logger.critical('NUM_ERRORS_BUF '+str(error_counter))
-percentage_errors_buf = float(error_counter*100)/float(buffer_size/2)
-logger.critical('PERCENTAGE_ERRORS_BUF ' + str(percentage_errors_buf))
-#logger.critical('PERCENTAGE_ERRORS_BUF {:.2}%'.format(percentage_errors_buf))
-logger.critical('NUM_PEERS '+str(len(peer_list)))
-# }}}
-
-
-'''
-#End buffering
-'''
-
-player_connected = True
-
-def send_a_block_to_the_player():
- # {{{
-
- global block_to_play
- global player_sock
- global player_connected
-
- if not received[block_to_play]:
- message = struct.pack("!H", block_to_play)
- cluster_sock.sendto(message, splitter)
-
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' complaining about lost block ' + str(block_to_play) + Color.none)
-
- # La mayoría de los players se sincronizan antes si en lugar
- # de no enviar nada se envía un bloque vacío. Esto habría que
- # probarlo.
-
- try:
- player_sock.sendall(blocks[block_to_play])
-
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(player_sock.getsockname()) +
- ' ' +
- str(block_to_play) +
- ' -> (player) ' +
- '{}'.format(player_sock.getpeername()))
-
- # }}}
-
- except socket.error:
- if __debug__:
- logger.error(Color.red + 'player disconected!' + Color.none)
- player_connected = False
- return
- except Exception as detail:
- if __debug__:
- logger.error(Color.red + 'unhandled exception ' + str(detail) + Color.none)
- return
-
- received[block_to_play] = False
-
- # }}}
-
-#get a death time
-#death_time = churn.new_death_time(20)
-death_time = churn.new_death_time(weibull_scale)
-
-'''
-#Once the buffer is half-filled, then start operating normally
-'''
-#while player_connected and not blocks_exhausted:
-while player_connected and not churn.time_to_die(death_time):
-
- if __debug__ and death_time != churn.NEVER:
- current_time = time.localtime()
- logger.debug(Color.green+'Current time is '+str(current_time.tm_hour).zfill(2)+':'+str(current_time.tm_min).zfill(2)+':'+str(current_time.tm_sec).zfill(2)+Color.none)
- logger.debug(Color.green+'Scheduled death time is '+str(time.localtime(death_time).tm_hour).zfill(2)+':'+str(time.localtime(death_time).tm_min).zfill(2)+':'+str(time.localtime(death_time).tm_sec).zfill(2)+Color.none)
-
- block_number = receive_and_feed()
- if block_number>=0:
- if (block_number % 256) == 0:
- for i in peer_insolidarity:
- peer_insolidarity[i] /= 2
- if _PLAYER_:
- send_a_block_to_the_player()
- block_to_play = (block_to_play + 1) % buffer_size
- #elif block_number == -2: #this stops the peer after only one cluster timeout
- # break
- if __debug__:
- logger.debug('NUM PEERS '+str(len(peer_list)))
-
-if __debug__:
- logger.info(Color.cyan + 'Goodbye!' + Color.none)
-goodbye = 'bye'
-cluster_sock.sendto(goodbye, splitter)
-for x in xrange(3):
- receive_and_feed()
-for peer in peer_list:
- cluster_sock.sendto(goodbye, peer)
-
diff --git a/sim-cluster/peer-x.py b/sim-cluster/peer-x.py
deleted file mode 100755
index 7aaf998..0000000
--- a/sim-cluster/peer-x.py
+++ /dev/null
@@ -1,791 +0,0 @@
-#!/usr/bin/python
-# -*- coding: iso-8859-15 -*-
-
-# Note: if you run the python interpreter in the optimzed mode (-O),
-# debug messages will be disabled.
-
-# {{{ GNU GENERAL PUBLIC LICENSE
-
-# This is the peer node of the P2PSP (Peer-to-Peer Simple Protocol)
-# .
-#
-# Copyright (C) 2013 Vicente González Ruiz.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-# }}}
-
-# Try running me as:
-#
-# ./splitter.py --source_hostname="localhost"
-# ./gatherer.py --splitter_hostname="localhost" --source_hostname="localhost"
-# vlc http://localhost:9999 &
-# ./peer.py --splitter_hostname="localhost" --source_hostname="localhost"
-# vlc http://localhost:9998 &
-
-'''
-# VERSIÓN bloque-exclusivo DEL PEER, no hay mensajes de hola por parte de los peers.
-# El peer recibe un bloque de stream exclusivo a su llegada, y lo reenvía a todos a modo de "hola".
-# Con esto prentendemos acelerar el proceso de buffering y saturar menos la red.
-# Usar con splitter-x.py y gatherer.py
-'''
-
-# {{{ Imports
-
-import os
-import logging
-from colors import Color
-from common import Common
-import sys
-import socket
-import struct
-import time
-import argparse
-import churn
-
-# }}}
-
-IP_ADDR = 0
-PORT = 1
-
-# Number of blocks of the buffer
-#buffer_size = 32
-#buffer_size = 256
-buffer_size = Common.buffer_size
-
-#cluster_port = 0 # OS default behavior will be used for port binding
-
-# Port to communicate with the player
-listening_port = 9998
-
-# Splitter endpoint
-#splitter_hostname = '150.214.150.68'
-splitter_hostname = 'localhost'
-splitter_port = 4552
-
-# Estas cuatro variables las debería indicar el splitter
-#source_hostname = '150.214.150.68'
-source_hostname = 'localhost'
-source_port = 4551
-channel = '134.ogg'
-#block_size = 1024
-block_size = Common.block_size
-
-# Number of bytes of the stream's header
-#header_size = 1024*20*10
-#header_size = 1024*20
-header_size = Common.header_size
-
-# Controls if the stream is sent to a player
-_PLAYER_ = True
-
-# Maximun number of blocks to receive from the splitter
-number_of_blocks = 999999999
-
-# The buffer of stream blocks
-blocks = [None]*buffer_size
-received = [False]*buffer_size
-
-# This variable holds the last block received from the splitter. It is
-# used below to send the "last" block in the congestion avoiding mode.
-last = ''
-
-logging_levelname = 'INFO' # 'DEBUG' (default), 'INFO' (cyan),
- # 'WARNING' (blue), 'ERROR' (red),
- # 'CRITICAL' (yellow)
-
-logging_level = logging.INFO
-
-logging_filename = ''
-
-console_logging = False
-file_logging = False
-
-weibull_scale = 0 #for churn. 0 means no churn.
-
-# {{{ Args handing
-
-print 'Argument List:', str(sys.argv)
-
-parser = argparse.ArgumentParser(
- description='This is a peer node of a P2PSP network.')
-
-parser.add_argument('--buffer_size',
- help='size of the video buffer in blocks. (Default = {})'.format(buffer_size))
-
-parser.add_argument('--block_size',
- help='Block size in bytes. (Default = {})'.format(block_size))
-
-parser.add_argument('--channel',
- help='Name of the channel served by the streaming source. (Default = {})'.format(channel))
-
-parser.add_argument('--listening_port',
- help='Port used to communicate with the player. (Default = {})'.format(listening_port))
-
-parser.add_argument('--logging_levelname',
- help='Name of the channel served by the streaming source. (Default = "{}")'.format(logging_levelname))
-
-parser.add_argument('--logging_filename',
- help='Name of the logging output file. (Default = "{})'.format(logging_filename))
-
-parser.add_argument('--number_of_blocks',
- help='Maximun number of blocks to receive from the splitter. (Default = {}). If not specified, the peer runs forever.'.format(number_of_blocks))
-
-parser.add_argument('--source_hostname',
- help='Hostname of the streaming source. (Default = {})'.format(source_hostname))
-
-parser.add_argument('--source_port',
- help='Listening port of the streaming source. (Default = {})'.format(source_port))
-
-parser.add_argument('--splitter_hostname',
- help='Hostname of the splitter. (Default = {})'.format(splitter_hostname))
-
-parser.add_argument('--splitter_port',
- help='Listening port of the splitter. (Default = {})'.format(splitter_port))
-
-parser.add_argument('--no_player', help='Do no send the stream to a player.', action="store_true")
-
-parser.add_argument('--churn', help='Scale parameter for the Weibull function, 0 means no churn. (Default = {})'.format(weibull_scale))
-
-args = parser.parse_known_args()[0]
-if args.buffer_size:
- buffer_size = int(args.buffer_size)
-if args.block_size:
- block_size = int(args.block_size)
-if args.channel:
- channel = args.channel
-if args.listening_port:
- listening_port = int(args.listening_port)
-if args.logging_levelname == 'DEBUG':
- logging_level = logging.DEBUG
-if args.logging_levelname == 'INFO':
- logging_level = logging.INFO
-if args.logging_levelname == 'WARNING':
- logging_level = logging.WARNING
-if args.logging_levelname == 'ERROR':
- logging_level = logging.ERROR
-if args.logging_levelname == 'CRITICAL':
- logging_level = logging.CRITICAL
-if args.logging_filename:
- logging_filename = args.logging_filename
-if args.number_of_blocks:
- number_of_blocks = int(args.number_of_blocks)
-if args.source_hostname:
- source_hostname = args.source_hostname
-if args.source_port:
- source_port = int(args.source_port)
-if args.splitter_hostname:
- splitter_hostname = args.splitter_hostname
-if args.splitter_port:
- splitter_port = args.splitter_port
-if args.no_player:
- _PLAYER_ = False
-if args.churn:
- weibull_scale = int(args.churn)
-
-# }}}
-
-print 'This is a P2PSP peer node ...',
-if __debug__:
- print 'running in debug mode'
-else:
- print 'running in release mode'
-
-
-# {{{ Logging initialization
-
-# create logger
-logger = logging.getLogger('peer (' + str(os.getpid()) + ')')
-logger.setLevel(logging_level)
-
-# create console handler and set the level
-if console_logging == True:
- ch = logging.StreamHandler()
- ch.setLevel(logging_level)
- # create formatter
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
- # add formatter to ch
- ch.setFormatter(formatter)
- # add ch to logger
- logger.addHandler(ch)
-
-#jalvaro
-# create file handler and set the level
-if args.logging_filename and file_logging == True:
- fh = logging.FileHandler('/home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/output/peer-'+str(os.getpid()))
- fh.setLevel(logging_level)
- #add fh to logger
- logger.addHandler(fh)
-#jalvaro: create a file handler for the critical level, to store times. I know I shouldn't be using critical :D
-fh_timing = logging.FileHandler('/home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/timing/peer-'+str(os.getpid()))
-fh_timing.setLevel(logging.CRITICAL)
-logger.addHandler(fh_timing)
-
-# }}}
-
-print("Buffer size: "+str(buffer_size)+" blocks")
-print("Block size: "+str(block_size)+" bytes")
-logger.info("Buffer size: "+str(buffer_size)+" blocks")
-logger.info("Block size: "+str(block_size)+" bytes")
-
-source = (source_hostname, source_port)
-splitter = (splitter_hostname, splitter_port)
-
-block_format_string = "H"+str(block_size)+"s"
-
-def get_player_socket():
- # {{{
-
- #sock = blocking_TCP_socket.blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM)
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-
- try:
- # In Windows systems this call doesn't work!
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except:
- pass
- sock.bind(('', listening_port))
- sock.listen(0)
-
- if __debug__:
- logger.info(Color.cyan + '{}'.format(sock.getsockname()) + ' waiting for the player on port ' + str(listening_port) + Color.none)
- # }}}
-
- #sock, player = sock.baccept()
- sock, player = sock.accept()
- sock.setblocking(0)
- return sock
-
- # }}}
-
-if _PLAYER_:
- player_sock = get_player_socket() # The peer is blocked until the
- # player establish a connection.
-
- # {{{ debug
-
- if __debug__:
- logger.debug('{}'.format(player_sock.getsockname()) +
- ' The player ' +
- '{}'.format(player_sock.getpeername()) +
- ' has establised a connection')
-
- # }}}
-
-def communicate_the_header():
- # {{{
- source_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- source_sock.connect(source)
- source_sock.sendall("GET /" + channel + " HTTP/1.1\r\n\r\n")
-
- # {{{ Receive the video header from the source and send it to the player
-
- # Nota: este proceso puede fallar si durante la recepción de los
- # bloques el stream se acaba. Habría que realizar de nuevo la
- # petición HTTP (como hace el servidor).
-
- if __debug__:
- logger.info(Color.cyan + str(source_sock.getsockname()) + ' retrieving the header ...' + Color.none)
-
- data = source_sock.recv(header_size)
- total_received = len(data)
- player_sock.sendall(data)
- while total_received < header_size:
- if __debug__:
- logger.debug(str(total_received))
- data = source_sock.recv(header_size - len(data))
- player_sock.sendall(data)
- total_received += len(data)
-
- # }}}
-
- if __debug__:
- logger.info(Color.cyan + str(source_sock.getsockname()) + ' done' + Color.none)
-
- source_sock.close()
- # }}}
-
-if _PLAYER_:
- communicate_the_header() # Retrieve the header of the stream from the
- # source and send it to the player.
-
-# {{{ debug
-if __debug__:
- logger.debug(" Trying to connect to the splitter at" + str(splitter))
-# }}}
-
-def connect_to_the_splitter():
- # {{{
-
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock.connect(splitter)
- return sock
-
- # }}}
-
-# COMIENZO DE BUFFERING TIME
-start_buffering_time = time.time()
-
-splitter_sock = connect_to_the_splitter() # Connect to the splitter in
- # order to tell it who the
- # gatherer is.
-splitter = splitter_sock.getpeername() # "localhost" -> "127.0.0.1"
-
-if __debug__:
- logger.info(Color.cyan + '{}'.format(splitter_sock.getsockname()) + ' connected to the splitter' + Color.none)
-
-def create_cluster_sock():
- # {{{
-
- sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- try:
- # In Windows systems this call doesn't work!
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except:
- pass
- sock.bind(('',splitter_sock.getsockname()[PORT]))
- return sock
-
- # }}}
-cluster_sock = create_cluster_sock()
-cluster_sock.settimeout(1)
-
-# {{{ This is the list of peers of the cluster. Each peer uses this
-# structure to resend the blocks received from the splitter to these
-# nodes. }}}
-peer_list = []
-
-# {{{ This store the insolidarity of the peers of the cluster. When
-# the solidarity exceed a threshold, the peer is deleted from the list
-# of peers. }}}
-peer_insolidarity = {}
-
-gatherer = None
-
-def retrieve_first_block():
- global block_number
- global buffer_size
- global blocks
-
- #message = splitter_sock.recv(struct.calcsize("H1024s"))
- message = splitter_sock.recv(struct.calcsize(block_format_string))
- print("First block received from splitter. "+str(len(message))+" bytes")
-
- #number, block = struct.unpack("H1024s", message)
- number, block = struct.unpack(block_format_string, message)
- block_number = socket.ntohs(number)
- if __debug__:
- logger.debug("First block number: "+str(block_number))
- logger.debug("First block in buffer position: "+str(block_number%buffer_size))
- # {{{ debug
- '''
- if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- " <- " +
- '{}'.format(block_number) +
- ' ' +
- '{}'.format(sender))
- '''
- # }}}
- blocks[block_number % buffer_size] = block
- received[block_number % buffer_size] = True
- return message
-
-#first_payload contains (block_number,block)
-first_payload = retrieve_first_block()
-last = first_payload
-
-def retrieve_the_list_of_peers():
- # {{{
- global gatherer
- global first_payload
-
- number_of_peers = socket.ntohs(
- struct.unpack("H",splitter_sock.recv(struct.calcsize("H")))[0])
-
- # {{{ debug
-
- if __debug__:
- logger.debug('{}'.format(splitter_sock.getsockname()) +
- ' <- ' +
- '{}'.format(splitter_sock.getpeername()) +
- ' Cluster size = ' +
- str(number_of_peers))
-
- # }}}
- message = splitter_sock.recv(struct.calcsize("4sH"))
- IP_addr, port = struct.unpack("4sH", message)
- IP_addr = socket.inet_ntoa(IP_addr)
- port = socket.ntohs(port)
- gatherer = (IP_addr, port)
- #send the first block to the gatherer, very important!
- cluster_sock.sendto(first_payload, gatherer)
- while number_of_peers > 0:
- message = splitter_sock.recv(struct.calcsize("4sH"))
- IP_addr, port = struct.unpack("4sH", message)
- IP_addr = socket.inet_ntoa(IP_addr)
- port = socket.ntohs(port)
- peer = (IP_addr, port)
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(splitter_sock.getsockname()) +
- ' <- ' +
- '{}'.format(splitter_sock.getpeername()) +
- ' Peer ' +
- str(peer))
- # }}}
- peer_list.append(peer)
- peer_insolidarity[peer] = 0
-# cluster_sock.sendto('', peer) # Send a empty block (this
- # should be fast)
- #send the block
- cluster_sock.sendto(first_payload, peer)
- number_of_peers -= 1
- # }}}
-
-retrieve_the_list_of_peers()
-
-splitter_sock.close()
-
-# {{{ In this momment, most of the rest of peers of the cluster are
-# sending blocks to the new peer.
-# }}}
-
-# {{{ We define the buffer structure. Two components are needed: (1)
-# the blocks buffer that stores the received blocks (2) the received
-# buffer that stores if a block has been received or not.
-# }}}
-#blocks = [None]*buffer_size
-#received = [False]*buffer_size
-
-# True if the peer has received "number_of_blocks" blocks.
-blocks_exhausted = False
-
-
-
-# Number of times that the last block has been sent to the cluster (we
-# send the block each time we receive a block).
-counter = 0
-
-def receive_and_feed():
- global last
- global counter
- global blocks_exhausted
- global number_of_blocks
-
- try:
- # {{{ Receive and send
- #message, sender = cluster_sock.recvfrom(struct.calcsize("H1024s"))
- message, sender = cluster_sock.recvfrom(struct.calcsize(block_format_string))
- #if len(message) == struct.calcsize("H1024s"):
- if len(message) == struct.calcsize(block_format_string):
- # {{{ Received a video block
- #number, block = struct.unpack("H1024s", message)
- number, block = struct.unpack(block_format_string, message)
- block_number = socket.ntohs(number)
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- " <- " +
- '{}'.format(block_number) +
- ' ' +
- '{}'.format(sender))
-
- # }}}
- blocks[block_number % buffer_size] = block
- received[block_number % buffer_size] = True
-
- if sender == splitter:
- # {{{ Send the previously received block in burst mode.
-
- cluster_sock.sendto(message, gatherer)
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- ' ' +
- str(block_number) +
- ' -> (gatherer) ' +
- '{}'.format(gatherer))
-
- # }}}
-
- if __debug__:
- logger.debug("Sending block "+str(block_number)+" in burst mode")
- logger.debug("Counter value: "+str(counter))
-
- # finish sending the last block to all peers in "burst mode" before sending the new one
- while( (counter < len(peer_list)) & (counter > 0)):
- peer = peer_list[counter]
-
- if __debug__:
- logger.debug("Counter: "+str(counter)+", Peer"+str(peer))
-
- cluster_sock.sendto(last, peer)
- # if not is_player_working:
- # cluster_sock.sendto('', peer)
-
- if __debug__ and last=='':
- logger.debug("I'M SENDING A '' MESSAGE")
-
- peer_insolidarity[peer] += 1
- if peer_insolidarity[peer] > 64: # <- Important parameter!!
- del peer_insolidarity[peer]
- peer_list.remove(peer)
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(peer) + ' removed' + Color.none)
-
- counter += 1
-
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- ' ' +
- str(block_number) +
- ' -> (peer) ' +
- '{}'.format(peer))
-
- # }}}
-
- counter = 0
- last = message
- '''
- if args.number_of_blocks:
- number_of_blocks -= 1
- if number_of_blocks <= 0:
- blocks_exhausted = True
-'''
- # }}}
- else:
- # the sender is not the splitter, hence it's a peer
- # {{{ Check if the peer is new
-
- if sender not in peer_list:
- # The peer is new
- #peer_list.append(sender)
- #peer_list.insert(0,sender)
- peer_list.insert(counter,sender)
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' added by data block' + Color.none)
- peer_insolidarity[sender] = 0
-
- # }}}
-
- if counter < len(peer_list):
- # {{{ Send the last block in congestion avoiding mode
-
- peer = peer_list[counter]
- cluster_sock.sendto(last, peer)
-
- peer_insolidarity[peer] += 1
- if peer_insolidarity[peer] > 64: # <- Important parameter!!
- del peer_insolidarity[peer]
- peer_list.remove(peer)
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(peer) + ' removed by unsupportive' + Color.none)
-
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- ' ' +
- str(block_number) +
- ' -> (peer) ' +
- '{}'.format(peer))
-
- # }}}
-
- counter += 1
-
- # }}}
-
- if args.number_of_blocks:
- number_of_blocks -= 1
- if number_of_blocks <= 0:
- blocks_exhausted = True
-
-
- return block_number
- # }}}
- elif message=='bye':
- # {{{ Received a goodbye control block
-
- #if sender not in peer_list:
- # peer_list.append(sender)
- # peer_insolidarity[sender] = 0
- # if __debug__:
- # logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' added by control block' + Color.none)
- #else:
- try:
- peer_list.remove(sender)
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' removed by control block' + Color.none)
- except:
- pass
- return -1
- # }}}
- # }}}
- except socket.timeout:
- # {{{
- if __debug__:
- logger.warning(Color.red + "cluster timeout!" + Color.none)
- return -2
- # }}}
-
-# {{{ debug
-if __debug__:
- logger.debug(str(cluster_sock.getsockname()) + ' buffering ...')
-# }}}
-
-if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' receiving data ...' + Color.none)
-
-'''
-#Fill half the buffer
-'''
-#WARNING!!!
-#time.clock() measures the time spent by the process (so the time spent waiting for an execution slot in the processor is left out)
-#time.time() measures wall time, this means execution time plus waiting time
-
-last_block_number = 0
-error_counter = 0
-#start_buffering_time = time.time()
-
-block_number = receive_and_feed()
-while block_number<=0:
- block_number = receive_and_feed()
-block_to_play = block_number % buffer_size
-for x in xrange(buffer_size/2):
- while receive_and_feed()<=0:
- pass
-#go through the buffer
-num_errors_buf = 0
-for x in range(block_to_play, block_to_play+(buffer_size/2)):
- if received[x%buffer_size] == False:
- num_errors_buf += 1
-
-'''
-block_number = receive_and_feed()
-while block_number<=0:
- block_number = receive_and_feed()
-block_to_play = block_number % buffer_size
-for x in xrange(buffer_size/2):
- last_block_number = receive_and_feed()
- if last_block_number <= 0:
- error_counter += 1
-'''
-#FIN DE BUFFERING TIME
-end_buffering_time = time.time()
-buffering_time = end_buffering_time - start_buffering_time
-
-if __debug__:
- logger.info(str(cluster_sock.getsockname()) + ' buffering done')
- logger.info('NUM_PEERS '+str(len(peer_list)))
- logger.critical('BUF_TIME '+str(buffering_time)+' secs') #buffering time in SECONDS
- logger.critical('BUF_LEN '+str(buffer_size)+' bytes')
- logger.critical('NUM_ERRORS_BUF '+str(error_counter))
- percentage_errors_buf = float(error_counter*100)/float(buffer_size/2)
- logger.critical('PERCENTAGE_ERRORS_BUF ' + str(percentage_errors_buf))
- #logger.critical('PERCENTAGE_ERRORS_BUF {:.2}%'.format(percentage_errors_buf))
- logger.critical('NUM_PEERS '+str(len(peer_list)))
- # }}}
-
-'''
-#End buffering
-'''
-
-player_connected = True
-
-def send_a_block_to_the_player():
- # {{{
-
- global block_to_play
- global player_sock
- global player_connected
-
- if not received[block_to_play]:
- message = struct.pack("!H", block_to_play)
- cluster_sock.sendto(message, splitter)
-
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' complaining about lost block ' + str(block_to_play) + Color.none)
-
- # La mayoría de los players se sincronizan antes si en lugar
- # de no enviar nada se envía un bloque vacío. Esto habría que
- # probarlo.
-
- try:
- player_sock.sendall(blocks[block_to_play])
-
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(player_sock.getsockname()) +
- ' ' +
- str(block_to_play) +
- ' -> (player) ' +
- '{}'.format(player_sock.getpeername()))
-
- # }}}
-
- except socket.error:
- if __debug__:
- logger.error(Color.red + 'player disconected!' + Color.none)
- player_connected = False
- return
- except Exception as detail:
- if __debug__:
- logger.error(Color.red + 'unhandled exception ' + str(detail) + Color.none)
- return
-
- received[block_to_play] = False
-
- # }}}
-
-#get a death time
-#death_time = churn.new_death_time(20)
-death_time = churn.new_death_time(weibull_scale)
-
-'''
-#Once the buffer is half-filled, then start operating normally
-'''
-#while player_connected and not blocks_exhausted:
-while player_connected and not churn.time_to_die(death_time):
-
- if __debug__ and death_time != churn.NEVER:
- current_time = time.localtime()
- logger.debug(Color.green+'Current time is '+str(current_time.tm_hour).zfill(2)+':'+str(current_time.tm_min).zfill(2)+':'+str(current_time.tm_sec).zfill(2)+Color.none)
- logger.debug(Color.green+'Scheduled death time is '+str(time.localtime(death_time).tm_hour).zfill(2)+':'+str(time.localtime(death_time).tm_min).zfill(2)+':'+str(time.localtime(death_time).tm_sec).zfill(2)+Color.none)
-
- block_number = receive_and_feed()
- if block_number>=0:
- if (block_number % 256) == 0:
- for i in peer_insolidarity:
- peer_insolidarity[i] /= 2
- if not received[block_to_play]:
- print(str(cluster_sock.getsockname())+"Block "+str(block_to_play)+" missed")
- if _PLAYER_:
- send_a_block_to_the_player()
- block_to_play = (block_to_play + 1) % buffer_size
- #elif block_number == -2: #this stops the peer after only one cluster timeout
- # break
- if __debug__:
- logger.debug('NUM PEERS '+str(len(peer_list)))
-
-if __debug__:
- logger.info(Color.cyan + 'Goodbye!' + Color.none)
-goodbye = 'bye'
-cluster_sock.sendto(goodbye, splitter)
-for x in xrange(3):
- receive_and_feed()
-for peer in peer_list:
- cluster_sock.sendto(goodbye, peer)
-
diff --git a/sim-cluster/peer.py b/sim-cluster/peer.py
deleted file mode 100755
index 7162d05..0000000
--- a/sim-cluster/peer.py
+++ /dev/null
@@ -1,740 +0,0 @@
-#!/usr/bin/python
-# -*- coding: iso-8859-15 -*-
-
-# Note: if you run the python interpreter in the optimzed mode (-O),
-# debug messages will be disabled.
-
-# {{{ GNU GENERAL PUBLIC LICENSE
-
-# This is the peer node of the P2PSP (Peer-to-Peer Simple Protocol)
-# .
-#
-# Copyright (C) 2013 Vicente González Ruiz.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-# }}}
-
-# Try running me as:
-#
-# ./splitter.py --source_hostname="localhost"
-# ./gatherer.py --splitter_hostname="localhost" --source_hostname="localhost"
-# vlc http://localhost:9999 &
-# ./peer.py --splitter_hostname="localhost" --source_hostname="localhost"
-# vlc http://localhost:9998 &
-
-# {{{ Imports
-
-'''
-# VERSIÓN BÁSICA DEL PEER, no hay mensajes de hola por parte de los peers.
-# Usar con splitter.py y gatherer.py (sin números)
-'''
-
-import os
-import logging
-from colors import Color
-from common import Common
-import sys
-import socket
-import struct
-import time
-import argparse
-import churn
-
-# }}}
-
-IP_ADDR = 0
-PORT = 1
-
-# Number of blocks of the buffer
-#buffer_size = 32
-#buffer_size = 256
-buffer_size = Common.buffer_size
-
-#cluster_port = 0 # OS default behavior will be used for port binding
-
-# Port to communicate with the player
-listening_port = 9998
-
-# Splitter endpoint
-#splitter_hostname = '150.214.150.68'
-splitter_hostname = 'localhost'
-splitter_port = 4552
-
-# Number of bytes of the stream's header
-#header_size = 1024*20*10
-#header_size = 1024*20
-header_size = Common.header_size
-
-# Estas cuatro variables las debería indicar el splitter
-#source_hostname = '150.214.150.68'
-source_hostname = 'localhost'
-source_port = 4551
-channel = '134.ogg'
-#block_size = 1024
-block_size = Common.block_size
-
-# Controls if the stream is sent to a player
-_PLAYER_ = True
-
-# Maximun number of blocks to receive from the splitter
-number_of_blocks = 999999999
-
-
-logging_levelname = 'INFO' # 'DEBUG' (default), 'INFO' (cyan),
- # 'WARNING' (blue), 'ERROR' (red),
- # 'CRITICAL' (yellow)
-
-logging_level = logging.INFO
-
-logging_filename = ''
-
-console_logging = False
-file_logging = True
-
-weibull_scale = 0 #for churn. 0 means no churn.
-
-# {{{ Args handing
-
-print 'Argument List:', str(sys.argv)
-
-parser = argparse.ArgumentParser(
- description='This is a peer node of a P2PSP network.')
-
-parser.add_argument('--buffer_size',
- help='size of the video buffer in blocks. (Default = {})'.format(buffer_size))
-
-parser.add_argument('--block_size',
- help='Block size in bytes. (Default = {})'.format(block_size))
-
-parser.add_argument('--channel',
- help='Name of the channel served by the streaming source. (Default = {})'.format(channel))
-
-parser.add_argument('--listening_port',
- help='Port used to communicate with the player. (Default = {})'.format(listening_port))
-
-parser.add_argument('--logging_levelname',
- help='Name of the channel served by the streaming source. (Default = "{}")'.format(logging_levelname))
-
-parser.add_argument('--logging_filename',
- help='Name of the logging output file. (Default = "{})'.format(logging_filename))
-
-parser.add_argument('--number_of_blocks',
- help='Maximun number of blocks to receive from the splitter. (Default = {}). If not specified, the peer runs forever.'.format(number_of_blocks))
-
-parser.add_argument('--source_hostname',
- help='Hostname of the streaming source. (Default = {})'.format(source_hostname))
-
-parser.add_argument('--source_port',
- help='Listening port of the streaming source. (Default = {})'.format(source_port))
-
-parser.add_argument('--splitter_hostname',
- help='Hostname of the splitter. (Default = {})'.format(splitter_hostname))
-
-parser.add_argument('--splitter_port',
- help='Listening port of the splitter. (Default = {})'.format(splitter_port))
-
-parser.add_argument('--no_player', help='Do no send the stream to a player.', action="store_true")
-
-parser.add_argument('--churn', help='Scale parameter for the Weibull function, 0 means no churn. (Default = {})'.format(weibull_scale))
-
-args = parser.parse_known_args()[0]
-if args.buffer_size:
- buffer_size = int(args.buffer_size)
-if args.block_size:
- block_size = int(args.block_size)
-if args.channel:
- channel = args.channel
-if args.listening_port:
- listening_port = int(args.listening_port)
-if args.logging_levelname == 'DEBUG':
- logging_level = logging.DEBUG
-if args.logging_levelname == 'INFO':
- logging_level = logging.INFO
-if args.logging_levelname == 'WARNING':
- logging_level = logging.WARNING
-if args.logging_levelname == 'ERROR':
- logging_level = logging.ERROR
-if args.logging_levelname == 'CRITICAL':
- logging_level = logging.CRITICAL
-if args.logging_filename:
- logging_filename = args.logging_filename
-if args.number_of_blocks:
- number_of_blocks = int(args.number_of_blocks)
-if args.source_hostname:
- source_hostname = args.source_hostname
-if args.source_port:
- source_port = int(args.source_port)
-if args.splitter_hostname:
- splitter_hostname = args.splitter_hostname
-if args.splitter_port:
- splitter_port = args.splitter_port
-if args.no_player:
- _PLAYER_ = False
-if args.churn:
- weibull_scale = int(args.churn)
-
-# }}}
-
-print 'This is a P2PSP peer node ...',
-if __debug__:
- print 'running in debug mode'
-else:
- print 'running in release mode'
-
-
-# {{{ Logging initialization
-
-# create logger
-logger = logging.getLogger('peer (' + str(os.getpid()) + ')')
-logger.setLevel(logging_level)
-
-# create console handler and set the level
-if console_logging == True:
- ch = logging.StreamHandler()
- ch.setLevel(logging_level)
- # create formatter
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
- # add formatter to ch
- ch.setFormatter(formatter)
- # add ch to logger
- logger.addHandler(ch)
-
-#jalvaro
-# create file handler and set the level
-if args.logging_filename and file_logging == True:
- fh = logging.FileHandler('/home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/output/peer-'+str(os.getpid()))
- fh.setLevel(logging_level)
- #add fh to logger
- logger.addHandler(fh)
-#jalvaro: create a file handler for the critical level, to store times. I know I shouldn't be using critical :D
-fh_timing = logging.FileHandler('/home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/timing/peer-'+str(os.getpid()))
-fh_timing.setLevel(logging.CRITICAL)
-logger.addHandler(fh_timing)
-
-# }}}
-
-print("Buffer size: "+str(buffer_size)+" blocks")
-print("Block size: "+str(block_size)+" bytes")
-logger.info("Buffer size: "+str(buffer_size)+" blocks")
-logger.info("Block size: "+str(block_size)+" bytes")
-
-source = (source_hostname, source_port)
-splitter = (splitter_hostname, splitter_port)
-
-block_format_string = "H"+str(block_size)+"s"
-
-def get_player_socket():
- # {{{
-
- #sock = blocking_TCP_socket.blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM)
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-
- try:
- # In Windows systems this call doesn't work!
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except:
- pass
- sock.bind(('', listening_port))
- sock.listen(0)
-
- if __debug__:
- logger.info(Color.cyan + '{}'.format(sock.getsockname()) + ' waiting for the player on port ' + str(listening_port) + Color.none)
- # }}}
-
- #sock, player = sock.baccept()
- sock, player = sock.accept()
- sock.setblocking(0)
- return sock
-
- # }}}
-
-if _PLAYER_:
- player_sock = get_player_socket() # The peer is blocked until the
- # player establish a connection.
-
- # {{{ debug
-
- if __debug__:
- logger.debug('{}'.format(player_sock.getsockname()) +
- ' The player ' +
- '{}'.format(player_sock.getpeername()) +
- ' has establised a connection')
-
- # }}}
-
-def communicate_the_header():
- # {{{
- source_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- source_sock.connect(source)
- source_sock.sendall("GET /" + channel + " HTTP/1.1\r\n\r\n")
-
- # {{{ Receive the video header from the source and send it to the player
-
- # Nota: este proceso puede fallar si durante la recepción de los
- # bloques el stream se acaba. Habría que realizar de nuevo la
- # petición HTTP (como hace el servidor).
-
- if __debug__:
- logger.info(Color.cyan + str(source_sock.getsockname()) + ' retrieving the header ...' + Color.none)
-
- data = source_sock.recv(header_size)
- total_received = len(data)
- player_sock.sendall(data)
- while total_received < header_size:
- if __debug__:
- logger.debug(str(total_received))
- data = source_sock.recv(header_size - len(data))
- player_sock.sendall(data)
- total_received += len(data)
-
- # }}}
-
- if __debug__:
- logger.info(Color.cyan + str(source_sock.getsockname()) + ' done' + Color.none)
-
- source_sock.close()
- # }}}
-
-if _PLAYER_:
- communicate_the_header() # Retrieve the header of the stream from the
- # source and send it to the player.
-
-# {{{ debug
-if __debug__:
- logger.debug(" Trying to connect to the splitter at" + str(splitter))
-# }}}
-
-def connect_to_the_splitter():
- # {{{
-
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock.connect(splitter)
- return sock
-
- # }}}
-
-# COMIENZO DE BUFFERING TIME
-start_buffering_time = time.time()
-
-splitter_sock = connect_to_the_splitter() # Connect to the splitter in
- # order to tell it who the
- # gatherer is.
-splitter = splitter_sock.getpeername() # "localhost" -> "127.0.0.1"
-
-if __debug__:
- logger.info(Color.cyan + '{}'.format(splitter_sock.getsockname()) + ' connected to the splitter' + Color.none)
-
-def create_cluster_sock():
- # {{{
-
- sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- try:
- # In Windows systems this call doesn't work!
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except:
- pass
- sock.bind(('',splitter_sock.getsockname()[PORT]))
- return sock
-
- # }}}
-cluster_sock = create_cluster_sock()
-cluster_sock.settimeout(1)
-
-# {{{ This is the list of peers of the cluster. Each peer uses this
-# structure to resend the blocks received from the splitter to these
-# nodes. }}}
-peer_list = []
-
-# {{{ This store the insolidarity of the peers of the cluster. When
-# the solidarity exceed a threshold, the peer is deleted from the list
-# of peers. }}}
-peer_insolidarity = {}
-
-gatherer = None
-
-def retrieve_the_list_of_peers():
- # {{{
- global gatherer
- number_of_peers = socket.ntohs(
- struct.unpack("H",splitter_sock.recv(struct.calcsize("H")))[0])
-
- # {{{ debug
-
- if __debug__:
- logger.debug('{}'.format(splitter_sock.getsockname()) +
- ' <- ' +
- '{}'.format(splitter_sock.getpeername()) +
- ' Cluster size = ' +
- str(number_of_peers))
-
- # }}}
- message = splitter_sock.recv(struct.calcsize("4sH"))
- IP_addr, port = struct.unpack("4sH", message)
- IP_addr = socket.inet_ntoa(IP_addr)
- port = socket.ntohs(port)
- gatherer = (IP_addr, port)
- while number_of_peers > 0:
- message = splitter_sock.recv(struct.calcsize("4sH"))
- IP_addr, port = struct.unpack("4sH", message)
- IP_addr = socket.inet_ntoa(IP_addr)
- port = socket.ntohs(port)
- peer = (IP_addr, port)
-
- # {{{ debug
-
- if __debug__:
- logger.debug('{}'.format(splitter_sock.getsockname()) +
- ' <- ' +
- '{}'.format(splitter_sock.getpeername()) +
- ' Peer ' +
- str(peer))
-
- # }}}
-
- peer_list.append(peer)
- peer_insolidarity[peer] = 0
-
- cluster_sock.sendto('', peer) # Send a empty block (this
- # should be fast)
-
- number_of_peers -= 1
-
- # }}}
-
-retrieve_the_list_of_peers()
-
-splitter_sock.close()
-
-# {{{ In this momment, most of the rest of peers of the cluster are
-# sending blocks to the new peer.
-# }}}
-
-# {{{ We define the buffer structure. Two components are needed: (1)
-# the blocks buffer that stores the received blocks (2) the received
-# buffer that stores if a block has been received or not.
-# }}}
-blocks = [None]*buffer_size
-received = [False]*buffer_size
-
-# True if the peer has received "number_of_blocks" blocks.
-blocks_exhausted = False
-
-# This variable holds the last block received from the splitter. It is
-# used below to send the "last" block in the congestion avoiding mode.
-last = ''
-
-# Number of times that the last block has been sent to the cluster (we
-# send the block each time we receive a block).
-counter = 0
-
-def receive_and_feed():
- global last
- global counter
- global blocks_exhausted
- global number_of_blocks
-
- try:
- # {{{ Receive and send
- #message, sender = cluster_sock.recvfrom(struct.calcsize("H1024s"))
- message, sender = cluster_sock.recvfrom(struct.calcsize(block_format_string))
- #if len(message) == struct.calcsize("H1024s"):
- if len(message) == struct.calcsize(block_format_string):
- # {{{ Received a video block
- #number, block = struct.unpack("H1024s", message)
- number, block = struct.unpack(block_format_string, message)
- block_number = socket.ntohs(number)
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- " <- " +
- '{}'.format(block_number) +
- ' ' +
- '{}'.format(sender))
-
- # }}}
- blocks[block_number % buffer_size] = block
- received[block_number % buffer_size] = True
-
- if sender == splitter:
- # {{{ Send the previously received block in burst mode.
-
- cluster_sock.sendto(message, gatherer)
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- ' ' +
- str(block_number) +
- ' -> (gatherer) ' +
- '{}'.format(gatherer))
-
- # }}}
-
- if __debug__:
- logger.debug("Sending block "+str(block_number)+" in burst mode")
- logger.debug("Counter value: "+str(counter))
-
- while( (counter < len(peer_list)) & (counter > 0)):
- peer = peer_list[counter]
-
- if __debug__:
- logger.debug("Counter: "+str(counter)+", Peer"+str(peer))
-
- cluster_sock.sendto(last, peer)
- # if not is_player_working:
- # cluster_sock.sendto('', peer)
-
- peer_insolidarity[peer] += 1
- if peer_insolidarity[peer] > 64: # <- Important parameter!!
- del peer_insolidarity[peer]
- peer_list.remove(peer)
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(peer) + ' removed' + Color.none)
-
- counter += 1
-
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- ' ' +
- str(block_number) +
- ' -> (peer) ' +
- '{}'.format(peer))
-
- # }}}
-
- counter = 0
- last = message
- '''
- if args.number_of_blocks:
- number_of_blocks -= 1
- if number_of_blocks <= 0:
- blocks_exhausted = True
-'''
- # }}}
- else:
- # {{{ Check if the peer is new
-
- if sender not in peer_list:
- # The peer is new
- peer_list.append(sender)
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' added by data block' + Color.none)
- peer_insolidarity[sender] = 0
-
- # }}}
-
- if counter < len(peer_list):
- # {{{ Send the last block in congestion avoiding mode
-
- peer = peer_list[counter]
- cluster_sock.sendto(last, peer)
-
- peer_insolidarity[peer] += 1
- if peer_insolidarity[peer] > 64: # <- Important parameter!!
- del peer_insolidarity[peer]
- peer_list.remove(peer)
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(peer) + ' removed by unsupportive' + Color.none)
-
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- ' ' +
- str(block_number) +
- ' -> (peer) ' +
- '{}'.format(peer))
-
- # }}}
-
- counter += 1
-
- # }}}
-
- if args.number_of_blocks:
- number_of_blocks -= 1
- if number_of_blocks <= 0:
- blocks_exhausted = True
-
-
- return block_number
- # }}}
- elif message=='':
- # {{{ Received a control block
-
- if sender not in peer_list:
- peer_list.append(sender)
- peer_insolidarity[sender] = 0
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' added by control block' + Color.none)
- else:
- peer_list.remove(sender)
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' peer ' + str(sender) + ' removed by control block' + Color.none)
- return -1
- # }}}
- # }}}
- except socket.timeout:
- # {{{
- if __debug__:
- logger.warning(Color.red + "cluster timeout!" + Color.none)
- return -2
- # }}}
-
-# {{{ debug
-if __debug__:
- logger.debug(str(cluster_sock.getsockname()) + ' buffering ...')
-# }}}
-
-if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' receiving data ...' + Color.none)
-
-'''
-#Fill half the buffer
-'''
-#WARNING!!!
-#time.clock() measures the time spent by the process (so the time spent waiting for an execution slot in the processor is left out)
-#time.time() measures wall time, this means execution time plus waiting time
-
-last_block_number = 0
-error_counter = 0
-
-block_number = receive_and_feed()
-while block_number<=0:
- block_number = receive_and_feed()
-block_to_play = block_number % buffer_size
-for x in xrange(buffer_size/2):
- while receive_and_feed()<=0:
- pass
-#go through the buffer
-num_errors_buf = 0
-for x in range(block_to_play, block_to_play+(buffer_size/2)):
- if received[x%buffer_size] == False:
- num_errors_buf += 1
-
-'''
-block_number = receive_and_feed()
-while block_number<=0:
- block_number = receive_and_feed()
-block_to_play = block_number % buffer_size
-for x in xrange(buffer_size/2):
- last_block_number = receive_and_feed()
- if last_block_number <= 0:
- error_counter += 1
-'''
-
-# FIN DE BUFFERING TIME
-end_buffering_time = time.time()
-buffering_time = end_buffering_time - start_buffering_time
-
-if __debug__:
- logger.info(str(cluster_sock.getsockname()) + ' buffering done')
- logger.info('NUM_PEERS '+str(len(peer_list)))
- logger.critical('BUF_TIME '+str(buffering_time)+' secs') #buffering time in SECONDS
- logger.critical('BUF_LEN '+str(buffer_size)+' bytes')
- logger.critical('NUM_ERRORS_BUF '+str(error_counter))
- percentage_errors_buf = float(error_counter*100)/float(buffer_size/2)
- logger.critical('PERCENTAGE_ERRORS_BUF ' + str(percentage_errors_buf))
- #logger.critical('PERCENTAGE_ERRORS_BUF {:.2}%'.format(percentage_errors_buf))
- logger.critical('NUM_PEERS '+str(len(peer_list)))
- # }}}
-
-'''
-#End buffering
-'''
-
-player_connected = True
-
-def send_a_block_to_the_player():
- # {{{
-
- global block_to_play
- global player_sock
- global player_connected
-
- if not received[block_to_play]:
- message = struct.pack("!H", block_to_play)
- cluster_sock.sendto(message, splitter)
-
- if __debug__:
- logger.info(Color.cyan + str(cluster_sock.getsockname()) + ' complaining about lost block ' + str(block_to_play) + Color.none)
-
- # La mayoría de los players se sincronizan antes si en lugar
- # de no enviar nada se envía un bloque vacío. Esto habría que
- # probarlo.
-
- try:
- player_sock.sendall(blocks[block_to_play])
-
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(player_sock.getsockname()) +
- ' ' +
- str(block_to_play) +
- ' -> (player) ' +
- '{}'.format(player_sock.getpeername()))
-
- # }}}
-
- except socket.error:
- if __debug__:
- logger.error(Color.red + 'player disconected!' + Color.none)
- player_connected = False
- return
- except Exception as detail:
- if __debug__:
- logger.error(Color.red + 'unhandled exception ' + str(detail) + Color.none)
- return
-
- received[block_to_play] = False
-
- # }}}
-
-#get a death time
-#death_time = churn.new_death_time(20)
-death_time = churn.new_death_time(weibull_scale)
-
-'''
-#Once the buffer is half-filled, then start operating normally
-'''
-#while player_connected and not blocks_exhausted:
-while player_connected and not churn.time_to_die(death_time):
-
- if __debug__ and death_time != churn.NEVER:
- current_time = time.localtime()
- logger.debug(Color.green+'Current time is '+str(current_time.tm_hour).zfill(2)+':'+str(current_time.tm_min).zfill(2)+':'+str(current_time.tm_sec).zfill(2)+Color.none)
- logger.debug(Color.green+'Scheduled death time is '+str(time.localtime(death_time).tm_hour).zfill(2)+':'+str(time.localtime(death_time).tm_min).zfill(2)+':'+str(time.localtime(death_time).tm_sec).zfill(2)+Color.none)
-
- block_number = receive_and_feed()
- if block_number>=0:
- if (block_number % 256) == 0:
- for i in peer_insolidarity:
- peer_insolidarity[i] /= 2
- if _PLAYER_:
- send_a_block_to_the_player()
- block_to_play = (block_to_play + 1) % buffer_size
- #elif block_number == -2: #this stops the peer after only one cluster timeout
- # break
- if __debug__:
- logger.debug('NUM PEERS '+str(len(peer_list)))
-
-if __debug__:
- logger.info(Color.cyan + 'Goodbye!' + Color.none)
-goodbye = ''
-cluster_sock.sendto(goodbye, splitter)
-for x in xrange(3):
- receive_and_feed()
-for peer in peer_list:
- cluster_sock.sendto(goodbye, peer)
-
diff --git a/sim-cluster/run_oggfwd.sh b/sim-cluster/run_oggfwd.sh
deleted file mode 100755
index 3b1013d..0000000
--- a/sim-cluster/run_oggfwd.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/bin/bash
-
-icecast_name="localhost"
-icecast_port=4551
-video=/home/jalvaro/workspace/sim/gnagl.ogg
-#video=/home/jalvaro/workspaces-eclipse/P2PSP/Big_Buck_Bunny_small.ogv
-#video=/home/jalvaro/workspaces-eclipse/P2PSP/sample48.ogg
-password=1qaz
-channel=134.ogg
-
-usage() {
- echo $0
- echo " [-c (icecast mount-point, \"$channel\" by default)]"
- echo " [-w (icecast password, \"$password\" by default)]"
- echo " [-a (icecast hostname, $icecast_name by default)]"
- echo " [-p (icecast port, $icecast_port by default)]"
- echo " [-v (video file-name, \"$video\" by default)]"
- echo " [-? (help)]"
-}
-
-echo $0: parsing: $@
-
-while getopts "c:w:a:p:v:?" opt; do
- case ${opt} in
- c)
- channel="${OPTARG}"
- ;;
- w)
- password="${OPTARG}"
- ;;
- a)
- icecast_name="${OPTARG}"
- ;;
- p)
- icecast_port="${OPTARG}"
- ;;
- v)
- video="${OPTARG}"
- ;;
- ?)
- usage
- exit 0
- ;;
- \?)
- echo "Invalid option: -${OPTARG}" >&2
- usage
- exit 1
- ;;
- :)
- echo "Option -${OPTARG} requires an argument." >&2
- usage
- exit 1
- ;;
- esac
-done
-
-#old_IFS=$IFS
-#IFS=":"
-#icecast_host=${icecast[0]}
-#icecast_port=${icecast[1]}
-#IFS=$old_IFS
-
-echo "Feeding http://$icecast_name:$icecast_port/$channel with \"$video\" forever ..."
-
-set -x
-
-while true
-do
- oggfwd $icecast_name $icecast_port $password $channel < $video
-done
-
-set +x
diff --git a/sim-cluster/simulation.sh b/sim-cluster/simulation.sh
deleted file mode 100755
index 857667d..0000000
--- a/sim-cluster/simulation.sh
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/bin/bash
-
-set -x
-
-# Simulates flash-crowd peer churn.
-
-#number_of_blocks=100
-number_of_peers=2
-churn_scale=0
-buffer_size=32
-block_size=1024
-
-usage() {
- echo $0
- echo "Simulates flash-crowd peer churn."
- echo "Parameters:"
-# echo " [-b (number of blocks, $number_of_blocks by default)]"
- echo " [-n (number of peers, $number_of_peers by default)]"
- echo " [-c (churn scale, $churn_scale by default, meaning no churn)]"
- echo " [-s (buffer size in blocks, $buffer_size by default)]"
- echo " [-l (block size in bytes, $block_size by default)]"
- echo " [-? (help)]"
-}
-
-echo $0: parsing: $@
-
-while getopts "b:n:c:s:l:?" opt; do
- case ${opt} in
- b)
- number_of_blocks="${OPTARG}"
- ;;
- n)
- number_of_peers="${OPTARG}"
- ;;
- c)
- churn_scale="${OPTARG}"
- ;;
- s)
- buffer_size="${OPTARG}"
- ;;
- l)
- block_size="${OPTARG}"
- ;;
- ?)
- usage
- exit 0
- ;;
- \?)
- echo "Invalid option: -${OPTARG}" >&2
- usage
- exit 1
- ;;
- :)
- echo "Option -${OPTARG} requires an argument." >&2
- usage
- exit 1
- ;;
- esac
-done
-
-echo "block_size is ${block_size}"
-
-#clear previous output files
-rm /home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/output/*
-rm /home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster/timing/*
-
-#start the splitter
-xterm -l -lf ./output/salida_splitter.txt -e "./splitter.py --source_hostname=localhost --logging_level=INFO --buffer_size=$buffer_size --block_size=$block_size"&
-
-#sleep 1
-
-#start the gatherer
-#xterm -l -lf ./output/salida_gatherer.txt -e "./gatherer.py --splitter_hostname=localhost --source_hostname=localhost --logging_level=INFO --buffer_size=$buffer_size --block_size=$block_size" &
-
-sleep 1s
-
-#start the player
-#vlc http://localhost:9999 &
-
-sleep 5s
-#x(){
-COUNTER=0
-while [ $COUNTER -lt $((number_of_peers-1)) ];
-do
- ./peer-h.py --splitter_hostname=localhost --source_hostname=localhost --no_player --logging_level=DEBUG --logging_file=./output/peer-${COUNTER} --churn=${churn_scale} --buffer_size=${buffer_size} --block_size=$block_size&
- let COUNTER=COUNTER+1
-done
-#}
-rm ./output/salida_peer_player.txt
-xterm -l -lf ./output/salida_peer_player.txt -e "./peer-h.py --splitter_hostname=localhost --source_hostname=localhost --logging_level=DEBUG --logging_file=./output/peer-${COUNTER} --churn=${churn_scale} --buffer_size=${buffer_size} --block_size=$block_size"&
-
-sleep 1s
-vlc http://localhost:9998 &
-set +x
diff --git a/sim-cluster/simulator.sh b/sim-cluster/simulator.sh
deleted file mode 100755
index 31cc881..0000000
--- a/sim-cluster/simulator.sh
+++ /dev/null
@@ -1,82 +0,0 @@
-set -x
-home="/home/jalvaro/workspaces-eclipse/P2PSP-sim-cluster/sim/sim-cluster"
-strategy="bloque-exclusivo-nuevos-siguiente-block768"
-sleep_time=100s
-sleep_time_oggfwd=30s
-num_peers_array=(10 50 100 300)
-#upper_limit=400
-
-cd ${home}/timing/
-rm -rf ./*
-cd ..
-
-pkill oggfwd
-
-for num_peers in ${num_peers_array[*]}
-do
-
-##32 BITS
- echo "Experiment 1 for ${num_peers} peers, buffer 32"
- ./run_oggfwd.sh &
- sleep ${sleep_time_oggfwd}
- ${home}/simulation.sh -c 0 -n ${num_peers} -s 32 &
- sleep ${sleep_time}
- ${home}/stop_simulation.sh
- mkdir -p ${home}/timing/1/${strategy}/buffer-32bits/${num_peers}
- mv ${home}/timing/peer* ${home}/timing/1/${strategy}/buffer-32bits/${num_peers}
- pkill oggfwd
-
- echo "Experiment 2 for ${num_peers} peers, buffer 32"
- ./run_oggfwd.sh &
- sleep ${sleep_time_oggfwd}
- ${home}/simulation.sh -c 0 -n ${num_peers} -s 32 &
- sleep ${sleep_time}
- ${home}/stop_simulation.sh
- mkdir -p ${home}/timing/2/${strategy}/buffer-32bits/${num_peers}
- mv ${home}/timing/peer* ${home}/timing/2/${strategy}/buffer-32bits/${num_peers}
- pkill oggfwd
-
- echo "Experiment 3 for ${num_peers} peers, buffer 32"
- ./run_oggfwd.sh &
- sleep ${sleep_time_oggfwd}
- ${home}/simulation.sh -c 0 -n ${num_peers} -s 32 &
- sleep ${sleep_time}
- ${home}/stop_simulation.sh
- mkdir -p ${home}/timing/3/${strategy}/buffer-32bits/${num_peers}
- mv ${home}/timing/peer* ${home}/timing/3/${strategy}/buffer-32bits/${num_peers}
- pkill oggfwd
-
-#256 BITS
- echo "Experiment 1 for ${num_peers} peers, buffer 256"
- ./run_oggfwd.sh &
- sleep ${sleep_time_oggfwd}
- ${home}/simulation.sh -c 0 -n ${num_peers} -s 256 &
- sleep ${sleep_time}
- ${home}/stop_simulation.sh
- mkdir -p ${home}/timing/1/${strategy}/buffer-256bits/${num_peers}
- mv ${home}/timing/peer* ${home}/timing/1/${strategy}/buffer-256bits/${num_peers}
- pkill oggfwd
-
- echo "Experiment 2 for ${num_peers} peers, buffer 256"
- ./run_oggfwd.sh &
- sleep ${sleep_time_oggfwd}
- ${home}/simulation.sh -c 0 -n ${num_peers} -s 256 &
- sleep ${sleep_time}
- ${home}/stop_simulation.sh
- mkdir -p ${home}/timing/2/${strategy}/buffer-256bits/${num_peers}
- mv ${home}/timing/peer* ${home}/timing/2/${strategy}/buffer-256bits/${num_peers}
- pkill oggfwd
-
- echo "Experiment 3 for ${num_peers} peers, buffer 256"
- ./run_oggfwd.sh &
- sleep ${sleep_time_oggfwd}
- ${home}/simulation.sh -c 0 -n ${num_peers} -s 256 &
- sleep ${sleep_time}
- ${home}/stop_simulation.sh
- mkdir -p ${home}/timing/3/${strategy}/buffer-256bits/${num_peers}
- mv ${home}/timing/peer* ${home}/timing/3/${strategy}/buffer-256bits/${num_peers}
- pkill oggfwd
-
-done
-set +x
-
diff --git a/sim-cluster/splitter-x.py b/sim-cluster/splitter-x.py
deleted file mode 100755
index 69a39dd..0000000
--- a/sim-cluster/splitter-x.py
+++ /dev/null
@@ -1,693 +0,0 @@
-#!/usr/bin/python
-# -*- coding: iso-8859-15 -*-
-
-# Note: if you run the python interpreter in the optimzed mode (-O),
-# debug messages will be disabled.
-
-# {{{ GNU GENERAL PUBLIC LICENSE
-
-# This is the splitter node of the P2PSP (Peer-to-Peer Simple Protocol)
-# .
-#
-# Copyright (C) 2013 Vicente González Ruiz.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-# }}}
-
-# Try running me as:
-#
-# xterm -e "./splitter.py" &
-# xterm -e './gatherer.py --splitter_hostname="localhost"' &
-# vlc http://localhost:9999 &
-
-'''
-# VERSIÓN bloque-exclusivo DEL SPLITTER.
-# El splitter envía un bloque de stream exclusivo a cada peer entrante. El peer reenvía dicho bloque a todos a modo de "hola".
-# Con esto prentendemos acelerar el proceso de buffering y saturar menos la red.
-# Usar con peer-x.py y gatherer.py
-'''
-
-# {{{ imports
-
-import logging
-from colors import Color
-from common import Common
-import socket
-from blocking_TCP_socket import blocking_TCP_socket
-import sys
-import struct
-import time
-#import thread
-from threading import Thread
-from threading import Lock
-from threading import RLock
-from time import gmtime, strftime
-import os
-import argparse
-
-# }}}
-
-total_blocks = 1 #starts in 1 to avoid div-by-zero issues when calculating the percentage
-total_blocks = long(total_blocks) #to declare it long. Alternatively: total_blocks = 0L
-loss_percentage = 0
-loss_percentage = float(loss_percentage) #the same with the percentage of loss
-
-IP_ADDR = 0
-PORT = 1
-
-
-#buffer_size = 32 # Buffer size in the peers and the gatherer
-#block_size = 1024
-buffer_size = Common.buffer_size
-block_size = Common.block_size
-
-channel = '134.ogg'
-#source_hostname = '150.214.150.68'
-source_hostname = 'localhost'
-source_port = 4551
-listening_port = 4552
-
-logging_levelname = 'INFO' # 'DEBUG' (default), 'INFO' (cyan),
- # 'WARNING' (blue), 'ERROR' (red),
- # 'CRITICAL' (yellow)
-logging_level = logging.INFO
-
-# {{{ Args handing
-
-print 'Argument List:', str(sys.argv)
-
-parser = argparse.ArgumentParser(
- description='This is the splitter node of a P2PSP network.')
-
-parser.add_argument('--buffer_size',
- help='size of the video buffer in blocks'.format(buffer_size))
-
-parser.add_argument('--block_size',
- help='Block size in bytes. (Default = {})'.format(block_size))
-
-parser.add_argument('--channel',
- help='Name of the channel served by the streaming source. (Default = "{}")'.format(channel))
-
-parser.add_argument('--logging_levelname',
- help='Name of the channel served by the streaming source. (Default = "{}")'.format(logging_levelname))
-
-parser.add_argument('--source_hostname',
- help='Hostname of the streaming server. (Default = "{}")'.format(source_hostname))
-
-parser.add_argument('--source_port',
- help='Listening port of the streaming server. (Default = {})'.format(source_port))
-
-parser.add_argument('--listening_port',
- help='Port to talk with the gatherer and peers. (Default = {})'.format(listening_port))
-
-args = parser.parse_known_args()[0]
-if args.buffer_size:
- buffer_size = int(args.buffer_size)
-if args.block_size:
- block_size = int(args.block_size)
-if args.channel:
- channel = args.channel
-if args.logging_levelname == 'DEBUG':
- logging_level = logging.DEBUG
-if args.logging_levelname == 'INFO':
- logging_level = logging.INFO
-if args.logging_levelname == 'WARNING':
- logging_level = logging.WARNING
-if args.logging_levelname == 'ERROR':
- logging_level = logging.ERROR
-if args.logging_levelname == 'CRITICAL':
- logging_level = logging.CRITICAL
-if args.source_hostname:
- source_hostname = str(args.source_hostname)
-if args.source_port:
- source_port = int(args.source_port)
-if args.listening_port:
- listening_port = int(args.listening_port)
-
-# }}}
-
-print 'This is a P2PSP splitter node ...',
-if __debug__:
- print 'running in debug mode'
-else:
- print 'running in release mode'
-
-# {{{ Logging initialization
-
-# Echar un vistazo a logging.config.
-
-# create logger
-logger = logging.getLogger('splitter (' + str(os.getpid()) + ')')
-logger.setLevel(logging_level)
-
-# create console handler and set the level
-ch = logging.StreamHandler()
-ch.setLevel(logging_level)
-
-# create formatter
-formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-#formatter = logging.Formatter("%(asctime)s [%(funcName)s: %(filename)s,%(lineno)d] %(message)s")
-
-# add formatter to ch
-ch.setFormatter(formatter)
-
-# add ch to logger
-logger.addHandler(ch)
-
-#logging.basicConfig(format='%(asctime)s.%(msecs)d %(message)s',
-# datefmt='%H:%M:%S',
-# level=logging.DEBUG)
-# logging.basicConfig(format='%(asctime)s.%(msecs)d %(message)s',
-# datefmt='%H:%M:%S')
-# else:
-# print 'Running in release mode'
-# logging.basicConfig(format='%(asctime)s.%(msecs)d %(message)s',
-# datefmt='%H:%M:%S',
-# level=logging.CRITICAL)
-
-# }}}
-
-source = (source_hostname, source_port)
-
-# }}}
-
-# The list of peers (included the gatherer)
-peer_list = []
-
-# The number of the last received block from the streaming server
-block_number = 0
-
-# Used to find the peer to which a block has been sent
-destination_of_block = [('0.0.0.0',0) for i in xrange(buffer_size)]
-
-# Unreliability rate of a peer
-unreliability = {}
-
-# Complaining rate of a peer
-complains = {}
-
-# The peer_list iterator
-peer_index = 0
-
-# A lock to perform mutual exclusion for accesing to the list of peers
-peer_list_lock = Lock()
-# A lock for source_sock
-source_sock_lock = Lock()
-
-gatherer = None
-
-block_format_string = "H"+str(block_size)+"s"
-
-print("Buffer size: "+str(buffer_size)+" blocks")
-print("Block size: "+str(block_size)+" bytes")
-logger.info("Buffer size: "+str(buffer_size)+" blocks")
-logger.info("Block size: "+str(block_size)+" bytes")
-
-# {{{ Handle one telnet client
-
-class get_the_state(Thread):
- # {{{
-
- global peer_list
-
- def __init__(self):
- Thread.__init__(self)
-
- def run(self):
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- try:
- # This does not work in Windows systems.
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except:
- pass
- sock.bind(('', listening_port+1))
-
- logger.info(Color.cyan +
- '{}'.format(sock.getsockname()) +
- ' listening (telnet) on port ' +
- str(listening_port+1) +
- Color.none)
-
- sock.listen(0)
- try:
- while True:
- connection = sock.accept()[0]
- message = 'a'
- while message[0] != 'q':
- connection.sendall('Gatherer = ' + str(gatherer) + '\n')
- connection.sendall('Number of peers = ' + str(len(peer_list)) + '\n')
- counter = 0
- for p in peer_list:
- loss_percentage = float(unreliability[p]*100)/float(total_blocks)
- connection.sendall(str(counter) +
- '\t' + str(p) +
- '\t' + 'unreliability=' + str(unreliability[p]) +' ({:.2}%)'.format(loss_percentage)+
- '\t' + 'complains=' + str(complains[p]) +
- '\n')
- counter += 1
- connection.sendall('\n Total blocks sent = '+str(total_blocks))
- connection.sendall(Color.cyan + '\nEnter a line that beggings with "q" to exit or any other key to continue\n' + Color.none)
- message = connection.recv(2)
-
- connection.close()
-
- except:
- pass
-
-get_the_state().setDaemon(True)
-get_the_state().daemon=True
-get_the_state().start()
-
-# }}}
-
-# Return the connection socket used to establish the connections of the
-# peers (and the gatherer) (Week 3)
-
-def get_peer_connection_socket():
- #sock = blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM)
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-
- try:
- # This does not work in Windows systems.
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except:
- pass
-
- sock.bind( ('', listening_port) )
- sock.listen(100)
-
- return sock
-
-peer_connection_sock = get_peer_connection_socket()
-
-logger.info(Color.cyan +
- '{}'.format(peer_connection_sock.getsockname()) +
- ' waiting for the gatherer on port ' +
- str(listening_port) +
- Color.none)
-
-gatherer = peer_connection_sock.accept()[1]
-
-logger.info(Color.cyan +
- '{}'.format(peer_connection_sock.getsockname()) +
- ' the gatherer is ' +
- str(gatherer) +
- Color.none)
-
-# {{{ Handle peer arrivals.
-
-class handle_arrivals(Thread):
- # {{{
-
- def __init__(self):
- Thread.__init__(self)
-
- def run(self):
-
- global block_number
- global total_blocks
- global destination_of_block
- global unreliability
- global complains
-
- while True:
- # {{{ Wait for the connection from the peer /PS0/
-
- peer_serve_socket, peer = peer_connection_sock.accept()
-
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(peer_serve_socket.getsockname()) +
- ' Accepted connection from peer ' +
- str(peer))
- # }}}
-
- # }}}
-
-
- # {{{ debug
-
- if __debug__:
- logger.debug('{}'.format(peer_serve_socket.getsockname()) +
- ' Sending the list of peers')
- # }}}
-
- try:
- #get a new stream block for the incoming peer
- block = receive_next_block()
-
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(source_sock.getsockname()) +
- Color.green + ' <- ' + Color.none +
- '{}'.format(source_sock.getpeername()) + " (source)" +
- ' ' +
- '{}'.format(block_number))
- # }}}
-
-
- try:
- peer_list_lock.acquire() #get the lock
- temp_peer_list = list(peer_list) #for later use outside the critical section. http://henry.precheur.org/python/copy_list
-
- peer_list.append(peer)
- temp_block_number = block_number #for later use outside the critical secion
- total_blocks += 1
- destination_of_block[block_number % buffer_size] = peer
- block_number = (block_number + 1) % 65536
- except Exception:
- print("Exception adding the peer to the peer list in handle arrivals")
- finally:
- peer_list_lock.release() #release the lock
-
- if __debug__:
- logger.debug("First block sent to peer "+str(peer)+" : "+str(temp_block_number))
- logger.debug("First block sent to peer "+str(peer)+" in buffer position : "+str((temp_block_number)%buffer_size))
-
- unreliability[peer] = 0
- complains[peer] = 0
-
- #send the block
- #message = struct.pack("H1024s", socket.htons(temp_block_number), block)
- message = struct.pack(block_format_string, socket.htons(temp_block_number), block)
- peer_serve_socket.sendall(message)
-
- #send the list of peers
- message = struct.pack("H", socket.htons(len(temp_peer_list)))
- peer_serve_socket.sendall(message)
- message = struct.pack("4sH", socket.inet_aton(gatherer[IP_ADDR]),socket.htons(gatherer[PORT]))
- peer_serve_socket.sendall(message)
- for p in temp_peer_list:
- message = struct.pack("4sH", socket.inet_aton(p[IP_ADDR]),socket.htons(p[PORT]))
- peer_serve_socket.sendall(message)
-
- # {{{ debug
-
- if __debug__:
- logger.debug(str(len(temp_peer_list)) + ' peers sent (plus gatherer)')
-
- # }}}
-
- # }}}
-
- # {{{ Close the TCP socket with the peer/gatherer
-
- peer_serve_socket.close()
-
- # }}}
-
- logger.info(Color.cyan +
- str(peer) +
- ' has joined the cluster' +
- Color.none)
- except:
- print("Exception in handle_arrivals")
-
-
- # }}}
-
-handle_arrivals().setDaemon(True)
-handle_arrivals().daemon=True
-handle_arrivals().start()
-
-# }}}
-
-# {{{ Create the socket to send the blocks of stream to the peers/gatherer
-
-def create_cluster_sock(listening_port):
- sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- try:
- # This does not work in Windows systems.
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except:
- pass
- sock.bind(('', listening_port))
- #peer_socket.bind(('',peer_connection_sock.getsockname()[PORT]))
-
- return sock
-
-cluster_sock = create_cluster_sock(listening_port)
-
-# }}}
-
-# {{{ Handle peer/gatherer complains and goodbye messages (Week 10)
-
-class listen_to_the_cluster(Thread):
- # {{{
-
- def __init__(self):
- Thread.__init__(self)
-
- def run(self):
-
- global peer_index
-
- while True:
- # {{{ debug
- if __debug__:
- logger.debug('waiting for messages from the cluster')
- # }}}
- message, sender = cluster_sock.recvfrom(struct.calcsize("H"))
-
- #if len(message) == 0:
- if message == 'bye':
- try:
- peer_list_lock.acquire() #get the lock
- peer_list.remove(sender)
- logger.info(Color.cyan + str(sender) + ' has left the cluster' + Color.none)
- except:
- logger.warning(Color.blue + 'Received a googbye message from ' + str(sender) + ' which is not in the list of peers' + Color.none)
- pass
- finally:
- peer_list_lock.release() #release the lock
- else:
- # The sender of the packet complains and the packet
- # comes with the index of a lost block
- try:
- peer_list_lock.acquire() #get the lock
- lost_block = struct.unpack("!H",message)[0]
- destination = destination_of_block[lost_block]
-
- logger.debug(Color.cyan + str(sender) + ' complains about lost block ' + str(lost_block) + ' sent to ' + str(destination) + Color.none)
- unreliability[destination] += 1
- finally:
- peer_list_lock.release() #release the lock
-
-'''jalvaro: i'm commenting this so peers are not expeled
-#if the sender of the complaint is the gatherer then the splitter removes the infractor inmediately
- if sender == gatherer:
- try:
- peer_list.remove(destination)
- del unreliability[destination]
- del complains[destination]
-
- logger.info(Color.cyan +
- str(destination) +
- ' has been removed' +
- Color.none)
- except:
- pass
-
- else:
- try:
- unreliability[destination] += 1
- if unreliability[destination] > len(peer_list):
- # Too many complains about an unsuportive peer
- peer_list.remove(destination)
- del unreliability[destination]
- del complains[destination]
-
- logger.info(Color.cyan +
- str(destination) +
- ' has been removed' +
- Color.none)
-
- except:
- pass
-'''
-
- # }}}
-
-listen_to_the_cluster().setDaemon(True)
-listen_to_the_cluster().daemon=True
-listen_to_the_cluster().start()
-
-# }}}
-
-# {{{ Connect to the streaming server and request the channel (week 2)
-
-source_sock = blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM)
-source_sock.connect(source)
-
-# {{{ debug
-
-if __debug__:
- logger.debug('{}'.format(source_sock.getsockname()) +
- ' connected to the video source ' +
- '{}'.format(source_sock.getpeername()))
-
-# }}}
-
-# {{{ Request the video to the source
-
-GET_message = 'GET /' + channel + " HTTP/1.1\r\n\r\n"
-source_sock.sendall(GET_message)
-
-# }}}
-
-# {{{ debug
-
-if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- ' sending the rest of the stream ...')
-
-# }}}
-
-# {{{ Feed the peers
-
-while True:
-
- # (Week 2)
- def receive_next_block():
- # {{{
-
- global source_sock
-
- source_sock_lock.acquire() #get the lock
- try:
- block = source_sock.recv(block_size)
- tries = 0
- while len(block) < block_size:
- tries += 1
- if tries > 3:
-
- # {{{ debug
- if __debug__:
- logger.debug('GET')
- # }}}
-
- time.sleep(1)
- source_sock.close()
- source_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- source_sock.connect(source)
- source_sock.sendall(GET_message)
-
- #block += source_sock.recv(1024-len(block))
- block += source_sock.recv(block_size - len(block))
- finally:
- source_sock_lock.release() #release the lock
- return block
-
- # }}}
-
- block = receive_next_block()
- #block = source_sock.brecv(block_size)
-
- # {{{ debug
- if __debug__:
-
- logger.debug('{}'.format(source_sock.getsockname()) +
- Color.green + ' <- ' + Color.none +
- '{}'.format(source_sock.getpeername()) + " (source)" +
- ' ' +
- '{}'.format(block_number))
- # }}}
-
- '''
- Nuevo código
- '''
- peer_list_lock.acquire() #get peer_list_lock
- try:
- len_peer_list = len(peer_list)
- try:
- peer = peer_list[peer_index]
- except:
- try:
- peer = peer_list[0]
- except:
- peer = gatherer
- len_peer_list = 1
- destination_of_block[block_number % buffer_size] = peer
- peer_index = (peer_index + 1) % len_peer_list
- temp_block_number = block_number #for later use outside the critical section
- block_number = (block_number + 1) % 65536
- total_blocks += 1
- finally:
- peer_list_lock.release() # release peer_list_lock
-
- #message = struct.pack("H1024s", socket.htons(temp_block_number), block)
- message = struct.pack(block_format_string, socket.htons(temp_block_number), block)
- cluster_sock.sendto(message, peer)
-
- if peer == gatherer:
- logger.debug('{}'.format(cluster_sock.getsockname())+Color.green+' -> '+Color.none+ str(peer)+' (gatherer) '+str(temp_block_number))
- else:
- logger.debug('{}'.format(cluster_sock.getsockname())+Color.green+' -> '+Color.none+ str(peer)+' (peer) '+str(temp_block_number))
-
- logger.debug("NUM_PEERS "+str(len(peer_list)))
-
- '''
- Fin del nuevo código
- '''
-
- '''
- #Código antiguo
- #with peer_list_lock:
- #peer_list_lock.acquire()
- len_peer_list = len(peer_list)
- #if peer_index < len_peer_list:
- try:
- peer = peer_list[peer_index]
- except:
- try:
- peer = peer_list[0]
- except:
- peer = gatherer
- len_peer_list = 1
- #peer_list_lock.release()
-
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- Color.green + ' -> ' + Color.none +
- '{}'.format(peer) +
- ' ' +
- '{}'.format(block_number))
- # }}}
-
- message = struct.pack("H1024s", socket.htons(block_number), block)
- #if not (block_number%2)==0:
- cluster_sock.sendto(message, peer)
- # Ojo, a veces peta diciendo: "IndexError: list index out of range"
- destination_of_block[block_number % buffer_size] = peer
-
- peer_index = (peer_index + 1) % len_peer_list
-
- block_number = (block_number + 1) % 65536
-
- total_blocks += 1
- #Fin del código antiguo
- '''
-
- '''
- #decrement unreliability and complaints after every 256 packets
- if (block_number % 256) == 0:
- for i in unreliability:
- unreliability[i] /= 2
- for i in complains:
- complains[i] /= 2
- '''
-
-# }}}
diff --git a/sim-cluster/splitter.py b/sim-cluster/splitter.py
deleted file mode 100755
index ba2f8ad..0000000
--- a/sim-cluster/splitter.py
+++ /dev/null
@@ -1,730 +0,0 @@
-#!/usr/bin/python
-# -*- coding: iso-8859-15 -*-
-
-# Note: if you run the python interpreter in the optimzed mode (-O),
-# debug messages will be disabled.
-
-# {{{ GNU GENERAL PUBLIC LICENSE
-
-# This is the splitter node of the P2PSP (Peer-to-Peer Simple Protocol)
-# .
-#
-# Copyright (C) 2013 Vicente González Ruiz.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-# }}}
-
-# Try running me as:
-#
-# xterm -e "./splitter.py" &
-# xterm -e './gatherer.py --splitter_hostname="localhost"' &
-# vlc http://localhost:9999 &
-
-# {{{ imports
-
-import logging
-from colors import Color
-from common import Common
-import socket
-from blocking_TCP_socket import blocking_TCP_socket
-import sys
-import struct
-import time
-#import thread
-from threading import Thread
-from threading import Lock
-from threading import RLock
-from time import gmtime, strftime
-import os
-import argparse
-
-# }}}
-
-total_blocks = 1 #starts in 1 to avoid div-by-zero issues when calculating the percentage
-total_blocks = long(total_blocks) #to declare it long. Alternatively: total_blocks = 0L
-loss_percentage = 0
-loss_percentage = float(loss_percentage) #the same with the percentage of loss
-
-IP_ADDR = 0
-PORT = 1
-
-# Buffer size in the peers and the gatherer
-#buffer_size = 32
-#block_size = 1024
-buffer_size = Common.buffer_size
-block_size = Common.block_size
-
-channel = '134.ogg'
-#source_hostname = '150.214.150.68'
-source_hostname = 'localhost'
-source_port = 4551
-listening_port = 4552
-
-logging_levelname = 'INFO' # 'DEBUG' (default), 'INFO' (cyan),
- # 'WARNING' (blue), 'ERROR' (red),
- # 'CRITICAL' (yellow)
-logging_level = logging.INFO
-
-# {{{ Args handing
-
-print 'Argument List:', str(sys.argv)
-
-parser = argparse.ArgumentParser(
- description='This is the splitter node of a P2PSP network.')
-
-parser.add_argument('--buffer_size',
- help='size of the video buffer in blocks'.format(buffer_size))
-
-parser.add_argument('--block_size',
- help='Block size in bytes. (Default = {})'.format(block_size))
-
-parser.add_argument('--channel',
- help='Name of the channel served by the streaming source. (Default = "{}")'.format(channel))
-
-parser.add_argument('--logging_levelname',
- help='Name of the channel served by the streaming source. (Default = "{}")'.format(logging_levelname))
-
-parser.add_argument('--source_hostname',
- help='Hostname of the streaming server. (Default = "{}")'.format(source_hostname))
-
-parser.add_argument('--source_port',
- help='Listening port of the streaming server. (Default = {})'.format(source_port))
-
-parser.add_argument('--listening_port',
- help='Port to talk with the gatherer and peers. (Default = {})'.format(listening_port))
-
-args = parser.parse_known_args()[0]
-if args.buffer_size:
- buffer_size = int(args.buffer_size)
-if args.block_size:
- block_size = int(args.block_size)
-if args.channel:
- channel = args.channel
-if args.logging_levelname == 'DEBUG':
- logging_level = logging.DEBUG
-if args.logging_levelname == 'INFO':
- logging_level = logging.INFO
-if args.logging_levelname == 'WARNING':
- logging_level = logging.WARNING
-if args.logging_levelname == 'ERROR':
- logging_level = logging.ERROR
-if args.logging_levelname == 'CRITICAL':
- logging_level = logging.CRITICAL
-if args.source_hostname:
- source_hostname = str(args.source_hostname)
-if args.source_port:
- source_port = int(args.source_port)
-if args.listening_port:
- listening_port = int(args.listening_port)
-
-# }}}
-
-print 'This is a P2PSP splitter node ...',
-if __debug__:
- print 'running in debug mode'
-else:
- print 'running in release mode'
-
-# {{{ Logging initialization
-
-# Echar un vistazo a logging.config.
-
-# create logger
-logger = logging.getLogger('splitter (' + str(os.getpid()) + ')')
-logger.setLevel(logging_level)
-
-# create console handler and set the level
-ch = logging.StreamHandler()
-ch.setLevel(logging_level)
-
-# create formatter
-formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-#formatter = logging.Formatter("%(asctime)s [%(funcName)s: %(filename)s,%(lineno)d] %(message)s")
-
-# add formatter to ch
-ch.setFormatter(formatter)
-
-# add ch to logger
-logger.addHandler(ch)
-
-#logging.basicConfig(format='%(asctime)s.%(msecs)d %(message)s',
-# datefmt='%H:%M:%S',
-# level=logging.DEBUG)
-# logging.basicConfig(format='%(asctime)s.%(msecs)d %(message)s',
-# datefmt='%H:%M:%S')
-# else:
-# print 'Running in release mode'
-# logging.basicConfig(format='%(asctime)s.%(msecs)d %(message)s',
-# datefmt='%H:%M:%S',
-# level=logging.CRITICAL)
-
-# }}}
-
-source = (source_hostname, source_port)
-
-# }}}
-
-# The list of peers (included the gatherer)
-peer_list = []
-
-# The number of the last received block from the streaming server
-block_number = 0
-
-
-
-# Used to find the peer to which a block has been sent
-destination_of_block = [('0.0.0.0',0) for i in xrange(buffer_size)]
-
-# Unreliability rate of a peer
-unreliability = {}
-
-# Complaining rate of a peer
-complains = {}
-
-# The peer_list iterator
-peer_index = 0
-
-# A lock to perform mutual exclusion for accesing to the list of peers
-peer_list_lock = Lock()
-
-#gatherer = None
-
-block_format_string = "H"+str(block_size)+"s"
-
-print("Buffer size: "+str(buffer_size)+" blocks")
-print("Block size: "+str(block_size)+" bytes")
-logger.info("Buffer size: "+str(buffer_size)+" blocks")
-logger.info("Block size: "+str(block_size)+" bytes")
-
-# {{{ Handle one telnet client
-
-class get_the_state(Thread):
- # {{{
-
- global peer_list
-
- def __init__(self):
- Thread.__init__(self)
-
- def run(self):
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- try:
- # This does not work in Windows systems.
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except:
- pass
- sock.bind(('', listening_port+1))
-
- logger.info(Color.cyan +
- '{}'.format(sock.getsockname()) +
- ' listening (telnet) on port ' +
- str(listening_port+1) +
- Color.none)
-
- sock.listen(0)
- try:
- while True:
- connection = sock.accept()[0]
- message = 'a'
- while message[0] != 'q':
- #Commented due to gatherer removal
- #connection.sendall('Gatherer = ' + str(gatherer) + '\n')
- connection.sendall('Number of peers = ' + str(len(peer_list)) + '\n')
- counter = 0
- for p in peer_list:
- loss_percentage = float(unreliability[p]*100)/float(total_blocks)
- connection.sendall(str(counter) +
- '\t' + str(p) +
- '\t' + 'unreliability=' + str(unreliability[p]) +' ({:.2}%)'.format(loss_percentage)+
- '\t' + 'complains=' + str(complains[p]) +
- '\n')
- counter += 1
- connection.sendall('\n Total blocks sent = '+str(total_blocks))
- connection.sendall(Color.cyan + '\nEnter a line that beggings with "q" to exit or any other key to continue\n' + Color.none)
- message = connection.recv(2)
-
- connection.close()
-
- except:
- pass
-
-get_the_state().setDaemon(True)
-get_the_state().daemon=True
-get_the_state().start()
-
-# }}}
-
-# Return the connection socket used to establish the connections of the
-# peers (and the gatherer) (Week 3)
-
-def get_peer_connection_socket():
- #sock = blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM)
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-
- try:
- # This does not work in Windows systems.
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except:
- pass
-
- sock.bind( ('', listening_port) )
- #sock.listen(5)
- sock.listen(socket.SOMAXCONN) #set the connection queue to the max!
-
- return sock
-
-peer_connection_sock = get_peer_connection_socket()
-
-'''
-#Commented due to gatherer removal
-logger.info(Color.cyan +
- '{}'.format(peer_connection_sock.getsockname()) +
- ' waiting for the gatherer on port ' +
- str(listening_port) +
- Color.none)
-
-gatherer = peer_connection_sock.accept()[1]
-
-logger.info(Color.cyan +
- '{}'.format(peer_connection_sock.getsockname()) +
- ' the gatherer is ' +
- str(gatherer) +
- Color.none)
-'''
-
-# {{{ Handle the arrival of a peer. This class is called y handle_arrivals
-class handle_one_arrival(Thread):
- peer_serve_socket = ""
- peer = ""
-
- def __init__(self, peer_serve_socket, peer):
- Thread.__init__(self)
- self.peer_serve_socket = peer_serve_socket
- self.peer = peer
-
- def run(self):
-
- global peer_list
- global unreliability
- global complains
- global logger
-
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(self.peer_serve_socket.getsockname()) +
- ' Accepted connection from peer ' +
- str(self.peer))
- # }}}
-
- # {{{ Send the list of peers to the peer /PS4/
- # {{{ debug
-
- if __debug__:
- logger.debug('{}'.format(self.peer_serve_socket.getsockname()) +
- ' Sending the list of peers')
- # }}}
-
- #get a copy of peer_list to prevent race conditions!
- #list slicing ([:]) is faster than the list() method according to http://stackoverflow.com/questions/2612802/how-to-clone-a-list-in-python
- #peer_list_copy = peer_list[:]
-
- #message = struct.pack("H", socket.htons(len(peer_list_copy)))
- message = struct.pack("H", socket.htons(len(peer_list)))
- self.peer_serve_socket.sendall(message)
-
- #Commented due to gatherer removal
- #message = struct.pack("4sH", socket.inet_aton(gatherer[IP_ADDR]),socket.htons(gatherer[PORT]))
- #self.peer_serve_socket.sendall(message)
-
- #for p in peer_list_copy:
- for p in peer_list:
- message = struct.pack(
- "4sH", socket.inet_aton(p[IP_ADDR]),
- socket.htons(p[PORT]))
- self.peer_serve_socket.sendall(message)
-
- # {{{ debug
-
- if __debug__:
- logger.debug(str(len(peer_list)) + ' peers sent')
-
- # }}}
-
- # }}}
-
- # {{{ Close the TCP socket with the peer/gatherer
-
- self.peer_serve_socket.close()
-
- # }}}
-
- #add peer to the REAL peer_list
- peer_list.append(self.peer)
- unreliability[self.peer] = 0
- complains[self.peer] = 0
-
- logger.info(Color.cyan +
- str(self.peer) +
- ' has joined the cluster' +
- Color.none)
- # }}}
-
-# {{{ Main handler peer arrivals.
-class handle_arrivals(Thread):
- # {{{
-
- def __init__(self):
- Thread.__init__(self)
-
- def run(self):
- #peer_connection_sock = blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM)
- #peer_connection_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- #peer_connection_sock.bind(("", listening_port)) # Listen to any interface
- #peer_connection_sock.listen(5)
- #global peer_connection_sock
- while True:
- # {{{ Wait for the connection from the peer /PS0/
-
- peer_serve_socket, peer = peer_connection_sock.accept()
- handle_one_arrival(peer_serve_socket, peer).start()
-
-
- #aquí comienza el thread
- '''
-
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(peer_serve_socket.getsockname()) +
- ' Accepted connection from peer ' +
- str(peer))
- # }}}
-
- # }}}
-
- # {{{ Send the last block to the peer /PS3/
-
- # Solicitar un nuevo bloque a Icecast y enviárselo al peer
- #block = block_buffer[last_block % buffer_size]
- #payload = struct.pack("H1024s", socket.htons(last_block), block)
- #peer_serve_socket.sendall(payload)
-
- # }}}
-
- # {{{ Send the list of peers to the peer /PS4/
-
- # {{{ debug
-
- if __debug__:
- logger.debug('{}'.format(peer_serve_socket.getsockname()) +
- ' Sending the list of peers')
- # }}}
-
- message = struct.pack("H", socket.htons(len(peer_list)))
- peer_serve_socket.sendall(message)
- message = struct.pack(
- "4sH", socket.inet_aton(gatherer[IP_ADDR]),
- socket.htons(gatherer[PORT]))
- peer_serve_socket.sendall(message)
- for p in peer_list:
- message = struct.pack(
- "4sH", socket.inet_aton(p[IP_ADDR]),
- socket.htons(p[PORT]))
- peer_serve_socket.sendall(message)
-
- # {{{ debug
-
- if __debug__:
- logger.debug(str(len(peer_list)) + ' peers sent')
-
- # }}}
-
- # }}}
-
- # {{{ Close the TCP socket with the peer/gatherer
-
- peer_serve_socket.close()
-
- # }}}
-
- # Then the first peer arrival, the first entry of the list
- # of peers is replaced by the peer.
- #if peer_list[0] == gatherer:
- # peer_list[0] = peer
- #else:
- #with peer_list_lock:
- #peer_list_lock.acquire()
- peer_list.append(peer)
- #peer_list_lock.release()
- unreliability[peer] = 0
- complains[peer] = 0
-
- logger.info(Color.cyan +
- str(peer) +
- ' has joined the cluster' +
- Color.none)
- '''
- #fin del thread
- # }}}
-
-print("Peer list length: "+str(len(peer_list)))
-
-handle_arrivals().setDaemon(True) #setting the thread as daemon makes it die when the main process ends. Otherwise, it'd never stop since it runs a while(true).
-handle_arrivals().daemon=True
-handle_arrivals().start()
-
-# }}}
-
-# {{{ Create the socket to send the blocks of stream to the peers/gatherer
-
-def create_cluster_sock(listening_port):
- sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- try:
- # This does not work in Windows systems.
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except:
- pass
- sock.bind(('', listening_port))
- #peer_socket.bind(('',peer_connection_sock.getsockname()[PORT]))
-
- return sock
-
-cluster_sock = create_cluster_sock(listening_port)
-
-# }}}
-
-# {{{ Handle peer/gatherer complains and goodbye messages (Week 10)
-
-class listen_to_the_cluster(Thread):
- # {{{
-
- def __init__(self):
- Thread.__init__(self)
-
- def run(self):
-
- global peer_index
-
- while True:
- # {{{ debug
- if __debug__:
- logger.debug('waiting for messages from the cluster')
- # }}}
- message, sender = cluster_sock.recvfrom(struct.calcsize("H"))
-
- #if len(message) == 0:
- if message == 'bye':
- try:
- peer_list.remove(sender)
- logger.info(Color.cyan +
- str(sender) +
- ' has left the cluster' +
- Color.none)
- except:
- logger.warning(Color.blue +
- 'Received a googbye message from ' +
- str(sender) +
- ' which is not in the list of peers' +
- Color.none)
- pass
- else:
- # The sender of the packet complains and the packet
- # comes with the index of a lost block
- lost_block = struct.unpack("!H",message)[0]
- destination = destination_of_block[lost_block]
-
- logger.info(Color.cyan +
- str(sender) +
- ' complains about lost block ' +
- str(lost_block) +
- ' sent to ' +
- str(destination) +
- Color.none)
- unreliability[destination] += 1
-'''jalvaro: i'm commenting this so peers are not expeled
-#if the sender of the complaint is the gatherer then the splitter removes the infractor inmediately
- if sender == gatherer:
- try:
- peer_list.remove(destination)
- del unreliability[destination]
- del complains[destination]
-
- logger.info(Color.cyan +
- str(destination) +
- ' has been removed' +
- Color.none)
- except:
- pass
-
- else:
- try:
- unreliability[destination] += 1
- if unreliability[destination] > len(peer_list):
- # Too many complains about an unsuportive peer
- peer_list.remove(destination)
- del unreliability[destination]
- del complains[destination]
-
- logger.info(Color.cyan +
- str(destination) +
- ' has been removed' +
- Color.none)
-
- except:
- pass
-'''
-
- # }}}
-
-listen_to_the_cluster().setDaemon(True)
-listen_to_the_cluster().daemon=True
-listen_to_the_cluster().start()
-
-# }}}
-
-# {{{ Connect to the streaming server and request the channel (week 2)
-
-source_sock = blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM)
-source_sock.connect(source)
-
-# {{{ debug
-
-if __debug__:
- logger.debug('{}'.format(source_sock.getsockname()) +
- ' connected to the video source ' +
- '{}'.format(source_sock.getpeername()))
-
-# }}}
-
-# {{{ Request the video to the source
-
-GET_message = 'GET /' + channel + " HTTP/1.1\r\n\r\n"
-source_sock.sendall(GET_message)
-
-# }}}
-
-# {{{ debug
-
-if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- ' sending the rest of the stream ...')
-
-# }}}
-
-# {{{ Feed the peers
-
-while True:
-
- # (Week 2)
- def receive_next_block():
- # {{{
-
- global source_sock
-
- block = source_sock.recv(block_size)
- tries = 0
- while len(block) < block_size:
- tries += 1
- if tries > 3:
-
- # {{{ debug
- if __debug__:
- logger.debug('GET')
- # }}}
-
- time.sleep(1)
- source_sock.close()
- source_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- source_sock.connect(source)
- source_sock.sendall(GET_message)
-
- #block += source_sock.recv(1024-len(block))
- block += source_sock.recv(block_size - len(block))
- return block
-
- # }}}
-
- block = receive_next_block()
- #block = source_sock.brecv(block_size)
-
- # {{{ debug
- if __debug__:
-
- logger.debug('{}'.format(source_sock.getsockname()) +
- Color.green + ' <- ' + Color.none +
- '{}'.format(source_sock.getpeername()) +
- ' ' +
- '{}'.format(block_number))
- # }}}
- print("Block "+str(block_number)+" received")
-
- #with peer_list_lock:
- #peer_list_lock.acquire()
- len_peer_list = len(peer_list)
- print("Length of peer_list: "+str(len_peer_list))
- #if peer_index < len_peer_list:
- try:
- peer = peer_list[peer_index]
- print("Destinatario peer: "+str(peer))
- except:
- try:
- peer = peer_list[0]
- print("Destinatario peer2: "+str(peer))
- except:
- #Commented due to gatherer removal
- #peer = gatherer
- #len_peer_list = 1
- peer = None
- len_peer_list = 1 #should be zero but that would raise a modulo by zero exception
- print("No hay peers")
-
- #peer_list_lock.release()
-
- # {{{ debug
- if __debug__:
- logger.debug('{}'.format(cluster_sock.getsockname()) +
- Color.green + ' -> ' + Color.none +
- '{}'.format(peer) +
- ' ' +
- '{}'.format(block_number))
- # }}}
-
- print("peer != None: "+str(peer!=None))
- if peer != None:
- print("Sending block "+str(block_number))
- #message = struct.pack("H1024s", socket.htons(block_number), block)
- message = struct.pack(block_format_string, socket.htons(block_number), block)
- #if not (block_number%2)==0:
- cluster_sock.sendto(message, peer)
- #print("Block "+str(block_number)+"sent to "+str(peer))
-
- # Ojo, a veces peta diciendo: "IndexError: list index out of range"
- destination_of_block[block_number % buffer_size] = peer
-
- peer_index = (peer_index + 1) % len_peer_list
-
- block_number = (block_number + 1) % 65536
-
- total_blocks += 1
-
- '''
- #decrement unreliability and complaints after every 256 packets
- if (block_number % 256) == 0:
- for i in unreliability:
- unreliability[i] /= 2
- for i in complains:
- complains[i] /= 2
- '''
-
-# }}}
diff --git a/sim-cluster/stop_simulation.sh b/sim-cluster/stop_simulation.sh
deleted file mode 100755
index 727da19..0000000
--- a/sim-cluster/stop_simulation.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-#Kill every related process. Don't kill oggfw!
-pkill splitter
-pkill gatherer
-pkill vlc
-pkill peer
diff --git a/sim-cluster/test_get.py b/sim-cluster/test_get.py
deleted file mode 100755
index 22cb149..0000000
--- a/sim-cluster/test_get.py
+++ /dev/null
@@ -1,153 +0,0 @@
-#!/usr/bin/python
-# -*- coding: iso-8859-15 -*-
-
-# {{{ GNU GENERAL PUBLIC LICENSE
-
-# This is the splitter node of the P2PSP (Peer-to-Peer Simple Protocol)
-# .
-#
-# Copyright (C) 2013 Cristobal Medina López, Juan Pablo García Ortiz,
-# Juan Alvaro Muñoz Naranjo, Leocadio González Casado and Vicente
-# González Ruiz.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-# }}}
-
-# Test the connection with the streaming server.
-
-# {{{ imports
-
-import logging
-import socket
-from blocking_TCP_socket import blocking_TCP_socket
-import sys
-import struct
-import time
-from threading import Thread
-from threading import Lock
-from colors import Color
-import signal
-from time import gmtime, strftime
-import os
-import argparse
-
-# }}}
-
-IP_ADDR = 0
-PORT = 1
-
-block_size = 1024
-channel = '134.ogg'
-source_name = '150.214.150.68'
-source_port = 4551
-listening_port = 4552
-
-# {{{ Args handing
-
-parser = argparse.ArgumentParser(description='This a test_get of a P2PSP cluster.')
-parser.add_argument('--block_size', help='Block size in bytes. (Default = {})'.format(block_size))
-parser.add_argument('--channel', help='Name of the channel served by the streaming source. (Default = "{}")'.format(channel))
-parser.add_argument('--source_name', help='Name of the streaming server. (Default = "{}")'.format(source_name))
-parser.add_argument('--source_port', help='Listening port of the streaming server. (Default = {})'.format(source_port))
-parser.add_argument('--listening_port', help='Port to talk with the drain and peers. (Default = {})'.format(listening_port))
-
-args = parser.parse_known_args()[0]
-if args.block_size:
- block_size = int(args.block_size)
-if args.channel:
- channel = args.channel
-if args.source_name:
- source_name = args.source_name
-if args.source_port:
- source_port = int(args.source_port)
-if args.listening_port:
- listening_port = int(args.listening_port)
-
-source = (source_name, source_port)
-
-# {{{ debug
-if __debug__:
- print 'Running in debug mode'
- logging.basicConfig(format='%(asctime)s.%(msecs)d %(message)s',
- datefmt='%H:%M:%S',
- level=logging.WARNING)
-else:
- print 'Running in release mode'
- logging.basicConfig(format='%(asctime)s.%(msecs)d %(message)s',
- datefmt='%H:%M:%S',
- level=logging.CRITICAL)
-# }}}
-
-# {{{ The drain is blocked until a player establish a connection. (Week 4/6)
-
-def get_player_socket():
-
- # {{{
-
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- try:
- # In Windows systems this call doesn't work!
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except:
- pass
- sock.bind(('', listening_port))
- sock.listen(0)
-
- # {{{ debug
- if __debug__:
- logging.warning('{}'.format(sock.getsockname())
- + ' Waiting for the player connection ...')
- # }}}
-
- sock, player = sock.accept()
- #sock.setblocking(0)
- return sock
-
- # }}}
-
-player_sock = get_player_socket()
-
-# }}}
-
-source_sock = blocking_TCP_socket(socket.AF_INET, socket.SOCK_STREAM)
-source_sock.connect(source)
-source_sock.sendall("GET /" + channel + " HTTP/1.1\r\n\r\n")
-
-header_size = 1000000
-
-'''
-data = source_sock.recv(header_size)
-total_received = len(data)
-player_sock.sendall(data)
-while total_received < header_size:
- if __debug__:
- logging.warning('h')
- data = source_sock.recv(header_size - len(data))
- player_sock.sendall(data)
- total_received += len(data)
-'''
-
-
-block = source_sock.brecv(block_size)
-total_received = len(block)
-player_sock.sendall(block)
-print total_received
-while total_received < header_size:
- if __debug__:
- logging.warning(str(len(block)))
- block = source_sock.brecv(block_size)
- player_sock.sendall(block)
- total_received += block_size
-
diff --git a/sim-cluster/test_p2psp.sh b/sim-cluster/test_p2psp.sh
deleted file mode 100755
index 3baa5c5..0000000
--- a/sim-cluster/test_p2psp.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-xterm -e "./splitter.py" &
-xterm -e './gatherer.py --splitter_name="localhost"' &
-vlc http://localhost:9999 &
diff --git a/src/InvObserver.java b/src/InvObserver.java
new file mode 100755
index 0000000..46ce109
--- /dev/null
+++ b/src/InvObserver.java
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2003-2005 The BISON Project
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+package txrelaysim.src;
+
+import txrelaysim.src.helpers.*;
+
+import peersim.config.*;
+import peersim.core.*;
+import peersim.util.*;
+
+import java.util.Map;
+import java.util.Iterator;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.HashMap;
+import java.util.*;
+
+
+public class InvObserver implements Control
+{
+ /**
+ * The protocol to operate on.
+ * @config
+ */
+ private static final String PAR_PROT = "protocol";
+
+ /** The name of this observer in the configuration */
+ private final String name;
+
+ /** Protocol identifier */
+ private final int pid;
+
+ /**
+ * Standard constructor that reads the configuration parameters.
+ * Invoked by the simulation engine.
+ * @param name the configuration prefix for this class
+ */
+ public InvObserver(String name) {
+ this.name = name;
+ pid = Configuration.getPid(name + "." + PAR_PROT);
+ }
+
+ public enum Protocol {
+ ERLAY,
+ LEGACY,
+ }
+ public enum NodeType {
+ REACHABLE,
+ PRIVATE,
+ }
+
+ public boolean execute() {
+ // Track how many invs and txs were sent.
+ HashMap invsByProtocol = new HashMap<>();
+ HashMap txsByProtocol = new HashMap<>();
+ HashMap invsByNodeType = new HashMap<>();
+ HashMap txsByNodeType = new HashMap<>();
+ HashMap shortInvsByNodeType = new HashMap<>();
+
+ // Track reconciliation results across experiments.
+ ArrayList successRecons = new ArrayList<>();
+ ArrayList failedRecons = new ArrayList<>();
+ // Track how soon transactions were propagating across the network.
+ HashMap> txArrivalTimes = new HashMap>();
+ int blackHoles = 0, reconcilingNodes = 0, reachableNodes = 0;
+
+ for(int i = 1; i < Network.size(); i++) {
+ Peer peer = (Peer) Network.get(i).getProtocol(pid);
+
+ // Store all arrival times (at every node) for all transactions. We will later use this
+ // to calculate latency.
+ Iterator it = peer.txArrivalTimes.entrySet().iterator();
+ while (it.hasNext()) {
+ Map.Entry pair = (Map.Entry)it.next();
+ Integer txId = (Integer)pair.getKey();
+ Long arrivalTime = (Long)pair.getValue();
+ if (txArrivalTimes.get(txId) == null) {
+ txArrivalTimes.put(txId, new ArrayList<>());
+ }
+ txArrivalTimes.get(txId).add(arrivalTime);
+ }
+
+ // See how many black holes there were.
+ if (peer.isBlackHole) {
+ ++blackHoles;
+ continue;
+ }
+
+ if (peer.isReachable) {
+ invsByNodeType.put(NodeType.REACHABLE, invsByNodeType.getOrDefault(NodeType.REACHABLE, 0) + peer.invsSent);
+ txsByNodeType.put(NodeType.REACHABLE, txsByNodeType.getOrDefault(NodeType.REACHABLE, 0) + peer.txSent);
+ reachableNodes++;
+ } else {
+ invsByNodeType.put(NodeType.PRIVATE, invsByNodeType.getOrDefault(NodeType.PRIVATE, 0) + peer.invsSent);
+ txsByNodeType.put(NodeType.PRIVATE, txsByNodeType.getOrDefault(NodeType.PRIVATE, 0) + peer.txSent);
+ }
+
+ // See how many inv/shortinv/tx messages every node sent.
+ if (peer.reconcile) {
+ invsByProtocol.put(Protocol.ERLAY, invsByProtocol.getOrDefault(Protocol.ERLAY, 0) + peer.invsSent);
+ txsByProtocol.put(Protocol.ERLAY, txsByProtocol.getOrDefault(Protocol.ERLAY, 0) + peer.txSent);
+ reconcilingNodes++;
+ successRecons.add(peer.successRecons);
+ failedRecons.add(peer.failedRecons);
+
+ if (peer.isReachable) {
+ shortInvsByNodeType.put(NodeType.REACHABLE, shortInvsByNodeType.getOrDefault(NodeType.REACHABLE, 0) + peer.shortInvsSent);
+ } else {
+ shortInvsByNodeType.put(NodeType.PRIVATE, shortInvsByNodeType.getOrDefault(NodeType.PRIVATE, 0) + peer.shortInvsSent);
+ }
+ } else {
+ invsByProtocol.put(Protocol.LEGACY, invsByProtocol.getOrDefault(Protocol.LEGACY, 0) + peer.invsSent);
+ txsByProtocol.put(Protocol.LEGACY, txsByProtocol.getOrDefault(Protocol.LEGACY, 0) + peer.txSent);
+ }
+ }
+
+ int allTxs = txArrivalTimes.size();
+
+ if (allTxs == 0) {
+ return false;
+ }
+
+ // Measure the delays it took to reach majority of the nodes (based on receival time).
+ ArrayList avgTxArrivalDelay = new ArrayList<>();
+ Iterator it = txArrivalTimes.entrySet().iterator();
+ while (it.hasNext()) {
+ Map.Entry pair = (Map.Entry)it.next();
+ // A workaround to avoid unchecked cast.
+ ArrayList> ar = (ArrayList>) pair.getValue();
+ ArrayList arrivalTimes = new ArrayList<>();
+
+ if (ar.size() < (Network.size() - 1) * 0.99) {
+ // Don't bother printing results if relay is in progress (some nodes didn't receive
+ // the transactions yet).
+ continue;
+ }
+
+ for (Object x : ar) {
+ arrivalTimes.add((Long) x);
+ }
+
+ Collections.sort(arrivalTimes);
+ int percentile95Index = (int)(arrivalTimes.size() * 0.95);
+ Long percentile95delay = (arrivalTimes.get(percentile95Index) - arrivalTimes.get(0));
+ avgTxArrivalDelay.add(percentile95delay);
+ }
+
+ System.err.println("");
+ System.err.println("");
+ System.err.println("");
+ System.err.println("-----------RESULTS--------");
+ System.err.println("Relayed txs: " + allTxs);
+
+ double avgMaxDelay = avgTxArrivalDelay.stream().mapToLong(val -> val).average().orElse(0.0);
+ System.out.println("Avg max latency: " + avgMaxDelay);
+
+ if (blackHoles != 0) {
+ // Note that black holes are only useful to measure latency
+ // impact, measuring/comparing bandwidth is currently not supported because it depends
+ // on how exactly black holes operate (do they reconcile with empty sketches? or drop
+ // sketches/requests on the floor?).
+ return false;
+ }
+ System.out.println("Total bandwidth per tx");
+ int shortInvsTotal = shortInvsByNodeType.getOrDefault(NodeType.REACHABLE, 0) + shortInvsByNodeType.getOrDefault(NodeType.PRIVATE, 0);
+ int invsTotal = invsByNodeType.get(NodeType.REACHABLE) + invsByNodeType.get(NodeType.PRIVATE);
+ System.out.println("INV items: " + (invsTotal + shortInvsTotal * 0.25) / allTxs / (Network.size() - 1));
+
+ System.out.println("");
+ System.out.println("Total bandwidth per tx based on the protocol");
+ int legacyNodes = Network.size() - reconcilingNodes - 1;
+ if (legacyNodes > 0) {
+ System.out.println("Legacy:");
+ System.out.println("INV items: " + (invsByProtocol.get(Protocol.LEGACY) * 1.0 / allTxs / legacyNodes));
+ System.out.println("TX items: " + (txsByProtocol.get(Protocol.LEGACY) * 1.0 / allTxs / legacyNodes));
+ }
+ if (reconcilingNodes > 0) {
+ System.out.println("Reconciling:");
+ System.out.println("INV items: " + (invsByProtocol.get(Protocol.ERLAY) + shortInvsTotal * 0.25) / allTxs / reconcilingNodes);
+ System.out.println("Of them short invs: " + (shortInvsTotal * 0.25 / allTxs / reconcilingNodes));
+ System.out.println("TX items: " + (txsByProtocol.get(Protocol.ERLAY) * 1.0 / allTxs / reconcilingNodes));
+
+ double avgSuccessRecons = successRecons.stream().mapToInt(val -> val).average().orElse(0.0);
+ if (avgSuccessRecons > 0) {
+ System.out.println(avgSuccessRecons + " successful recons on average.");
+
+ double avgFailedRecons = failedRecons.stream().mapToInt(val -> val).average().orElse(0.0);
+ System.out.println(avgFailedRecons + " failed recons on average.");
+ }
+ }
+
+ System.out.println("");
+ System.out.println("Total bandwidth per tx based on reachability");
+ int privateNodes = Network.size() - reachableNodes - 1;
+ System.out.println("Reachable:");
+ System.out.println("INV items: " + (invsByNodeType.get(NodeType.REACHABLE) * 1.0 / allTxs / reachableNodes));
+ System.out.println("TX items: " + (txsByNodeType.get(NodeType.REACHABLE) * 1.0 / allTxs / reachableNodes));
+
+ System.out.println("Private:");
+ System.out.println("INV items: " + (invsByNodeType.get(NodeType.PRIVATE) * 1.0 / allTxs / privateNodes));
+ System.out.println("TX items: " + (txsByNodeType.get(NodeType.PRIVATE) * 1.0 / allTxs / privateNodes));
+ System.err.println("");
+ return false;
+ }
+}
diff --git a/src/Neighbor.java b/src/Neighbor.java
deleted file mode 100644
index 51930e5..0000000
--- a/src/Neighbor.java
+++ /dev/null
@@ -1,17 +0,0 @@
-package sim.src;
-
-import peersim.core.Node;
-
-public class Neighbor {
-
- private Node node;
-
- public Neighbor(Node node) {
- this.node = node;
- }
-
- public Node getNode() {
- return this.node;
- }
-
-}
diff --git a/src/Peer.java b/src/Peer.java
index c5f8a6d..78ec726 100755
--- a/src/Peer.java
+++ b/src/Peer.java
@@ -1,148 +1,411 @@
-package sim.src;
+package txrelaysim.src;
+
+import txrelaysim.src.helpers.*;
import java.util.ArrayList;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.HashMap;
+import java.util.Queue;
+import java.util.Map;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.Random;
+import java.util.Collections;
import peersim.cdsim.CDProtocol;
import peersim.config.Configuration;
import peersim.config.FastConfig;
import peersim.core.Network;
import peersim.core.Node;
+import peersim.core.CommonState;
import peersim.edsim.*;
import peersim.transport.Transport;
public class Peer implements CDProtocol, EDProtocol
{
+ /* System */
public static int pidPeer;
- public boolean isPeer = false;
- private int bufferSize;
- public IntMessage[] buffer;
- public ArrayList peerList;
- public ArrayList badPeerList;
- public boolean isMalicious = false;
- public boolean isTrusted = false;
+
+ /* Constants and delays. Reconciliation only! */
+ public double inFloodLimitPercent;
+ public double outFloodLimitPercent;
+ public int reconciliationInterval;
+ public int inRelayDelay;
+ public int outRelayDelay;
+ public double defaultQ;
+
+ /* State */
+ public boolean isReachable = false;
+ public boolean isBlackHole = false;
+ public ArrayList outboundPeers;
+ public ArrayList inboundPeers;
+ public HashMap txArrivalTimes;
+ public HashMap> peerKnowsTxs;
+ public long nextFloodInbound = 0;
+
+ /* Reconciliation state */
+ public boolean reconcile = false;
+ public Queue reconciliationQueue;
+ public long nextRecon = 0;
+ // This variable is used to check if a peer supports reconciliations.
+ private HashMap> reconSets;
+
+ /* Stats */
+ public int invsSent;
+ public int shortInvsSent;
+ public int txSent;
+
+ public int successRecons;
+ public int extSuccessRecons;
+ public int failedRecons;
public Peer(String prefix) {
- bufferSize = Configuration.getInt(prefix+".buffer_size", 32);
- buffer = new IntMessage[bufferSize];
- peerList = new ArrayList();
- badPeerList = new ArrayList();
+ inboundPeers = new ArrayList<>();
+ outboundPeers = new ArrayList<>();
+ reconciliationQueue = new LinkedList<>();
+ reconSets = new HashMap<>();
+ peerKnowsTxs = new HashMap<>();
+ txArrivalTimes = new HashMap<>();
+ }
+
+ public Object clone() {
+ return new Peer("");
}
-
+
+ @Override
+ public void nextCycle(Node node, int pid) {
+ if (reconcile) {
+ // If reconciliation is enabled on this node, it should periodically request reconciliations
+ // with a queue of its reconciling peers.
+ long curTime = CommonState.getTime();
+ if (reconciliationQueue.peek() != null && curTime > nextRecon) {
+ Node recipient = reconciliationQueue.poll();
+
+ SimpleMessage request = new SimpleMessage(SimpleEvent.RECON_REQUEST, node);
+ ((Transport)recipient.getProtocol(FastConfig.getTransport(pid))).send(node, recipient, request, Peer.pidPeer);
+
+ // Move this node to the end of the queue, schedule the next reconciliation.
+ reconciliationQueue.offer(recipient);
+ nextRecon = curTime + (reconciliationInterval / reconciliationQueue.size());
+ }
+ }
+ }
+
@Override
- public void nextCycle(Node node, int pid) {}
-
- /**
- * The last packet FROM THE SOURCE from anyone is resent to everyone
- * @Override
- */
public void processEvent(Node node, int pid, Object event) {
SimpleEvent castedEvent = (SimpleEvent)event;
switch (castedEvent.getType()) {
- case SimpleEvent.CHUNK:
- processChunkMessage(node, pid, (IntMessage)castedEvent);
+ case SimpleEvent.INV:
+ // INV received from a peer.
+ handleInvMessage(node, pid, (IntMessage)castedEvent);
break;
- case SimpleEvent.PEERLIST:
- processPeerlistMessage(node, pid, (ArrayListMessage)castedEvent);
+ case SimpleEvent.RECON_REQUEST:
+ // Reconciliation request from a peer.
+ handleReconRequest(node, pid, (SimpleMessage)castedEvent);
+ break;
+ case SimpleEvent.SKETCH:
+ // Sketch from a peer in response to reconciliation request.
+ ArrayListMessage> ar = (ArrayListMessage>) castedEvent;
+ ArrayList remoteSet = new ArrayList();
+ for (Object x : ar.getArrayList()) {
+ remoteSet.add((Integer) x);
+ }
+ handleSketchMessage(node, pid, ar.getSender(), remoteSet);
break;
- case SimpleEvent.HELLO:
- processHelloMessage(node, pid, (SimpleMessage)castedEvent);
+ case SimpleEvent.SCHEDULED_INV:
+ // Self-scheduled INV to be sent to a peer.
+ executeScheduledInv(node, pid, (TupleMessage)castedEvent);
break;
- case SimpleEvent.GOODBYE:
- processGoodbyeMessage(node, pid, (SimpleMessage)castedEvent);
- break;
- case SimpleEvent.BAD_PEER:
- processBadPeerMessage(node, pid, (IntMessage)castedEvent);
+ case SimpleEvent.RECON_FINALIZATION:
+ // We use this to track how many inv/shortinvs messages were sent for statas.
+ handleReconFinalization(node, pid, (ArrayListMessage)castedEvent);
+ break;
+ case SimpleEvent.GETDATA:
+ // We use this just for bandwidth accounting, the actual txId (what we need) was already
+ // commnunicated so nothing to do here.
+ ++txSent;
break;
}
}
-
- private void processChunkMessage(Node node, int pid, IntMessage message) {
- storeInBuffer(node, message);
- if(message.getSender().getIndex() == SourceInitializer.sourceIndex) { //the sender is the source
- int latencySum = 0;
- for (Neighbor peer : peerList) {
- IntMessage chunkMessage = new IntMessage(SimpleEvent.CHUNK, node, message.getInteger() * (this.isMalicious ? -1 : 1));
- latencySum += chunkMessage.getLatency(peer.getNode(), pid);
- EDSimulator.add(latencySum, chunkMessage, peer.getNode(), pid);
- }
- } else {
- if (this.isTrusted) {
- TupleMessage chunkCheckMessage = new TupleMessage(SimpleEvent.CHUNK_CHECK, node, message.getSender().getIndex(), message.getInteger());
- long latency = chunkCheckMessage.getLatency(Network.get(0), pid);
- EDSimulator.add(latency, chunkCheckMessage, Network.get(0), Source.pidSource);
- }
- if (!isInBadPeerList(message.getSender().getIndex())) {
- addNewNeighbor(message.getSender());
+
+ // Handle a transaction announcement (INV) from a peer. Remember when the transaction was
+ // announced, and set it for further relay to other peers.
+ private void handleInvMessage(Node node, int pid, IntMessage message) {
+ int txId = message.getInteger();
+ Node sender = message.getSender();
+
+ if (sender.getID() != 0) {
+ // Came not from source.
+ peerKnowsTxs.get(sender).add(txId);
+ if (reconcile && reconSets.containsKey(sender)) {
+ removeFromReconSet(node, txId, sender);
}
}
+
+ if (!txArrivalTimes.keySet().contains(txId)) {
+ SimpleMessage getdata = new SimpleMessage(SimpleEvent.GETDATA, node);
+ ((Transport)sender.getProtocol(FastConfig.getTransport(pid))).send(node, sender, getdata, Peer.pidPeer);
+ txArrivalTimes.put(txId, CommonState.getTime());
+ relayTx(node, pid, txId, sender);
+ }
}
-
- private void storeInBuffer(Node node, IntMessage message) {
- if (!isInBadPeerList(message.getSender().getIndex())) {
- buffer[Math.abs(message.getInteger()) % buffer.length] = message;
+
+ private void handleReconRequest(Node node, int pid, SimpleMessage message) {
+ Node sender = message.getSender();
+
+ long curTime = CommonState.getTime();
+ HashSet reconSet = reconSets.get(sender);
+ ArrayListMessage sketch = new ArrayListMessage(SimpleEvent.SKETCH, node, new ArrayList(reconSet));
+ ((Transport)sender.getProtocol(FastConfig.getTransport(Peer.pidPeer))).send(node, sender, sketch, Peer.pidPeer);
+ for (Integer txId: reconSet) {
+ peerKnowsTxs.get(sender).add(txId);
}
+ reconSet.clear();
}
-
- private boolean isInBadPeerList(int index) {
- boolean isInBadPeerList = false;
- for (int peer : badPeerList) {
- if (peer == index) {
- isInBadPeerList = true;
- break;
+
+ // Handle a sketch a peer sent us in response to our request. All sketch extension logic and
+ // txId exchange is done here implicitly without actually sending messages, because a it can be
+ // easily modeled and accounted at this node locally.
+ private void handleSketchMessage(Node node, int pid, Node sender, ArrayList remoteSet) {
+ Set localSet = reconSets.get(sender);
+ int shared = 0, usMiss = 0, theyMiss = 0;
+ // Handle transactions the local (sketch receiving) node doesn't have.
+ for (Integer txId : remoteSet) {
+ peerKnowsTxs.get(sender).add(txId);
+ if (localSet.contains(txId)) {
+ ++shared;
+ } else {
+ ++usMiss;
+ if (!txArrivalTimes.keySet().contains(txId)) {
+ SimpleMessage getdata = new SimpleMessage(SimpleEvent.GETDATA, node);
+ ((Transport)sender.getProtocol(FastConfig.getTransport(pid))).send(node, sender, getdata, Peer.pidPeer);
+ txArrivalTimes.put(txId, CommonState.getTime());
+ relayTx(node, pid, txId, sender);
+ } else {
+ // This is an edge case: sometimes a local set doesn't have a transaction
+ // although we did receive/record it. It happens when we announce a transaction
+ // to the peer and remove from the set while the peer sends us a sketch
+ // including the same transaction.
+ }
}
}
- return isInBadPeerList;
- }
-
- private void processPeerlistMessage(Node node, int pid, ArrayListMessage message) {
- peerList.clear();
- for (Neighbor peer : message.getArrayList()) {
- peerList.add(peer);
- SimpleMessage helloMessage = new SimpleMessage(SimpleEvent.HELLO, node);
- long latency = helloMessage.getLatency(peer.getNode(), pid);
- EDSimulator.add(latency, helloMessage, peer.getNode(), pid);
+
+ // Handle transactions which the remote (sketch sending) node doesn't have.
+ for (Integer txId : localSet) {
+ if (!remoteSet.contains(txId)) {
+ theyMiss++;
+ IntMessage inv = new IntMessage(SimpleEvent.INV, node, txId);
+ ((Transport)sender.getProtocol(FastConfig.getTransport(Peer.pidPeer))).send(node, sender, inv, Peer.pidPeer);
+ ++invsSent;
+ }
}
+
+ // Compute the cost of this sketch exchange.
+ int diff = usMiss + theyMiss;
+ // This is a technicality of the simulator: in the finalization message we will notify
+ // the node how much INV they supposedly sent us in this reconciliation round.
+ int theySentInvs = 0, theySentShortInvs = 0;
+
+ // Although diff estimation should happen at the sketch sender side, we do it here because
+ // it works in our simplified model, to save extra messages.
+ // To make it more detailed, we could remember the set size at request time here.
+ int localSetSize = localSet.size();
+ int remoteSetSize = remoteSet.size();
+ // TODO: Q could be dynamicly updated after each reconciliation.
+ int capacity = Math.abs(localSetSize - remoteSetSize) + (int)(defaultQ * Math.min(localSetSize, remoteSetSize)) + 1;
+ if (capacity > diff) {
+ // Reconciliation succeeded right away.
+ successRecons++;
+ theySentShortInvs = capacity; // account for sketch
+ shortInvsSent += usMiss;
+ theySentInvs += usMiss;
+ } else if (capacity * 2 > diff) {
+ // Reconciliation succeeded after extension.
+ extSuccessRecons++;
+ theySentShortInvs = capacity * 2; // account for sketch and extension
+ shortInvsSent += usMiss;
+ theySentInvs += usMiss;
+ } else {
+ // Reconciliation failed.
+ failedRecons++;
+ theySentShortInvs = capacity * 2; // account for sketch and extension
+ // Above, we already sent them invs they miss.
+ // Here, we just account for all the remaining full invs: what we miss, and shared txs.
+ // I think ideally the "inefficient" overlap between our set and their set should
+ // be sent by us, hence the accounting below.
+ invsSent += shared;
+ theySentInvs = usMiss;
+ }
+
+ ArrayList finalizationData = new ArrayList();
+ finalizationData.add(theySentInvs);
+ finalizationData.add(theySentShortInvs);
+
+ // System.err.println(theySentShortInvs);
+
+ ArrayListMessage reconFinalization = new ArrayListMessage(
+ SimpleEvent.RECON_FINALIZATION, node, finalizationData);
+ ((Transport)sender.getProtocol(FastConfig.getTransport(Peer.pidPeer))).send(
+ node, sender, reconFinalization, Peer.pidPeer);
+
+ localSet.clear();
}
- private void processHelloMessage(Node node, int pid, SimpleMessage message) {
- addNewNeighbor(message.getSender());
+ private void handleReconFinalization(Node node, int pid, ArrayListMessage message) {
+ invsSent += (Integer)message.getArrayList().get(0);
+ shortInvsSent += (Integer)message.getArrayList().get(1);
}
- private void processGoodbyeMessage(Node node, int pid, SimpleMessage message) {
- // remove neighbor from peerList
+ // A node previously scheduled a transaction announcement to the peer. Execute it here when
+ // this function is called by the scheduler.
+ private void executeScheduledInv(Node node, int pid, TupleMessage scheduledInv) {
+ Node recipient = scheduledInv.getX();
+ int txId = scheduledInv.getY();
+ boolean shouldFlood = scheduledInv.getZ();
+ if (!peerKnowsTxs.get(recipient).contains(txId)) {
+ peerKnowsTxs.get(recipient).add(txId);
+
+ if (reconcile && reconSets.containsKey(recipient)) {
+ if (shouldFlood) {
+ removeFromReconSet(node, txId, recipient);
+ } else {
+ reconSets.get(recipient).add(txId);
+ }
+ }
+
+ if (shouldFlood) {
+ IntMessage inv = new IntMessage(SimpleEvent.INV, node, txId);
+ ((Transport)recipient.getProtocol(FastConfig.getTransport(Peer.pidPeer))).send(node, recipient, inv, Peer.pidPeer);
+ ++invsSent;
+ }
+ }
}
-
- private void addNewNeighbor(Node node) {
- boolean isExist = false;
- for (Neighbor peer : peerList) {
- if (peer.getNode().getID() == node.getID()) {
- isExist = true;
- break;
+
+ private void relayTx(Node node, int pid, int txId, Node sender) {
+ if (isBlackHole) {
+ // Black holes don't relay. Note that black holes are only useful to measure latency
+ // impact, measuring/comparing bandwidth is currently not supported because it depends
+ // on how exactly black holes operate (do they reconcile with empty sketches? or drop
+ // sketches/requests on the floor?).
+ return;
+ }
+
+ // Send to inbounds (flood or recon).
+ // To preserve privacy against inbound observers with multiple connections,
+ // they share the timer (as in the Bitcoin peer-to-peer layer).
+ long delay;
+ long curTime = CommonState.getTime();
+ if (nextFloodInbound < curTime) {
+ nextFloodInbound = curTime + generateRandomDelay(this.inRelayDelay);
+ delay = 0;
+ } else {
+ delay = nextFloodInbound - curTime;
+ }
+
+ // Send to inbounds.
+ // First flood to all non-reconciling peers.
+ // Then flood to a random subset of remaining reconciling peers, according to a defined
+ // fraction. For the rest, reconcile.
+ int flooded = 0;
+ for (Node peer : inboundPeers) {
+ if (!reconSets.containsKey(peer)) { // check for non-reconciling
+ scheduleInv(node, delay, peer, txId, true);
+ flooded++;
+ }
+ }
+
+ double alreadyFloodedPercent, remainsToFloodPercent;
+ Random randomNum = new Random();
+ // Now flood to a random subset of remaining (reconciling) peers, according to a defined
+ // fraction. For the rest, reconcile.
+ if (inboundPeers.size() > 0) {
+ alreadyFloodedPercent = flooded * 100.0 / inboundPeers.size();
+ // We will flip a coin for the sake of randomness -> privacy every time.
+ remainsToFloodPercent = inFloodLimitPercent - alreadyFloodedPercent;
+ Collections.shuffle(inboundPeers);
+ for (Node peer : inboundPeers) {
+ // Skip non-reconciling peers.
+ if (!reconSets.containsKey(peer)) continue;
+
+ boolean shouldFlood = false;
+ if (randomNum.nextInt(100) < remainsToFloodPercent) {
+ shouldFlood = true;
+ }
+ scheduleInv(node, delay, peer, txId, shouldFlood);
}
}
- if (!isExist) {
- peerList.add(new Neighbor(node));
+
+ // Send to outbounds.
+ // First flood to all non-reconciling peers.
+ flooded = 0;
+ for (Node peer : outboundPeers) {
+ if (!reconSets.containsKey(peer)) { // check for non-reconciling
+ delay = generateRandomDelay(this.outRelayDelay);
+ scheduleInv(node, delay, peer, txId, true);
+ flooded++;
+ }
+ }
+
+ // Now flood to a random subset of remaining (reconciling) peers, according to a defined
+ // fraction. For the rest, reconcile.
+ alreadyFloodedPercent = flooded * 100.0 / outboundPeers.size();
+ // We will flip a coin for the sake of randomness -> privacy every time.
+ remainsToFloodPercent = outFloodLimitPercent - alreadyFloodedPercent;
+ Collections.shuffle(outboundPeers);
+ for (Node peer : outboundPeers) {
+ // Skip non-reconciling peers.
+ if (!reconSets.containsKey(peer)) continue;
+
+ delay = generateRandomDelay(this.outRelayDelay);
+ boolean shouldFlood = false;
+ if (randomNum.nextInt(100) < remainsToFloodPercent) {
+ shouldFlood = true;
+ }
+ scheduleInv(node, delay, peer, txId, shouldFlood);
}
}
-
- private void processBadPeerMessage(Node node, int pid, IntMessage message) {
- badPeerList.add(message.getInteger());
- removeNeighbor(message.getInteger());
+
+ private void removeFromReconSet(Node node, int txId, Node target) {
+ if (reconSets.get(target).contains(txId)) {
+ reconSets.get(target).remove(txId);
+ }
}
-
- private void removeNeighbor(int index) {
- Neighbor toRemove = null;
- for (Neighbor peer : peerList) {
- if (peer.getNode().getIndex() == index) {
- toRemove = peer;
- break;
- }
+
+ // We don't announce transactions right away, because usually the delay takes place to make it
+ // more private.
+ private void scheduleInv(Node node, long delay, Node recipient, int txId, boolean shouldFlood) {
+ if (recipient.getID() == 0) {
+ // Don't send to source.
+ return;
+ }
+
+ if (peerKnowsTxs.get(recipient).contains(txId)) {
+ return;
+ }
+ TupleMessage scheduledInv = new TupleMessage(SimpleEvent.SCHEDULED_INV, node, recipient, txId, shouldFlood);
+ EDSimulator.add(delay, scheduledInv, node, Peer.pidPeer); // send to self.
+ }
+
+ // A helper for scheduling events which happen after a random delay.
+ private long generateRandomDelay(long avgDelay) {
+ return CommonState.r.nextLong(avgDelay * 2 + 1);
+ }
+
+ // Used for setting up the topology.
+ public void addPeer(Node peer, boolean outbound, boolean supportsRecon) {
+ if (outbound) {
+ assert(!outboundPeers.contains(peer));
+ outboundPeers.add(peer);
+ } else {
+ assert(!inboundPeers.contains(peer));
+ inboundPeers.add(peer);
+ }
+ peerKnowsTxs.put(peer, new HashSet<>());
+ if (reconcile && supportsRecon) {
+ if (outbound) { reconciliationQueue.offer(peer); }
+ reconSets.put(peer, new HashSet<>());
}
- peerList.remove(toRemove);
}
-
- public Object clone() {
- return new Peer("");
- }
}
\ No newline at end of file
diff --git a/src/PeerInitializer.java b/src/PeerInitializer.java
index 5e9f088..eb800ce 100755
--- a/src/PeerInitializer.java
+++ b/src/PeerInitializer.java
@@ -1,4 +1,9 @@
-package sim.src;
+package txrelaysim.src;
+
+import txrelaysim.src.helpers.*;
+
+import java.util.HashSet;
+import java.util.HashMap;
import peersim.config.*;
import peersim.core.*;
@@ -8,50 +13,128 @@
public class PeerInitializer implements Control
{
private int pid;
- private int maliciousCount;
- private int trustedCount;
-
- private static final String PAR_PROT = "protocol";
- private static final String PAR_MALICIOUS_COUNT = "malicious_count";
- private static final String PAR_TRUSTED_COUNT = "trusted_count";
-
+ private int reachableCount;
+ private int privateBlackHolesPercent;
+ private int outPeersLegacy;
+ private int outPeersRecon;
+ private int inRelayDelayReconPeer;
+ private int outRelayDelayReconPeer;
+ private int inRelayDelayLegacyPeer;
+ private int outRelayDelayLegacyPeer;
+
+ // Reconciliation params
+ private int reconcilePercent;
+ private double outFloodPeersPercent;
+ private double inFloodPeersPercent;
+ private double defaultQ;
+ private int reconciliationInterval;
+
public PeerInitializer(String prefix) {
- pid = Configuration.getPid(prefix + "." + PAR_PROT);
- maliciousCount = Configuration.getInt(prefix + "." + PAR_MALICIOUS_COUNT);
- trustedCount = Configuration.getInt(prefix + "." + PAR_TRUSTED_COUNT);
- }
-
+ pid = Configuration.getPid(prefix + "." + "protocol");
+ reachableCount = Configuration.getInt(prefix + "." + "reachable_count");
+ outPeersLegacy = Configuration.getInt(prefix + "." + "out_peers_legacy");
+ outPeersRecon = Configuration.getInt(prefix + "." + "out_peers_recon");
+ inRelayDelayReconPeer = Configuration.getInt(prefix + "." + "in_relay_delay_recon_peer");
+ outRelayDelayReconPeer = Configuration.getInt(prefix + "." + "out_relay_delay_recon_peer");
+ inRelayDelayLegacyPeer = Configuration.getInt(prefix + "." + "in_relay_delay_legacy_peer");
+ outRelayDelayLegacyPeer = Configuration.getInt(prefix + "." + "out_relay_delay_legacy_peer");
+ privateBlackHolesPercent = Configuration.getInt(prefix + "." + "private_black_holes_percent", 0);
+ reconcilePercent = Configuration.getInt(prefix + "." + "reconcile_percent");
+ if (reconcilePercent > 0) {
+ reconciliationInterval = Configuration.getInt(prefix + "." + "reconciliation_interval");
+ defaultQ = Configuration.getDouble(prefix + "." + "default_q");
+ outFloodPeersPercent = Configuration.getDouble(prefix + "." + "out_flood_peers_percent");
+ inFloodPeersPercent = Configuration.getDouble(prefix + "." + "in_flood_peers_percent");
+ }
+ }
+
@Override
public boolean execute() {
Peer.pidPeer = pid;
-
- //set source as not peer
- ((Peer)Network.get(SourceInitializer.sourceIndex).getProtocol(pid)).isPeer = false;
-
- Node source = Network.get(0);
- while (maliciousCount > 0) {
+
+ int privateBlackHolesCount = (Network.size() - reachableCount) * privateBlackHolesPercent / 100;
+ // Set a subset of nodes to be reachable by other nodes.
+ while (reachableCount > 0) {
int r = CommonState.r.nextInt(Network.size() - 1) + 1;
- if (!((Peer)Network.get(r).getProtocol(pid)).isMalicious && !((Peer)Network.get(r).getProtocol(pid)).isTrusted) {
- ((Peer)Network.get(r).getProtocol(pid)).isMalicious = true;
- maliciousCount--;
+ if (!((Peer)Network.get(r).getProtocol(pid)).isReachable) {
+ ((Peer)Network.get(r).getProtocol(pid)).isReachable = true;
+ --reachableCount;
}
}
- while (trustedCount > 0) {
+
+ System.err.println("Black holes: " + privateBlackHolesCount);
+ while (privateBlackHolesCount > 0) {
int r = CommonState.r.nextInt(Network.size() - 1) + 1;
- if (!((Peer)Network.get(r).getProtocol(pid)).isMalicious && !((Peer)Network.get(r).getProtocol(pid)).isTrusted) {
- ((Peer)Network.get(r).getProtocol(pid)).isTrusted = true;
- trustedCount--;
+ if (!((Peer)Network.get(r).getProtocol(pid)).isReachable) {
+ ((Peer)Network.get(r).getProtocol(pid)).isBlackHole = true;
+ --privateBlackHolesCount;
}
}
+ System.err.println("Black holes: " + privateBlackHolesCount);
+
+ int reconcilingNodes = Network.size() * reconcilePercent / 100;
+ // A list storing who is already connected to who, so that we don't make duplicate conns.
+ HashMap> peers = new HashMap<>();
+ for (int i = 1; i < Network.size(); i++) {
+ peers.put(i, new HashSet<>());
+ // Initial parameters setting for all nodes.
+
+ if (reconcilingNodes > 0) {
+ reconcilingNodes--;
+ ((Peer)Network.get(i).getProtocol(pid)).reconcile = true;
+ ((Peer)Network.get(i).getProtocol(pid)).reconciliationInterval = reconciliationInterval;
+ ((Peer)Network.get(i).getProtocol(pid)).inFloodLimitPercent = inFloodPeersPercent;
+ ((Peer)Network.get(i).getProtocol(pid)).outFloodLimitPercent = outFloodPeersPercent;
+ ((Peer)Network.get(i).getProtocol(pid)).reconciliationInterval = reconciliationInterval;
+ ((Peer)Network.get(i).getProtocol(pid)).defaultQ = defaultQ;
+ ((Peer)Network.get(i).getProtocol(pid)).inRelayDelay = inRelayDelayReconPeer;
+ ((Peer)Network.get(i).getProtocol(pid)).outRelayDelay = outRelayDelayReconPeer;
+ } else {
+ ((Peer)Network.get(i).getProtocol(pid)).reconcile = false;
+ ((Peer)Network.get(i).getProtocol(pid)).inFloodLimitPercent = 100;
+ ((Peer)Network.get(i).getProtocol(pid)).outFloodLimitPercent = 100;
+ ((Peer)Network.get(i).getProtocol(pid)).inRelayDelay = inRelayDelayLegacyPeer;
+ ((Peer)Network.get(i).getProtocol(pid)).outRelayDelay = outRelayDelayLegacyPeer;
+ }
+ }
+
+ // Connect all nodes to a limited number of reachable nodes.
for(int i = 1; i < Network.size(); i++) {
- Node node = Network.get(i);
- ((Peer)node.getProtocol(pid)).isPeer = true;
- SimpleMessage message = new SimpleMessage(SimpleEvent.HELLO, Network.get(i));
- long latency = CommonState.r.nextInt(Network.size());
- EDSimulator.add(latency, message, source, Source.pidSource);
+ Node curNode = Network.get(i);
+ int connsTarget;
+ if (((Peer)curNode.getProtocol(pid)).reconcile) {
+ connsTarget = outPeersRecon;
+ } else {
+ connsTarget = outPeersLegacy;
+ }
+ while (connsTarget > 0) {
+ int randomNodeIndex = CommonState.r.nextInt(Network.size() - 1) + 1;
+ if (randomNodeIndex == i) {
+ continue;
+ }
+
+ Node randomNode = Network.get(randomNodeIndex);
+ Peer randomNodeState = ((Peer)Network.get(randomNodeIndex).getProtocol(pid));
+
+ if (!randomNodeState.isReachable) {
+ continue;
+ }
+ if (peers.get(i).contains(randomNodeIndex) || peers.get(randomNodeIndex).contains(i)) {
+ continue;
+ }
+
+ peers.get(i).add(randomNodeIndex);
+ peers.get(randomNodeIndex).add(i);
+
+ // Actual connecting.
+ boolean curNodeSupportsRecon = ((Peer)Network.get(i).getProtocol(pid)).reconcile;
+ ((Peer)curNode.getProtocol(pid)).addPeer(randomNode, true, randomNodeState.reconcile);
+ ((Peer)randomNode.getProtocol(pid)).addPeer(curNode, false, curNodeSupportsRecon);
+ --connsTarget;
+ }
}
-
-
+
+ System.err.println("Initialized peers");
return true;
}
}
\ No newline at end of file
diff --git a/src/PeerObserver.java b/src/PeerObserver.java
deleted file mode 100755
index 8beafc4..0000000
--- a/src/PeerObserver.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2003-2005 The BISON Project
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-package sim.src;
-
-import peersim.config.*;
-import peersim.core.*;
-import peersim.util.*;
-
-/**
-* Print statistics over a vector. The vector is defined by a protocol,
-* specified by {@value #PAR_PROT}, that has to implement
-* {@link SingleValue}.
-* Statistics printed are: min, max, number of samples, average, variance,
-* number of minimal instances, number of maximal instances (using
-* {@link IncrementalStats#toString}).
-* @see IncrementalStats
-*/
-public class PeerObserver implements Control
-{
- /**
- * The protocol to operate on.
- * @config
- */
- private static final String PAR_PROT = "protocol";
-
- /** The name of this observer in the configuration */
- private final String name;
-
- /** Protocol identifier */
- private final int pid;
-
- private int cycle_length;
-
- /**
- * Standard constructor that reads the configuration parameters.
- * Invoked by the simulation engine.
- * @param name the configuration prefix for this class
- */
- public PeerObserver(String name) {
- this.name = name;
- pid = Configuration.getPid(name + "." + PAR_PROT);
- cycle_length = Configuration.getInt("CYCLE");
- }
-
- public boolean execute() {
- Peer peer;
-
- System.out.println("---------------------------------------------------------------------------------");
- System.out.println("This is PeerObserver. Buffers...");
- for(int i = 1; i < Network.size(); i++) {
- peer = (Peer) Network.get(i).getProtocol(pid);
- if (peer.isMalicious) {
- System.out.print("(A)");
- } else if (peer.isTrusted) {
- System.out.print("(T)");
- }
- System.out.print("Node "+i+" buffer: ");
- for(int j = 0; j < peer.buffer.length; j++) {
- if(peer.buffer[j] == null)
- System.out.print(" | ");
- else
- System.out.print(peer.buffer[j].getInteger() + " | ");
- }
- System.out.println();
- System.out.print("Node "+i+" neighbors: ");
- for (Neighbor neighbor : peer.peerList) {
- if (((Peer)neighbor.getNode().getProtocol(pid)).isMalicious) {
- System.out.print("(A)");
- } else if (((Peer)neighbor.getNode().getProtocol(pid)).isTrusted) {
- System.out.print("(T)");
- }
- System.out.print (neighbor.getNode().getIndex() + ", ");
- }
- System.out.println();
- }
- System.out.println("---------------------------------------------------------------------------------");
-
- return false;
- }
-}
\ No newline at end of file
diff --git a/src/PoisonedChunksObserver.java b/src/PoisonedChunksObserver.java
deleted file mode 100644
index 28a057d..0000000
--- a/src/PoisonedChunksObserver.java
+++ /dev/null
@@ -1,50 +0,0 @@
-package sim.src;
-
-import peersim.config.Configuration;
-import peersim.core.CommonState;
-import peersim.core.Control;
-import peersim.core.Network;
-
-public class PoisonedChunksObserver implements Control {
-
- private static final String PAR_PROT = "protocol";
-
- private String name;
- private int pid;
- private int cycleLength;
- private int poisonedChunks;
-
- public PoisonedChunksObserver(String name) {
- this.name = name;
- pid = Configuration.getPid(name + "." + PAR_PROT);
- cycleLength = Configuration.getInt("CYCLE");
- poisonedChunks = 0;
- }
-
- @Override
- public boolean execute() {
- Peer peer;
- int currentPoisonedChunks = 0;
- for (int i = 1; i < Network.size(); i++) {
- peer = (Peer) Network.get(i).getProtocol(pid);
- for(int j = 0; j < peer.buffer.length; j++) {
- if(peer.buffer[j] == null) {
-
- } else {
- if (peer.buffer[j].getInteger() < 0) {
- currentPoisonedChunks++;
- }
- }
- }
- }
- if (currentPoisonedChunks >= poisonedChunks && CommonState.getEndTime() - CommonState.getTime() > 2 * cycleLength) {
- poisonedChunks = currentPoisonedChunks;
- } else {
- System.out.println("== " + poisonedChunks + " poisoned chunks" + " " + CommonState.getTime() );
- return true;
- }
-
- return false;
- }
-
-}
diff --git a/src/SimpleEvent.java b/src/SimpleEvent.java
deleted file mode 100644
index 8e4aab3..0000000
--- a/src/SimpleEvent.java
+++ /dev/null
@@ -1,22 +0,0 @@
-package sim.src;
-
-public class SimpleEvent {
-
- public static final int HELLO = 1;
- public static final int GOODBYE = 2;
- public static final int CHUNK = 3;
- public static final int PEERLIST = 4;
- public static final int CHUNK_CHECK = 5;
- public static final int BAD_PEER = 6;
-
- private int type;
-
- public SimpleEvent(int type) {
- this.type = type;
- }
-
- public int getType() {
- return this.type;
- }
-
-}
diff --git a/src/Source.java b/src/Source.java
index d80c024..970b96b 100755
--- a/src/Source.java
+++ b/src/Source.java
@@ -1,11 +1,15 @@
-package sim.src;
+package txrelaysim.src;
+
+import txrelaysim.src.helpers.*;
import java.util.ArrayList;
import peersim.cdsim.CDProtocol;
import peersim.config.FastConfig;
+import peersim.config.Configuration;
import peersim.core.Network;
import peersim.core.Node;
+import peersim.core.CommonState;
import peersim.edsim.EDProtocol;
import peersim.edsim.EDSimulator;
import peersim.transport.Transport;
@@ -13,129 +17,52 @@
public class Source implements CDProtocol, EDProtocol
{
- public static int pidSource;
-
+ public static int pidSource;
+ public static int tps;
+
public boolean isSource = false;
- private int packetIndex = 1;
- private int recipientIndex = 1;
- private int cycle = 1;
- private ArrayList peerList;
-
+ public int txId = 0;
+ private ArrayList peerList;
+
public Source(String prefix) {
- this.peerList = new ArrayList();
+ this.peerList = new ArrayList<>();
}
-
+
@Override
public void nextCycle(Node node, int pid) {
Node recipient;
int nextNodeIndex;
-
- if(isSource == false)
+
+ if (isSource == false)
+ return;
+
+ if (CommonState.getEndTime() < CommonState.getTime() + 40 * 1000) {
+ // if the experiment is over soon, stop issuing transactions and let existing propagate.
return;
-
- if (peerList.size() > 0) {
- if (recipientIndex >= peerList.size()) {
- recipientIndex = 0;
- }
- recipient = peerList.get(recipientIndex).getNode();
- //next node in the list
- nextNodeIndex = (recipientIndex+1) % peerList.size();
-
- //send packet to this node, with nextNodeIndex in the resendTo field
- IntMessage chunkMessage = new IntMessage(SimpleEvent.CHUNK, node, packetIndex);
- ((Transport)recipient.getProtocol(FastConfig.getTransport(pid))).send(node, recipient, chunkMessage, Peer.pidPeer);
-
- //for next cycle
- packetIndex++;
- recipientIndex = nextNodeIndex;
}
- cycle++;
- }
- /*
- * Returns the regular peer with absolute index "index"
- */
- public Peer getPeer(int index) {
- Node node = Network.get(index);
- //look for the Peer protocol
- for(int p = 0; p < node.protocolSize(); p++)
- {
- if(node.getProtocol(p) instanceof Peer)
- return (Peer)node.getProtocol(p);
+ int randomNumberOfTxs = CommonState.r.nextInt(this.tps * 2); // anything from 0 to tps * 2.
+
+ for (int i = 0; i < randomNumberOfTxs; ++i) {
+ txId++;
+ int randomRecipientIndex = CommonState.r.nextInt(peerList.size() - 1) + 1;
+ recipient = peerList.get(randomRecipientIndex);
+ IntMessage inv = new IntMessage(SimpleEvent.INV, node, txId);
+ ((Transport)recipient.getProtocol(FastConfig.getTransport(pid))).send(node, recipient, inv, Peer.pidPeer);
}
-
- return null;
- }
-
- public Object clone() {
- return new Source("");
}
@Override
public void processEvent(Node node, int pid, Object event) {
- SimpleEvent castedEvent = (SimpleEvent)event;
- switch (castedEvent.getType()) {
- case SimpleEvent.HELLO:
- processHelloMessage(node, pid, (SimpleMessage)castedEvent);
- break;
- case SimpleEvent.GOODBYE:
- processGoodbyeMessage(node, pid, (SimpleMessage)castedEvent);
- break;
- case SimpleEvent.CHUNK_CHECK:
- processChunkCheckMessage(node, pid, (TupleMessage)castedEvent);
- }
+ return;
}
-
- private void processHelloMessage(Node node, int pid, SimpleMessage receivedMessage) {
- ArrayList clone = new ArrayList();
- synchronized (this.peerList) {
- for (Neighbor peer : this.peerList) {
- clone.add(peer);
- }
- }
- ArrayListMessage message = new ArrayListMessage(SimpleEvent.PEERLIST, node, clone);
- Node sender = receivedMessage.getSender();
-
- long latency = message.getLatency(sender, pid);
- EDSimulator.add(latency, message, sender, Peer.pidPeer);
-
- peerList.add(new Neighbor(receivedMessage.getSender()));
- }
-
- private void processGoodbyeMessage(Node node, int pid, SimpleMessage receivedMessage) {
- Neighbor peerToRemove = null;
- for (Neighbor peer : peerList) {
- if (peer.getNode().getID() == receivedMessage.getSender().getID()) {
- peerToRemove = peer;
- break;
- }
- }
- if (peerToRemove != null) {
- peerList.remove(peerToRemove);
- }
- }
-
- private void processChunkCheckMessage(Node node, int pid, TupleMessage receivedMessage) {
- int chunkNum = receivedMessage.getY();
- if (chunkNum < 0) { // poisoned chunk
- removeNeighbor(receivedMessage.getX());
- IntMessage badPeerMessage = new IntMessage(SimpleEvent.BAD_PEER, node, receivedMessage.getX());
- for (Neighbor peer : peerList) {
- long latency = badPeerMessage.getLatency(peer.getNode(), pid);
- EDSimulator.add(latency, badPeerMessage, peer.getNode(), Peer.pidPeer);
- }
- }
+
+ public Object clone() {
+ return new Source("");
}
-
- private void removeNeighbor(int index) {
- Neighbor toRemove = null;
- for (Neighbor peer : peerList) {
- if (peer.getNode().getIndex() == index) {
- toRemove = peer;
- break;
- }
- }
- peerList.remove(toRemove);
+
+ public void addPeer(Node peer) {
+ peerList.add(peer);
}
-
+
}
\ No newline at end of file
diff --git a/src/SourceInitializer.java b/src/SourceInitializer.java
index ebc76c6..fbd7f3f 100755
--- a/src/SourceInitializer.java
+++ b/src/SourceInitializer.java
@@ -1,4 +1,4 @@
-package sim.src;
+package txrelaysim.src;
import peersim.config.*;
import peersim.core.*;
@@ -6,26 +6,39 @@
public class SourceInitializer implements Control
{
public static final int sourceIndex = 0;
-
+
private static final String PAR_PROT = "protocol";
private final int pid;
-
+ private int tps;
+
public SourceInitializer(String prefix) {
pid = Configuration.getPid(prefix + "." + PAR_PROT);
+ tps = Configuration.getInt(prefix + ".tps");
}
@Override
public boolean execute() {
- //set the Source pid
+ // Set the Source pid.
Source.pidSource = pid;
-
- //set node 0 as source
+
+ // Set node 0 as source.
((Source) Network.get(sourceIndex).getProtocol(pid)).isSource = true;
-
- //set other nodes as not source
+ ((Source) Network.get(sourceIndex).getProtocol(pid)).tps = tps;
+
+ //set other nodes as not source.
for(int i = 1; i < Network.size()-1; i++)
((Source) Network.get(i).getProtocol(pid)).isSource = false;
+ // Source connects to some nodes.
+ Node source = Network.get(0);
+ int sourceConns = 0;
+ while (sourceConns < 20) {
+ int randomNodeIndex = CommonState.r.nextInt(Network.size() - 1) + 1;
+ Node node = Network.get(randomNodeIndex);
+ ((Source)source.getProtocol(pid)).addPeer(node);
+ ++sourceConns;
+ }
+
return true;
}
}
diff --git a/src/TupleMessage.java b/src/TupleMessage.java
deleted file mode 100644
index 146ea57..0000000
--- a/src/TupleMessage.java
+++ /dev/null
@@ -1,24 +0,0 @@
-package sim.src;
-
-import peersim.core.Node;
-
-public class TupleMessage extends SimpleMessage {
-
- private int x;
- private int y;
-
- public TupleMessage(int type, Node sender, int x, int y) {
- super(type, sender);
- this.x = x;
- this.y = y;
- }
-
- public int getX() {
- return this.x;
- }
-
- public int getY() {
- return this.y;
- }
-
-}
diff --git a/src/ArrayListMessage.java b/src/helpers/ArrayListMessage.java
similarity index 91%
rename from src/ArrayListMessage.java
rename to src/helpers/ArrayListMessage.java
index ace5c53..e08497d 100644
--- a/src/ArrayListMessage.java
+++ b/src/helpers/ArrayListMessage.java
@@ -1,4 +1,4 @@
-package sim.src;
+package txrelaysim.src.helpers;
import java.util.ArrayList;
@@ -7,12 +7,12 @@
public class ArrayListMessage extends SimpleMessage {
private ArrayList arrayList;
-
+
public ArrayListMessage(int type, Node sender, ArrayList arrayList) {
super(type, sender);
this.arrayList = arrayList;
}
-
+
public ArrayList getArrayList() {
return this.arrayList;
}
diff --git a/src/IntMessage.java b/src/helpers/IntMessage.java
similarity index 88%
rename from src/IntMessage.java
rename to src/helpers/IntMessage.java
index ff9ca4b..6228a48 100644
--- a/src/IntMessage.java
+++ b/src/helpers/IntMessage.java
@@ -1,16 +1,16 @@
-package sim.src;
+package txrelaysim.src.helpers;
import peersim.core.Node;
public class IntMessage extends SimpleMessage {
-
+
private int integer;
public IntMessage(int type, Node sender, int integer) {
super(type, sender);
this.integer = integer;
}
-
+
public int getInteger() {
return this.integer;
}
diff --git a/src/helpers/SimpleEvent.java b/src/helpers/SimpleEvent.java
new file mode 100644
index 0000000..a257091
--- /dev/null
+++ b/src/helpers/SimpleEvent.java
@@ -0,0 +1,23 @@
+package txrelaysim.src.helpers;
+
+public class SimpleEvent {
+
+ public static final int INV = 1;
+ public static final int GETDATA = 2;
+ public static final int RECON_REQUEST = 3;
+ public static final int SKETCH = 4;
+ public static final int RECON_FINALIZATION = 5;
+ public static final int SCHEDULED_INV = 6;
+ public static final int SCHEDULED_SKETCH = 7;
+
+ private int type;
+
+ public SimpleEvent(int type) {
+ this.type = type;
+ }
+
+ public int getType() {
+ return this.type;
+ }
+
+}
diff --git a/src/SimpleMessage.java b/src/helpers/SimpleMessage.java
similarity index 52%
rename from src/SimpleMessage.java
rename to src/helpers/SimpleMessage.java
index 1897c24..3613488 100644
--- a/src/SimpleMessage.java
+++ b/src/helpers/SimpleMessage.java
@@ -1,4 +1,4 @@
-package sim.src;
+package txrelaysim.src.helpers;
import peersim.config.FastConfig;
import peersim.core.Node;
@@ -7,23 +7,13 @@
public class SimpleMessage extends SimpleEvent {
private Node sender;
-
+
public SimpleMessage(int type, Node sender) {
super(type);
this.sender = sender;
}
-
+
public Node getSender() {
return this.sender;
}
-
- public long getLatency(Node dest, int pid) {
- Node src = this.getSender();
- long latency = ((Transport)src.getProtocol(FastConfig.getTransport(pid))).getLatency(src, dest);
- if (this.getType() != SimpleEvent.CHUNK) {
- latency = 1;
- }
- return latency;
- }
-
}
\ No newline at end of file
diff --git a/src/helpers/TupleMessage.java b/src/helpers/TupleMessage.java
new file mode 100644
index 0000000..a8d7c31
--- /dev/null
+++ b/src/helpers/TupleMessage.java
@@ -0,0 +1,29 @@
+package txrelaysim.src.helpers;
+
+import peersim.core.Node;
+
+public class TupleMessage extends SimpleMessage {
+
+ private Node x;
+ private int y;
+ private boolean z;
+
+ public TupleMessage(int type, Node sender, Node x, int y, boolean z) {
+ super(type, sender);
+ this.x = x;
+ this.y = y;
+ this.z = z;
+ }
+
+ public Node getX() {
+ return this.x;
+ }
+
+ public int getY() {
+ return this.y;
+ }
+
+ public boolean getZ() {
+ return this.z;
+ }
+}
diff --git a/utils/average.py b/utils/average.py
deleted file mode 100644
index a1e7e3d..0000000
--- a/utils/average.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import getopt, sys, re
-
-def main(argv):
- try:
- opts, args = getopt.getopt(argv, "i:", [])
- except getopt.GetoptError as err:
- print str(err)
- sys.exit(2)
- if len(opts) == 0:
- sys.exit(2)
-
- logFile = opts[0][1]
- poisonedChunksSum = 0
- experiments = 0
- p = re.compile("==\s(\d+)")
- with open(logFile) as f:
- for line in f:
- m = p.search(line)
- if m:
- experiments += 1
- poisonedChunksSum += int(m.group(1))
-
- print float(poisonedChunksSum) / experiments
-
-if __name__ == "__main__":
- main(sys.argv[1:])
\ No newline at end of file
diff --git a/utils/expected.py b/utils/expected.py
deleted file mode 100644
index f71e29f..0000000
--- a/utils/expected.py
+++ /dev/null
@@ -1,46 +0,0 @@
-import sys, getopt
-
-def C(n, k):
- if 0 <= k <= n:
- ntok = 1
- ktok = 1
- for t in xrange(1, min(k, n - k) + 1):
- ntok *= n
- ktok *= t
- n -= 1
- return ntok // ktok
- else:
- return 0
-
-def f(n, T):
- re = 0.0
- for i in range(1, n-T+1):
- re += i * (float(C(n-i-1, T-1)) / C(n-1, T))
- return re
-
-def g(n, T, A):
- re = 0.0
- for i in range(1, A+1):
- re += f(n-i+1, T)
- return re
-
-def main(argv):
- try:
- opts, args = getopt.getopt(argv, "t:a:n:", [])
- except getopt.GetoptError as err:
- print str(err)
- sys.exit(2)
- T = 1
- A = 1
- n = 9
- for o, a in opts:
- if o == "-t":
- T = int(a)
- elif o == "-a":
- A = int(a)
- elif o == "-n":
- n = int(a)
- print g(n, T, A)
-
-if __name__ == "__main__":
- main(sys.argv[1:])
\ No newline at end of file