aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore103
-rw-r--r--.gitlab-ci.yml20
-rw-r--r--AUTHORS6
-rw-r--r--AUTHORS.txt5
-rw-r--r--LICENSE (renamed from LICENSE.txt)0
-rw-r--r--MANIFEST.in6
-rw-r--r--README.md205
-rw-r--r--doc/irati.rst11
-rw-r--r--doc/model.rst16
-rw-r--r--doc/prototype.rst2
-rw-r--r--doc/qemu.rst15
-rw-r--r--doc/rlite.rst11
-rw-r--r--doc/testbed.rst1
-rw-r--r--doc/workflow.rst6
-rwxr-xr-xexamples/congestion.py109
-rwxr-xr-xexamples/converged-operator-network.py211
-rwxr-xr-xexamples/datacenter.py78
-rwxr-xr-xexamples/docker-ouroboros.py25
-rw-r--r--examples/example-script.rsb6
-rwxr-xr-xexamples/example.py31
-rwxr-xr-xexamples/geant.py233
-rwxr-xr-xexamples/isps.py68
-rwxr-xr-xexamples/jfed-rlite.py53
-rwxr-xr-xexamples/mouse.py109
-rw-r--r--examples/ouroboros-layer-example.py66
-rwxr-xr-xexamples/rumba_example.py23
-rwxr-xr-xexamples/scalingtime.py135
-rwxr-xr-xexamples/script-example.py50
-rwxr-xr-xexamples/snake.py48
-rwxr-xr-xexamples/square.py52
-rwxr-xr-xexamples/test.py57
-rwxr-xr-xexamples/two-layers.py41
-rwxr-xr-xexamples/vpn.py35
-rw-r--r--rumba/_version.py2
-rw-r--r--rumba/command.py60
-rw-r--r--rumba/elements/experimentation.py272
-rw-r--r--rumba/elements/topology.py635
-rw-r--r--rumba/executors/docker.py20
-rw-r--r--rumba/executors/local.py33
-rw-r--r--rumba/executors/ssh.py11
-rw-r--r--rumba/irm_backend.py343
-rw-r--r--rumba/log.py146
-rw-r--r--rumba/model.py55
-rw-r--r--rumba/multiprocess.py28
-rw-r--r--rumba/process.py118
-rwxr-xr-xrumba/prototypes/enroll.py140
-rw-r--r--rumba/prototypes/irati.py467
-rw-r--r--rumba/prototypes/irati_templates.py437
-rw-r--r--rumba/prototypes/ouroboros.py241
-rw-r--r--rumba/prototypes/rlite.py194
-rw-r--r--rumba/recpoisson.py8
-rw-r--r--rumba/ssh_support.py101
-rw-r--r--rumba/storyboard.py131
-rw-r--r--rumba/testbeds/dockertb.py22
-rw-r--r--rumba/testbeds/emulab.py16
-rw-r--r--rumba/testbeds/jfed.py18
-rw-r--r--rumba/testbeds/localnet.py257
-rw-r--r--rumba/testbeds/qemu.py457
-rw-r--r--rumba/topologies.py146
-rw-r--r--rumba/utils.py77
-rw-r--r--[-rwxr-xr-x]rumba/visualizer.py88
-rwxr-xr-xsetup.py62
-rw-r--r--tools/conf-examples/dc-vpns.conf114
-rw-r--r--tools/conf-examples/geant2-renumber.conf86
-rw-r--r--tools/conf-examples/insane-stacking.conf29
-rw-r--r--tools/conf-examples/isp-sec.conf189
-rw-r--r--tools/conf-examples/resilient-square.conf16
-rw-r--r--tools/conf-examples/secure-two-layers.conf25
-rw-r--r--tools/conf-examples/seven.conf34
-rw-r--r--tools/conf-examples/star.conf7
-rw-r--r--tools/conf-examples/triangle.conf9
-rw-r--r--tools/conf-examples/tutorial1.conf4
-rw-r--r--tools/conf-examples/tutorial2.conf6
-rw-r--r--tools/conf-examples/two-layers.conf17
-rwxr-xr-xtools/democonf2rumba.py292
-rw-r--r--tools/scriptgenerator.py14
76 files changed, 3029 insertions, 4235 deletions
diff --git a/.gitignore b/.gitignore
index dc1959d..93e8e76 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,98 +1,47 @@
-# Byte-compiled / optimized / DLL files
+# Byte-compiled / optimized
__pycache__/
*.py[cod]
*$py.class
-# C extensions
-*.so
-
# Distribution / packaging
-.Python
-env/
build/
-develop-eggs/
dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
*.egg-info/
-.installed.cfg
*.egg
+.eggs/
+sdist/
+
+# Virtual environments
+.venv/
+venv/
+env/
+ENV/
-# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
+# IDE / editor
+.idea/
+.vscode/
+*.swp
+*.swo
+*~
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
+# Sphinx documentation
+doc/_build/
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
-.cache
-nosetests.xml
+.cache/
+.pytest_cache/
coverage.xml
-*,cover
-.hypothesis/
-
-# Translations
-*.mo
-*.pot
-
-# Django stuff:
-*.log
-local_settings.py
-
-# Flask stuff:
-instance/
-.webassets-cache
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-target/
-
-# IPython Notebook
-.ipynb_checkpoints
-
-# pyenv
-.python-version
-
-# celery beat schedule file
-celerybeat-schedule
-
-# dotenv
-.env
-
-# virtualenv
-venv/
-ENV/
-
-# Spyder project settings
-.spyderproject
-
-# Rope project settings
-.ropeproject
+nosetests.xml
-# emacs temporary files
-*~
+# OS files
+.DS_Store
+Thumbs.db
-*rspec
+# Rumba runtime
+/tmp/rumba/
*.pem
-
-# PyCharm metadata folder
-.idea
+rumba.log
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
deleted file mode 100644
index dad6f1d..0000000
--- a/.gitlab-ci.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-before_script:
- - apt-get update -qy
- - apt-get install -y python-dev python-pip bridge-utils qemu
- - pip install setuptools --upgrade
- - python setup.py install
-
-irati-test:
- script:
- - python examples/vpn.py
-
-pages:
- stage: deploy
- script:
- - pip install sphinx sphinx_rtd_theme
- - sphinx-build doc/ public/
- artifacts:
- paths:
- - public
- only:
- - master
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..634bb68
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,6 @@
+Sander Vrijders <sander@ouroboros.rocks>
+Dimitri Staessens <dimitri@ouroboros.rocks>
+Thijs Paelman <thijs@ouroboros.rocks>
+Vincenzo Maffione <v.maffione@nextworks.it>
+Marco Capitani <m.capitani@nextworks.it>
+Nick Aerts <nick.aerts@ugent.be>
diff --git a/AUTHORS.txt b/AUTHORS.txt
deleted file mode 100644
index 0e929fb..0000000
--- a/AUTHORS.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Sander Vrijders <sander.vrijders@intec.ugent.be>
-Dimitri Staessens <dimitri.staessens@ugent.be>
-Vincenzo Maffione <v.maffione@nextworks.it>
-Marco Capitani <m.capitani@nextworks.it>
-Nick Aerts <nick.aerts@ugent.be> \ No newline at end of file
diff --git a/LICENSE.txt b/LICENSE
index 19e3071..19e3071 100644
--- a/LICENSE.txt
+++ b/LICENSE
diff --git a/MANIFEST.in b/MANIFEST.in
index 21ede48..b44eea6 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
-include AUTHORS.txt
-include LICENSE.txt
-include README \ No newline at end of file
+include AUTHORS
+include LICENSE
+include README.md
diff --git a/README.md b/README.md
index f29c3dc..87178af 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,208 @@
Rumba is a Python framework that allows users to write Python scripts
to define recursive internet networks and run scripted experiments.
-The official documentation can be found
+The original documentation can be found
[here](https://arcfire.gitlab.io/rumba/).
+
+This version of rumba is maintained by the Ouroboros (O7s) project.
+
+The documentation can be found
+[here](https://ouroboros.rocks/wiki/Rumba).
+
+## Installation
+
+To install Rumba from a local clone:
+
+```bash
+git clone https://codeberg.org/o7s/rumba.git
+cd rumba
+python3 -m venv .venv
+source .venv/bin/activate
+pip install .
+```
+
+Optional extras:
+
+```bash
+pip install rumba[NumpyAcceleration] # faster random traffic generation
+pip install rumba[graphs] # PDF graph export via pydot
+pip install rumba[visualizer] # interactive visualization via igraph + plotly
+```
+
+On Debian/Ubuntu you may also need:
+
+```bash
+sudo apt-get install build-essential libssl-dev libffi-dev python3-dev
+```
+
+The required dependencies (`paramiko` and `docker`) are installed
+automatically.
+
+## Defining a Topology
+
+Rumba models recursive networks using **layers** and **nodes**. Layers
+correspond to Ouroboros IPCP types. Nodes declare which layers they
+belong to and how those layers are stacked via **registrations**.
+
+The example below defines four nodes connected in a ring, with a single
+unicast layer (`n1`) running over four point-to-point Ethernet links:
+
+```
+ a ---e01--- b
+ | |
+ e04 e02
+ | |
+ d ---e03--- c
+```
+
+```python
+from rumba.model import UnicastLayer, EthDixLayer, Node
+
+# A unicast layer that will span all four nodes
+n1 = UnicastLayer("n1")
+
+# Four point-to-point Ethernet links (max 2 members each)
+e01 = EthDixLayer("e01") # a <-> b
+e02 = EthDixLayer("e02") # b <-> c
+e03 = EthDixLayer("e03") # c <-> d
+e04 = EthDixLayer("e04") # d <-> a
+
+# Each node lists its layer memberships and how layers are stacked.
+# registrations = {upper: [lower, ...]} means "upper registers in lower".
+a = Node("a",
+ layers=[n1, e01, e04],
+ registrations={n1: [e01, e04]})
+
+b = Node("b",
+ layers=[n1, e01, e02],
+ registrations={n1: [e01, e02]})
+
+c = Node("c",
+ layers=[n1, e02, e03],
+ registrations={n1: [e02, e03]})
+
+d = Node("d",
+ layers=[n1, e03, e04],
+ registrations={n1: [e03, e04]})
+```
+
+Two nodes that share an `EthDixLayer` are connected by that Ethernet
+link. The `registrations` dictionary tells Rumba how to stack the
+layers: `{n1: [e01, e04]}` means the unicast layer `n1` registers in
+both `e01` and `e04` on that node, giving it connectivity over those
+links.
+
+Policies can be set on unicast layers:
+
+```python
+n1.add_policy("routing", "lfa")
+```
+
+Link quality (delay, loss, rate) can be configured on Ethernet layers:
+
+```python
+from rumba.model import EthDixLayer, Delay
+
+e01 = EthDixLayer("e01")
+e01.set_delay(delay=10, jitter=2) # 10 ms delay, 2 ms jitter
+e01.set_loss(loss=0.1) # 0.1% packet loss
+e01.set_rate(rate=100) # 100 Mbps
+```
+
+## Quick Start
+
+A complete runnable script that deploys the ring topology above on a
+local testbed:
+
+```python
+from rumba.model import *
+from rumba.utils import ExperimentManager
+
+import rumba.testbeds.local as local
+import rumba.prototypes.ouroboros as our
+
+# --- topology (from the example above) ---
+n1 = UnicastLayer("n1")
+e01 = EthDixLayer("e01")
+e02 = EthDixLayer("e02")
+e03 = EthDixLayer("e03")
+e04 = EthDixLayer("e04")
+
+a = Node("a", layers=[n1, e01, e04], registrations={n1: [e01, e04]})
+b = Node("b", layers=[n1, e01, e02], registrations={n1: [e01, e02]})
+c = Node("c", layers=[n1, e02, e03], registrations={n1: [e02, e03]})
+d = Node("d", layers=[n1, e03, e04], registrations={n1: [e03, e04]})
+
+# --- testbed and experiment ---
+tb = local.Testbed()
+exp = our.Experiment(tb, nodes=[a, b, c, d])
+
+# --- run ---
+with ExperimentManager(exp):
+ exp.swap_in() # provision the testbed
+ exp.bootstrap_prototype() # create IPCPs and enrollments
+```
+
+`swap_in()` provisions the nodes on the chosen testbed.
+`bootstrap_prototype()` creates the Ouroboros IPCPs, bootstraps them,
+and performs enrollments automatically. `ExperimentManager` handles
+cleanup when the block exits.
+
+## Key Concepts
+
+- **Layer** — a network layer aligned with an Ouroboros IPCP type.
+ Available types: `UnicastLayer`, `BroadcastLayer`, `EthDixLayer`,
+ `EthLlcLayer`, `LocalLayer`, `Udp4Layer`, `Udp6Layer`.
+- **Node** — a system declaring its layer memberships and
+ registrations (layer stacking).
+- **Testbed** — the infrastructure backend that provisions nodes
+ (see table below).
+- **Experiment** — computes IPCP placement, enrollment order, and
+ manages the prototype lifecycle.
+- **StoryBoard** — schedules client/server traffic with support for
+ Poisson arrival rates.
+- **CommandResult** — structured result from `node.execute_command()`
+ carrying exit code, stdout, and stderr.
+- **Process** — a managed background process with `is_alive()`,
+ `wait()`, and `kill()`.
+- **ExperimentManager** — context manager that handles experiment
+ setup and teardown.
+
+## Supported Testbeds
+
+| Testbed | Module | Description |
+|----------|---------------------------|------------------------------------------------|
+| Local | `rumba.testbeds.local` | Single machine, processes run locally |
+| LocalNet | `rumba.testbeds.localnet` | Linux bridges + veth pairs, supports netem |
+| Docker | `rumba.testbeds.dockertb` | Docker containers with bridge networking |
+| Emulab | `rumba.testbeds.emulab` | Emulab / iMinds testbed (SSH-based) |
+| jFed | `rumba.testbeds.jfed` | Virtual Wall, ExoGENI, CloudLab via jFed |
+
+## CLI Tools
+
+The `rumba-access` command opens an SSH session to an experiment node:
+
+```bash
+rumba-access <node-name>
+```
+
+## More Examples
+
+See the [examples/](examples/) directory for additional topologies and
+usage patterns, including multi-layer stacking, Docker deployments,
+storyboard scripting, and large-scale experiments.
+
+## Citation
+
+If you use Rumba in your research, please cite:
+
+> S. Vrijders, D. Staessens, M. Capitani, and V. Maffione,
+> "Rumba: A Python framework for automating large-scale recursive
+> internet experiments on GENI and FIRE+," *IEEE*, 2018.
+
+## License
+
+Rumba is licensed under the
+[GNU Lesser General Public License v2.1](LICENSE).
+
+See [AUTHORS](AUTHORS) for the list of contributors.
diff --git a/doc/irati.rst b/doc/irati.rst
deleted file mode 100644
index 0ad496a..0000000
--- a/doc/irati.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-IRATI
-=============
-
-`IRATI <https://github.com/IRATI/stack>`_ is an open source
-implementation of the RINA architecture targeted at the OS/Linux
-system, initially developed by the FP7-IRATI project.
-
-.. automodule:: rumba.prototypes.irati
- :member-order: bysource
- :show-inheritance:
- :inherited-members:
diff --git a/doc/model.rst b/doc/model.rst
index 1ced817..454de7e 100644
--- a/doc/model.rst
+++ b/doc/model.rst
@@ -6,13 +6,21 @@ blocks listed below.
.. autoclass:: rumba.model.Node
-.. autoclass:: rumba.model.DIF
+.. autoclass:: rumba.model.Layer
-.. autoclass:: rumba.model.NormalDIF
+.. autoclass:: rumba.model.UnicastLayer
-.. autoclass:: rumba.model.ShimUDPDIF
+.. autoclass:: rumba.model.BroadcastLayer
-.. autoclass:: rumba.model.ShimEthDIF
+.. autoclass:: rumba.model.EthDixLayer
+
+.. autoclass:: rumba.model.EthLlcLayer
+
+.. autoclass:: rumba.model.Udp4Layer
+
+.. autoclass:: rumba.model.Udp6Layer
+
+.. autoclass:: rumba.model.LocalLayer
.. autoclass:: rumba.model.Distribution
diff --git a/doc/prototype.rst b/doc/prototype.rst
index a827dcb..0a17719 100644
--- a/doc/prototype.rst
+++ b/doc/prototype.rst
@@ -10,6 +10,4 @@ provided by the base class.
Specific implementations of the Testbed class:
.. toctree::
- irati
- rlite
ouroboros
diff --git a/doc/qemu.rst b/doc/qemu.rst
deleted file mode 100644
index 375ab55..0000000
--- a/doc/qemu.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-QEMU
-=============
-
-`QEMU <http://wiki.qemu-project.org/Main_Page>`_ is a generic and open
-source machine emulator and virtualizer.
-
-In order to use the qemu testbed, the user should install the qemu and
-bridge-utils packages on which the testbed depends: ::
-
- $ sudo apt-get install bridge-utils qemu
-
-.. automodule:: rumba.testbeds.qemu
- :member-order: bysource
- :show-inheritance:
- :inherited-members:
diff --git a/doc/rlite.rst b/doc/rlite.rst
deleted file mode 100644
index 171c4b8..0000000
--- a/doc/rlite.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-rlite
-=============
-
-`rlite <https://gitlab.com/arcfire/rlite>`_ is a lightweight Free
-and Open Source implementation of the Recursive InterNetwork
-Architecture (RINA) for GNU/Linux operating systems.
-
-.. automodule:: rumba.prototypes.rlite
- :member-order: bysource
- :show-inheritance:
- :inherited-members:
diff --git a/doc/testbed.rst b/doc/testbed.rst
index 0158904..45ac2c8 100644
--- a/doc/testbed.rst
+++ b/doc/testbed.rst
@@ -14,4 +14,3 @@ Specific implementations of the Testbed class:
emulab
jfed
local
- qemu
diff --git a/doc/workflow.rst b/doc/workflow.rst
index d0e56a7..1c35b91 100644
--- a/doc/workflow.rst
+++ b/doc/workflow.rst
@@ -2,14 +2,14 @@ Workflow
************************
1. Define the network graph, creating instances of model.Node and
- model.DIF classes. Experiments can import rumba.model to obtain all
+ model.Layer classes. Experiments can import rumba.model to obtain all
Rumba classes needed to run an experiment.
-2. Create an instance of a specific model.Testbed class (QEMU, Docker,
+2. Create an instance of a specific model.Testbed class (Docker,
Local, Emulab or jFed).
3. Create an instance of the selected prototype.Experiment class
- (Ouroboros, rlite, IRATI), passing the testbed instance and a list
+ (Ouroboros), passing the testbed instance and a list
of Node instances.
* At the end of the base Experiment constructor, the
diff --git a/examples/congestion.py b/examples/congestion.py
new file mode 100755
index 0000000..844f136
--- /dev/null
+++ b/examples/congestion.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+
+# An example script using the rumba package
+
+from rumba.model import Node, UnicastLayer, EthDixLayer
+from rumba.storyboard import *
+from rumba.utils import ExperimentManager, PROMPT_SWAPOUT
+from rumba.topologies import build_star
+
+# import testbed plugins
+import rumba.testbeds.jfed as jfed
+
+# import Ouroboros prototype plugin
+import rumba.prototypes.ouroboros as our
+
+import rumba.log as log
+
+
+__all__ = ["exp", "sb", "nodes", "main", "run"]
+
+log.set_logging_level('DEBUG')
+
+n1 = UnicastLayer("n1")
+
+leaves, routerNode = build_star(
+ ["client1", "client2", "server"], n1, hub_name="router")
+clientNode1, clientNode2, serverNode = leaves
+
+nodes = ["client1", "client2", "router", "server"]
+
+tb = jfed.Testbed(exp_name='cc2',
+ cert_file='/path/to/cert.pem',
+ authority='wall1.ilabt.iminds.be',
+ image='UBUNTU18-64-STD',
+ username='username',
+ exp_hours='1',
+ proj_name='ouroborosrocks')
+
+exp = our.Experiment(tb,
+ nodes=[clientNode1, clientNode2, routerNode, serverNode],
+ # git_repo='https://codeberg.org/o7s/ouroboros',
+ git_branch='be',
+ build_options='-DCMAKE_BUILD_TYPE=Debug '
+ '-DSHM_BUFFER_SIZE=131072 '
+ '-DIPCP_ETH_PAD_RUNT_FRAMES=false '
+ '-DDISABLE_CORE_LOCK=false '
+ '-DPCP_SCHED_THR_MUL=1',
+ add_packages=['ethtool'],
+ influxdb={
+ 'ip': '127.0.0.1',
+ 'port': 8086,
+ 'org': "Ouroboros",
+ 'token': "your-influxdb-token"
+ })
+
+sb = StoryBoard(experiment=exp, duration=1500, servers=[])
+
+
+def run():
+ sb.run_command("server",
+ 'irm bind prog ocbr name ocbr_server;'
+ 'irm name register ocbr_server layer n1;'
+ 'ocbr --listen > /dev/null 2>&1 &')
+ sb.run_command("client1",
+ 'irm bind prog ocbr name ocbr_client1;'
+ 'irm name register ocbr_client1 layer n1;'
+ 'ocbr --listen > /dev/null 2>&1 &')
+ sb.run_command("server",
+ 'irm bind prog oping name oping_server;'
+ 'irm name register oping_server layer n1;'
+ 'oping --listen > /dev/null 2>&1 &')
+ sb.run_command("client1", "for sz in `seq 0 10 200 1000`;"
+ "do oping -n oping_server -i 0ms -s $sz -d 120;"
+ "done")
+ sb.run_command("client1", "ocbr -n ocbr_server -r 70M -d 600 --spin > /dev/null 2>&1 &")
+ sb.run_command("server", "ocbr -n ocbr_client1 -r 70M -d 600 --spin > /dev/null 2>&1 &")
+ time.sleep(30)
+ sb.run_command("client2", "ocbr -n ocbr_server -r 70M -d 30 --spin > /dev/null 2>&1 &")
+ time.sleep(45)
+ sb.run_command("client2", "ocbr -n ocbr_server -r 20M -d 30 --spin > /dev/null 2>&1 &")
+ time.sleep(45)
+ sb.run_command("client2", "ocbr -n ocbr_server -r 60M -d 30 -s 1400 --spin > /dev/null 2>&1 &")
+ time.sleep(45)
+ sb.run_command("client2", "ocbr -n ocbr_server -r 60M -d 30 -s 1200 --spin > /dev/null 2>&1 &")
+ time.sleep(45)
+ sb.run_command("client2", "ocbr -n ocbr_server -r 60M -d 30 -s 1000 --spin > /dev/null 2>&1 &")
+ time.sleep(45)
+ sb.run_command("client2", "ocbr -n ocbr_server -r 60M -d 30 -s 800 --spin > /dev/null 2>&1 &")
+ time.sleep(35)
+ sb.run_command("client2", "ocbr -n ocbr_server -r 60M -d 30 -s 500 --spin > /dev/null 2>&1 &")
+ time.sleep(35)
+ sb.run_command("client2", "ocbr -n ocbr_server -r 60M -d 30 -s 200 --spin > /dev/null 2>&1 &")
+ time.sleep(200)
+# sb.ouroboros_stop_metrics_exporter(nodes)
+# exp.terminate_prototype()
+
+
+def main():
+ with ExperimentManager(exp, swap_out_strategy=PROMPT_SWAPOUT):
+ exp.swap_in()
+ exp.install_prototype()
+ exp.set_phy_link_rate_between("router", "server", 100)
+ exp.start_metrics_exporter(nodes, interval=300)
+ exp.bootstrap_prototype()
+ run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/converged-operator-network.py b/examples/converged-operator-network.py
index 42df95d..eb604f9 100755
--- a/examples/converged-operator-network.py
+++ b/examples/converged-operator-network.py
@@ -8,176 +8,171 @@ from rumba.storyboard import *
from rumba.utils import *
# import testbed plugins
-import rumba.testbeds.emulab as emulab
import rumba.testbeds.jfed as jfed
-import rumba.testbeds.local as local
-import rumba.testbeds.qemu as qemu
# import prototype plugins
import rumba.prototypes.ouroboros as our
-import rumba.prototypes.rlite as rl
-import rumba.prototypes.irati as irati
log.set_logging_level('DEBUG')
-f1 = NormalDIF("fixed")
-l1 = NormalDIF("LTE")
-m1 = NormalDIF("metro1")
-m2 = NormalDIF("metro2")
-m3 = NormalDIF("metro3")
-c1 = NormalDIF("core")
-n1 = NormalDIF("overlay")
+f1 = UnicastLayer("fixed")
+l1 = UnicastLayer("LTE")
+m1 = UnicastLayer("metro1")
+m2 = UnicastLayer("metro2")
+m3 = UnicastLayer("metro3")
+c1 = UnicastLayer("core")
+n1 = UnicastLayer("overlay")
-f1e1 = ShimEthDIF("f1e1")
-f1e2 = ShimEthDIF("f1e2")
-f1e3 = ShimEthDIF("f1e3")
+f1e1 = EthDixLayer("f1e1")
+f1e2 = EthDixLayer("f1e2")
+f1e3 = EthDixLayer("f1e3")
-l1e1 = ShimEthDIF("l1e1")
+l1e1 = EthDixLayer("l1e1")
-m1f1 = ShimEthDIF("m1f1")
+m1f1 = EthDixLayer("m1f1")
-m1e1 = ShimEthDIF("m1e1")
-m1e2 = ShimEthDIF("m1e2")
-m1e3 = ShimEthDIF("m1e3")
-m1e4 = ShimEthDIF("m1e4")
-m1e5 = ShimEthDIF("m1e5")
-m1e6 = ShimEthDIF("m1e6")
+m1e1 = EthDixLayer("m1e1")
+m1e2 = EthDixLayer("m1e2")
+m1e3 = EthDixLayer("m1e3")
+m1e4 = EthDixLayer("m1e4")
+m1e5 = EthDixLayer("m1e5")
+m1e6 = EthDixLayer("m1e6")
-m2l1 = ShimEthDIF("m2l1")
+m2l1 = EthDixLayer("m2l1")
-m2e1 = ShimEthDIF("m2e1")
-m2e2 = ShimEthDIF("m2e2")
-m2e3 = ShimEthDIF("m2e3")
-m2e4 = ShimEthDIF("m2e4")
-m2e5 = ShimEthDIF("m2e5")
-m2e6 = ShimEthDIF("m2e6")
+m2e1 = EthDixLayer("m2e1")
+m2e2 = EthDixLayer("m2e2")
+m2e3 = EthDixLayer("m2e3")
+m2e4 = EthDixLayer("m2e4")
+m2e5 = EthDixLayer("m2e5")
+m2e6 = EthDixLayer("m2e6")
-m3e1 = ShimEthDIF("m3e1")
-m3e2 = ShimEthDIF("m3e2")
-m3e3 = ShimEthDIF("m3e3")
-m3e4 = ShimEthDIF("m3e4")
-m3e5 = ShimEthDIF("m3e5")
-m3e6 = ShimEthDIF("m3e6")
+m3e1 = EthDixLayer("m3e1")
+m3e2 = EthDixLayer("m3e2")
+m3e3 = EthDixLayer("m3e3")
+m3e4 = EthDixLayer("m3e4")
+m3e5 = EthDixLayer("m3e5")
+m3e6 = EthDixLayer("m3e6")
-m1c1 = ShimEthDIF("m1c1")
-m1c2 = ShimEthDIF("m1c2")
+m1c1 = EthDixLayer("m1c1")
+m1c2 = EthDixLayer("m1c2")
-m2c1 = ShimEthDIF("m2c1")
-m2c2 = ShimEthDIF("m2c2")
+m2c1 = EthDixLayer("m2c1")
+m2c2 = EthDixLayer("m2c2")
-m3c1 = ShimEthDIF("m3c1")
-m3c2 = ShimEthDIF("m3c2")
+m3c1 = EthDixLayer("m3c1")
+m3c2 = EthDixLayer("m3c2")
-c1e1 = ShimEthDIF("c1e1")
-c1e2 = ShimEthDIF("c1e2")
-c1e3 = ShimEthDIF("c1e3")
-c1e4 = ShimEthDIF("c1e4")
+c1e1 = EthDixLayer("c1e1")
+c1e2 = EthDixLayer("c1e2")
+c1e3 = EthDixLayer("c1e3")
+c1e4 = EthDixLayer("c1e4")
f1n1 = Node("f1n1",
- difs = [n1, f1, f1e3],
- dif_registrations = {f1 : [f1e3], n1: [f1]})
+ layers = [n1, f1, f1e3],
+ registrations = {f1 : [f1e3], n1: [f1]})
f1n2 = Node("f1n2",
- difs = [f1, f1e1, f1e2, f1e3, m1f1],
- dif_registrations = {f1: [f1e1, f1e2, f1e3, m1f1]})
+ layers = [f1, f1e1, f1e2, f1e3, m1f1],
+ registrations = {f1: [f1e1, f1e2, f1e3, m1f1]})
f1n3 = Node("f1n3",
- difs = [n1, f1, f1e1],
- dif_registrations = {f1: [f1e1], n1: [f1]})
+ layers = [n1, f1, f1e1],
+ registrations = {f1: [f1e1], n1: [f1]})
f1n4 = Node("f1n4",
- difs = [n1, f1, f1e2],
- dif_registrations = {f1: [f1e2], n1: [f1]})
+ layers = [n1, f1, f1e2],
+ registrations = {f1: [f1e2], n1: [f1]})
l1n1 = Node("l1n1",
- difs = [n1, l1, m2l1, l1e1],
- dif_registrations = {l1: [m2l1, l1e1], n1: [l1]})
+ layers = [n1, l1, m2l1, l1e1],
+ registrations = {l1: [m2l1, l1e1], n1: [l1]})
l1n2 = Node("l1n2",
- difs = [n1, l1, l1e1],
- dif_registrations = {l1: [l1e1], n1: [l1]})
+ layers = [n1, l1, l1e1],
+ registrations = {l1: [l1e1], n1: [l1]})
m1n1 = Node("m1n1",
- difs = [m1, m1e1, m1e6],
- dif_registrations = {m1: [m1e1, m1e6]})
+ layers = [m1, m1e1, m1e6],
+ registrations = {m1: [m1e1, m1e6]})
m1n2 = Node("m1n2",
- difs = [m1, m1e1, m1e2],
- dif_registrations = {m1: [m1e1, m1e2]})
+ layers = [m1, m1e1, m1e2],
+ registrations = {m1: [m1e1, m1e2]})
m1n3 = Node("m1n3",
- difs = [m1, m1e2, m1e3, m1c1],
- dif_registrations = {m1: [m1e2, m1e3, m1c1]})
+ layers = [m1, m1e2, m1e3, m1c1],
+ registrations = {m1: [m1e2, m1e3, m1c1]})
m1n4 = Node("m1n4",
- difs = [m1, m1e3, m1e4, m1c2],
- dif_registrations = {m1: [m1e3, m1e4, m1c2]})
+ layers = [m1, m1e3, m1e4, m1c2],
+ registrations = {m1: [m1e3, m1e4, m1c2]})
m1n5 = Node("m1n5",
- difs = [m1, m1e4, m1e5],
- dif_registrations = {m1: [m1e4, m1e5]})
+ layers = [m1, m1e4, m1e5],
+ registrations = {m1: [m1e4, m1e5]})
m1n6 = Node("m1n6",
- difs = [n1, m1, f1, m1e5, m1e6, m1f1],
- dif_registrations = {m1: [m1e5, m1e6],
+ layers = [n1, m1, f1, m1e5, m1e6, m1f1],
+ registrations = {m1: [m1e5, m1e6],
f1: [m1f1], n1: [f1, m1]})
m2n1 = Node("m2n1",
- difs = [m2, m2e1, m2e6],
- dif_registrations = {m2: [m2e1, m2e6]})
+ layers = [m2, m2e1, m2e6],
+ registrations = {m2: [m2e1, m2e6]})
m2n2 = Node("m2n2",
- difs = [m2, m2e1, m2e2],
- dif_registrations = {m2: [m2e1, m2e2]})
+ layers = [m2, m2e1, m2e2],
+ registrations = {m2: [m2e1, m2e2]})
m2n3 = Node("m2n3",
- difs = [m2, m2e2, m2e3, m2c1],
- dif_registrations = {m2: [m2e2, m2e3, m2c1]})
+ layers = [m2, m2e2, m2e3, m2c1],
+ registrations = {m2: [m2e2, m2e3, m2c1]})
m2n4 = Node("m2n4",
- difs = [m2, m2e3, m2e4, m2c2],
- dif_registrations = {m2: [m2e3, m2e4, m2c2]})
+ layers = [m2, m2e3, m2e4, m2c2],
+ registrations = {m2: [m2e3, m2e4, m2c2]})
m2n5 = Node("m2n5",
- difs = [m2, m2e4, m2e5],
- dif_registrations = {m2: [m2e4, m2e5]})
+ layers = [m2, m2e4, m2e5],
+ registrations = {m2: [m2e4, m2e5]})
m2n6 = Node("m2n6",
- difs = [n1, m2, l1, m2e5, m2e6, m2l1],
- dif_registrations = {m2: [m2e5, m2e6], l1: [m2l1], n1: [l1, m2]})
+ layers = [n1, m2, l1, m2e5, m2e6, m2l1],
+ registrations = {m2: [m2e5, m2e6], l1: [m2l1], n1: [l1, m2]})
m3n1 = Node("m3n1",
- difs = [m3, m3e1, m3e6],
- dif_registrations = {m3: [m3e1, m3e6]})
+ layers = [m3, m3e1, m3e6],
+ registrations = {m3: [m3e1, m3e6]})
m3n2 = Node("m3n2",
- difs = [m3, m3e1, m3e2],
- dif_registrations = {m3: [m3e1, m3e2]})
+ layers = [m3, m3e1, m3e2],
+ registrations = {m3: [m3e1, m3e2]})
m3n3 = Node("m3n3",
- difs = [m3, m3e2, m3e3, m3c1],
- dif_registrations = {m3: [m3e2, m3e3, m3c1]})
+ layers = [m3, m3e2, m3e3, m3c1],
+ registrations = {m3: [m3e2, m3e3, m3c1]})
m3n4 = Node("m3n4",
- difs = [m3, m3e3, m3e4, m3c2],
- dif_registrations = {m3: [m3e3, m3e4, m3c2]})
+ layers = [m3, m3e3, m3e4, m3c2],
+ registrations = {m3: [m3e3, m3e4, m3c2]})
m3n5 = Node("m3n5",
- difs = [m3, m3e4, m3e5],
- dif_registrations = {m3: [m3e4, m3e5]})
+ layers = [m3, m3e4, m3e5],
+ registrations = {m3: [m3e4, m3e5]})
m3n6 = Node("m3n6",
- difs = [m3, m3e5, m3e6],
- dif_registrations = {m3: [m3e5, m3e6]})
+ layers = [m3, m3e5, m3e6],
+ registrations = {m3: [m3e5, m3e6]})
c1n1 = Node("c1n1",
- difs = [n1, c1, m1, m2, m1c1, m2c1, c1e1, c1e4],
- dif_registrations = {c1: [c1e1, c1e4], m1: [c1, m1c1],
+ layers = [n1, c1, m1, m2, m1c1, m2c1, c1e1, c1e4],
+ registrations = {c1: [c1e1, c1e4], m1: [c1, m1c1],
m2: [c1, m2c1], n1: [m1, m2]})
c1n2 = Node("c1n2",
- difs = [n1, c1, m2, m3, m2c2, m3c1, c1e1, c1e2],
- dif_registrations = {c1: [c1e1, c1e2], m2: [c1, m2c2],
+ layers = [n1, c1, m2, m3, m2c2, m3c1, c1e1, c1e2],
+ registrations = {c1: [c1e1, c1e2], m2: [c1, m2c2],
m3: [c1, m3c1], n1: [m2, m3]})
c1n3 = Node("c1n3",
- difs = [n1, c1, m3, m3c2, c1e2, c1e3],
- dif_registrations = {c1: [c1e2, c1e3], m3: [c1, m3c2], n1: [m3]})
+ layers = [n1, c1, m3, m3c2, c1e2, c1e3],
+ registrations = {c1: [c1e2, c1e3], m3: [c1, m3c2], n1: [m3]})
c1n4 = Node("c1n4",
- difs = [n1, c1, m1, m1c2, c1e3, c1e4],
- dif_registrations = {c1: [c1e3, c1e4], m1: [c1, m1c2], n1: [m1]})
+ layers = [n1, c1, m1, m1c2, c1e3, c1e4],
+ registrations = {c1: [c1e3, c1e4], m1: [c1, m1c2], n1: [m1]})
tb = jfed.Testbed(exp_name = "arcfiret43",
proj_name = "rumba",
- cert_file = "/home/dstaesse/jfed/cert.pem",
+ cert_file = "/path/to/cert.pem",
authority = "exogeni.net",
- username = "dstaesse")
+ username = "username")
-exp = rl.Experiment(tb, nodes = [f1n1, f1n2, f1n3, f1n4,
+exp = our.Experiment(tb, nodes = [f1n1, f1n2, f1n3, f1n4,
l1n1, l1n2,
m1n1, m1n2, m1n3, m1n4, m1n5, m1n6,
m2n1, m2n2, m2n3, m2n4, m2n5, m2n6,
@@ -190,10 +185,10 @@ with ExperimentManager(exp, swap_out_strategy=PAUSE_SWAPOUT):
exp.swap_in()
exp.install_prototype()
exp.bootstrap_prototype()
- c1 = Client("rinaperf", options ="-i 10000 -s 1000 -c 0 -d overlay",
+ c1 = Client("operf", options ="-s 1000 -d 0",
nodes=[f1n1, f1n3, f1n4, l1n2])
- s1 = Server("rinaperf", arrival_rate=2, mean_duration=5,
- options = "-l -d overlay", nodes = [c1n1], clients = [c1])
+ s1 = Server("operf", arrival_rate=2, mean_duration=5,
+ options = "-l", nodes = [c1n1], clients = [c1])
sb = StoryBoard(experiment=exp, duration=3600, servers = [s1])
sb.generate_script()
sb.start()
diff --git a/examples/datacenter.py b/examples/datacenter.py
new file mode 100755
index 0000000..ab02a2a
--- /dev/null
+++ b/examples/datacenter.py
@@ -0,0 +1,78 @@
+from rumba.model import Node, UnicastLayer, EthDixLayer
+import rumba.log as log
+# import testbed plugins
+import rumba.testbeds.local as local
+from rumba.storyboard import *
+# import Ouroboros prototype plugin
+import rumba.prototypes.ouroboros as our
+
+
+__all__ = ["local_exp", "name_nodes", "start_net"]
+
+
+log.set_logging_level("DEBUG")
+
+
+def buildRack(nRack, numHosts=2):
+ rack = UnicastLayer(f"rack{nRack}")
+ cables, hosts = [], []
+
+ for n in range(numHosts):
+ cables.append(EthDixLayer(f"e{n}r{nRack}"))
+ hosts.append(
+ Node(
+ f"h{n}r{nRack}",
+ layers=[cables[-1], rack],
+ registrations={rack: [cables[-1]]},
+ )
+ )
+
+ ToR_switch = Node(
+ f"s{nRack}", layers=[*cables, rack], registrations={rack: cables}
+ )
+
+ return [*hosts, ToR_switch] # list of nodes, ToR_switch latest
+
+
+def build(numRacks=2, numHostsPerRack=2):
+ nodes, router_cables = [], []
+
+ for i in range(numRacks):
+ router_cables.append(EthDixLayer(f"router_{i}"))
+ nodes += buildRack(i, numHosts=numHostsPerRack)
+ nodes[-1].add_layer(router_cables[-1])
+ #nodes[-1].add_registration(router_cables[-1])
+
+ Datacenter_router = Node("router", layers=router_cables)
+
+ return [*nodes, Datacenter_router]
+
+
+nodes = build()
+
+name_nodes = list(map(lambda node: node.name, nodes))
+
+local_tb = local.Testbed()
+
+
+local_exp = our.Experiment(local_tb, nodes=nodes)
+
+
+def start_net():
+ local_exp.swap_in()
+ local_exp.install_prototype()
+ local_exp.bootstrap_prototype()
+
+
+def print_network():
+ local_exp.export_connectivity_graph("datacenter_physical.pdf")
+
+ for layer in local_exp.layer_ordering:
+ if not isinstance(layer, EthDixLayer):
+ local_exp.export_layer_graph(f"datacenter_layer_{layer.name}.pdf", layer)
+
+
+if __name__ == '__main__':
+ #start_net()
+ print_network()
+
diff --git a/examples/docker-ouroboros.py b/examples/docker-ouroboros.py
index ce30fdb..16f6f1d 100755
--- a/examples/docker-ouroboros.py
+++ b/examples/docker-ouroboros.py
@@ -4,6 +4,7 @@
from rumba.model import *
from rumba.utils import ExperimentManager
+from rumba.topologies import build_ring
# import testbed plugins
import rumba.testbeds.dockertb as docker
@@ -25,33 +26,13 @@ args = argparser.parse_args()
log.set_logging_level('DEBUG')
-n01 = NormalDIF("n01")
+n01 = UnicastLayer("n01")
if (args.nodes < 3):
print("The ouroboros must be longer than 2 nodes")
sys.exit(-1)
-nodes = []
-
-shim_prev = None
-for i in range(0, args.nodes):
- shim = ShimEthDIF("e" + str(i))
-
- if shim_prev == None and shim != None:
- node = Node("node" + str(i), difs = [n01, shim],
- dif_registrations = {n01 : [shim]})
- elif shim_prev != None and shim != None:
- node = Node("node" + str(i), difs = [n01, shim, shim_prev],
- dif_registrations = {n01 : [shim, shim_prev]})
- else:
- node = Node("node" + str(i), difs = [n01, shim_prev],
- dif_registrations = {n01 : [shim_prev]})
-
- shim_prev = shim
- nodes.append(node)
-
-nodes[0].add_dif(shim_prev)
-nodes[0].add_dif_registration(n01, shim_prev)
+nodes = build_ring(args.nodes, n01)
tb = docker.Testbed(exp_name = "ouroboros")
diff --git a/examples/example-script.rsb b/examples/example-script.rsb
index 1d49a66..495bf4c 100644
--- a/examples/example-script.rsb
+++ b/examples/example-script.rsb
@@ -35,11 +35,11 @@ echo2, 18 &ev4| $sb run_client_of $Server.server_b
# if no object ($ handle) is provided, the storyboard
# is assumed as the object
-14 | $Node.node_a set_link_state $ShimEthDIF.e1 'up'
+14 | $Node.node_a set_link_state $EthLayer.e1 'up'
-16 | $ShimEthDIF.e1 set_delay 30 10
+16 | $EthLayer.e1 set_delay 30 10
-28 | $ShimEthDIF.e1 set_loss 2
+28 | $EthLayer.e1 set_loss 2
diff --git a/examples/example.py b/examples/example.py
index 2887cd5..9febd6e 100755
--- a/examples/example.py
+++ b/examples/example.py
@@ -7,59 +7,42 @@ from rumba.utils import ExperimentManager
from rumba.storyboard import *
# import testbed plugins
-import rumba.testbeds.emulab as emulab
import rumba.testbeds.jfed as jfed
-import rumba.testbeds.local as local
-import rumba.testbeds.qemu as qemu
# import prototype plugins
import rumba.prototypes.ouroboros as our
-import rumba.prototypes.rlite as rl
-import rumba.prototypes.irati as irati
import rumba.log as log
log.set_logging_level('DEBUG')
-n1 = NormalDIF("n1")
+n1 = UnicastLayer("n1")
n1.add_policy("rmt.pff", "lfa")
n1.add_policy("security-manager", "passwd")
-e1 = ShimEthDIF("e1")
+e1 = EthDixLayer("e1")
a = Node("a",
- difs=[n1, e1],
- dif_registrations={n1: [e1]})
+ layers=[n1, e1],
+ registrations={n1: [e1]})
b = Node("b",
- difs=[e1, n1],
- dif_registrations={n1: [e1]})
+ layers=[e1, n1],
+ registrations={n1: [e1]})
tb = jfed.Testbed(exp_name="example1",
username="user1",
cert_file="/home/user1/cert.pem")
-exp = rl.Experiment(tb, nodes=[a, b])
+exp = our.Experiment(tb, nodes=[a, b])
print(exp)
# General setup (can be reused in other scripts as-is)
storyboard = StoryBoard(duration=30)
-# Clients can be applications that just keep running, and will be
-# stopped by a SIGINT...
-client1 = Client("rinaperf",
- options="-t perf -s 1000 -c 0")
-
-# ... or a custom shutdown method can be provided.
-client2 = Client("rinaperf",
- options="-t perf -s 1000 -D <duration>",
- shutdown="")
-
-server = Server("rinaperf", options="-l", arrival_rate=0.5,
- mean_duration=5, clients=[client1, client2])
# Experiment-specific configuration:
diff --git a/examples/geant.py b/examples/geant.py
index 7fb59ad..720dd9e 100755
--- a/examples/geant.py
+++ b/examples/geant.py
@@ -4,16 +4,10 @@
from rumba.model import *
# import testbed plugins
-import rumba.testbeds.emulab as emulab
import rumba.testbeds.jfed as jfed
-import rumba.testbeds.local as local
-import rumba.testbeds.qemu as qemu
-import rumba.testbeds.dockertb as docker
# import prototype plugins
import rumba.prototypes.ouroboros as our
-import rumba.prototypes.rlite as rl
-import rumba.prototypes.irati as irati
import rumba.log as log
@@ -26,55 +20,55 @@ import time
log.set_logging_level('DEBUG')
-n1 = NormalDIF("n1")
+n1 = UnicastLayer("n1")
n1.add_policy("routing", "lfa")
n1.add_policy("pff", "alternate")
-e1 = ShimEthDIF("e1")
-e2 = ShimEthDIF("e2")
-e3 = ShimEthDIF("e3")
-e4 = ShimEthDIF("e4")
-e5 = ShimEthDIF("e5")
-e6 = ShimEthDIF("e6")
-e7 = ShimEthDIF("e7")
-e8 = ShimEthDIF("e8")
-e9 = ShimEthDIF("e9")
-e10 = ShimEthDIF("e10")
-e11 = ShimEthDIF("e11")
-e12 = ShimEthDIF("e12")
-e13 = ShimEthDIF("e13")
-e14 = ShimEthDIF("e14")
-e15 = ShimEthDIF("e15")
-e16 = ShimEthDIF("e16")
-e17 = ShimEthDIF("e17")
-e18 = ShimEthDIF("e18")
-e19 = ShimEthDIF("e19")
-e20 = ShimEthDIF("e20")
-e21 = ShimEthDIF("e21")
-e22 = ShimEthDIF("e22")
-e23 = ShimEthDIF("e23")
-e24 = ShimEthDIF("e24")
-e25 = ShimEthDIF("e25")
-e26 = ShimEthDIF("e26")
-e27 = ShimEthDIF("e27")
-e28 = ShimEthDIF("e28")
-e29 = ShimEthDIF("e29")
-e30 = ShimEthDIF("e30")
-e31 = ShimEthDIF("e31")
-e32 = ShimEthDIF("e32")
-e33 = ShimEthDIF("e33")
-e34 = ShimEthDIF("e34")
-e35 = ShimEthDIF("e35")
-e36 = ShimEthDIF("e36")
-e37 = ShimEthDIF("e37")
-e38 = ShimEthDIF("e38")
-e39 = ShimEthDIF("e39")
-e40 = ShimEthDIF("e40")
-e41 = ShimEthDIF("e41")
-e42 = ShimEthDIF("e42")
-e43 = ShimEthDIF("e43")
-e44 = ShimEthDIF("e44")
+e1 = EthDixLayer("e1")
+e2 = EthDixLayer("e2")
+e3 = EthDixLayer("e3")
+e4 = EthDixLayer("e4")
+e5 = EthDixLayer("e5")
+e6 = EthDixLayer("e6")
+e7 = EthDixLayer("e7")
+e8 = EthDixLayer("e8")
+e9 = EthDixLayer("e9")
+e10 = EthDixLayer("e10")
+e11 = EthDixLayer("e11")
+e12 = EthDixLayer("e12")
+e13 = EthDixLayer("e13")
+e14 = EthDixLayer("e14")
+e15 = EthDixLayer("e15")
+e16 = EthDixLayer("e16")
+e17 = EthDixLayer("e17")
+e18 = EthDixLayer("e18")
+e19 = EthDixLayer("e19")
+e20 = EthDixLayer("e20")
+e21 = EthDixLayer("e21")
+e22 = EthDixLayer("e22")
+e23 = EthDixLayer("e23")
+e24 = EthDixLayer("e24")
+e25 = EthDixLayer("e25")
+e26 = EthDixLayer("e26")
+e27 = EthDixLayer("e27")
+e28 = EthDixLayer("e28")
+e29 = EthDixLayer("e29")
+e30 = EthDixLayer("e30")
+e31 = EthDixLayer("e31")
+e32 = EthDixLayer("e32")
+e33 = EthDixLayer("e33")
+e34 = EthDixLayer("e34")
+e35 = EthDixLayer("e35")
+e36 = EthDixLayer("e36")
+e37 = EthDixLayer("e37")
+e38 = EthDixLayer("e38")
+e39 = EthDixLayer("e39")
+e40 = EthDixLayer("e40")
+e41 = EthDixLayer("e41")
+e42 = EthDixLayer("e42")
+e43 = EthDixLayer("e43")
+e44 = EthDixLayer("e44")
layers = [e1, e2, e3, e4, e5, e6, e7, e8, e9, e10,
e11, e12, e13, e14, e15, e16, e17, e18, e19, e20,
@@ -83,130 +77,129 @@ layers = [e1, e2, e3, e4, e5, e6, e7, e8, e9, e10,
e41, e42, e43, e44]
lisbon = Node("lisbon",
- difs = [n1, e1, e2],
- dif_registrations = {n1: [e1,e2]})
+ layers = [n1, e1, e2],
+ registrations = {n1: [e1,e2]})
madrid = Node("madrid",
- difs = [n1, e1, e6, e9, e11],
- dif_registrations = {n1 : [e1, e6, e9, e11]})
+ layers = [n1, e1, e6, e9, e11],
+ registrations = {n1 : [e1, e6, e9, e11]})
london = Node("london",
- difs = [n1, e2, e3, e4, e5],
- dif_registrations = {n1 : [e2, e3, e4, e5]})
+ layers = [n1, e2, e3, e4, e5],
+ registrations = {n1 : [e2, e3, e4, e5]})
dublin = Node("dublin",
- difs = [n1, e3],
- dif_registrations = {n1 : [e3]})
+ layers = [n1, e3],
+ registrations = {n1 : [e3]})
paris = Node("paris",
- difs = [n1, e4, e6, e7, e8],
- dif_registrations = {n1 : [e4, e6, e7, e8]})
+ layers = [n1, e4, e6, e7, e8],
+ registrations = {n1 : [e4, e6, e7, e8]})
brussels = Node("brussels",
- difs = [n1, e5, e12],
- dif_registrations = {n1 : [e5, e12]})
+ layers = [n1, e5, e12],
+ registrations = {n1 : [e5, e12]})
luxemburg = Node("luxemburg",
- difs = [n1, e7, e16],
- dif_registrations = {n1 : [e7, e16]})
+ layers = [n1, e7, e16],
+ registrations = {n1 : [e7, e16]})
bern = Node("bern",
- difs = [n1, e8, e9, e10, e15],
- dif_registrations = {n1 : [e8, e9, e10, e15]})
+ layers = [n1, e8, e9, e10, e15],
+ registrations = {n1 : [e8, e9, e10, e15]})
roma = Node("roma",
- difs = [n1, e10, e11, e13, e34, e28],
- dif_registrations = {n1 : [e10, e11, e13, e34, e28]})
+ layers = [n1, e10, e11, e13, e34, e28],
+ registrations = {n1 : [e10, e11, e13, e34, e28]})
amsterdam = Node("amsterdam",
- difs = [n1, e12, e14, e17, e18],
- dif_registrations = {n1 : [e12, e14, e17, e18]})
+ layers = [n1, e12, e14, e17, e18],
+ registrations = {n1 : [e12, e14, e17, e18]})
valleta = Node("valleta",
- difs = [n1, e13, e14],
- dif_registrations = {n1 : [e13, e14]})
+ layers = [n1, e13, e14],
+ registrations = {n1 : [e13, e14]})
berlin = Node("berlin",
- difs = [n1, e15, e16, e17, e30, e31],
- dif_registrations = {n1 : [e15, e16, e17, e30, e31]})
+ layers = [n1, e15, e16, e17, e30, e31],
+ registrations = {n1 : [e15, e16, e17, e30, e31]})
copenhagen = Node("copenhagen",
- difs = [n1, e18, e20, e22, e24, e19],
- dif_registrations = {n1 : [e18, e20, e22, e24, e19]})
+ layers = [n1, e18, e20, e22, e24, e19],
+ registrations = {n1 : [e18, e20, e22, e24, e19]})
oslo = Node("oslo",
- difs = [n1, e20, e21],
- dif_registrations = {n1 : [e20, e21]})
+ layers = [n1, e20, e21],
+ registrations = {n1 : [e20, e21]})
stockholm = Node("stockholm",
- difs = [n1, e21, e22],
- dif_registrations = {n1 : [e21, e22]})
+ layers = [n1, e21, e22],
+ registrations = {n1 : [e21, e22]})
tallin = Node("tallin",
- difs = [n1, e24, e25],
- dif_registrations = {n1 : [e24, e25]})
+ layers = [n1, e24, e25],
+ registrations = {n1 : [e24, e25]})
riga = Node("riga",
- difs = [n1, e25, e26],
- dif_registrations = {n1 : [e25, e26]})
+ layers = [n1, e25, e26],
+ registrations = {n1 : [e25, e26]})
vilnius = Node("vilnius",
- difs = [n1, e26, e27],
- dif_registrations = {n1 : [e26, e27]})
+ layers = [n1, e26, e27],
+ registrations = {n1 : [e26, e27]})
warsaw = Node("warsaw",
- difs = [n1, e27, e29],
- dif_registrations = {n1 : [e27, e29]})
+ layers = [n1, e27, e29],
+ registrations = {n1 : [e27, e29]})
praha = Node("praha",
- difs = [n1, e29, e30, e32],
- dif_registrations = {n1 : [e29, e30, e32]})
+ layers = [n1, e29, e30, e32],
+ registrations = {n1 : [e29, e30, e32]})
viena = Node("viena",
- difs = [n1, e32, e33, e35, e39, e28],
- dif_registrations = {n1 : [e32, e33, e35, e39, e28]})
+ layers = [n1, e32, e33, e35, e39, e28],
+ registrations = {n1 : [e32, e33, e35, e39, e28]})
budapest = Node("budapest",
- difs = [n1, e33, e37, e38, e43],
- dif_registrations = {n1 : [e33, e37, e38, e43]})
+ layers = [n1, e33, e37, e38, e43],
+ registrations = {n1 : [e33, e37, e38, e43]})
athens = Node("athens",
- difs = [n1, e39, e40, e44],
- dif_registrations = {n1 : [e39, e40, e44]})
+ layers = [n1, e39, e40, e44],
+ registrations = {n1 : [e39, e40, e44]})
ljubljana = Node("ljubljana",
- difs = [n1, e35, e36],
- dif_registrations = {n1 : [e35, e36]})
+ layers = [n1, e35, e36],
+ registrations = {n1 : [e35, e36]})
zagreb = Node("zagreb",
- difs = [n1, e36, e37],
- dif_registrations = {n1 : [e36, e37]})
+ layers = [n1, e36, e37],
+ registrations = {n1 : [e36, e37]})
sofia = Node("sofia",
- difs = [n1, e38, e40, e42, e23],
- dif_registrations = {n1 : [e38, e40, e42, e23]})
+ layers = [n1, e38, e40, e42, e23],
+ registrations = {n1 : [e38, e40, e42, e23]})
bucharest = Node("bucharest",
- difs = [n1, e42, e43, e41],
- dif_registrations = {n1 : [e42, e43, e41]})
+ layers = [n1, e42, e43, e41],
+ registrations = {n1 : [e42, e43, e41]})
nicosia = Node("nicosia",
- difs = [n1, e44, e34],
- dif_registrations = {n1 : [e44, e34]})
+ layers = [n1, e44, e34],
+ registrations = {n1 : [e44, e34]})
ankara = Node("ankara",
- difs = [n1, e23, e41],
- dif_registrations = {n1 : [e23, e41]})
+ layers = [n1, e23, e41],
+ registrations = {n1 : [e23, e41]})
moscow = Node("moscow",
- difs = [n1, e31, e19],
- dif_registrations = {n1 : [e31, e19]})
+ layers = [n1, e31, e19],
+ registrations = {n1 : [e31, e19]})
tb = jfed.Testbed(exp_name = 'geant8',
- cert_file = '/home/sander/cert.pem',
+ cert_file = '/path/to/cert.pem',
authority = 'wall1.ilabt.iminds.be',
-# authority = 'exogeni.net:umassvmsite',
- username = 'sander',
+ username = 'username',
exp_hours = '200')
nodes = [lisbon, madrid, london, paris, dublin,
@@ -217,7 +210,7 @@ nodes = [lisbon, madrid, london, paris, dublin,
bucharest, nicosia, ankara, moscow]
exp = our.Experiment(tb, nodes=nodes,
- git_repo='https://bitbucket.org/sandervrijders/ouroboros.git',
+ git_repo='https://codeberg.org/o7s/ouroboros',
git_branch='rand3')
@@ -241,7 +234,7 @@ with ExperimentManager(exp, swap_out_strategy=AUTO_SWAPOUT):
arrival_rate=0.01,
mean_duration=duration,
s_id='oping_' + node.name,
- difs=n1
+ layers=n1
)
sb.add_server_on_node(s, node)
@@ -294,9 +287,9 @@ with ExperimentManager(exp, swap_out_strategy=AUTO_SWAPOUT):
prefix = layer.name + '_' + str(i)
sb.schedule_export_lsdb_total(390, prefix + '_lsdb.csv', n1)
- sb.schedule_export_dif_bandwidth(410, prefix + '_1.csv', n1)
- sb.schedule_export_dif_bandwidth(510, prefix + '_2.csv', n1)
- sb.schedule_export_dif_bandwidth(610, prefix + '_3.csv', n1)
- sb.schedule_export_dif_bandwidth(710, prefix + '_4.csv', n1)
+ sb.schedule_export_layer_bandwidth(410, prefix + '_1.csv', n1)
+ sb.schedule_export_layer_bandwidth(510, prefix + '_2.csv', n1)
+ sb.schedule_export_layer_bandwidth(610, prefix + '_3.csv', n1)
+ sb.schedule_export_layer_bandwidth(710, prefix + '_4.csv', n1)
sb.start()
diff --git a/examples/isps.py b/examples/isps.py
index 62a17ad..335c43b 100755
--- a/examples/isps.py
+++ b/examples/isps.py
@@ -15,23 +15,23 @@ import time
log.set_logging_level('DEBUG')
-n0 = NormalDIF("n0")
+n0 = UnicastLayer("n0")
-n1 = NormalDIF("n1")
+n1 = UnicastLayer("n1")
-n2 = NormalDIF("n2")
+n2 = UnicastLayer("n2")
n2.add_policy("routing", "lfa")
n2.add_policy("pff", "alternate")
-e0 = ShimEthDIF("e0")
-e1 = ShimEthDIF("e1")
-e2 = ShimEthDIF("e2")
-e3 = ShimEthDIF("e3")
-e4 = ShimEthDIF("e4")
-e5 = ShimEthDIF("e5")
-e6 = ShimEthDIF("e6")
-e7 = ShimEthDIF("e7")
-e8 = ShimEthDIF("e8")
+e0 = EthDixLayer("e0")
+e1 = EthDixLayer("e1")
+e2 = EthDixLayer("e2")
+e3 = EthDixLayer("e3")
+e4 = EthDixLayer("e4")
+e5 = EthDixLayer("e5")
+e6 = EthDixLayer("e6")
+e7 = EthDixLayer("e7")
+e8 = EthDixLayer("e8")
es = [e0, e1, e2, e3, e4, e5, e6, e7, e8]
@@ -39,50 +39,50 @@ for e in es:
e.set_delay(5)
end0 = Node("end0",
- difs = [e0, e8, n2],
- dif_registrations = {n2: [e0, e8]})
+ layers = [e0, e8, n2],
+ registrations = {n2: [e0, e8]})
end1 = Node("end1",
- difs = [e4, e5, n2],
- dif_registrations = {n2: [e4, e5]})
+ layers = [e4, e5, n2],
+ registrations = {n2: [e4, e5]})
isp10 = Node("isp10",
- difs = [e0, e1, n2, n1],
- dif_registrations = {n2: [e0, n1], n1: [e1]})
+ layers = [e0, e1, n2, n1],
+ registrations = {n2: [e0, n1], n1: [e1]})
isp11 = Node("isp11",
- difs = [e1, e2, n1],
- dif_registrations = {n1: [e1, e2]})
+ layers = [e1, e2, n1],
+ registrations = {n1: [e1, e2]})
isp12 = Node("isp12",
- difs = [e2, e3, n1],
- dif_registrations = {n1: [e2, e3]})
+ layers = [e2, e3, n1],
+ registrations = {n1: [e2, e3]})
isp13 = Node("isp13",
- difs = [e3, e4, n2, n1],
- dif_registrations = {n2: [e4, n1], n1: [e3]})
+ layers = [e3, e4, n2, n1],
+ registrations = {n2: [e4, n1], n1: [e3]})
isp00 = Node("isp00",
- difs = [e8, e7, n2, n0],
- dif_registrations = {n2: [e8, n0], n0: [e7]})
+ layers = [e8, e7, n2, n0],
+ registrations = {n2: [e8, n0], n0: [e7]})
isp01 = Node("isp01",
- difs = [e7, e6, n0],
- dif_registrations = {n0: [e7, e6]})
+ layers = [e7, e6, n0],
+ registrations = {n0: [e7, e6]})
isp02 = Node("isp02",
- difs = [e6, e5, n2, n0],
- dif_registrations = {n2: [e5, n0], n0: [e6]})
+ layers = [e6, e5, n2, n0],
+ registrations = {n2: [e5, n0], n0: [e6]})
tb = jfed.Testbed(exp_name = 'case3',
- cert_file = '/home/sander/cert.pem',
- username = 'sander',
+ cert_file = '/path/to/cert.pem',
+ username = 'username',
exp_hours = '1')
nodes = [end0, end1, isp10, isp11, isp12, isp13, isp00, isp01, isp02]
exp = our.Experiment(tb, nodes=nodes,
- git_repo='https://bitbucket.org/sandervrijders/ouroboros.git',
+ git_repo='https://codeberg.org/o7s/ouroboros',
git_branch='rand3')
duration = 120
@@ -97,7 +97,7 @@ s = Server(
arrival_rate=0.01,
mean_duration=duration + 4,
s_id='oping',
- difs=n2
+ layers=n2
)
sb.add_server_on_node(s, end1)
diff --git a/examples/jfed-rlite.py b/examples/jfed-rlite.py
deleted file mode 100755
index 5cc087f..0000000
--- a/examples/jfed-rlite.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-
-from rumba.model import *
-from rumba.utils import ExperimentManager
-
-import rumba.testbeds.jfed as jfed
-import rumba.prototypes.rlite as rlite
-
-import rumba.log as log
-
-import argparse
-
-
-description = "Script to run rlite on jfed"
-epilog = "2017 H2020 ARCFIRE"
-
-argparser = argparse.ArgumentParser(description = description,
- epilog = epilog)
-argparser.add_argument('--user', type = str, default = 'vmaffio',
- help = "jFed username")
-argparser.add_argument('--cert', type = str,
- help = "Absolute path to certificate (.pem) file"
- " to be used with jFed",
- default = '/home/vmaffione/Downloads/vmaffio-jfed.pem')
-argparser.add_argument('--expname', type = str, default = 'pinocchio',
- help = "Name of the experiment within the jFed testbed")
-
-args = argparser.parse_args()
-
-log.set_logging_level('DEBUG')
-
-n1 = NormalDIF("n1")
-
-e1 = ShimEthDIF("e1")
-
-a = Node("a",
- difs = [n1, e1],
- dif_registrations = {n1 : [e1]})
-
-b = Node("b",
- difs = [e1, n1],
- dif_registrations = {n1 : [e1]})
-
-tb = jfed.Testbed(exp_name = args.expname,
- cert_file = args.cert,
- username = args.user)
-
-exp = rlite.Experiment(tb, nodes = [a, b])
-
-with ExperimentManager(exp):
- exp.swap_in()
- exp.install_prototype()
- exp.bootstrap_prototype()
diff --git a/examples/mouse.py b/examples/mouse.py
index d80da28..38be6e0 100755
--- a/examples/mouse.py
+++ b/examples/mouse.py
@@ -3,107 +3,106 @@
# An example script using the rumba package
from rumba.model import *
-from rumba.utils import ExperimentManager
+from rumba.utils import ExperimentManager, PROMPT_SWAPOUT
# import testbed plugins
-import rumba.testbeds.emulab as emulab
-import rumba.testbeds.jfed as jfed
import rumba.testbeds.local as local
-import rumba.testbeds.qemu as qemu
# import prototype plugins
import rumba.prototypes.ouroboros as our
-import rumba.prototypes.rlite as rl
-import rumba.prototypes.irati as irati
import rumba.log as log
+from rumba.visualizer import draw_network, get_network_from_rumba_experiment
+
log.set_logging_level('DEBUG')
-n01 = NormalDIF("n01")
+n01 = UnicastLayer("n01")
-e01 = ShimEthDIF("e01")
-e02 = ShimEthDIF("e02")
-e03 = ShimEthDIF("e03")
-e04 = ShimEthDIF("e04")
-e05 = ShimEthDIF("e05")
-e06 = ShimEthDIF("e06")
-e07 = ShimEthDIF("e07")
-e08 = ShimEthDIF("e08")
-e09 = ShimEthDIF("e09")
-e10 = ShimEthDIF("e10")
-e11 = ShimEthDIF("e11")
-e12 = ShimEthDIF("e12")
-e13 = ShimEthDIF("e13")
-e14 = ShimEthDIF("e14")
-e15 = ShimEthDIF("e15")
-e16 = ShimEthDIF("e16")
-e17 = ShimEthDIF("e17")
+e01 = EthDixLayer("e01")
+e02 = EthDixLayer("e02")
+e03 = EthDixLayer("e03")
+e04 = EthDixLayer("e04")
+e05 = EthDixLayer("e05")
+e06 = EthDixLayer("e06")
+e07 = EthDixLayer("e07")
+e08 = EthDixLayer("e08")
+e09 = EthDixLayer("e09")
+e10 = EthDixLayer("e10")
+e11 = EthDixLayer("e11")
+e12 = EthDixLayer("e12")
+e13 = EthDixLayer("e13")
+e14 = EthDixLayer("e14")
+e15 = EthDixLayer("e15")
+e16 = EthDixLayer("e16")
+e17 = EthDixLayer("e17")
a = Node("a",
- difs = [n01, e01, e06, e13 ],
- dif_registrations = {n01 : [e01, e06, e13]})
+ layers = [n01, e01, e06, e13 ],
+ registrations = {n01 : [e01, e06, e13]})
b = Node("b",
- difs = [n01, e01, e02, e04],
- dif_registrations = {n01 : [e01, e02, e04]})
+ layers = [n01, e01, e02, e04],
+ registrations = {n01 : [e01, e02, e04]})
c = Node("c",
- difs = [n01, e02, e03],
- dif_registrations = {n01 : [e02, e03]})
+ layers = [n01, e02, e03],
+ registrations = {n01 : [e02, e03]})
d = Node("d",
- difs = [n01, e03, e04, e05],
- dif_registrations = {n01 : [e03, e04, e05]})
+ layers = [n01, e03, e04, e05],
+ registrations = {n01 : [e03, e04, e05]})
e = Node("e",
- difs = [n01, e05, e06, e07],
- dif_registrations = {n01 : [e05, e06, e07]})
+ layers = [n01, e05, e06, e07],
+ registrations = {n01 : [e05, e06, e07]})
f = Node("f",
- difs = [n01, e07, e08],
- dif_registrations = {n01 : [e07, e08]})
+ layers = [n01, e07, e08],
+ registrations = {n01 : [e07, e08]})
g = Node("g",
- difs = [n01, e08, e09, e14],
- dif_registrations = {n01 : [e08, e09, e14]})
+ layers = [n01, e08, e09, e14],
+ registrations = {n01 : [e08, e09, e14]})
h = Node("h",
- difs = [n01, e09, e10, e15],
- dif_registrations = {n01 : [e09, e10, e15]})
+ layers = [n01, e09, e10, e15],
+ registrations = {n01 : [e09, e10, e15]})
i = Node("i",
- difs = [n01, e10, e11, e16],
- dif_registrations = {n01 : [e10, e11, e16]})
+ layers = [n01, e10, e11, e16],
+ registrations = {n01 : [e10, e11, e16]})
j = Node("j",
- difs = [n01, e11, e12],
- dif_registrations = {n01 : [e11, e12]})
+ layers = [n01, e11, e12],
+ registrations = {n01 : [e11, e12]})
k = Node("k",
- difs = [n01, e12, e13],
- dif_registrations = {n01 : [e12, e13]})
+ layers = [n01, e12, e13],
+ registrations = {n01 : [e12, e13]})
l = Node("l",
- difs = [n01, e14, e15],
- dif_registrations = {n01 : [e14, e15]})
+ layers = [n01, e14, e15],
+ registrations = {n01 : [e14, e15]})
m = Node("m",
- difs = [n01, e16, e17],
- dif_registrations = {n01 : [e16, e17]})
+ layers = [n01, e16, e17],
+ registrations = {n01 : [e16, e17]})
n = Node("n",
- difs = [n01, e17],
- dif_registrations = {n01 : [e17]})
+ layers = [n01, e17],
+ registrations = {n01 : [e17]})
-tb = qemu.Testbed(exp_name = "mouse2")
+tb = local.Testbed(exp_name = "mouse2")
-exp = rl.Experiment(tb, nodes = [a, b, c, d, e, f, g, h, i, j, k, l, m, n])
+exp = our.Experiment(tb, nodes = [a, b, c, d, e, f, g, h, i, j, k, l, m, n])
print(exp)
-with ExperimentManager(exp):
+with ExperimentManager(exp, swap_out_strategy=PROMPT_SWAPOUT):
+ nw = get_network_from_rumba_experiment(exp)
+ draw_network(nw)
exp.swap_in()
exp.bootstrap_prototype()
diff --git a/examples/ouroboros-layer-example.py b/examples/ouroboros-layer-example.py
new file mode 100644
index 0000000..310a204
--- /dev/null
+++ b/examples/ouroboros-layer-example.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+
+"""
+Example using the Ouroboros-aligned layer-centric API.
+
+Shows the naming conventions: Layer, UnicastLayer, EthDixLayer, etc.
+"""
+
+from rumba.model import *
+from rumba.utils import ExperimentManager
+
+# import testbed plugins
+import rumba.testbeds.local as local
+
+# import prototype plugins
+import rumba.prototypes.ouroboros as our
+
+import rumba.log as log
+
+log.set_logging_level('DEBUG')
+
+# -----------------------------------------------------------------------
+# Layer-centric syntax
+# -----------------------------------------------------------------------
+
+# Define layers
+top = UnicastLayer("top")
+mid = UnicastLayer("mid")
+top.add_policy("rmt.pff", "lfa")
+
+# Ethernet layers model point-to-point links
+e1 = EthDixLayer("e1")
+e2 = EthDixLayer("e2")
+e3 = EthDixLayer("e3")
+
+# Define nodes and their layer memberships + registrations
+a = Node("a",
+ layers=[top, mid, e1, e2],
+ registrations={mid: [e1, e2],
+ top: [mid]})
+
+b = Node("b",
+ layers=[mid, e1, e3],
+ registrations={mid: [e1, e3],
+ })
+
+c = Node("c",
+ layers=[top, mid, e2, e3],
+ registrations={mid: [e2, e3],
+ top: [mid]})
+
+# Create testbed and experiment
+tb = local.Testbed(exp_name="layer-example")
+exp = our.Experiment(tb, nodes=[a, b, c])
+
+print(exp)
+
+# Use properties from the API
+for layer in exp.layer_ordering:
+ print("Layer: %s (type=%s, is_eth=%s, is_shim=%s)" % (
+ layer.name, layer.layer_type.value, layer.is_eth, layer.is_shim))
+
+with ExperimentManager(exp):
+ exp.swap_in()
+ exp.install_prototype()
+ exp.bootstrap_prototype()
diff --git a/examples/rumba_example.py b/examples/rumba_example.py
new file mode 100755
index 0000000..2027ffa
--- /dev/null
+++ b/examples/rumba_example.py
@@ -0,0 +1,23 @@
+from rumba.model import Node, UnicastLayer, EthDixLayer
+from rumba.topologies import build_star
+
+# import testbed plugins
+import rumba.testbeds.local as local
+
+# import Ouroboros prototype plugin
+import rumba.prototypes.ouroboros as our
+
+__all__ = ["exp", "nodes"]
+
+n1 = UnicastLayer("n1")
+
+leaves, routerNode = build_star(
+ ["client1", "client2", "server"], n1, hub_name="router")
+clientNode1, clientNode2, serverNode = leaves
+
+nodes = ["client1", "client2", "router", "server"]
+
+tb = local.Testbed()
+exp = our.Experiment(tb,
+ nodes=[clientNode1, clientNode2,
+ routerNode, serverNode])
diff --git a/examples/scalingtime.py b/examples/scalingtime.py
index 920553c..9490b4f 100755
--- a/examples/scalingtime.py
+++ b/examples/scalingtime.py
@@ -1,23 +1,19 @@
-#!/usr/bin/env python
-
from rumba.model import *
from rumba.utils import *
# import testbed plugins
import rumba.testbeds.emulab as emulab
import rumba.testbeds.jfed as jfed
-import rumba.testbeds.qemu as qemu
import rumba.testbeds.local as local
import rumba.testbeds.dockertb as docker
# import prototype plugins
import rumba.prototypes.ouroboros as our
-import rumba.prototypes.rlite as rl
-import rumba.prototypes.irati as irati
import argparse
+import json
-from rumba.visualizer import draw_experiment
+from rumba.visualizer import draw_network, get_network_from_rumba_experiment
description = "Script to create the demo from the Rumba paper"
@@ -41,29 +37,16 @@ argparser.add_argument('--swapout', type=SwapOutStrategy.from_string,
default=PROMPT_SWAPOUT,
choices=list(SwapOutStrategy),
help='What action to perform on swap-out')
-argparser.add_argument('--prototype', type=str, required=True,
- choices=['irati', 'ouroboros', 'rlite'],
- help='The kind of prototype plugin to use to run'
- ' the experiment.')
+argparser.add_argument('--visualize', action="store_true",
+ help='Draw a representation of the network')
+
subparsers = argparser.add_subparsers(dest='testbed')
emulab_p = subparsers.add_parser('emulab', help='Use emulab testbed')
jfed_p = subparsers.add_parser('jfed', help='Use jfed testbed')
-qemu_p = subparsers.add_parser('qemu', help='Use qemu testbed')
local_p = subparsers.add_parser('local', help='Use local testbed')
docker_p = subparsers.add_parser('docker', help='Use Docker testbed')
-qemu_p.add_argument('--bzimage', metavar='BZIMAGE', type=str,
- help='path to the bzImage file to use')
-qemu_p.add_argument('--initramfs', metavar='INITRAMFS', type=str,
- help='path to the initramfs file to use')
-qemu_p.add_argument('--use_vhost', action='store_true',
- default=False, help='Use vhost')
-qemu_p.add_argument('--qemu_logs_dir', metavar='QEMU_LOGS', type=str,
- default=None, help='path to the folder for qemu logs')
-qemu_p.add_argument('--username', type=str, help="Username")
-qemu_p.add_argument('--password', type=str, help="Password")
-
emulab_p.add_argument('--url', metavar='URL', type=str,
default="wall2.ilabt.iminds.be",
help='Url')
@@ -102,91 +85,90 @@ args = argparser.parse_args()
log.set_logging_level(args.verbosity)
-pi = NormalDIF("pi")
-pi.dt_strategy = 'minimal'
+pi = UnicastLayer("pi")
-c = NormalDIF("core")
+c = UnicastLayer("core")
nodes = []
core_nodes = []
for i in range(0, args.metro_networks):
- m = NormalDIF("m-" + str(i))
+ m = UnicastLayer("m-" + str(i))
e_prev = None
m1 = None
mn = None
for j in range(0, args.metro_nodes):
- if j is not (args.metro_nodes - 1):
- e = ShimEthDIF("e-" + str(i) + "-" + str(j))
+ if j != (args.metro_nodes - 1):
+ e = EthDixLayer("e-" + str(i) + "-" + str(j))
else:
e = None
if e_prev is None and e is not None:
node = Node("metro-" + str(i) + "-" + str(j),
- difs=[m, pi, e],
- dif_registrations={pi: [m], m: [e]})
+ layers=[m, pi, e],
+ registrations={pi: [m], m: [e]})
m1 = node
elif e_prev is not None and e is not None:
node = Node("metro-" + str(i) + "-" + str(j),
- difs=[m, pi, e, e_prev],
- dif_registrations={pi: [m], m: [e, e_prev]})
+ layers=[m, pi, e, e_prev],
+ registrations={pi: [m], m: [e, e_prev]})
else:
node = Node("metro-" + str(i) + "-" + str(j),
- difs=[m, pi, e_prev],
- dif_registrations={pi: [m], m: [e_prev]})
+ layers=[m, pi, e_prev],
+ registrations={pi: [m], m: [e_prev]})
mn = node
nodes.append(node)
e_prev = e
# Create nodes attached to metro
for k in range(0, args.end_nodes):
- ee = ShimEthDIF("e-" + str(i) + "-" + str(j) + "-" + str(k))
+ ee = EthDixLayer("e-" + str(i) + "-" + str(j) + "-" + str(k))
end_node = Node("end-" + str(i) + "-" + str(j) + "-" + str(k),
- difs=[pi, ee],
- dif_registrations={pi: [ee]})
- node.add_dif(ee)
- node.add_dif_registration(pi, ee)
+ layers=[pi, ee],
+ registrations={pi: [ee]})
+ node.add_layer(ee)
+ node.add_registration(pi, ee)
nodes.append(end_node)
- e = ShimEthDIF("e-" + str(i) + "-" + str(args.metro_nodes))
- m1.add_dif(e)
- m1.add_dif_registration(m, e)
- mn.add_dif(e)
- mn.add_dif_registration(m, e)
+ e = EthDixLayer("e-" + str(i) + "-" + str(args.metro_nodes))
+ m1.add_layer(e)
+ m1.add_registration(m, e)
+ mn.add_layer(e)
+ mn.add_registration(m, e)
# Add 2 links from metro to core
- e0 = ShimEthDIF("c-e-" + str(i) + "-0")
- e1 = ShimEthDIF("c-e-" + str(i) + "-1")
- e2 = ShimEthDIF("c-e-" + str(i) + "-2")
+ e0 = EthDixLayer("c-e-" + str(i) + "-0")
+ e1 = EthDixLayer("c-e-" + str(i) + "-1")
+ e2 = EthDixLayer("c-e-" + str(i) + "-2")
c1 = Node("c-" + str(i) + "-0",
- difs=[c, m, pi, e0, e2],
- dif_registrations={pi: [m, c], m: [e0], c: [e2]})
+ layers=[c, m, pi, e0, e2],
+ registrations={pi: [m, c], m: [e0], c: [e2]})
c2 = Node("c-" + str(i) + "-1",
- difs=[c, m, pi, e1, e2],
- dif_registrations={pi: [m, c], m: [e1], c: [e2]})
+ layers=[c, m, pi, e1, e2],
+ registrations={pi: [m, c], m: [e1], c: [e2]})
nodes.append(c1)
nodes.append(c2)
- m1.add_dif(e0)
- m1.add_dif_registration(m, e0)
- mn.add_dif(e1)
- mn.add_dif_registration(m, e1)
+ m1.add_layer(e0)
+ m1.add_registration(m, e0)
+ mn.add_layer(e1)
+ mn.add_registration(m, e1)
for x in range(0, len(core_nodes), 2):
- ce0 = ShimEthDIF("c-c-e-" + str(i) + "-" + str(x))
- ce1 = ShimEthDIF("c-c-e-" + str(i) + "-" + str(x + 1))
+ ce0 = EthDixLayer("c-c-e-" + str(i) + "-" + str(x))
+ ce1 = EthDixLayer("c-c-e-" + str(i) + "-" + str(x + 1))
- core_nodes[x].add_dif(ce0)
- core_nodes[x].add_dif_registration(c, ce0)
- core_nodes[x + 1].add_dif(ce1)
- core_nodes[x + 1].add_dif_registration(c, ce1)
+ core_nodes[x].add_layer(ce0)
+ core_nodes[x].add_registration(c, ce0)
+ core_nodes[x + 1].add_layer(ce1)
+ core_nodes[x + 1].add_registration(c, ce1)
- c1.add_dif(ce0)
- c1.add_dif_registration(c, ce0)
- c2.add_dif(ce1)
- c2.add_dif_registration(c, ce1)
+ c1.add_layer(ce0)
+ c1.add_registration(c, ce0)
+ c2.add_layer(ce1)
+ c2.add_registration(c, ce1)
core_nodes.append(c1)
core_nodes.append(c2)
@@ -203,12 +185,6 @@ elif args.testbed == 'jfed':
for a in jfed_p._actions
if a.dest != 'help'
and getattr(args, a.dest) is not None}
-elif args.testbed == 'qemu':
- test_class = qemu.Testbed
- testbed_args = {a.dest: getattr(args, a.dest)
- for a in qemu_p._actions
- if a.dest != 'help'
- and getattr(args, a.dest) is not None}
elif args.testbed == 'local':
test_class = local.Testbed
testbed_args = {a.dest: getattr(args, a.dest)
@@ -228,25 +204,18 @@ elif args.testbed is None:
testbed = test_class(**testbed_args)
-if args.prototype == 'irati':
- exp_class = irati.Experiment
-elif args.prototype == 'ouroboros':
- exp_class = our.Experiment
-elif args.prototype == 'rlite':
- exp_class = rl.Experiment
-else:
- print('Unexpected prototype!')
- print(argparser.format_help())
- exit(1)
+exp_class = our.Experiment
exp = exp_class(testbed, nodes=nodes)
if __name__ == "__main__":
- draw_experiment(exp)
+
+ if args.visualize:
+ nw = get_network_from_rumba_experiment(exp)
+ draw_network(nw)
with ExperimentManager(exp, swap_out_strategy=args.swapout) as expM:
exp.swap_in()
if not isinstance(testbed, docker.Testbed) \
- and not isinstance(testbed, qemu.Testbed) \
and not isinstance(testbed, local.Testbed):
exp.install_prototype()
exp.bootstrap_prototype()
diff --git a/examples/script-example.py b/examples/script-example.py
index 6cfaf42..e99ce78 100755
--- a/examples/script-example.py
+++ b/examples/script-example.py
@@ -6,59 +6,57 @@ from functools import partial
from rumba.storyboard import *
from rumba.model import *
import rumba.log as log
-import rumba.testbeds.qemu as qemu
-import rumba.prototypes.rlite as rl
+import rumba.testbeds.local as local
+import rumba.prototypes.ouroboros as our
from rumba.utils import ExperimentManager
import rumba.utils as utils
log.set_logging_level(log.DEBUG)
-n1 = NormalDIF("n1")
+n1 = UnicastLayer("n1")
n1.add_policy("rmt.pff", "lfa")
n1.add_policy("security-manager", "passwd")
-e1 = ShimEthDIF("e1")
+e1 = EthDixLayer("e1")
node_a = Node("node_a",
- difs=[n1, e1],
- dif_registrations={n1: [e1]})
+ layers=[n1, e1],
+ registrations={n1: [e1]})
node_b = Node("node_b",
- difs=[e1, n1],
- dif_registrations={n1: [e1]})
+ layers=[e1, n1],
+ registrations={n1: [e1]})
-tb = qemu.Testbed(exp_name="script_test",
- username="root",
- password="root")
+tb = local.Testbed(exp_name="script_test")
-exp = rl.Experiment(tb, nodes=[node_a, node_b])
+exp = our.Experiment(tb, nodes=[node_a, node_b])
client_a = Client(
- "rinaperf",
- options="-t perf -s 1000 -D <duration>",
+ "operf",
+ options="-d <duration> -s 1000",
shutdown="",
- c_id='rinaperf_a'
+ c_id='operf_a'
)
client_b = Client(
- "rinaperf",
- options="-t perf -s 1000 -D <duration> -z rinaperfb",
+ "operf",
+ options="-d <duration> -s 1000 -n operfb",
shutdown="",
- c_id='rinaperf_b'
+ c_id='operf_b'
)
client_c = Client(
- "rinaperf",
- options="-t perf -s 1000 -D <duration> -z rinaperfc",
+ "operf",
+ options="-d <duration> -s 1000 -n operfc",
shutdown="",
- c_id='rinaperf_c'
+ c_id='operf_c'
)
server_a = Server(
- "rinaperf",
+ "operf",
options="-l",
arrival_rate=1,
mean_duration=5,
@@ -67,8 +65,8 @@ server_a = Server(
)
server_b = Server(
- "rinaperf",
- options="-l -z rinaperfb",
+ "operf",
+ options="-l -n operfb",
arrival_rate=0.5,
mean_duration=10,
clients=[client_b],
@@ -76,8 +74,8 @@ server_b = Server(
)
server_c = Server(
- "rinaperf",
- options="-l -z rinaperfc",
+ "operf",
+ options="-l -n operfc",
arrival_rate=1.6,
mean_duration=3,
clients=[client_c],
diff --git a/examples/snake.py b/examples/snake.py
index 5426e28..ac5573f 100755
--- a/examples/snake.py
+++ b/examples/snake.py
@@ -3,19 +3,16 @@
# An example script using the rumba package
from rumba.model import *
-from rumba.utils import ExperimentManager
+from rumba.utils import *
+from rumba.storyboard import *
+from rumba.topologies import build_chain
# import testbed plugins
-import rumba.testbeds.emulab as emulab
import rumba.testbeds.jfed as jfed
-import rumba.testbeds.local as local
-import rumba.testbeds.qemu as qemu
import rumba.log as log
# import prototype plugins
import rumba.prototypes.ouroboros as our
-import rumba.prototypes.rlite as rl
-import rumba.prototypes.irati as irati
import argparse
import sys
@@ -31,40 +28,29 @@ args = argparser.parse_args()
log.set_logging_level('DEBUG')
-n01 = NormalDIF("n01")
+n01 = UnicastLayer("n01")
if (args.nodes < 2):
print("Snake must be longer than 2 nodes")
sys.exit(-1)
-nodes = []
+nodes = build_chain(args.nodes, n01)
-shim_prev = None
-for i in range(0, args.nodes):
- if i is not (args.nodes - 1):
- shim = ShimEthDIF("e" + str(i))
- else:
- shim = None
+tb = jfed.Testbed(exp_name = "snake2",
+ cert_file = '/path/to/cert.pem',
+ authority = 'wall1.ilabt.iminds.be',
+ username = 'username',
+ exp_hours = '1',
+ proj_name='ouroborosrocks')
- if shim_prev == None and shim != None:
- node = Node("node" + str(i), difs = [n01, shim],
- dif_registrations = {n01 : [shim]})
- elif shim_prev != None and shim != None:
- node = Node("node" + str(i), difs = [n01, shim, shim_prev],
- dif_registrations = {n01 : [shim, shim_prev]})
- else:
- node = Node("node" + str(i), difs = [n01, shim_prev],
- dif_registrations = {n01 : [shim_prev]})
-
- shim_prev = shim
- nodes.append(node)
-
-tb = qemu.Testbed(exp_name = "snake")
-
-exp = rl.Experiment(tb, nodes = nodes)
+exp = our.Experiment(tb, nodes=nodes,
+ git_repo='https://codeberg.org/o7s/ouroboros',
+ git_branch='be',
+ build_options='-DCMAKE_BUILD_TYPE=Debug')
print(exp)
-with ExperimentManager(exp):
+with ExperimentManager(exp, swap_out_strategy=PAUSE_SWAPOUT):
exp.swap_in()
+ exp.install_prototype()
exp.bootstrap_prototype()
diff --git a/examples/square.py b/examples/square.py
new file mode 100755
index 0000000..ce5cc41
--- /dev/null
+++ b/examples/square.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+
+# An example script using the rumba package
+
+from rumba.model import *
+from rumba.utils import ExperimentManager
+
+# import testbed plugins
+import rumba.testbeds.local as local
+
+# import prototype plugins
+import rumba.prototypes.ouroboros as our
+
+import rumba.log as log
+
+log.set_logging_level('DEBUG')
+
+
+n01 = UnicastLayer("n01")
+n01.add_policy("routing", "ecmp")
+
+e01 = EthDixLayer("e01")
+e02 = EthDixLayer("e02")
+e03 = EthDixLayer("e03")
+e04 = EthDixLayer("e04")
+
+
+a = Node("a",
+ layers = [n01, e01, e02],
+ registrations = {n01 : [e01, e02]})
+
+b = Node("b",
+ layers = [n01, e02, e03],
+ registrations = {n01 : [e02, e03]})
+
+c = Node("c",
+ layers = [n01, e03, e04],
+ registrations = {n01 : [e03, e04]})
+
+d = Node("d",
+ layers = [n01, e01, e04],
+ registrations = {n01 : [e01, e04]})
+
+tb = local.Testbed(exp_name = "square")
+
+exp = our.Experiment(tb, nodes = [a, b, c, d])
+
+print(exp)
+
+with ExperimentManager(exp, swap_out_strategy=PROMPT_SWAPOUT):
+ exp.swap_in()
+ exp.bootstrap_prototype()
diff --git a/examples/test.py b/examples/test.py
new file mode 100755
index 0000000..e17670a
--- /dev/null
+++ b/examples/test.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+
+# An example script using the rumba package
+
+from rumba.model import *
+from rumba.utils import ExperimentManager
+from rumba.storyboard import *
+
+
+# import testbed plugins
+import rumba.testbeds.jfed as jfed
+
+# import prototype plugins
+import rumba.prototypes.ouroboros as our
+
+import rumba.log as log
+
+log.set_logging_level('DEBUG')
+
+
+n1 = UnicastLayer("n1")
+e1 = EthDixLayer("e1")
+e2 = EthDixLayer("e2")
+
+clientNode = Node("client",
+ layers = [e1, n1],
+ registrations = {n1 : [e1]})
+
+routerNode = Node("router",
+ layers = [e1, e2, n1],
+ registrations = {n1 : [e1, e2]})
+
+serverNode = Node("server",
+ layers = [e2, n1],
+ registrations = {n1 : [e2]})
+
+tb = jfed.Testbed(exp_name = 'example',
+ cert_file = '/path/to/cert.pem',
+ authority = 'wall1.ilabt.iminds.be',
+ username = 'username',
+ exp_hours = '5',
+ proj_name='ouroboros')
+
+exp = our.Experiment(tb, nodes=[clientNode, routerNode, serverNode],
+ git_repo="https://codeberg.org/o7s/ouroboros",
+ git_branch="be-fix")
+
+with ExperimentManager(exp, swap_out_strategy=PAUSE_SWAPOUT):
+ exp.swap_in()
+ exp.install_prototype()
+ exp.bootstrap_prototype()
+ sb = StoryBoard(experiment=exp, duration=15, servers=[])
+ sb.schedule_command(7.5, clientNode,
+ 'echo "7.5 secs in. We are at $(hostname)"')
+ sb.schedule_command(12, serverNode,
+ 'echo "12 secs in. We are at $(hostname)"')
+ sb.start()
diff --git a/examples/two-layers.py b/examples/two-layers.py
index 3f50037..5f6600c 100755
--- a/examples/two-layers.py
+++ b/examples/two-layers.py
@@ -6,15 +6,10 @@ from rumba.model import *
from rumba.storyboard import *
# import testbed plugins
-import rumba.testbeds.emulab as emulab
-import rumba.testbeds.jfed as jfed
import rumba.testbeds.local as local
-import rumba.testbeds.qemu as qemu
# import prototype plugins
import rumba.prototypes.ouroboros as our
-import rumba.prototypes.rlite as rl
-import rumba.prototypes.irati as irati
import rumba.log as log
from rumba.utils import ExperimentManager
@@ -22,36 +17,34 @@ from rumba.utils import ExperimentManager
log.set_logging_level('DEBUG')
-n1 = NormalDIF("n1")
-n2 = NormalDIF("n2")
-n3 = NormalDIF("n3")
-n4 = NormalDIF("n4")
+n1 = UnicastLayer("n1")
+n2 = UnicastLayer("n2")
+n3 = UnicastLayer("n3")
+n4 = UnicastLayer("n4")
-e1 = ShimEthDIF("e1")
-e2 = ShimEthDIF("e2")
-e3 = ShimEthDIF("e3")
+e1 = EthDixLayer("e1")
+e2 = EthDixLayer("e2")
+e3 = EthDixLayer("e3")
a = Node("a",
- difs = [n3, n4, n1, e1],
- dif_registrations = {n4: [n1], n3: [n1], n1 : [e1]})
+ layers = [n3, n4, n1, e1],
+ registrations = {n4: [n1], n3: [n1], n1 : [e1]})
b = Node("b",
- difs = [n1, e1, e2],
- dif_registrations = {n1 : [e1, e2]})
+ layers = [n1, e1, e2],
+ registrations = {n1 : [e1, e2]})
c = Node("c",
- difs = [n3, n4, n1, n2, e2, e3],
- dif_registrations = {n4: [n1], n3: [n1, n2], n1 : [e2], n2: [e3]})
+ layers = [n3, n4, n1, n2, e2, e3],
+ registrations = {n4: [n1], n3: [n1, n2], n1 : [e2], n2: [e3]})
d = Node("d",
- difs = [n3, n2, e3],
- dif_registrations = {n3: [n2], n2 : [e3]})
+ layers = [n3, n2, e3],
+ registrations = {n3: [n2], n2 : [e3]})
-tb = qemu.Testbed(exp_name = "twolayers",
- username = "root",
- password = "root")
+tb = local.Testbed(exp_name="twolayers")
-exp = rl.Experiment(tb, nodes = [a, b, c, d])
+exp = our.Experiment(tb, nodes = [a, b, c, d])
print(exp)
diff --git a/examples/vpn.py b/examples/vpn.py
index 6aa8db6..8aac916 100755
--- a/examples/vpn.py
+++ b/examples/vpn.py
@@ -6,46 +6,39 @@ from rumba.model import *
from rumba.utils import ExperimentManager
# import testbed plugins
-import rumba.testbeds.emulab as emulab
-import rumba.testbeds.jfed as jfed
import rumba.testbeds.local as local
-import rumba.testbeds.qemu as qemu
# import prototype plugins
import rumba.prototypes.ouroboros as our
-import rumba.prototypes.rlite as rl
-import rumba.prototypes.irati as irati
import rumba.log as log
log.set_logging_level('DEBUG')
-n1 = NormalDIF("n1")
-n2 = NormalDIF("n2")
-e1 = ShimEthDIF("e1")
-e2 = ShimEthDIF("e2")
-e3 = ShimEthDIF("e3")
+n1 = UnicastLayer("n1")
+n2 = UnicastLayer("n2")
+e1 = EthDixLayer("e1")
+e2 = EthDixLayer("e2")
+e3 = EthDixLayer("e3")
a = Node("a",
- difs = [e1, n1, n2],
- dif_registrations = {n1 : [e1], n2 : [n1]})
+ layers = [e1, n1, n2],
+ registrations = {n1 : [e1], n2 : [n1]})
b = Node("b",
- difs = [e1, e2, n1],
- dif_registrations = {n1 : [e1, e2]})
+ layers = [e1, e2, n1],
+ registrations = {n1 : [e1, e2]})
c = Node("c",
- difs = [e2, e3, n1],
- dif_registrations = {n1 : [e2, e3]})
+ layers = [e2, e3, n1],
+ registrations = {n1 : [e2, e3]})
d = Node("d",
- difs = [e3, n1, n2],
- dif_registrations = {n1 : [e3], n2 : [n1]})
+ layers = [e3, n1, n2],
+ registrations = {n1 : [e3], n2 : [n1]})
-tb = qemu.Testbed(exp_name = 'example1',
- username = 'root',
- password = 'root')
+tb = local.Testbed(exp_name="example1")
exp = our.Experiment(tb, nodes = [a, b, c, d])
diff --git a/rumba/_version.py b/rumba/_version.py
index 4fb93d0..f1e5b84 100644
--- a/rumba/_version.py
+++ b/rumba/_version.py
@@ -24,5 +24,5 @@
# Foundation, Inc., http://www.fsf.org/about/contact/.
#
-__version_info__ = (0, 9)
+__version_info__ = (0, 23, 0)
__version__ = '.'.join(map(str, __version_info__))
diff --git a/rumba/command.py b/rumba/command.py
new file mode 100644
index 0000000..0e15c4a
--- /dev/null
+++ b/rumba/command.py
@@ -0,0 +1,60 @@
+#
+# Rumba - Structured command results
+#
+# Copyright (C) 2017-2026 imec
+#
+# Dimitri Staessens <dimitri.staessens@ugent.be>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., http://www.fsf.org/about/contact/.
+#
+
+import dataclasses
+
+
+class CommandError(Exception):
+ """Raised when a command returns a non-zero exit code."""
+
+ def __init__(self, result):
+ self.result = result
+ msg = ("Command '%s' on node '%s' failed with exit code %d.\n"
+ "stdout: %s\nstderr: %s"
+ % (result.command, result.node, result.exit_code,
+ result.stdout, result.stderr))
+ super().__init__(msg)
+
+
+@dataclasses.dataclass
+class CommandResult:
+ """Structured result of a command execution."""
+ exit_code: int
+ stdout: str
+ stderr: str
+ command: str
+ node: str
+
+ @property
+ def succeeded(self):
+ """True if the command exited with code 0."""
+ return self.exit_code == 0
+
+ def check(self):
+ """Raise CommandError if exit code is non-zero.
+
+ :returns: self (for chaining).
+ :raises CommandError: If exit code is non-zero.
+ """
+ if not self.succeeded:
+ raise CommandError(self)
+ return self
diff --git a/rumba/elements/experimentation.py b/rumba/elements/experimentation.py
index efd7a3e..2872f7f 100644
--- a/rumba/elements/experimentation.py
+++ b/rumba/elements/experimentation.py
@@ -80,9 +80,9 @@ class Testbed(object):
self._swap_in(experiment)
- for dif in experiment.dif_ordering:
- if isinstance(dif, topology.ShimEthDIF):
- dif.link_quality.apply(dif)
+ for layer in experiment.layer_ordering:
+ if layer.is_eth:
+ layer.link_quality.apply(layer)
@abc.abstractmethod
def _swap_in(self, experiment):
@@ -149,14 +149,14 @@ class Experiment(object):
self.add_packages = add_packages
self.testbed = testbed
# the strategy employed for completing the enrollment phase in
- # the different DIFs
+ # the different Layers
self.enrollment_strategy = enrollment_strategy # 'full-mesh', 'manual'
# the strategy employed for setting up the data transfer
- # network in the DIFs after enrollment
+ # network in the Layers after enrollment
self.flows_strategy = flows_strategy
- self.dif_ordering = []
- self.enrollments = [] # a list of per-DIF lists of enrollments
- self.flows = [] # a list of per-DIF lists of flows
+ self.layer_ordering = []
+ self.enrollments = [] # a list of per-Layer lists of enrollments
+ self.flows = [] # a list of per-Layer lists of flows
if self.enrollment_strategy not in ['full-mesh', 'minimal', 'manual']:
raise Exception('Unknown enrollment strategy "%s"'
@@ -209,120 +209,120 @@ class Experiment(object):
self.nodes.remove(node)
self.generate()
- # Compute registration/enrollment order for DIFs
- def compute_dif_ordering(self):
- # Compute DIFs dependency graph, as both adjacency and incidence list.
- difsdeps_adj = dict()
- difsdeps_inc = dict()
+ # Compute registration/enrollment order for Layers
+ def compute_layer_ordering(self):
+ # Compute Layers dependency graph, as both adjacency and incidence list.
+ layerdeps_adj = dict()
+ layerdeps_inc = dict()
for node in self.nodes:
- for dif in node.difs:
- if dif not in difsdeps_adj:
- difsdeps_adj[dif] = set()
-
- for upper in node.dif_registrations:
- for lower in node.dif_registrations[upper]:
- if upper not in difsdeps_inc:
- difsdeps_inc[upper] = set()
- if lower not in difsdeps_inc:
- difsdeps_inc[lower] = set()
- if upper not in difsdeps_adj:
- difsdeps_adj[upper] = set()
- if lower not in difsdeps_adj:
- difsdeps_adj[lower] = set()
- difsdeps_inc[upper].add(lower)
- difsdeps_adj[lower].add(upper)
+ for layer in node.layers:
+ if layer not in layerdeps_adj:
+ layerdeps_adj[layer] = set()
+
+ for upper in node.registrations:
+ for lower in node.registrations[upper]:
+ if upper not in layerdeps_inc:
+ layerdeps_inc[upper] = set()
+ if lower not in layerdeps_inc:
+ layerdeps_inc[lower] = set()
+ if upper not in layerdeps_adj:
+ layerdeps_adj[upper] = set()
+ if lower not in layerdeps_adj:
+ layerdeps_adj[lower] = set()
+ layerdeps_inc[upper].add(lower)
+ layerdeps_adj[lower].add(upper)
# Kahn's algorithm below only needs per-node count of
# incident edges, so we compute these counts from the
# incidence list and drop the latter.
- difsdeps_inc_cnt = dict()
- for dif in difsdeps_inc:
- difsdeps_inc_cnt[dif] = len(difsdeps_inc[dif])
- del difsdeps_inc
+ layerdeps_inc_cnt = dict()
+ for layer in layerdeps_inc:
+ layerdeps_inc_cnt[layer] = len(layerdeps_inc[layer])
+ del layerdeps_inc
- # Init difsdeps_inc_cnt for those DIFs that do not
+ # Init layerdeps_inc_cnt for those Layers that do not
# act as lower IPCPs nor upper IPCPs for registration
# operations
for node in self.nodes:
- for dif in node.difs:
- if dif not in difsdeps_inc_cnt:
- difsdeps_inc_cnt[dif] = 0
+ for layer in node.layers:
+ if layer not in layerdeps_inc_cnt:
+ layerdeps_inc_cnt[layer] = 0
# Run Kahn's algorithm to compute topological
- # ordering on the DIFs graph.
+ # ordering on the Layers graph.
frontier = set()
- self.dif_ordering = []
- for dif in difsdeps_inc_cnt:
- if difsdeps_inc_cnt[dif] == 0:
- frontier.add(dif)
+ self.layer_ordering = []
+ for layer in layerdeps_inc_cnt:
+ if layerdeps_inc_cnt[layer] == 0:
+ frontier.add(layer)
while len(frontier):
cur = frontier.pop()
- self.dif_ordering.append(cur)
- for nxt in difsdeps_adj[cur]:
- difsdeps_inc_cnt[nxt] -= 1
- if difsdeps_inc_cnt[nxt] == 0:
+ self.layer_ordering.append(cur)
+ for nxt in layerdeps_adj[cur]:
+ layerdeps_inc_cnt[nxt] -= 1
+ if layerdeps_inc_cnt[nxt] == 0:
frontier.add(nxt)
- difsdeps_adj[cur] = set()
+ layerdeps_adj[cur] = set()
- circular_set = [dif for dif in difsdeps_inc_cnt
- if difsdeps_inc_cnt[dif] != 0]
+ circular_set = [layer for layer in layerdeps_inc_cnt
+ if layerdeps_inc_cnt[layer] != 0]
if len(circular_set):
- raise Exception("Fatal error: The specified DIFs topology"
+ raise Exception("Fatal error: The specified Layers topology"
"has one or more"
"circular dependencies, involving the following"
- " DIFs: %s" % circular_set)
+ " Layers: %s" % circular_set)
- logger.debug("DIF topological ordering: %s", self.dif_ordering)
+ logger.debug("Layer topological ordering: %s", self.layer_ordering)
- # Compute all the enrollments, to be called after compute_dif_ordering()
+ # Compute all the enrollments, to be called after compute_layer_ordering()
def compute_enrollments(self):
- dif_graphs = dict()
+ layer_graphs = dict()
self.enrollments = []
self.flows = []
- for dif in self.dif_ordering:
+ for layer in self.layer_ordering:
neighsets = dict()
- dif_graphs[dif] = dict()
+ layer_graphs[layer] = dict()
first = None
- # For each N-1-DIF supporting this DIF, compute the set of nodes
- # that share such N-1-DIF. This set will be called the 'neighset' of
- # the N-1-DIF for the current DIF.
+ # For each N-1-Layer supporting this Layer, compute the set of nodes
+ # that share such N-1-Layer. This set will be called the 'neighset' of
+ # the N-1-Layer for the current Layer.
for node in self.nodes:
- if dif in node.dif_registrations:
- dif_graphs[dif][node] = [] # init for later use
+ if layer in node.registrations:
+ layer_graphs[layer][node] = [] # init for later use
if first is None: # pick any node for later use
first = node
- for lower_dif in node.dif_registrations[dif]:
- if lower_dif not in neighsets:
- neighsets[lower_dif] = []
- neighsets[lower_dif].append(node)
+ for lower_layer in node.registrations[layer]:
+ if lower_layer not in neighsets:
+ neighsets[lower_layer] = []
+ neighsets[lower_layer].append(node)
# Build the graph, represented as adjacency list
- for lower_dif in neighsets:
+ for lower_layer in neighsets:
# Each neighset corresponds to a complete (sub)graph.
- for node1 in neighsets[lower_dif]:
- for node2 in neighsets[lower_dif]:
+ for node1 in neighsets[lower_layer]:
+ for node2 in neighsets[lower_layer]:
if node1 != node2:
- dif_graphs[dif][node1].append((node2, lower_dif))
+ layer_graphs[layer][node1].append((node2, lower_layer))
self.enrollments.append([])
self.flows.append([])
if first is None:
- # This is a shim DIF, nothing to do
+ # This is a shim Layer, nothing to do
continue
er = []
- for node in dif_graphs[dif]:
- for edge in dif_graphs[dif][node]:
+ for node in layer_graphs[layer]:
+ for edge in layer_graphs[layer][node]:
er.append("%s --[%s]--> %s" % (node.name,
edge[1].name,
edge[0].name))
- logger.debug("DIF graph for %s: %s", dif, ', '.join(er))
+ logger.debug("Layer graph for %s: %s", layer, ', '.join(er))
# To generate the list of minimal enrollments
# and minimal flows, we simulate it, using
@@ -332,39 +332,39 @@ class Experiment(object):
edges_covered = set()
while len(frontier):
cur = frontier.pop()
- for edge in dif_graphs[dif][cur]:
+ for edge in layer_graphs[layer][cur]:
if edge[0] not in enrolled:
enrolled.add(edge[0])
- enrollee = edge[0].get_ipcp_by_dif(dif)
+ enrollee = edge[0].get_ipcp_by_layer(layer)
assert(enrollee is not None)
- enroller = cur.get_ipcp_by_dif(dif)
+ enroller = cur.get_ipcp_by_layer(layer)
assert(enroller is not None)
edges_covered.add((enrollee, enroller))
- self.enrollments[-1].append({'dif': dif,
+ self.enrollments[-1].append({'layer': layer,
'enrollee': enrollee,
'enroller': enroller,
- 'lower_dif': edge[1]})
+ 'lower_layer': edge[1]})
self.flows[-1].append({'src': enrollee,
'dst': enroller})
frontier.add(edge[0])
- if len(dif.members) != len(enrolled):
- raise Exception("Disconnected DIF found: %s" % (dif,))
+ if len(layer.members) != len(enrolled):
+ raise Exception("Disconnected Layer found: %s" % (layer,))
# In case of a full mesh enrollment or dt flows
- for cur in dif_graphs[dif]:
- for edge in dif_graphs[dif][cur]:
+ for cur in layer_graphs[layer]:
+ for edge in layer_graphs[layer][cur]:
if cur.name < edge[0].name:
- enrollee = cur.get_ipcp_by_dif(dif)
+ enrollee = cur.get_ipcp_by_layer(layer)
assert(enrollee is not None)
- enroller = edge[0].get_ipcp_by_dif(dif)
+ enroller = edge[0].get_ipcp_by_layer(layer)
assert(enroller is not None)
if ((enrollee, enroller) not in edges_covered and
(enroller, enrollee) not in edges_covered):
if self.enrollment_strategy == 'full-mesh':
- self.enrollments[-1].append({'dif': dif,
+ self.enrollments[-1].append({'layer': layer,
'enrollee': enrollee,
'enroller': enroller,
- 'lower_dif': edge[1]})
+ 'lower_layer': edge[1]})
if self.flows_strategy == 'full-mesh':
self.flows[-1].append({'src': enrollee,
'dst': enroller})
@@ -380,11 +380,11 @@ class Experiment(object):
log_string = "Enrollments:\n"
for el in self.enrollments:
for e in el:
- log_string += (" [%s] %s --> %s through N-1-DIF %s\n"
- % (e['dif'],
+ log_string += (" [%s] %s --> %s through N-1-Layer %s\n"
+ % (e['layer'],
e['enrollee'].name,
e['enroller'].name,
- e['lower_dif']))
+ e['lower_layer']))
logger.debug(log_string)
log_string = "Flows:\n"
@@ -402,40 +402,41 @@ class Experiment(object):
node.ipcps = []
# We want also the node.ipcps list to be generated in
# topological ordering
- for dif in self.dif_ordering:
- if dif not in node.difs:
+ for layer in self.layer_ordering:
+ if layer not in node.layers:
continue
# Create an instance of the required IPCP class
- ipcp = dif.get_ipcp_class()(
- name='%s.%s' % (dif.name, node.name),
- node=node, dif=dif)
+ ipcp = layer.get_ipcp_class()(
+ name='%s.%s' % (layer.name, node.name),
+ node=node, layer=layer)
- if dif in node.dif_registrations:
- for lower in node.dif_registrations[dif]:
+ if layer in node.registrations:
+ for lower in node.registrations[layer]:
ipcp.registrations.append(lower)
node.ipcps.append(ipcp)
- dif.ipcps.append(ipcp)
+ layer.ipcps.append(ipcp)
def compute_bootstrappers(self):
for node in self.nodes:
for ipcp in node.ipcps:
- ipcp.dif_bootstrapper = True
+ ipcp.bootstrapper = True
for el in self.enrollments:
for e in el:
- if e['dif'] != ipcp.dif:
- # Skip this DIF
+ if e['layer'] != ipcp.layer:
+ # Skip this Layer
break
if e['enrollee'] == ipcp:
- ipcp.dif_bootstrapper = False
+ ipcp.bootstrapper = False
# Exit the loops
break
- if not ipcp.dif_bootstrapper:
+ if not ipcp.bootstrapper:
break
def dump_ssh_info(self):
f = open(os.path.join(tmp_dir, 'ssh_info'), 'w')
+
for node in self.nodes:
f.write("%s;%s;%s;%s;%s\n" % (node.name,
self.testbed.username,
@@ -444,11 +445,11 @@ class Experiment(object):
node.ssh_config.proxy_server))
f.close()
- # Examine the nodes and DIFs, compute the registration and enrollment
+ # Examine the nodes and Layers, compute the registration and enrollment
# order, the list of IPCPs to create, registrations, ...
def generate(self):
start = time.time()
- self.compute_dif_ordering()
+ self.compute_layer_ordering()
self.compute_ipcps()
self.compute_enrollments()
self.compute_bootstrappers()
@@ -509,8 +510,8 @@ class Experiment(object):
logger.info("Stopping metrics exporters took %.2f seconds", end - start)
@abc.abstractmethod
- def destroy_dif(self, dif):
- raise Exception('destroy_dif() method not implemented')
+ def destroy_layer(self, layer):
+ raise Exception('destroy_layer() method not implemented')
@abc.abstractmethod
def _install_prototype(self):
@@ -596,24 +597,8 @@ class Experiment(object):
node.execute_command('reboot', as_root=True)
@abc.abstractmethod
- def export_dif_bandwidth(self, filename, dif):
- raise Exception('Export DIF bandwidth method not implemented')
-
- def to_dms_yaml(self, filename):
- """
- Generate a YAML file of the experiment which can be fed to the
- ARCFIRE DMS.
-
- :param filename: The output YAML filename.
- """
- mode = 'w'
- with open(filename, mode) as f:
- for node in self.nodes:
- f.write("---\n")
- node.to_dms_yaml(f)
- f.write("...\n")
-
- logger.info('Generated DMS YAML file')
+ def export_layer_bandwidth(self, filename, layer):
+ raise Exception('Export Layer bandwidth method not implemented')
def export_connectivity_graph(self, filename):
"""
@@ -647,11 +632,11 @@ class Experiment(object):
return
try:
- for dif in self.dif_ordering:
- if isinstance(dif, topology.ShimEthDIF):
- edge = pydot.Edge(dif.members[0].name,
- dif.members[1].name,
- label=dif.name,
+ for layer in self.layer_ordering:
+ if layer.is_eth:
+ edge = pydot.Edge(layer.members[0].name,
+ layer.members[1].name,
+ label=layer.name,
color='black')
gvizg.add_edge(edge)
except Exception as e:
@@ -668,12 +653,12 @@ class Experiment(object):
logger.error('Warning: pydot module not installed, '
'cannot produce graph images')
- def export_dif_graph(self, filename, dif):
+ def export_layer_graph(self, filename, layer):
"""
- Generate a PDF of a DIF graph.
+ Generate a PDF of a Layer graph.
:param filename: The output PDF filename.
- :param dif: The DIF to export.
+ :param layer: The Layer to export.
"""
try:
import pydot
@@ -685,7 +670,7 @@ class Experiment(object):
i = 0
try:
- for node in dif.members:
+ for node in layer.members:
g_node = pydot.Node(node.name,
label=node.name,
style="filled",
@@ -704,7 +689,7 @@ class Experiment(object):
for enroll, dt in zip(self.enrollments,
self.flows):
for e in enroll:
- if e['dif'] is not dif:
+ if e['layer'] is not layer:
continue
edge = pydot.Edge(e['enrollee'].node.name,
@@ -713,7 +698,7 @@ class Experiment(object):
gvizg.add_edge(edge)
for e in dt:
- if e['src'].dif is not dif:
+ if e['src'].layer is not layer:
continue
edge = pydot.Edge(e['src'].node.name,
@@ -726,21 +711,30 @@ class Experiment(object):
try:
gvizg.write_pdf(filename)
- logger.info('Generated PDF of DIF graph')
+ logger.info('Generated PDF of Layer graph')
except Exception as e:
logger.error('Failed to write PDF: ' + str(e))
except:
logger.error('Warning: pydot module not installed, '
- 'cannot produce DIF graph images')
+ 'cannot produce Layer graph images')
class Executor:
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
- def execute_command(self, node, command, as_root=False, time_out=3):
- # Execute command on a node
+ def execute_command(self, node, command, as_root=False, time_out=3,
+ check=True):
+ """Execute command on a node.
+
+ :param node: The node to execute on.
+ :param command: The command string.
+ :param as_root: Execute as root.
+ :param time_out: Timeout in seconds.
+ :param check: If True, raise CommandError on non-zero exit.
+ :returns: A CommandResult.
+ """
return
def execute_commands(self, node, commands, as_root=False, time_out=3):
diff --git a/rumba/elements/topology.py b/rumba/elements/topology.py
index 5a3ad17..1e2c1a6 100644
--- a/rumba/elements/topology.py
+++ b/rumba/elements/topology.py
@@ -1,13 +1,10 @@
#
-# A library to manage ARCFIRE experiments
+# Rumba - Topology Model
#
-# Copyright (C) 2017-2018 Nextworks S.r.l.
-# Copyright (C) 2017-2018 imec
+# Copyright (C) 2017-2026 imec
#
# Sander Vrijders <sander.vrijders@ugent.be>
# Dimitri Staessens <dimitri.staessens@ugent.be>
-# Vincenzo Maffione <v.maffione@nextworks.it>
-# Marco Capitani <m.capitani@nextworks.it>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
@@ -31,27 +28,50 @@ import rumba.log as log
logger = log.get_logger(__name__)
-class DIF(object):
+# ---------------------------------------------------------------------------
+# Layer types (aligned with Ouroboros IPCP types)
+# ---------------------------------------------------------------------------
+
+class LayerType(Enum):
+ """IPCP / layer types available in Ouroboros."""
+ LOCAL = 'local'
+ UNICAST = 'unicast'
+ BROADCAST = 'broadcast'
+ ETH_LLC = 'eth-llc'
+ ETH_DIX = 'eth-dix'
+ UDP4 = 'udp4'
+ UDP6 = 'udp6'
+
+
+# ---------------------------------------------------------------------------
+# Layer classes
+# ---------------------------------------------------------------------------
+
+class Layer(object):
"""
- Base class for DIFs.
+ Base class for network layers.
+
+ A Layer represents a network layer in an Ouroboros network.
"""
def get_e_id(self):
- return "DIF." + self.name
+ return "Layer." + self.name
- def __init__(self, name, members=None):
+ def __init__(self, name, layer_type=LayerType.UNICAST, members=None):
"""
- :param name: Name of the DIF.
- :param members: List of nodes that are members of the DIF.
+ :param name: Name of the layer.
+ :param layer_type: Type of the layer (LayerType enum).
+ :param members: List of nodes that are members of this layer.
"""
self.name = name
+ self.layer_type = layer_type
if members is None:
members = list()
self.members = members
self.ipcps = list()
def __repr__(self):
- s = "DIF %s" % self.name
+ s = "Layer %s (%s)" % (self.name, self.layer_type.value)
return s
def __hash__(self):
@@ -72,49 +92,79 @@ class DIF(object):
def get_ipcp_class(self):
return IPCP
+ @property
+ def is_eth(self):
+ return self.layer_type in (LayerType.ETH_DIX, LayerType.ETH_LLC)
-# Shim over UDP
-#
-class ShimUDPDIF(DIF):
+ @property
+ def is_udp(self):
+ return self.layer_type in (LayerType.UDP4, LayerType.UDP6)
+
+ @property
+ def is_shim(self):
+ return self.layer_type in (LayerType.ETH_DIX, LayerType.ETH_LLC,
+ LayerType.UDP4, LayerType.UDP6,
+ LayerType.LOCAL)
+
+
+# Shim over UDP (IPv4)
+class Udp4Layer(Layer):
"""
- Shim over UDP.
+ Layer using the UDP over IPv4 shim IPCP.
"""
def __init__(self, name, members=None):
"""
- :param name: Name of the DIF.
- :param members: List of members of the DIF.
+ :param name: Name of the layer.
+ :param members: List of members of the layer.
"""
- DIF.__init__(self, name, members)
+ Layer.__init__(self, name, LayerType.UDP4, members)
def get_ipcp_class(self):
- return ShimUDPIPCP
+ return UdpIPCP
-# Shim over Ethernet
-#
-# @link_speed [int] Speed of the Ethernet network, in Mbps
-#
-class ShimEthDIF(DIF):
+# Shim over UDP (IPv6)
+class Udp6Layer(Layer):
+ """
+ Layer using the UDP over IPv6 shim IPCP.
+ """
+ def __init__(self, name, members=None):
+ """
+ :param name: Name of the layer.
+ :param members: List of members of the layer.
+ """
+ Layer.__init__(self, name, LayerType.UDP6, members)
+
+ def get_ipcp_class(self):
+ return UdpIPCP
+
+
+# Ethernet layer (point-to-point link)
+class EthDixLayer(Layer):
"""
- Shim over Ethernet.
+ Layer using an Ethernet shim IPCP (eth-dix).
+ Models a point-to-point Ethernet link between two nodes.
"""
def get_e_id(self):
- return "ShimEthDIF." + self.name
+ return "EthDixLayer." + self.name
- def __init__(self, name, members=None, link_quality=None):
+ def __init__(self, name, members=None, link_quality=None,
+ layer_type=LayerType.ETH_DIX):
"""
- :param name: Name of the DIF.
- :param members: List of members of the DIF.
+ :param name: Name of the layer.
+ :param members: List of members (max 2 for point-to-point).
:param link_quality: Quality of the link.
+ :param layer_type: LayerType (ETH_DIX or ETH_LLC).
"""
- DIF.__init__(self, name, members)
- self._link_quality = link_quality if link_quality is not None else LinkQuality()
+ Layer.__init__(self, name, layer_type, members)
+ self._link_quality = link_quality if link_quality is not None \
+ else LinkQuality()
def get_ipcp_class(self):
- return ShimEthIPCP
+ return EthIPCP
def add_member(self, node):
- super(ShimEthDIF, self).add_member(node)
+ super(EthDixLayer, self).add_member(node)
if len(self.members) > 2:
raise Exception("More than 2 members in %s!" % self.name)
@@ -127,9 +177,7 @@ class ShimEthDIF(DIF):
if not _link_quality:
raise ValueError("Cannot set link_quality to None, use del "
"link_quality to reset")
-
self._link_quality = _link_quality
-
_link_quality.apply(self)
@link_quality.deleter
@@ -150,8 +198,7 @@ class ShimEthDIF(DIF):
:type jitter: :py:class:`int`
:param correlation: correlation in %
:type correlation: :py:class:`int`
- :param distribution: delay distribution, defaults to a Normal
- distribution
+ :param distribution: delay distribution, defaults to Normal
:type distribution: :py:class:`.Distribution`
"""
new_delay = Delay(delay, jitter, correlation, distribution)
@@ -186,8 +233,7 @@ class ShimEthDIF(DIF):
def set_quality(self, delay, loss, rate):
"""
- Configure the basic quality parameters of the
- underlying link.
+ Configure the basic quality parameters of the underlying link.
:param delay: the link delay, in ms
:type delay: :py:class:`int`
@@ -200,9 +246,26 @@ class ShimEthDIF(DIF):
self.link_quality = new_quality
-class NormalDIF(DIF):
+class EthLlcLayer(EthDixLayer):
"""
- Normal DIF.
+ Layer using an Ethernet shim IPCP (eth-llc).
+ Models a point-to-point Ethernet link between two nodes.
+ """
+ def get_e_id(self):
+ return "EthLlcLayer." + self.name
+
+ def __init__(self, name, members=None, link_quality=None):
+ """
+ :param name: Name of the layer.
+ :param members: List of members (max 2 for point-to-point).
+ :param link_quality: Quality of the link.
+ """
+ super().__init__(name, members, link_quality, LayerType.ETH_LLC)
+
+
+class UnicastLayer(Layer):
+ """
+ Unicast network layer.
"""
def __init__(self,
name,
@@ -210,13 +273,13 @@ class NormalDIF(DIF):
policy=None,
add_default_qos_cubes=True):
"""
- :param name: The name of the DIF.
+ :param name: The name of the layer.
:param members: The list of members.
- :param policy: Policies of the normal DIF.
- :param add_default_qos_cubes: should the prototype-dependant default
- QoS cubes be added to this DIF?
+ :param policy: Policies of the layer.
+ :param add_default_qos_cubes: should prototype-default QoS cubes
+ be added?
"""
- DIF.__init__(self, name, members)
+ Layer.__init__(self, name, LayerType.UNICAST, members)
if policy is None:
policy = Policy(self)
self.policy = policy
@@ -226,7 +289,7 @@ class NormalDIF(DIF):
def add_policy(self, comp, pol, **params):
"""
- Adds a policy to the DIF.
+ Adds a policy to the layer.
:param comp: Component name.
:param pol: Policy name
@@ -236,7 +299,7 @@ class NormalDIF(DIF):
def del_policy(self, comp=None, pol=None):
"""
- Deletes a policy from the DIF.
+ Deletes a policy from the layer.
:param comp: Component name.
:param pol: Policy name
@@ -245,9 +308,9 @@ class NormalDIF(DIF):
def show(self):
"""
- :return: A string representing the policies in the DIF.
+ :return: A string representing the policies in the layer.
"""
- s = DIF.__repr__(self)
+ s = Layer.__repr__(self)
for comp, pol_dict in self.policy.get_policies().items():
for pol, params in pol_dict.items():
s += "\n Component %s has policy %s with params %s" \
@@ -256,7 +319,7 @@ class NormalDIF(DIF):
def add_qos_cube(self, name, **kwargs):
"""
- Adds a QoS Cube to this DIF
+ Adds a QoS Cube to this layer.
:param name: the name to be assigned to the QoS cube
:type name: `str`
@@ -271,12 +334,11 @@ class NormalDIF(DIF):
def del_qos_cube(self, name, strict=True):
"""
- Deletes a QoS cube from this DIF
+ Deletes a QoS cube from this layer.
:param name: the name of the cube to delete
:type name: `str`
- :param strict: if no cube with the provided name exists,
- raise an exception if and only if `strict` is `True`
+ :param strict: if True, raise if cube not found
:type strict: `bool`
"""
for i, cube in enumerate(self.qos_cubes):
@@ -285,13 +347,41 @@ class NormalDIF(DIF):
break
else: # no match
if strict:
- raise ValueError("No cube with name %s found in dif %s"
+ raise ValueError("No cube with name %s found in layer %s"
% (name, self.name))
else:
return
self.qos_cubes.pop(index)
+class BroadcastLayer(Layer):
+ """
+ Broadcast network layer.
+ """
+ def __init__(self, name, members=None):
+ """
+ :param name: The name of the layer.
+ :param members: The list of members.
+ """
+ Layer.__init__(self, name, LayerType.BROADCAST, members)
+
+
+class LocalLayer(Layer):
+ """
+ Local layer (for local loopback testing).
+ """
+ def __init__(self, name, members=None):
+ """
+ :param name: The name of the layer.
+ :param members: The list of members.
+ """
+ Layer.__init__(self, name, LayerType.LOCAL, members)
+
+
+# ---------------------------------------------------------------------------
+# Link quality classes
+# ---------------------------------------------------------------------------
+
class Distribution(Enum):
"""
An enum holding different statistical distributions.
@@ -324,23 +414,17 @@ class Delay(object):
:type jitter: :py:class:`int`
:param correlation: correlation in %
:type correlation: :py:class:`int`
- :param distribution: delay distribution, defaults to a Normal
- distribution
+ :param distribution: delay distribution, defaults to Normal
:type distribution: :py:class:`.Distribution`
"""
-
if delay < 0:
raise ValueError("Delay needs to be at least 0")
-
if jitter and not jitter > 0:
raise ValueError("Jitter needs to be higher than 0")
-
if (not jitter) and correlation:
raise ValueError("Correlation requires a value for jitter")
-
if correlation and (correlation < 0 or correlation > 100):
raise ValueError("Correlation needs to be between 0 and 100")
-
self._delay = delay
self._jitter = jitter
self._correlation = correlation
@@ -364,16 +448,12 @@ class Delay(object):
def build_command(self):
opts = ["delay %ims" % self.delay]
-
if self.jitter:
opts.append("%ims" % self.jitter)
-
if self.correlation:
opts.append("%f%%" % self.correlation)
-
if self.distribution:
opts.append("distribution %s" % self.distribution.name.lower())
-
return " ".join(opts)
@@ -392,10 +472,8 @@ class Loss(object):
"""
if loss and (loss < 0 or loss > 100):
raise ValueError("Loss needs to be between 0 and 100")
-
if correlation and (correlation < 0 or correlation > 100):
raise ValueError("Correlation needs to be between 0 and 100")
-
self._loss = loss
self._correlation = correlation
@@ -409,10 +487,8 @@ class Loss(object):
def build_command(self):
opts = ["loss %f%%" % self.loss]
-
if self.correlation:
opts.append("%f%%" % self.correlation)
-
return " ".join(opts)
@@ -425,22 +501,13 @@ class LinkQuality(object):
@classmethod
def clone(cls, old_quality, delay=None, loss=None, rate=None):
"""
- Clone old_quality, updating it with the provided parameters
- if present.
-
- :param old_quality: A :py:class:`.LinkQuality` instance to
- use as a base
- :type old_quality: :py:class:`.LinkQuality`
- :param delay: Delay object holding delay configuration
- or number corresponding to delay in ms
- :type delay: :py:class:`.Delay` or :py:class:`int`
- :param loss: Loss object holding delay configuration or
- number corresponding to loss percentage
- :type loss: :py:class:`.Loss` or :py:class:`float`
- :param rate: The rate of the link in mbit
- :type rate: :py:class:`int`
+ Clone old_quality, updating with provided parameters.
+
+ :param old_quality: A :py:class:`.LinkQuality` instance
+ :param delay: Delay object or number (ms)
+ :param loss: Loss object or number (%)
+ :param rate: Rate in mbit
:return: a new :py:class:`.LinkQuality` instance.
- :rtype: :py:class:`.LinkQuality`
"""
if delay is None:
delay = old_quality.delay
@@ -454,19 +521,12 @@ class LinkQuality(object):
"""
Link quality configuration.
- :param delay: Delay object holding delay configuration
- or number corresponding to delay in ms
- :type delay: :py:class:`.Delay` or :py:class:`int`
- :param loss: Loss object holding delay configuration or
- number corresponding to loss percentage
- :type loss: :py:class:`.Loss` or :py:class:`float`
- :param rate: The rate of the link in mbit
- :type rate: :py:class:`int`
+ :param delay: Delay object or number (ms)
+ :param loss: Loss object or number (%)
+ :param rate: Rate in mbit
"""
-
if rate and not rate > 0:
raise ValueError("Rate needs to be higher than 0")
-
if isinstance(delay, int) or isinstance(delay, float):
delay = Delay(delay)
if isinstance(loss, int) or isinstance(loss, float):
@@ -496,15 +556,18 @@ class LinkQuality(object):
cmds.append("tc qdisc del dev %s root" % ipcp.ifname)
if self.rate:
- cmds.append("tc qdisc add dev %s root handle 1: htb default 1" \
- % ipcp.ifname)
- cmds.append("tc class add dev %s parent 1: classid 1:1 htb rate %imbit" \
- % (ipcp.ifname, self.rate))
+ cmds.append(
+ "tc qdisc add dev %s root handle 1: htb default 1"
+ % ipcp.ifname)
+ cmds.append(
+ "tc class add dev %s parent 1: classid 1:1 htb rate %imbit"
+ % (ipcp.ifname, self.rate))
qref = "parent 1:1"
if self.delay or self.loss:
- netem_cmd.append("tc qdisc add dev %s %s netem" \
- % (ipcp.ifname, qref))
+ netem_cmd.append(
+ "tc qdisc add dev %s %s netem"
+ % (ipcp.ifname, qref))
if self.delay:
netem_cmd.append(self.delay.build_command())
if self.loss:
@@ -513,34 +576,39 @@ class LinkQuality(object):
return cmds
- def apply(self, shim):
+ def apply(self, layer):
if not (self.delay or self.loss or self.rate):
- self.deactivate(shim)
+ self.deactivate(layer)
else:
- for ipcp in shim.ipcps:
+ for ipcp in layer.ipcps:
if not ipcp.ifname:
- logger.error("Could not apply LinkQuality to IPCP because "
- "the interface name is None")
+ logger.error(
+ "Could not apply LinkQuality to IPCP because "
+ "the interface name is None")
continue
-
- ipcp.node.execute_commands(self.build_commands(ipcp),
- as_root=True)
+ ipcp.node.execute_commands(
+ self.build_commands(ipcp), as_root=True)
LinkQuality._active.add(ipcp)
- def deactivate(self, shim):
- for ipcp in shim.ipcps:
+ def deactivate(self, layer):
+ for ipcp in layer.ipcps:
if ipcp not in LinkQuality._active:
continue
if not ipcp.ifname:
- logger.error("Could not remove LinkQuality from IPCP because "
- "the interface name is None")
+ logger.error(
+ "Could not remove LinkQuality from IPCP because "
+ "the interface name is None")
continue
-
- ipcp.node.execute_command("tc qdisc del dev %s root"
- % ipcp.ifname, as_root=True)
+ ipcp.node.execute_command(
+ "tc qdisc del dev %s root" % ipcp.ifname,
+ as_root=True)
LinkQuality._active.remove(ipcp)
+# ---------------------------------------------------------------------------
+# SSH configuration
+# ---------------------------------------------------------------------------
+
class SSHConfig(object):
def __init__(self, hostname, port=22, proxy_server=None):
self.username = None
@@ -558,31 +626,42 @@ class SSHConfig(object):
self.password = password
+# ---------------------------------------------------------------------------
+# Node
+# ---------------------------------------------------------------------------
+
class Node(object):
"""
- A node in the experiment.
+ A node (system) in the experiment.
+
+ Nodes declare their layer memberships and registrations::
+
+ a = Node("a",
+ layers=[n1, e1],
+ registrations={n1: [e1]})
"""
+
def get_e_id(self):
return "Node." + self.name
- def __init__(self, name, difs=None, dif_registrations=None,
+ def __init__(self, name, layers=None, registrations=None,
policies=None, machine_type=None):
"""
:param name: Name of the node.
- :param difs: A list of DIFs the node is in.
- :param dif_registrations: How the DIFs are stacked.
- :param policies: Policies of a DIF specific to the node.
- :param machine_type: Type of machine to use, physical or virtual.
+ :param layers: A list of layers the node is a member of.
+ :param registrations: How layers are stacked (upper -> [lower]).
+ :param policies: Policies specific to this node.
+ :param machine_type: Type of machine (physical or virtual).
"""
self.name = name
- if difs is None:
- difs = list()
- self.difs = difs
- for dif in self.difs:
- dif.add_member(self)
- if dif_registrations is None:
- dif_registrations = dict()
- self.dif_registrations = dif_registrations
+ if layers is None:
+ layers = list()
+ self.layers = layers
+ for layer in self.layers:
+ layer.add_member(self)
+ if registrations is None:
+ registrations = dict()
+ self.registrations = registrations
self.machine_type = machine_type
self.ssh_config = SSHConfig(name)
self.ipcps = []
@@ -590,50 +669,49 @@ class Node(object):
self.has_tcpdump = False
if policies is None:
policies = dict()
- for dif in self.difs:
- if hasattr(dif, 'policy'): # check if the dif supports policies
- self.policies[dif] = policies.get(dif, Policy(dif, self))
+ for layer in self.layers:
+ if hasattr(layer, 'policy'):
+ self.policies[layer] = policies.get(layer, Policy(layer, self))
- self.executor = None # will be set by testbed on swap_in
- self.startup_command = None # will be set by prototype
+ self.executor = None # set by testbed on swap_in
+ self.startup_command = None # set by prototype
self._validate()
- def get_ipcp_by_dif(self, dif):
+ def get_ipcp_by_layer(self, layer):
"""
- :param dif: The DIF to get the IPCP of.
- :return: The IPCP of the node that is in the DIF.
+ :param layer: The layer to get the IPCP of.
+ :return: The IPCP of the node that is in the layer.
"""
for ipcp in self.ipcps:
- if ipcp.dif == dif:
+ if ipcp.layer == layer:
return ipcp
- def _undeclared_dif(self, dif):
- if dif not in self.difs:
+ def _undeclared_layer(self, layer):
+ if layer not in self.layers:
raise Exception("Invalid registration: node %s is not declared "
- "to be part of DIF %s" % (self.name, dif.name))
+ "to be part of layer %s"
+ % (self.name, layer.name))
def _validate(self):
- # Check that DIFs referenced in self.dif_registrations
- # are part of self.difs
- for upper in self.dif_registrations:
- self._undeclared_dif(upper)
- for lower in self.dif_registrations[upper]:
- self._undeclared_dif(lower)
+ for upper in self.registrations:
+ self._undeclared_layer(upper)
+ for lower in self.registrations[upper]:
+ self._undeclared_layer(lower)
def __repr__(self):
s = "Node " + self.name + ":\n"
- s += " DIFs: [ "
- s += " ".join([d.name for d in self.difs])
+ s += " Layers: [ "
+ s += " ".join([l.name for l in self.layers])
s += " ]\n"
- s += " DIF registrations: [ "
+ s += " Registrations: [ "
rl = []
- for upper in self.dif_registrations:
- difs = self.dif_registrations[upper]
+ for upper in self.registrations:
+ lowers = self.registrations[upper]
x = "%s => [" % upper.name
- x += " ".join([lower.name for lower in difs])
+ x += " ".join([lower.name for lower in lowers])
x += "]"
rl.append(x)
s += ", ".join(rl)
@@ -650,224 +728,189 @@ class Node(object):
def __neq__(self, other):
return not self == other
- def to_dms_yaml(self, buffer):
- buffer.write("node: %s\n" % (self.name))
- buffer.write("registrations:\n")
-
- for dif in self.dif_registrations:
- if isinstance(dif, NormalDIF):
- buffer.write(" - %s:\n" % (dif.name))
- for reg in self.dif_registrations[dif]:
- tp = "normal"
- nm = reg.name
- if isinstance(reg, ShimEthDIF):
- tp = "eth"
- for member in reg.members:
- if member.name is not self.name:
- nm = member.name
- buffer.write(" - %sdifid: %s, diftype: %s, "
- "diftype_number: 1%s\n" % ("{", nm, tp, "}"))
-
- def add_dif(self, dif):
- """
- Adds a DIF to the list.
-
- :param dif: Name of the DIF to add.
- """
- self.difs.append(dif)
- dif.add_member(self)
- if hasattr(dif, 'policy'):
- self.policies[dif] = Policy(dif, self)
+ def add_layer(self, layer):
+ """
+ Adds a layer to this node.
+
+ :param layer: The layer to add.
+ """
+ self.layers.append(layer)
+ layer.add_member(self)
+ if hasattr(layer, 'policy'):
+ self.policies[layer] = Policy(layer, self)
self._validate()
- def del_dif(self, dif):
+ def del_layer(self, layer):
"""
- Adds a DIF to the list.
+ Removes a layer from this node.
- :param dif: Name of the DIF to add.
+ :param layer: The layer to remove.
"""
- self.difs.remove(dif)
- dif.del_member(self)
+ self.layers.remove(layer)
+ layer.del_member(self)
try:
- del self.policies[dif]
+ del self.policies[layer]
except KeyError:
- # It was not in there, so nothing to do
pass
self._validate()
- def add_dif_registration(self, upper, lower):
+ def add_registration(self, upper, lower):
"""
- Adds a DIF registration.
+ Adds a layer registration.
- :param upper: Name of the DIF that is requesting IPC.
- :param lower: Name of the DIF providing IPC.
+ :param upper: The upper layer requesting IPC.
+ :param lower: The lower layer providing IPC.
"""
- self.dif_registrations[upper].append(lower)
+ if upper not in self.registrations:
+ self.registrations[upper] = []
+ self.registrations[upper].append(lower)
self._validate()
- def del_dif_registration(self, upper, lower):
+ def del_registration(self, upper, lower):
"""
- Removes a DIF registration.
+ Removes a layer registration.
- :param upper: Name of the DIF that is requesting IPC.
- :param lower: Name of the DIF providing IPC.
+ :param upper: The upper layer.
+ :param lower: The lower layer.
"""
- self.dif_registrations[upper].remove(lower)
+ self.registrations[upper].remove(lower)
self._validate()
- def add_policy(self, dif, component_name, policy_name, **parameters):
+ def add_policy(self, layer, component_name, policy_name, **parameters):
"""
Adds a policy.
- :param dif: The name of the DIF.
+ :param layer: The layer.
:param component_name: Name of the component.
:param policy_name: Name of the policy.
:param parameters: Parameters of the policy.
"""
- self.policies[dif].add_policy(component_name, policy_name, **parameters)
+ self.policies[layer].add_policy(
+ component_name, policy_name, **parameters)
- def del_policy(self, dif, component_name=None, policy_name=None):
+ def del_policy(self, layer, component_name=None, policy_name=None):
"""
- Removes a policy.
+ Removes a policy.
- :param dif: the dif to which the policy should be applied
+ :param layer: The layer.
:param component_name: Name of the component.
:param policy_name: Name of the policy.
"""
- self.policies[dif].del_policy(component_name, policy_name)
+ self.policies[layer].del_policy(component_name, policy_name)
- def get_policy(self, dif):
+ def get_policy(self, layer):
"""
- :param dif: The DIF to get the policy of.
+ :param layer: The layer to get the policy of.
:return: Returns the policy.
"""
- return self.policies[dif]
+ return self.policies[layer]
def execute_commands(self, commands, as_root=False, time_out=3,
use_proxy=False):
"""
- Execute a list of a commands on the node.
+ Execute a list of commands on the node.
:param commands: A list of commands.
:param as_root: Execute as root?
:param time_out: Seconds before timing out.
- :param use_proxy: Use a proxy to execute the commands?
+ :param use_proxy: Use a proxy?
"""
- return self.executor.execute_commands(self,
- commands,
- as_root,
- time_out)
+ return self.executor.execute_commands(
+ self, commands, as_root, time_out)
def execute_command(self, command, as_root=False, time_out=3,
- use_proxy=False):
+ use_proxy=False, check=True):
"""
- Execute a single command on a node.
+ Execute a single command on the node.
:param command: A command.
:param as_root: Execute as root?
:param time_out: Seconds before timing out.
- :param use_proxy: Use a proxy to execute the commands?
- :return: The stdout of the command.
+ :param use_proxy: Use a proxy?
+ :param check: If True, raise CommandError on non-zero exit.
+ :return: A CommandResult.
"""
- return self.executor.execute_command(self,
- command,
- as_root,
- time_out)
+ return self.executor.execute_command(
+ self, command, as_root, time_out, check=check)
def copy_file(self, path, destination):
- """
- Copy file to node.
-
- :param path: Local location of the file.
- :param destination: Destination location of the file.
- """
+ """Copy file to node."""
self.executor.copy_file(self, path, destination)
def copy_files(self, paths, destination):
- """
- Copy files to node.
-
- :param paths: Local location of the files.
- :param destination: Destination location of the files.
- """
+ """Copy files to node."""
self.executor.copy_files(self, paths, destination)
def fetch_file(self, path, destination, sudo=False):
- """
- Fetch file from the node.
-
- :param path: Location of the files on the node.
- :param destination: Destination location of the files.
- :param sudo: The file is owned by root on the node?
- """
+ """Fetch file from node."""
self.executor.fetch_file(self, path, destination, sudo)
def fetch_files(self, paths, destination, sudo=False):
- """
- Fetch files from the node.
-
- :param paths: Location of the files on the node.
- :param destination: Destination location of the files.
- :param sudo: The file is owned by root on the node?
- """
+ """Fetch files from node."""
self.executor.fetch_files(self, paths, destination, sudo)
- def set_link_state(self, dif, state):
+ def set_link_state(self, layer, state):
"""
Change the state of a link on the node.
- :param dif: The name of the shim Ethernet DIF.
+ :param layer: The Ethernet layer.
:param state: Up or down.
"""
- ipcp = self.get_ipcp_by_dif(dif)
- self.execute_command('ip link set dev ' + ipcp.ifname + ' ' + state,
- as_root=True)
+ ipcp = self.get_ipcp_by_layer(layer)
+ self.execute_command(
+ 'ip link set dev ' + ipcp.ifname + ' ' + state,
+ as_root=True)
+
+# ---------------------------------------------------------------------------
+# IPCP classes
+# ---------------------------------------------------------------------------
class IPCP(object):
- def __init__(self, name, node, dif):
+ def __init__(self, name, node, layer):
self.name = name
self.node = node
- self.dif = dif
+ self.layer = layer
self.registrations = []
-
- # Is this IPCP the first in its DIF, so that it does not need
- # to enroll to anyone ?
- self.dif_bootstrapper = False
+ self.bootstrapper = False
def __repr__(self):
- return "{IPCP=%s,DIF=%s,N-1-DIFs=(%s)%s}" % \
- (self.name, self.dif.name,
- ' '.join([dif.name for dif in self.registrations]),
- ',bootstrapper' if self.dif_bootstrapper else ''
- )
+ return "{IPCP=%s,Layer=%s,N-1-Layers=(%s)%s}" % \
+ (self.name, self.layer.name,
+ ' '.join([l.name for l in self.registrations]),
+ ',bootstrapper' if self.bootstrapper else '')
def __hash__(self):
- return hash((self.name, self.dif.name))
+ return hash((self.name, self.layer.name))
def __eq__(self, other):
return other is not None and self.name == other.name \
- and self.dif == other.dif
+ and self.layer == other.layer
def __neq__(self, other):
return not self == other
-class ShimEthIPCP(IPCP):
- def __init__(self, name, node, dif, ifname=None):
- IPCP.__init__(self, name, node, dif)
+class EthIPCP(IPCP):
+ def __init__(self, name, node, layer, ifname=None):
+ IPCP.__init__(self, name, node, layer)
self.ifname = ifname
-class ShimUDPIPCP(IPCP):
- def __init__(self, name, node, dif):
- IPCP.__init__(self, name, node, dif)
- # TODO: add IP and port
+class UdpIPCP(IPCP):
+ def __init__(self, name, node, layer, ip_addr=None, ifname=None):
+ IPCP.__init__(self, name, node, layer)
+ self.ip_addr = ip_addr or "0.0.0.0"
+ self.ifname = ifname
+
+# ---------------------------------------------------------------------------
+# Policy
+# ---------------------------------------------------------------------------
class Policy(object):
- def __init__(self, dif, node=None, policies=None):
- self.dif = dif # type: NormalDIF
+ def __init__(self, layer, node=None, policies=None):
+ self.layer = layer
self.node = node
if policies is None:
self._dict = dict()
@@ -875,11 +918,9 @@ class Policy(object):
self._dict = policies
def add_policy(self, component_name, policy_name, **parameters):
- self._dict.setdefault(component_name, dict())[policy_name] = parameters
+ self._dict.setdefault(
+ component_name, dict())[policy_name] = parameters
- #
- # Fetches effective policy info
- #
def get_policies(self, component_name=None, policy_name=None):
policy = self._superimpose()
if component_name is None:
@@ -897,42 +938,38 @@ class Policy(object):
else:
del self._dict[component_name][policy_name]
- #
- # Merges this policy into that of its dif, obtaining
- # the effective policy acting on self.node.
- #
def _superimpose(self):
if self.node is None:
return self
- other = self.dif.policy
+ other = self.layer.policy
base = dict(other._dict)
base.update(self._dict)
- return Policy(self.dif, self.node, base)
+ return Policy(self.layer, self.node, base)
def __eq__(self, other):
if not isinstance(other, Policy):
return False
- else:
- return other.dif == self.dif \
- and other.node == self.node \
- and other._dict == self._dict
+ return (other.layer == self.layer
+ and other.node == self.node
+ and other._dict == self._dict)
def __str__(self):
node_str = (" Node: " + self.node) if self.node is not None else ""
- return "Policy[Dif: %(dif)s,%(node_str)s Dict: %(dict)s]" \
- % {"dif": self.dif, "node_str": node_str, "dict": self._dict}
+ return "Policy[Layer: %(layer)s,%(node_str)s Dict: %(dict)s]" \
+ % {"layer": self.layer, "node_str": node_str,
+ "dict": self._dict}
def __repr__(self):
node_str = (" Node: " + self.node) if self.node is not None else ""
- s = "Policy[ Dif: %(dif)s,%(node_str)s" \
- % {"dif": self.dif, "node_str": node_str}
+ s = "Policy[ Layer: %(layer)s,%(node_str)s" \
+ % {"layer": self.layer, "node_str": node_str}
comps = []
for component in self._dict:
for policy in self._dict[component]:
- comps.append("\n Component %s has policy %s with params %s"
- % (component,
- policy,
- self._dict[component][policy]))
+ comps.append(
+ "\n Component %s has policy %s with params %s"
+ % (component, policy,
+ self._dict[component][policy]))
s += ",".join(comps)
s += "\n]\n"
return s
diff --git a/rumba/executors/docker.py b/rumba/executors/docker.py
index 8587878..4be9e9b 100644
--- a/rumba/executors/docker.py
+++ b/rumba/executors/docker.py
@@ -25,6 +25,7 @@
#
from rumba import model as mod
+from rumba.command import CommandResult
import tempfile
import tarfile
@@ -44,16 +45,25 @@ class DockerExecutor(mod.Executor):
self.testbed = testbed
self.running_containers = testbed.running_containers
- def execute_command(self, node, command, sudo=False, time_out=3):
+ def execute_command(self, node, command, sudo=False, time_out=3,
+ check=True):
logger.debug("%s >> %s" % (node.name, command))
c, o = self.running_containers[node.name].exec_run(["sh", "-c",
command])
- if c:
- raise DockerException('A remote command returned an error. '
- 'Output:\n\n\t' + o.decode("utf-8"))
- return o.decode("utf-8")
+ result = CommandResult(
+ exit_code=c,
+ stdout=o.decode("utf-8").rstrip(),
+ stderr="",
+ command=command,
+ node=node.name
+ )
+
+ if check:
+ result.check()
+
+ return result
def fetch_file(self, node, path, destination, as_root=False):
if not path.startswith("/"):
diff --git a/rumba/executors/local.py b/rumba/executors/local.py
index cab626d..00a0adf 100644
--- a/rumba/executors/local.py
+++ b/rumba/executors/local.py
@@ -25,6 +25,7 @@
#
from rumba import model as mod
+from rumba.command import CommandResult
import subprocess
from shutil import copy
@@ -38,17 +39,33 @@ class LocalExecutor(mod.Executor):
def __init__(self, testbed):
self.testbed = testbed
- def execute_command(self, node, cmd, sudo=False, time_out=3):
+ def execute_command(self, node, cmd, sudo=False, time_out=3,
+ check=True):
+ if sudo:
+ cmd = "sudo " + cmd
+ logger.debug("%s >> %s" % (node.name, cmd))
try:
- logger.debug("%s >> %s" % (node.name, cmd))
- output = subprocess.check_output(cmd,
- universal_newlines=True,
- shell=True)
- return output.rstrip()
- except subprocess.CalledProcessError as e:
- logger.error("Return code was " + str(e.returncode))
+ proc = subprocess.run(cmd, shell=True,
+ capture_output=True,
+ universal_newlines=True,
+ timeout=time_out)
+ except subprocess.TimeoutExpired:
+ logger.error("Command timed out after %ss: %s", time_out, cmd)
raise
+ result = CommandResult(
+ exit_code=proc.returncode,
+ stdout=proc.stdout.rstrip(),
+ stderr=proc.stderr.rstrip(),
+ command=cmd,
+ node=node.name
+ )
+
+ if check:
+ result.check()
+
+ return result
+
def fetch_file(self, node, path, destination, as_root=False):
copy(path, destination)
diff --git a/rumba/executors/ssh.py b/rumba/executors/ssh.py
index a53b978..b3bc366 100644
--- a/rumba/executors/ssh.py
+++ b/rumba/executors/ssh.py
@@ -33,8 +33,13 @@ class SSHExecutor(Executor):
def __init__(self, testbed):
self.testbed = testbed
- def execute_command(self, node, command, as_root=False, time_out=3):
- return self.execute_commands(node, [command], as_root, time_out)
+ def execute_command(self, node, command, as_root=False, time_out=3,
+ check=True):
+ if as_root and node.ssh_config.username != 'root':
+ command = "sudo %s" % command
+
+ return execute_command(self.testbed, node.ssh_config, command,
+ time_out, node_name=node.name, check=check)
def execute_commands(self, node, commands, as_root=False, time_out=3):
if as_root:
@@ -42,7 +47,7 @@ class SSHExecutor(Executor):
commands = list(map(lambda c: "sudo %s" % (c,), commands))
return execute_commands(self.testbed, node.ssh_config, commands,
- time_out)
+ time_out, node_name=node.name)
def fetch_file(self, node, path, destination, sudo=False):
copy_file_from_testbed(self.testbed, node.ssh_config, path,
diff --git a/rumba/irm_backend.py b/rumba/irm_backend.py
new file mode 100644
index 0000000..e9cc5c6
--- /dev/null
+++ b/rumba/irm_backend.py
@@ -0,0 +1,343 @@
+#
+# Rumba - IRM Backend Abstraction
+#
+# Copyright (C) 2017-2026 imec
+#
+# Dimitri Staessens <dimitri.staessens@ugent.be>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., http://www.fsf.org/about/contact/.
+#
+
+"""
+IRM backend abstraction for Ouroboros.
+
+Provides two implementations:
+- IrmPython: uses pyouroboros CFFI bindings (in-process, local testbed)
+- IrmCLI: generates irm CLI commands for SSH execution (remote testbeds)
+"""
+
+import abc
+import rumba.log as log
+from rumba.elements.topology import LayerType
+
+logger = log.get_logger(__name__)
+
+
+class IrmBackend(abc.ABC):
+ """
+ Abstract interface for IRM operations.
+
+ Maps to the pyouroboros irm API. Implementations execute the
+ operations either locally (via CFFI) or remotely (via CLI commands).
+ """
+
+ @abc.abstractmethod
+ def create_ipcp(self, node, name, layer_type):
+ """
+ Create an IPCP process.
+
+ :param node: The node to create the IPCP on.
+ :param name: Name for the IPCP.
+ :param layer_type: LayerType enum value.
+ :return: IPCP identifier (PID for local, name for remote).
+ """
+ pass
+
+ @abc.abstractmethod
+ def bootstrap_ipcp(self, node, name, layer_type, layer_name,
+ policies=None, eth_dev=None, ip_addr=None,
+ autobind=True):
+ """
+ Bootstrap an IPCP into a layer.
+
+ :param node: The node.
+ :param name: IPCP name.
+ :param layer_type: LayerType enum value.
+ :param layer_name: Name of the layer to bootstrap.
+ :param policies: Dict of component->policy mappings.
+ :param eth_dev: Ethernet device name (for eth-dix/eth-llc).
+ :param ip_addr: IP address (for UDP shim).
+ :param autobind: Automatically bind the IPCP name.
+ """
+ pass
+
+ @abc.abstractmethod
+ def enroll_ipcp(self, node, name, dst_name, autobind=True):
+ """
+ Enroll an IPCP into an existing layer.
+
+ :param node: The node.
+ :param name: IPCP name.
+ :param dst_name: Name of the destination IPCP to enroll with.
+ :param autobind: Automatically bind the IPCP name.
+ """
+ pass
+
+ @abc.abstractmethod
+ def connect_ipcp(self, node, name, dst_name):
+ """
+ Create a data transfer connection between IPCPs.
+
+ :param node: The node.
+ :param name: IPCP name.
+ :param dst_name: Destination IPCP name.
+ """
+ pass
+
+ @abc.abstractmethod
+ def reg_name(self, node, name, ipcp_names):
+ """
+ Register a name in N-1 layers.
+
+ :param node: The node.
+ :param name: The name to register.
+ :param ipcp_names: List of IPCP names to register with.
+ """
+ pass
+
+ @abc.abstractmethod
+ def bind_process(self, node, pid, name):
+ """
+ Bind a process to a name.
+
+ :param node: The node.
+ :param pid: Process ID.
+ :param name: Name to bind to.
+ """
+ pass
+
+ @abc.abstractmethod
+ def destroy_ipcp(self, node, name):
+ """
+ Destroy an IPCP.
+
+ :param node: The node.
+ :param name: IPCP name to destroy.
+ """
+ pass
+
+
+# -----------------------------------------------------------------------
+# Remote IRM: generates CLI commands for SSH execution
+# -----------------------------------------------------------------------
+
+# Map LayerType to Ouroboros irm CLI type strings
+_LAYER_TYPE_TO_CLI = {
+ LayerType.LOCAL: 'local',
+ LayerType.UNICAST: 'unicast',
+ LayerType.BROADCAST: 'broadcast',
+ LayerType.ETH_LLC: 'eth-llc',
+ LayerType.ETH_DIX: 'eth-dix',
+ LayerType.UDP4: 'udp4',
+ LayerType.UDP6: 'udp6',
+}
+
+
+class IrmCLI(IrmBackend):
+ """
+ IRM backend that generates ``irm`` CLI commands and executes them
+ on the node via its executor (SSH, Docker exec, etc.).
+ """
+
+ def create_ipcp(self, node, name, layer_type):
+ type_str = _LAYER_TYPE_TO_CLI[layer_type]
+ cmd = "irm i c n %s type %s" % (name, type_str)
+ node.execute_command(cmd, time_out=None)
+ return name # remote: we track by name
+
+ def bootstrap_ipcp(self, node, name, layer_type, layer_name,
+ policies=None, eth_dev=None, ip_addr=None,
+ autobind=True):
+ type_str = _LAYER_TYPE_TO_CLI[layer_type]
+ cmd = "irm i b n %s type %s" % (name, type_str)
+
+ if layer_type in (LayerType.ETH_DIX, LayerType.ETH_LLC):
+ if eth_dev:
+ cmd += " dev %s" % eth_dev
+
+ if layer_type in (LayerType.UDP4, LayerType.UDP6):
+ if ip_addr:
+ cmd += " ip %s" % ip_addr
+
+ if policies:
+ for comp, pol in policies.items():
+ cmd += " %s %s" % (comp, pol)
+
+ cmd += " layer %s" % layer_name
+
+ if autobind:
+ cmd += " autobind"
+
+ node.execute_command(cmd, time_out=None)
+
+ def enroll_ipcp(self, node, name, dst_name, autobind=True):
+ cmd = "irm i e n %s dst %s" % (name, dst_name)
+ if autobind:
+ cmd += " autobind"
+ node.execute_command(cmd, time_out=None)
+
+ def connect_ipcp(self, node, name, dst_name):
+ cmd = "irm i conn n %s dst %s" % (name, dst_name)
+ node.execute_command(cmd, time_out=None)
+
+ def reg_name(self, node, name, ipcp_names):
+ cmd = "irm n r %s" % name
+ for ipcp_name in ipcp_names:
+ cmd += " ipcp %s" % ipcp_name
+ node.execute_command(cmd, time_out=None)
+
+ def bind_process(self, node, pid, name):
+ cmd = "irm b process %s name %s" % (pid, name)
+ node.execute_command(cmd, time_out=None)
+
+ def destroy_ipcp(self, node, name):
+ cmd = "irm i d n %s" % name
+ node.execute_command(cmd, time_out=None)
+
+
+# -----------------------------------------------------------------------
+# Local IRM: uses pyouroboros CFFI bindings
+# -----------------------------------------------------------------------
+
+class IrmPython(IrmBackend):
+ """
+ IRM backend that uses pyouroboros CFFI bindings for direct
+ in-process IRM calls. Only works on the local machine where
+ the Ouroboros IRMd is running.
+ """
+
+ def __init__(self):
+ try:
+ from ouroboros import irm as _irm
+ from ouroboros import cli as _cli
+ self._irm = _irm
+ self._cli = _cli
+ logger.info("pyouroboros IRM backend loaded successfully")
+ except ImportError:
+ raise ImportError(
+ "pyouroboros is required for Python backend. "
+ "Install it from the pyouroboros repository.")
+
+ def _get_ipcp_type(self, layer_type):
+ """Convert LayerType to pyouroboros IpcpType."""
+ type_map = {
+ LayerType.LOCAL: self._irm.IpcpType.LOCAL,
+ LayerType.UNICAST: self._irm.IpcpType.UNICAST,
+ LayerType.BROADCAST: self._irm.IpcpType.BROADCAST,
+ LayerType.ETH_LLC: self._irm.IpcpType.ETH_LLC,
+ LayerType.ETH_DIX: self._irm.IpcpType.ETH_DIX,
+ LayerType.UDP4: self._irm.IpcpType.UDP4,
+ LayerType.UDP6: self._irm.IpcpType.UDP6,
+ }
+ return type_map[layer_type]
+
+ def create_ipcp(self, node, name, layer_type):
+ ipcp_type = self._get_ipcp_type(layer_type)
+ pid = self._cli.create_ipcp(name, ipcp_type)
+ logger.debug("Created IPCP %s (pid=%d, type=%s)",
+ name, pid, layer_type.value)
+ return pid
+
+ def bootstrap_ipcp(self, node, name, layer_type, layer_name,
+ policies=None, eth_dev=None, ip_addr=None,
+ autobind=True):
+ pid = self._cli.pid_of(name)
+ ipcp_type = self._get_ipcp_type(layer_type)
+ irm = self._irm
+
+ # Build config based on type
+ if layer_type == LayerType.UNICAST:
+ uc_kwargs = {}
+ dt_kwargs = {}
+ routing_kwargs = {}
+ ls_kwargs = {}
+
+ # Parse policies dict into pyouroboros config
+ if policies:
+ for comp, pol in policies.items():
+ if comp == 'rmt.pff':
+ pol_map = {
+ 'lfa': irm.LinkStatePolicy.LFA,
+ 'ecmp': irm.LinkStatePolicy.ECMP,
+ 'simple': irm.LinkStatePolicy.SIMPLE,
+ }
+ if pol in pol_map:
+ ls_kwargs['pol'] = pol_map[pol]
+
+ if ls_kwargs:
+ routing_kwargs['ls'] = irm.LinkStateConfig(**ls_kwargs)
+ if routing_kwargs:
+ dt_kwargs['routing'] = irm.RoutingConfig(**routing_kwargs)
+ if dt_kwargs:
+ uc_kwargs['dt'] = irm.DtConfig(**dt_kwargs)
+
+ conf = irm.IpcpConfig(
+ ipcp_type=ipcp_type,
+ layer_name=layer_name,
+ unicast=irm.UnicastConfig(**uc_kwargs))
+
+ elif layer_type in (LayerType.ETH_DIX, LayerType.ETH_LLC):
+ eth_conf = irm.EthConfig(dev=eth_dev or "")
+ conf = irm.IpcpConfig(
+ ipcp_type=ipcp_type,
+ layer_name=layer_name,
+ eth=eth_conf)
+
+ elif layer_type == LayerType.UDP4:
+ udp4_kwargs = {}
+ if ip_addr:
+ udp4_kwargs['ip_addr'] = ip_addr
+ conf = irm.IpcpConfig(
+ ipcp_type=ipcp_type,
+ layer_name=layer_name,
+ udp4=irm.Udp4Config(**udp4_kwargs))
+
+ elif layer_type == LayerType.UDP6:
+ udp6_kwargs = {}
+ if ip_addr:
+ udp6_kwargs['ip_addr'] = ip_addr
+ conf = irm.IpcpConfig(
+ ipcp_type=ipcp_type,
+ layer_name=layer_name,
+ udp6=irm.Udp6Config(**udp6_kwargs))
+
+ else:
+ conf = irm.IpcpConfig(
+ ipcp_type=ipcp_type,
+ layer_name=layer_name)
+
+ self._cli.bootstrap_ipcp(pid, conf, autobind=autobind)
+
+ logger.debug("Bootstrapped IPCP %s in layer %s", name, layer_name)
+
+ def enroll_ipcp(self, node, name, dst_name, autobind=True):
+ pid = self._cli.pid_of(name)
+ self._cli.enroll_ipcp(pid, dst_name, autobind=autobind)
+ logger.debug("Enrolled IPCP %s via %s", name, dst_name)
+
+ def connect_ipcp(self, node, name, dst_name):
+ pid = self._cli.pid_of(name)
+ self._cli.connect_ipcp(pid, dst_name)
+ logger.debug("Connected IPCP %s to %s", name, dst_name)
+
+ def reg_name(self, node, name, ipcp_names):
+ self._cli.reg_name(name, ipcps=ipcp_names)
+
+ def bind_process(self, node, pid, name):
+ self._cli.bind_process(int(pid), name)
+
+ def destroy_ipcp(self, node, name):
+ self._cli.destroy_ipcp(name)
+ logger.debug("Destroyed IPCP %s", name)
diff --git a/rumba/log.py b/rumba/log.py
index 17ad22e..0b847fc 100644
--- a/rumba/log.py
+++ b/rumba/log.py
@@ -24,9 +24,11 @@
# Foundation, Inc., http://www.fsf.org/about/contact/.
#
+import atexit
import logging
import logging.handlers
-import multiprocessing
+from logging.handlers import QueueHandler, QueueListener
+import queue
import sys
import time
@@ -41,133 +43,13 @@ CRITICAL = logging.CRITICAL
loggers_set = set()
+_setup_done = False
-mq = multiprocessing.Queue()
+mq = queue.Queue()
logging_listener = None
-try:
- from logging.handlers import QueueHandler
-except ImportError:
- # We are in python2 code
- class QueueHandler(logging.Handler):
- """
- This handler sends events to a queue. Typically, it would be used
- together with a multiprocessing Queue to centralise logging to file
- in one process (in a multi-process application), so as to avoid file
- write contention between processes.
-
- This code is new in Python 3.2, but this class can be copy pasted into
- user code for use with earlier Python versions.
- """
-
- # Copy-pasted as per above docstring from logging
-
- def __init__(self, queue):
- logging.Handler.__init__(self)
- self.queue = queue
-
- def enqueue(self, record):
- self.queue.put_nowait(record)
-
- def prepare(self, record):
- self.format(record)
- record.msg = record.message
- record.args = None
- record.exc_info = None
- return record
-
- def emit(self, record):
- try:
- self.enqueue(self.prepare(record))
- except Exception:
- self.handleError(record)
-
-try:
- from logging.handlers import QueueListener
-except ImportError:
- # We are in python2 code
- import threading
- try:
- import Queue
- except ImportError:
- # Make it pythonX with 3.0 <= X <3.2
- import queue as Queue
-
- class QueueListener(object):
- """
- This class implements an internal threaded listener which watches for
- LogRecords being added to a queue, removes them and passes them to a
- list of handlers for processing.
- """
-
- # Also copy-pasted
- _sentinel = None
-
- def __init__(self, queue, respect_handler_level=False, *handlers):
- self.queue = queue
- self.handlers = handlers
- self._stop = threading.Event()
- self._thread = None
- self.respect_handler_level = respect_handler_level
-
- def dequeue(self, block):
- return self.queue.get(block)
-
- def start(self):
- self._thread = t = threading.Thread(target=self._monitor)
- t.setDaemon(True)
- t.start()
-
- def prepare(self , record):
- return record
-
- def handle(self, record):
- record = self.prepare(record)
- for handler in self.handlers:
- if not self.respect_handler_level:
- process = True
- else:
- process = record.levelno >= handler.level
- if process:
- handler.handle(record)
-
- def _monitor(self):
- q = self.queue
- has_task_done = hasattr(q, 'task_done')
- while not self._stop.isSet():
- try:
- record = self.dequeue(True)
- if record is self._sentinel:
- break
- self.handle(record)
- if has_task_done:
- q.task_done()
- except Queue.Empty:
- pass
- # There might still be records in the queue.
- while True:
- try:
- record = self.dequeue(False)
- if record is self._sentinel:
- break
- self.handle(record)
- if has_task_done:
- q.task_done()
- except Queue.Empty:
- break
-
- def enqueue_sentinel(self):
- self.queue.put_nowait(self._sentinel)
-
- def stop(self):
- self._stop.set()
- self.enqueue_sentinel()
- self._thread.join()
- self._thread = None
-
-
class RumbaFormatter(logging.Formatter):
"""
The `logging.Formatter` subclass used by Rumba
@@ -202,12 +84,18 @@ class RumbaFormatter(logging.Formatter):
def setup():
"""Configures the logging framework with default values."""
- global mq
+ global mq, _setup_done
+ if _setup_done:
+ return
+ # Attach QueueHandler to root logger so messages flow through the queue
queue_handler = QueueHandler(mq)
queue_handler.setLevel(logging.DEBUG)
- logging.basicConfig(handlers=[queue_handler], level=logging.DEBUG)
- logging.getLogger('').setLevel(logging.ERROR)
+ root_logger = logging.getLogger('')
+ root_logger.addHandler(queue_handler)
+ root_logger.setLevel(logging.DEBUG)
+ # Configure rumba loggers to show INFO and above
logging.getLogger('rumba').setLevel(logging.INFO)
+ _setup_done = True
# Used for the first call, in order to configure logging
@@ -302,7 +190,8 @@ def flush_and_kill_logging():
this does not happen.
"""
global logging_listener
- logging_listener.stop()
+ if logging_listener is not None:
+ logging_listener.stop()
class LogOptions(object):
@@ -315,6 +204,8 @@ class LogOptions(object):
logging_listener = QueueListener(mq)
self.log_to_console()
logging_listener.start()
+ # Configure logging levels
+ setup()
@staticmethod
def _get_handlers():
@@ -366,3 +257,4 @@ class LogOptions(object):
options = LogOptions() # module instance used for configuration
+atexit.register(flush_and_kill_logging)
diff --git a/rumba/model.py b/rumba/model.py
index 7306138..19ed8ff 100644
--- a/rumba/model.py
+++ b/rumba/model.py
@@ -27,15 +27,27 @@
import os
import stat
+from rumba.command import (
+ CommandResult,
+ CommandError,
+)
+
+from rumba.process import Process
+
from rumba.elements.topology import (
- DIF,
- ShimEthDIF,
- ShimUDPDIF,
- NormalDIF,
+ LayerType,
+ Layer,
+ EthDixLayer,
+ EthLlcLayer,
+ UnicastLayer,
+ BroadcastLayer,
+ LocalLayer,
+ Udp4Layer,
+ Udp6Layer,
IPCP,
- ShimEthIPCP,
- ShimUDPIPCP,
+ EthIPCP,
+ UdpIPCP,
Node,
SSHConfig,
@@ -43,7 +55,8 @@ from rumba.elements.topology import (
LinkQuality,
Delay,
Loss,
- Distribution
+ Distribution,
+ Policy,
)
from rumba.elements.experimentation import (
@@ -55,15 +68,24 @@ from rumba.elements.experimentation import (
__all__ = [
- # Topology
- "DIF",
- "ShimEthDIF",
- "ShimUDPDIF",
- "NormalDIF",
+ "CommandResult",
+ "CommandError",
+
+ "Process",
+
+ "LayerType",
+ "Layer",
+ "EthDixLayer",
+ "EthLlcLayer",
+ "UnicastLayer",
+ "BroadcastLayer",
+ "LocalLayer",
+ "Udp4Layer",
+ "Udp6Layer",
"IPCP",
- "ShimEthIPCP",
- "ShimUDPIPCP",
+ "EthIPCP",
+ "UdpIPCP",
"Node",
"SSHConfig",
@@ -72,15 +94,14 @@ __all__ = [
"Delay",
"Loss",
"Distribution",
+ "Policy",
- # Experimentation
"Experiment",
"Testbed",
"Executor",
"tmp_dir",
- # Other
- "cache_dir"
+ "cache_dir",
]
diff --git a/rumba/multiprocess.py b/rumba/multiprocess.py
index 508c9d9..56d0f72 100644
--- a/rumba/multiprocess.py
+++ b/rumba/multiprocess.py
@@ -24,16 +24,12 @@
# Foundation, Inc., http://www.fsf.org/about/contact/.
#
-import multiprocessing.dummy as multiprocessing
-import sys
+import contextlib
+import queue
+import threading
import rumba.log as log
-if sys.version_info[0] >= 3:
- import contextlib
-else:
- import contextlib2 as contextlib
-
logger = log.get_logger(__name__)
@@ -63,7 +59,6 @@ def call_in_parallel(name_list, argument_list, executor_list):
def job(executor, name, m_queue, argument):
try:
- # m_queue.cancel_join_thread()
logger.debug('Starting process "%s".'
% (name,))
executor(argument)
@@ -75,7 +70,7 @@ def call_in_parallel(name_list, argument_list, executor_list):
m_queue.put(e)
logger.debug('About to start spawning processes.')
- queue = multiprocessing.Queue()
+ result_queue = queue.Queue()
with contextlib.ExitStack() as stack:
# This is a composite context manager.
# After exiting the 'with' block, the __exit__ method of all
@@ -84,12 +79,12 @@ def call_in_parallel(name_list, argument_list, executor_list):
for i, e in enumerate(executor_list):
stack.enter_context(ProcessContextManager(
target=job,
- args=(e, name_list[i], queue, argument_list[i])
+ args=(e, name_list[i], result_queue, argument_list[i])
))
msg_to_be_read += 1
results = []
for _ in range(len(executor_list)):
- result = queue.get() # This blocks until results are available
+ result = result_queue.get() # This blocks until results are available
msg_to_be_read -= 1
results.append(result)
for result in results:
@@ -105,7 +100,7 @@ class ProcessContextManager(object):
args = ()
if kwargs is None:
kwargs = {}
- self.process = multiprocessing.Process(
+ self.process = threading.Thread(
target=target,
args=tuple(args),
kwargs=kwargs
@@ -118,14 +113,7 @@ class ProcessContextManager(object):
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_tb is not None or exc_val is not None or exc_tb is not None:
logger.error('Subprocess error: %s.' % (type(exc_val).__name__,))
- try:
- self.process.terminate()
- self.process.join()
- except AttributeError:
- # We are using multiprocessing.dummy, so no termination.
- # We trust the threads will die with the application
- # (since we are shutting down anyway)
- pass
+ self.process.join()
return False
else:
self.process.join()
diff --git a/rumba/process.py b/rumba/process.py
new file mode 100644
index 0000000..6f37433
--- /dev/null
+++ b/rumba/process.py
@@ -0,0 +1,118 @@
+#
+# Rumba - Process lifecycle management
+#
+# Copyright (C) 2017-2026 imec
+#
+# Dimitri Staessens <dimitri.staessens@ugent.be>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., http://www.fsf.org/about/contact/.
+#
+
+import time
+import signal
+
+import rumba.log as log
+
+logger = log.get_logger(__name__)
+
+
+class Process:
+ """Managed background process with lifecycle operations.
+
+ Wraps either a local subprocess.Popen handle or a remote PID
+ tracked via node.execute_command().
+ """
+
+ def __init__(self, node, pid, command, popen=None):
+ """
+ :param node: The node the process runs on (or None for local).
+ :param pid: The process ID.
+ :param command: The command that was started.
+ :param popen: A subprocess.Popen handle (local processes only).
+ """
+ self.node = node
+ self.pid = int(pid) if pid is not None else None
+ self.command = command
+ self._popen = popen
+
+ def is_alive(self):
+ """Check if the process is still running.
+
+ :returns: True if the process is alive.
+ """
+ if self._popen is not None:
+ return self._popen.poll() is None
+
+ if self.node is None or self.pid is None:
+ return False
+
+ result = self.node.execute_command(
+ 'kill -0 %d' % self.pid, as_root=True, check=False)
+ return result.succeeded
+
+ def wait(self, timeout=None):
+ """Wait for the process to exit.
+
+ :param timeout: Maximum seconds to wait (None = forever).
+ :returns: The exit code, or None if still running after timeout.
+ """
+ if self._popen is not None:
+ try:
+ return self._popen.wait(timeout=timeout)
+ except Exception:
+ return None
+
+ # For remote processes, poll until dead
+ deadline = None if timeout is None else time.time() + timeout
+ while True:
+ if not self.is_alive():
+ return 0 # Process is dead; no way to get real exit code
+ if deadline is not None and time.time() >= deadline:
+ return None
+ time.sleep(0.5)
+
+ def kill(self, sig=signal.SIGTERM):
+ """Send a signal to the process.
+
+ :param sig: Signal number (default SIGTERM).
+ """
+ if self._popen is not None:
+ self._popen.send_signal(sig)
+ return
+
+ if self.node is None or self.pid is None:
+ return
+
+ self.node.execute_command(
+ 'kill -%d %d' % (sig, self.pid), as_root=True, check=False)
+
+ def output(self):
+ """Read stdout from the process (local Popen only).
+
+ :returns: stdout string, or None if not available.
+ """
+ if self._popen is not None:
+ try:
+ out, _ = self._popen.communicate(timeout=0)
+ return out
+ except Exception:
+ return None
+ return None
+
+ def __repr__(self):
+ loc = "local" if self._popen else (
+ self.node.name if self.node else "?")
+ return "Process(pid=%s, cmd='%s', on=%s)" % (
+ self.pid, self.command, loc)
diff --git a/rumba/prototypes/enroll.py b/rumba/prototypes/enroll.py
deleted file mode 100755
index d3501cc..0000000
--- a/rumba/prototypes/enroll.py
+++ /dev/null
@@ -1,140 +0,0 @@
-#!/usr/bin/env python
-#
-# A library to manage ARCFIRE experiments
-#
-# Copyright (C) 2017-2018 Nextworks S.r.l.
-# Copyright (C) 2017-2018 imec
-#
-# Sander Vrijders <sander.vrijders@ugent.be>
-# Dimitri Staessens <dimitri.staessens@ugent.be>
-# Vincenzo Maffione <v.maffione@nextworks.it>
-# Marco Capitani <m.capitani@nextworks.it>
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., http://www.fsf.org/about/contact/.
-#
-
-import argparse
-import socket
-import time
-import re
-import sys
-
-def printalo(byt):
- print(repr(byt).replace('\\n', '\n'))
-
-
-def get_response(s):
- data = bytes()
- while 1:
- data += s.recv(1024)
- lines = str(data).replace('\\n', '\n').split('\n')
- #print(lines)
- if lines[-1].find("IPCM") != -1:
- return lines[:len(lines)-1]
-
-
-description = "Python script to enroll IPCPs"
-epilog = "2016 Vincenzo Maffione <v.maffione@nextworks.it>"
-
-argparser = argparse.ArgumentParser(description = description,
- epilog = epilog)
-argparser.add_argument('--ipcm-conf', help = "Path to the IPCM configuration file",
- type = str, required = True)
-argparser.add_argument('--enrollee-name', help = "Name of the enrolling IPCP",
- type = str, required = True)
-argparser.add_argument('--dif', help = "Name of DIF to enroll to",
- type = str, required = True)
-argparser.add_argument('--lower-dif', help = "Name of the lower level DIF",
- type = str, required = True)
-argparser.add_argument('--enroller-name', help = "Name of the remote neighbor IPCP to enroll to",
- type = str, required = True)
-args = argparser.parse_args()
-
-socket_name = None
-
-fin = open(args.ipcm_conf, 'r')
-while 1:
- line = fin.readline()
- if line == '':
- break
-
- m = re.search(r'"(\S+ipcm-console.sock)', line)
- if m != None:
- socket_name = m.group(1)
- break
-fin.close()
-
-if socket_name == None:
- print('Cannot find %s' % (socket_name))
- quit(1)
-
-s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-
-connected = False
-trials = 0
-while trials < 4:
- try:
- s.connect(socket_name)
- connected = True
- break
- except:
- pass
- trials += 1
- time.sleep(1)
-
-if connected:
- try:
- # Receive the banner
- get_response(s)
-
- # Send the IPCP list command
- cmd = u'list-ipcps\n'
- s.sendall(cmd.encode('ascii'))
-
- # Get the list of IPCPs and parse it to look for the enroller ID
- print('Looking up identifier for IPCP %s' % args.enrollee_name)
- lines = get_response(s)
- print(lines)
- enrollee_id = None
- for line in lines:
- rs = r'^\s*(\d+)\s*\|\s*' + args.enrollee_name.replace('.', '\\.')
- m = re.match(rs, line)
- if m != None:
- enrollee_id = m.group(1)
-
- if enrollee_id == None:
- print('Could not find the ID of enrollee IPCP %s' \
- % args.enrollee_name)
- raise Exception()
-
- # Send the enroll command
- cmd = u'enroll-to-dif %s %s %s %s 1\n' \
- % (enrollee_id, args.dif, args.lower_dif, args.enroller_name)
- print(cmd)
-
- s.sendall(cmd.encode('ascii'))
-
- # Get the enroll command answer
- lines = get_response(s)
- print(lines)
- except:
- s.close()
- raise
-
-else:
- print('Failed to connect to "%s"' % socket_name)
- sys.exit(-1)
-
-s.close()
diff --git a/rumba/prototypes/irati.py b/rumba/prototypes/irati.py
deleted file mode 100644
index 3860d14..0000000
--- a/rumba/prototypes/irati.py
+++ /dev/null
@@ -1,467 +0,0 @@
-#
-# A library to manage ARCFIRE experiments
-#
-# Copyright (C) 2017-2018 Nextworks S.r.l.
-# Copyright (C) 2017-2018 imec
-#
-# Sander Vrijders <sander.vrijders@ugent.be>
-# Dimitri Staessens <dimitri.staessens@ugent.be>
-# Vincenzo Maffione <v.maffione@nextworks.it>
-# Marco Capitani <m.capitani@nextworks.it>
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., http://www.fsf.org/about/contact/.
-#
-
-import copy
-import json
-import os
-import time
-
-import rumba.ssh_support as ssh
-import rumba.model as mod
-import rumba.multiprocess as m_processing
-import rumba.prototypes.irati_templates as irati_templates
-import rumba.log as log
-
-logger = log.get_logger(__name__)
-
-
-class Experiment(mod.Experiment):
- """
- Represents an IRATI experiment.
- """
-
- @staticmethod
- def make_executor(node, packages, testbed):
- def executor(commands):
- ssh.aptitude_install(testbed, node, packages)
- node.execute_commands(commands, time_out=None, use_proxy=True)
- return executor
-
- def prototype_name(self):
- return 'irati'
-
- @staticmethod
- def real_sudo(s):
- return 'sudo ' + s
-
- @staticmethod
- def fake_sudo(s):
- return s
-
- def __init__(self, testbed,
- nodes=None,
- git_repo='https://github.com/IRATI/stack',
- git_branch='arcfire',
- installpath=None,
- varpath=None,
- app_mappings=None,
- enrollment_strategy='minimal'):
- """
- Initializes the experiment class.
-
- :param testbed: The testbed to run the experiment on.
- :param nodes: The list of nodes.
- :param git_repo: The git repository to use for installation.
- :param git_branch: The branch of the git repository to use.
- :param installpath: The installation path of IRATI.
- :param varpath: The /var path of IRATI.
- :param app_mappings: a list of application -> dif mapping containing
- all application which will register to
- any given dif.
- :type app_mappings: `List[(str, str)]`
- :param enrollment_strategy: Can be 'full-mesh', 'minimal' or 'manual'.
- """
- mod.Experiment.__init__(self,
- testbed,
- nodes,
- git_repo,
- git_branch,
- prototype_logs=['/tmp/ipcmstart.log'],
- enrollment_strategy=enrollment_strategy)
- if installpath is None:
- installpath = '/usr'
- if varpath is None:
- varpath = ''
- irati_templates.env_dict['installpath'] = installpath
- irati_templates.env_dict['varpath'] = varpath
- self.manager = False
- self.conf_files = None
- self.shim2vlan = {}
-
- if app_mappings is None:
- app_mappings = []
- self.app_mappings = app_mappings
-
- if self.testbed.username == 'root':
- self.sudo = self.fake_sudo
- else:
- self.sudo = self.real_sudo
-
- self._conf_dir = os.path.join(mod.tmp_dir, 'IRATI_conf')
- try:
- os.mkdir(self._conf_dir)
- except OSError:
- # Already there, nothing to do
- pass
-
- def conf_dir(self, path):
- return os.path.join(self._conf_dir, path)
-
- def install(self):
-
- packages = ["g++", "gcc", "libtool", "linux-headers-$(uname -r)",
- "autoconf", "automake", "protobuf-compiler",
- "libprotobuf-dev", "git", "pkg-config", "libssl-dev"]
-
- cmds = [self.sudo("rm -rf ~/stack"),
- "cd ~; git clone -b " + self.git_branch + " " + self.git_repo,
- "cd ~/stack && "
- + self.sudo("./configure && ") + self.sudo("make install")]
- names = []
- executors = []
- args = []
- for node in self.nodes:
-
- executor = self.make_executor(node, packages, self.testbed)
-
- names.append(node.name)
- executors.append(executor)
- args.append(cmds)
- m_processing.call_in_parallel(names, args, executors)
-
- def bootstrap_network(self):
- for node in self.nodes:
- self.process_node(node)
- self.enroll_nodes()
-
- def _install_prototype(self):
- logger.info("installing IRATI on all the nodes")
- self.install()
- logger.info("installation complete")
-
- def _bootstrap_prototype(self):
- logger.info("setting up")
- self.conf_files = self.write_conf()
- logger.info("configuration files generated for all nodes")
- self.bootstrap_network()
- logger.info("IPCPs created and enrolled on all nodes")
-
- def process_node(self, node):
- name = node.name
-
- vlans = []
- for ipcp in node.ipcps:
- if isinstance(ipcp, mod.ShimEthIPCP):
- vlans.append([ipcp.ifname, self.shim2vlan[ipcp.dif.name]])
-
- if vlans:
- ssh.setup_vlans(self.testbed, node, vlans)
-
- gen_files_conf = self.conf_files[node] + ['da.map']
- dir_path = os.path.dirname(os.path.abspath(__file__))
- gen_files_bin = 'enroll.py'
- gen_files_conf_full = [self.conf_dir(x) for x in gen_files_conf]
- gen_files_bin_full = [os.path.join(dir_path, 'enroll.py')]
-
- ipcm_components = ['scripting', 'console']
- if self.manager:
- ipcm_components.append('mad')
- ipcm_components = ', '.join(ipcm_components)
-
- gen_files = gen_files_conf_full + gen_files_bin_full
-
- format_args = {'name': name,
- 'ssh': node.ssh_config.port,
- 'username': self.testbed.username,
- 'genfiles': gen_files,
- 'genfilesconf': ' '.join(gen_files_conf),
- 'genfilesbin': gen_files_bin,
- 'verb': 'DBG',
- 'ipcmcomps': ipcm_components}
-
- logger.info('Copying configuration files to node %s', node.name)
- ssh.copy_files_to_testbed(self.testbed,
- node.ssh_config,
- gen_files,
- '')
-
- cmds = [self.sudo('hostname %(name)s' % format_args),
- self.sudo('modprobe rina-irati-core'),
- self.sudo('chmod a+rw /dev/irati'),
- # The || true should be removed soon, but it's needed
- # until we update the bitbucket repo.
- self.sudo('chmod a+rw /dev/irati-ctrl || true'),
- self.sudo('mv %(genfilesconf)s /etc' % format_args),
- self.sudo('mv %(genfilesbin)s /usr/bin') % format_args,
- self.sudo('chmod a+x /usr/bin/enroll.py') % format_args]
-
- cmds += [self.sudo('modprobe rina-default-plugin'),
- self.sudo('modprobe shim-eth-vlan'),
- self.sudo('modprobe normal-ipcp'),
- self.sudo('ipcm -a \"%(ipcmcomps)s\" '
- '-c /etc/%(name)s.ipcm.conf -l %(verb)s '
- '> /tmp/ipcmstart.log 2>&1 &'
- % format_args)]
-
- logger.info('Sending setup commands to node %s.', node.name)
- ssh.execute_commands(self.testbed, node.ssh_config, cmds)
-
- def enroll_nodes(self):
- logger.info("Starting enrollment phase.")
- time.sleep(5)
- for enrollment_list in self.enrollments:
- for e in enrollment_list:
- logger.info(
- 'Enrolling %s to DIF %s against neighbor %s,'
- ' through lower DIF %s.',
- e['enrollee'].name,
- e['dif'].name,
- e['enroller'].name,
- e['lower_dif'].name)
-
- time.sleep(1) # Important!
-
- e_args = {'ldif': self.dif_name(e['lower_dif']),
- 'dif': e['dif'].name,
- 'nname': e['enrollee'].node.name,
- 'iname': e['enrollee'].name,
- 'o_iname': e['enroller'].name}
-
- cmd = self.sudo('enroll.py --lower-dif %(ldif)s --dif %(dif)s '
- '--ipcm-conf /etc/%(nname)s.ipcm.conf '
- '--enrollee-name %(iname)s.IPCP '
- '--enroller-name %(o_iname)s.IPCP'
- % e_args)
- ssh.execute_command(self.testbed,
- e['enrollee'].node.ssh_config,
- cmd)
-
- def dif_name(self, dif):
- try:
- return str(self.shim2vlan[dif.name])
- except KeyError:
- return dif.name
-
- def write_conf(self):
- # Constants and initializations
- ipcmconfs = dict()
- difconfs = dict()
- ipcp2shim_map = {}
- node2id_map = {}
- mgmt_dif_name = 'NMS'
- conf_files = {} # dict of per-nod conf files
-
- # Translating Shim Eth difs to vlan tags.
- next_vlan = 10
- for dif in self.dif_ordering:
- if isinstance(dif, mod.ShimEthDIF):
- try:
- vlan = int(dif.name)
- self.shim2vlan[dif.name] = vlan
- except ValueError:
- vlan = next_vlan
- next_vlan += 10
- self.shim2vlan[dif.name] = vlan
-
- # If some app directives were specified, use those to build da.map.
- # Otherwise, assume the standard applications are to be mapped in
- # the DIF with the highest rank.
- app_mappings = self.app_mappings
- if len(app_mappings) == 0:
- if len(self.dif_ordering) > 0:
- for adm in \
- irati_templates.da_map_base["applicationToDIFMappings"]:
- adm["difName"] = "%s" % (self.dif_ordering[-1].name,)
- else: # not yet supported
- irati_templates.da_map_base["applicationToDIFMappings"] = []
- for app, dif in app_mappings:
- irati_templates.da_map_base["applicationToDIFMappings"]\
- .append({"encodedAppName": app,
- "difName": dif
- })
-
- if self.manager:
- # Add MAD/Manager configuration
- irati_templates.get_ipcmconf_base()["addons"] = {
- "mad": {
- "managerAppName": "",
- "NMSDIFs": [{"DIF": "%s" % mgmt_dif_name}],
- "managerConnections": [{
- "managerAppName": "manager-1--",
- "DIF": "%s" % mgmt_dif_name
- }]
- }
- }
-
- node_number = 1
- for node in self.nodes: # type: mod.Node
- node2id_map[node.name] = node_number
- node_number += 1
- ipcmconfs[node.name] = copy.deepcopy(irati_templates.get_ipcmconf_base())
- if self.manager:
- ipcmconfs[node.name]["addons"]["mad"]["managerAppName"] \
- = "%s.mad-1--" % (node.name,)
-
- for dif in self.dif_ordering: # type: mod.DIF
- if isinstance(dif, mod.ShimEthDIF):
- ipcp2shim_map.update({ipcp.name: dif for ipcp in dif.ipcps})
- elif isinstance(dif, mod.NormalDIF):
- difconfs[dif.name] = dict()
- # Generate base conf
- dif_conf = copy.deepcopy(irati_templates.normal_dif_base)
- # push qos_cubes
- if len(dif.qos_cubes) != 0:
- dif_conf["qosCubes"] = []
- for cube in dif.qos_cubes:
- dif_conf["qosCubes"].append(
- irati_templates.generate_qos_cube(**cube)
- )
- if dif.add_default_qos_cubes:
- # Add basic cubes
- unreliable = copy.deepcopy(irati_templates.qos_cube_u_base)
- unreliable["id"] = len(dif_conf["qosCubes"]) + 1
- dif_conf["qosCubes"].append(unreliable)
- reliable = copy.deepcopy(irati_templates.qos_cube_r_base)
- reliable["id"] = len(dif_conf["qosCubes"]) + 1
- dif_conf["qosCubes"].append(reliable)
-
- for node in dif.members:
- difconfs[dif.name][node.name] = copy.deepcopy(
- dif_conf
- )
-
- for node in self.nodes: # type: mod.Node
- ipcmconf = ipcmconfs[node.name]
-
- for ipcp in node.ipcps: # type: mod.ShimEthIPCP
- if isinstance(ipcp, mod.ShimEthIPCP):
- shim = ipcp2shim_map[ipcp.name] # type: mod.ShimEthDIF
- shim_name = self.dif_name(shim)
- ipcmconf["ipcProcessesToCreate"].append({
- "apName": "eth.%s.IPCP" % ipcp.name,
- "apInstance": "1",
- "difName": shim_name
- })
-
- template_file_name = self.conf_dir(
- 'shimeth.%s.%s.dif'
- % (node.name, shim_name))
- ipcmconf["difConfigurations"].append({
- "name": shim_name,
- "template": os.path.basename(template_file_name)
- })
-
- fout = open(template_file_name, 'w')
- fout.write(json.dumps(
- {"difType": "shim-eth-vlan",
- "configParameters": {
- "interface-name": ipcp.ifname
- }
- },
- indent=4, sort_keys=True))
- fout.close()
- conf_files.setdefault(node, []).append(
- 'shimeth.%s.%s.dif'
- % (node.name, shim_name))
-
- # Run over dif_ordering array, to make sure each IPCM config has
- # the correct ordering for the ipcProcessesToCreate list of operations.
- # If we iterated over the difs map, the order would be randomic, and so
- # some IPCP registrations in lower DIFs may fail.
- # This would happen because at the moment of registration,
- # it may be that the IPCP of the lower DIF has not been created yet.
- shims = ipcp2shim_map.values()
- for dif in self.dif_ordering: # type: mod.NormalDIF
-
- if dif in shims:
- # Shims are managed separately, in the previous loop
- continue
-
- for node in dif.members: # type: mod.Node
- node_name = node.name
- ipcmconf = ipcmconfs[node_name]
-
- normal_ipcp = {"apName": "%s.%s.IPCP" % (dif.name, node_name),
- "apInstance": "1",
- "difName": "%s" % (dif.name,),
- "difsToRegisterAt": []}
-
- for lower_dif in node.dif_registrations[dif]: # type: mod.DIF
- normal_ipcp["difsToRegisterAt"].append(
- self.dif_name(lower_dif))
-
- ipcmconf["ipcProcessesToCreate"].append(normal_ipcp)
-
- ipcmconf["difConfigurations"].append({
- "name": "%s" % (dif.name,),
- "template": "normal.%s.%s.dif" % (node_name, dif.name,)
- })
-
- # Fill in the map of IPCP addresses.
- # This could be moved at difconfs
- for other_node in dif.members: # type: mod.Node
- difconfs[dif.name][other_node.name] \
- ["knownIPCProcessAddresses"].append({
- "apName": "%s.%s.IPCP" % (dif.name, node_name),
- "apInstance": "1",
- "address": 16 + node2id_map[node_name]})
- policy_dict = node.get_policy(dif).get_policies()
- for component in policy_dict:
- for policy_name in policy_dict[component]:
- params = policy_dict[component][policy_name].items()
- irati_templates.translate_policy(
- difconfs[dif.name][node_name],
- component,
- policy_name,
- params
- )
-
- # Dump the DIF Allocator map
- with open(self.conf_dir('da.map'), 'w') as da_map_file:
- json.dump(irati_templates.da_map_base,
- da_map_file,
- indent=4,
- sort_keys=True)
-
- for node in self.nodes:
- # Dump the IPCM configuration files
- with open(self.conf_dir('%s.ipcm.conf'
- % (node.name,)), 'w') as node_file:
- json.dump(ipcmconfs[node.name],
- node_file,
- indent=4,
- sort_keys=True)
- conf_files.setdefault(node, []).append(
- '%s.ipcm.conf' % (node.name,))
-
- for dif in self.dif_ordering: # type: mod.DIF
- dif_conf = difconfs.get(dif.name, None)
- if dif_conf:
- # Dump the normal DIF configuration files
- for node in dif.members:
- with open(self.conf_dir('normal.%s.%s.dif'
- % (node.name, dif.name)), 'w') \
- as dif_conf_file:
- json.dump(dif_conf[node.name],
- dif_conf_file,
- indent=4,
- sort_keys=True)
- conf_files.setdefault(node, []).append(
- 'normal.%s.%s.dif' % (node.name, dif.name))
- return conf_files
-
- def _terminate_prototype(self):
- return
diff --git a/rumba/prototypes/irati_templates.py b/rumba/prototypes/irati_templates.py
deleted file mode 100644
index a6dd3c4..0000000
--- a/rumba/prototypes/irati_templates.py
+++ /dev/null
@@ -1,437 +0,0 @@
-#
-# A library to manage ARCFIRE experiments
-#
-# Copyright (C) 2017-2018 Nextworks S.r.l.
-# Copyright (C) 2017-2018 imec
-#
-# Sander Vrijders <sander.vrijders@ugent.be>
-# Dimitri Staessens <dimitri.staessens@ugent.be>
-# Vincenzo Maffione <v.maffione@nextworks.it>
-# Marco Capitani <m.capitani@nextworks.it>
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., http://www.fsf.org/about/contact/.
-#
-
-# Environment setup for VMs. Standard linux approach
-env_dict = {'installpath': '/usr', 'varpath': ''}
-
-
-# Template for a IPCM configuration file
-def get_ipcmconf_base():
- return {
- "configFileVersion": "1.4.1",
- "localConfiguration": {
- "installationPath": "%(installpath)s/bin" % env_dict,
- "libraryPath": "%(installpath)s/lib" % env_dict,
- "logPath": "%(varpath)s/var/log" % env_dict,
- "consoleSocket": "%(varpath)s/var/run/ipcm-console.sock" % env_dict,
- "pluginsPaths": [
- "%(installpath)s/lib/rinad/ipcp" % env_dict,
- "/lib/modules/4.9.28-irati/extra"
- ]
- },
-
- "ipcProcessesToCreate": [],
- "difConfigurations": [],
-}
-
-
-da_map_base = {
- "applicationToDIFMappings": [
- {
- "encodedAppName": "rina.apps.echotime.server-1--",
- "difName": "n.DIF"
- },
- {
- "encodedAppName": "traffic.generator.server-1--",
- "difName": "n.DIF"
- }
- ],
-}
-
-
-def generate_qos_cube(
- name,
- cube_id,
- initial_credit=200,
- ordered=False,
- delay=None,
- loss=None,
- reliable=False,
- data_rxms_nmax=5,
- initial_rtx_time=1000
-):
- cube = {
- "name": name,
- "id": cube_id,
- "partialDelivery": False,
- "orderedDelivery": ordered,
- "efcpPolicies": {
- "dtpPolicySet": {
- "name": "default",
- "version": "0"
- },
- "initialATimer": 0,
- "dtcpPresent": True,
- "dtcpConfiguration": {
- "dtcpPolicySet": {
- "name": "default",
- "version": "0"
- },
- "rtxControl": False,
- "flowControl": True,
- "flowControlConfig": {
- "rateBased": False,
- "windowBased": True,
- "windowBasedConfig": {
- "maxClosedWindowQueueLength": 10,
- "initialCredit": initial_credit
- }
- }
- }
- }
- }
- if delay is not None:
- cube["delay"] = delay
- if loss is not None:
- cube["loss"] = loss
- if reliable:
- cube["maxAllowableGap"] = 0
- cube["efcpPolicies"]["dtcpConfiguration"]["rtxControl"] = True
- cube["efcpPolicies"]["dtcpConfiguration"]["rtxControlConfig"] = {
- "dataRxmsNmax": data_rxms_nmax,
- "initialRtxTime": initial_rtx_time
- }
- return cube
-
-
-qos_cube_u_base = {
- "name": "unreliablewithflowcontrol",
- "id": 1,
- "partialDelivery": False,
- "orderedDelivery": True,
- "efcpPolicies": {
- "dtpPolicySet": {
- "name": "default",
- "version": "0"
- },
- "initialATimer": 0,
- "dtcpPresent": True,
- "dtcpConfiguration": {
- "dtcpPolicySet": {
- "name": "default",
- "version": "0"
- },
- "rtxControl": False,
- "flowControl": True,
- "flowControlConfig": {
- "rateBased": False,
- "windowBased": True,
- "windowBasedConfig": {
- "maxClosedWindowQueueLength": 10,
- "initialCredit": 200
- }
- }
- }
- }
-}
-
-qos_cube_r_base = {
- "name": "reliablewithflowcontrol",
- "id": 2,
- "partialDelivery": False,
- "orderedDelivery": True,
- "maxAllowableGap": 0,
- "efcpPolicies": {
- "dtpPolicySet": {
- "name": "default",
- "version": "0"
- },
- "initialATimer": 0,
- "dtcpPresent": True,
- "dtcpConfiguration": {
- "dtcpPolicySet": {
- "name": "default",
- "version": "0"
- },
- "rtxControl": True,
- "rtxControlConfig": {
- "dataRxmsNmax": 5,
- "initialRtxTime": 1000
- },
- "flowControl": True,
- "flowControlConfig": {
- "rateBased": False,
- "windowBased": True,
- "windowBasedConfig": {
- "maxClosedWindowQueueLength": 10,
- "initialCredit": 200
- }
- }
- }
- }
-}
-
-
-# Template for a normal DIF configuration file
-normal_dif_base = {
- "difType": "normal-ipc",
- "dataTransferConstants": {
- "addressLength": 2,
- "cepIdLength": 2,
- "lengthLength": 2,
- "portIdLength": 2,
- "qosIdLength": 2,
- "rateLength": 4,
- "frameLength": 4,
- "sequenceNumberLength": 4,
- "ctrlSequenceNumberLength": 4,
- "maxPduSize": 10000,
- "maxPduLifetime": 60000
- },
-
- "qosCubes": [
- qos_cube_u_base, qos_cube_r_base
- ],
-
- "knownIPCProcessAddresses": [],
-
- "addressPrefixes": [
- {
- "addressPrefix": 0,
- "organization": "N.Bourbaki"
- }, {
- "addressPrefix": 16,
- "organization": "IRATI"
- }
- ],
-
- "rmtConfiguration": {
- "pffConfiguration": {
- "policySet": {
- "name": "default",
- "version": "0"
- }
- },
- "policySet": {
- "name": "default",
- "version": "1"
- }
- },
-
- "enrollmentTaskConfiguration": {
- "policySet": {
- "name": "default",
- "version": "1",
- "parameters": [
- {
- "name": "enrollTimeoutInMs",
- "value": "10000"
- }, {
- "name": "watchdogPeriodInMs",
- "value": "30000"
- }, {
- "name": "declaredDeadIntervalInMs",
- "value": "120000"
- }, {
- "name": "neighborsEnrollerPeriodInMs",
- "value": "0"
- }, {
- "name": "maxEnrollmentRetries",
- "value": "0"
- }
- ]
- }
- },
-
- "flowAllocatorConfiguration": {
- "policySet": {
- "name": "default",
- "version": "1"
- }
- },
-
- "namespaceManagerConfiguration": {
- "policySet": {
- "name": "default",
- "version": "1"
- }
- },
-
- "securityManagerConfiguration": {
- "policySet": {
- "name": "default",
- "version": "1"
- }
- },
-
- "resourceAllocatorConfiguration": {
- "pduftgConfiguration": {
- "policySet": {
- "name": "default",
- "version": "0"
- }
- }
- },
-
- "routingConfiguration": {
- "policySet": {
- "name": "link-state",
- "version": "1",
- "parameters": [
- {
- "name": "objectMaximumAge",
- "value": "10000"
- }, {
- "name": "waitUntilReadCDAP",
- "value": "5001"
- }, {
- "name": "waitUntilError",
- "value": "5001"
- }, {
- "name": "waitUntilPDUFTComputation",
- "value": "103"
- }, {
- "name": "waitUntilFSODBPropagation",
- "value": "101"
- }, {
- "name": "waitUntilAgeIncrement",
- "value": "997"
- }, {
- "name": "routingAlgorithm",
- "value": "Dijkstra"
- }
- ]
- }
- }
-}
-
-
-def ps_set(d, k, v, parms):
- if k not in d:
- d[k] = {'name': '', 'version': '1'}
-
- if d[k]["name"] == v and "parameters" in d[k]:
- cur_names = [p["name"] for p in d[k]["parameters"]]
- for p in parms:
- name, value = p
- if name in cur_names:
- for i in range(len(d[k]["parameters"])):
- if d[k]["parameters"][i]["name"] == name:
- d[k]["parameters"][i]["value"] = value
- break
- else:
- d[k]["parameters"].append({'name': name, 'value': value})
-
- elif len(parms) > 0:
- d[k]["parameters"] = [
- {'name': p[0], 'value': p[1]}
- for p in parms]
-
- d[k]["name"] = v
-
-
-def dtp_ps_set(d, v, parms):
- for i in range(len(d["qosCubes"])):
- ps_set(d["qosCubes"][i]["efcpPolicies"], "dtpPolicySet", v, parms)
-
-
-def dtcp_ps_set(d, v, parms):
- for i in range(len(d["qosCubes"])):
- ps_set(d["qosCubes"][i]["efcpPolicies"]["dtcpConfiguration"],
- "dtcpPolicySet", v, parms)
-
-
-policy_translator = {
- 'rmt.pff': lambda d, v, p: ps_set(d["rmtConfiguration"]["pffConfiguration"],
- "policySet", v, p),
- 'rmt': lambda d, v, p: ps_set(d["rmtConfiguration"], "policySet", v, p),
- 'enrollment-task': lambda d, v, p: ps_set(d["enrollmentTaskConfiguration"],
- "policySet", v, p),
- 'flow-allocator': lambda d, v, p: ps_set(d["flowAllocatorConfiguration"],
- "policySet", v, p),
- 'namespace-manager': lambda d, v, p: ps_set(
- d["namespaceManagerConfiguration"], "policySet", v, p),
- 'security-manager': lambda d, v, p: ps_set(
- d["securityManagerConfiguration"], "policySet", v, p),
- 'routing': lambda d, v, p: ps_set(
- d["routingConfiguration"], "policySet", v, p),
- 'resource-allocator.pduftg': lambda d, v, p: ps_set(
- d["resourceAllocatorConfiguration"]["pduftgConfiguration"], "policySet", v, p),
- 'efcp.*.dtcp': None,
- 'efcp.*.dtp': None,
-}
-
-
-def is_security_path(path):
- sp = path.split('.')
- return (len(sp) == 3) and (sp[0] == 'security-manager') \
- and (sp[1] in ['auth', 'encrypt', 'ttl', 'errorcheck'])
-
-
-# Do we know this path ?
-def policy_path_valid(path):
- if path in policy_translator:
- return True
-
- # Try to validate security configuration
- if is_security_path(path):
- return True
-
- return False
-
-
-def translate_security_path(d, path, ps, parms):
- u1, component, profile = path.split('.')
- if "authSDUProtProfiles" not in d["securityManagerConfiguration"]:
- d["securityManagerConfiguration"]["authSDUProtProfiles"] = {}
- d = d["securityManagerConfiguration"]["authSDUProtProfiles"]
-
- tr = {'auth': 'authPolicy', 'encrypt': 'encryptPolicy',
- 'ttl': 'TTLPolicy', 'errorcheck': 'ErrorCheckPolicy'}
-
- if profile == 'default':
- if profile not in d:
- d["default"] = {}
-
- ps_set(d["default"], tr[component], ps, parms)
-
- else: # profile is the name of a DIF
- if "specific" not in d:
- d["specific"] = []
- j = -1
- for i in range(len(d["specific"])):
- if d["specific"][i]["underlyingDIF"] == profile + ".DIF":
- j = i
- break
-
- if j == -1: # We need to create an entry for the new DIF
- d["specific"].append({"underlyingDIF": profile + ".DIF"})
-
- ps_set(d["specific"][j], tr[component], ps, parms)
-
-
-def translate_policy(difconf, path, ps, parms):
- if path == 'efcp.*.dtcp':
- dtcp_ps_set(difconf, ps, parms)
-
- elif path == 'efcp.*.dtp':
- dtp_ps_set(difconf, ps, parms)
-
- elif is_security_path(path):
- translate_security_path(difconf, path, ps, parms)
-
- else:
- policy_translator[path](difconf, ps, parms)
diff --git a/rumba/prototypes/ouroboros.py b/rumba/prototypes/ouroboros.py
index 9cdd3c9..ce05181 100644
--- a/rumba/prototypes/ouroboros.py
+++ b/rumba/prototypes/ouroboros.py
@@ -33,8 +33,12 @@ import rumba.model as mod
import rumba.multiprocess as m_processing
import rumba.log as log
import rumba.testbeds.local as local
+import rumba.testbeds.localnet as localnet
import rumba.testbeds.dockertb as docker
import rumba.storyboard as sb
+from rumba.elements.topology import LayerType
+from rumba.irm_backend import IrmPython, IrmCLI
+from rumba.process import Process
logger = log.get_logger(__name__)
@@ -53,7 +57,7 @@ class OurServer(sb.Server):
server.min_duration,
server.id,
server.as_root,
- server.difs
+ server.layers
)
def _make_run_cmd(self, node):
@@ -64,12 +68,12 @@ class OurServer(sb.Server):
# Build register command
r_cmd = 'irm n r %s ' % (self.id,)
- if len(self.difs) == 0:
+ if len(self.layers) == 0:
r_cmd += ' '.join('ipcp %s' % (ipcp.name,) for ipcp in node.ipcps)
else:
- for dif in self.difs:
+ for layer in self.layers:
for ipcp in node.ipcps:
- if ipcp.dif is dif:
+ if ipcp.layer is layer:
r_cmd += 'ipcp %s' % (ipcp.name,)
r_cmd += ' && '
@@ -143,6 +147,20 @@ class Experiment(mod.Experiment):
self.set_startup_command("irmd")
self.metrics_python_version = "python3.9"
+ # Create IRM backend
+ if isinstance(testbed, (local.Testbed,
+ localnet.LocalNetworkTestbed)):
+ try:
+ self.irm = IrmPython()
+ logger.info("Using pyouroboros IRM backend")
+ except ImportError:
+ self.irm = IrmCLI()
+ logger.info("pyouroboros not available, "
+ "using CLI IRM backend for local testbed")
+ else:
+ self.irm = IrmCLI()
+ logger.info("Using CLI IRM backend for remote testbed")
+
@staticmethod
def make_executor(node, packages, testbed):
def executor(commands):
@@ -171,10 +189,12 @@ class Experiment(mod.Experiment):
if isinstance(self.testbed, docker.Testbed):
return
- if isinstance(self.testbed, local.Testbed):
+ if self._is_local:
subprocess.check_call('sudo -v'.split())
- self.irmd = subprocess.Popen(["sudo", "irmd"])
- logger.info("Started IRMd, sleeping 2 seconds...")
+ popen = subprocess.Popen(["sudo", "irmd"])
+ self.irmd = Process(None, popen.pid, "irmd", popen=popen)
+ logger.info("Started IRMd (pid %d), sleeping 2 seconds...",
+ popen.pid)
time.sleep(2)
else:
for node in self.nodes:
@@ -203,8 +223,14 @@ class Experiment(mod.Experiment):
m_processing.call_in_parallel(names, args, executors)
+ @property
+ def _is_local(self):
+ """True if the testbed runs locally (single IRMd)."""
+ return isinstance(self.testbed,
+ (local.Testbed, localnet.LocalNetworkTestbed))
+
def install_ouroboros(self):
- if isinstance(self.testbed, local.Testbed):
+ if self._is_local:
return
packages = ["cmake", "protobuf-c-compiler", "git", "libfuse-dev",
@@ -221,7 +247,7 @@ class Experiment(mod.Experiment):
self._install_packages_and_execute_cmds(packages, cmds)
def update_ouroboros(self, branch):
- if isinstance(self.testbed, local.Testbed):
+ if self._is_local:
return
if branch is not None:
@@ -255,7 +281,7 @@ class Experiment(mod.Experiment):
self._install_packages_and_execute_cmds(packages, [])
def install_ouroboros_python_exporter(self):
- if isinstance(self.testbed, local.Testbed):
+ if self._is_local:
return
if self.influxdb is None:
@@ -315,97 +341,106 @@ class Experiment(mod.Experiment):
self._install_packages_and_execute_cmds([], commands, [src])
+ def _get_n1_ipcp_names(self, node, ipcp):
+ """Get N-1 IPCP names for name registration."""
+ reg_ipcps = []
+ if ipcp.layer in node.registrations:
+ for lower in node.registrations[ipcp.layer]:
+ for ipcp_b in node.ipcps:
+ if ipcp_b in lower.ipcps:
+ reg_ipcps.append(ipcp_b.name)
+ return reg_ipcps
+
def create_ipcps(self):
for node in self.nodes:
- cmds = list()
for ipcp in node.ipcps:
- cmds2 = list()
- if ipcp.dif_bootstrapper:
- cmd = "irm i b n " + ipcp.name
- else:
- cmd = "irm i c n " + ipcp.name
-
- if isinstance(ipcp.dif, mod.ShimEthDIF):
- if isinstance(self.testbed, local.Testbed):
- cmd += " type local layer " + ipcp.dif.name
- else:
- cmd += " type eth-dix dev " + ipcp.ifname
- cmd += " layer " + ipcp.dif.name
- elif isinstance(ipcp.dif, mod.NormalDIF):
- cmd += " type unicast"
- if ipcp.dif_bootstrapper:
- pols = ipcp.dif.policy.get_policies()
- for comp in pols:
- for pol in pols[comp]:
- cmd += " " + comp + " " + pol
- cmd += " layer " + ipcp.dif.name + " autobind"
-
- cmd2 = "irm n r " + ipcp.name
- for dif_b in node.dif_registrations[ipcp.dif]:
- for ipcp_b in node.ipcps:
- if ipcp_b in dif_b.ipcps:
- cmd2 += " ipcp " + ipcp_b.name
- cmds2.append(cmd2)
- cmd2 = "irm n r " + ipcp.dif.name
- for dif_b in node.dif_registrations[ipcp.dif]:
- for ipcp_b in node.ipcps:
- if ipcp_b in dif_b.ipcps:
- cmd2 += " ipcp " + ipcp_b.name
- cmds2.append(cmd2)
- elif isinstance(ipcp.dif, mod.ShimUDPDIF):
- logger.error("UDP IPCP not supported yet")
- continue
+ layer = ipcp.layer
+ layer_type = layer.layer_type
+
+ # Plain local testbed: use local IPCP type for eth layers
+ # (LocalNetworkTestbed has real veth interfaces)
+ if layer.is_eth \
+ and isinstance(self.testbed, local.Testbed) \
+ and not isinstance(self.testbed,
+ localnet.LocalNetworkTestbed):
+ layer_type = LayerType.LOCAL
+
+ if layer.is_shim:
+ # Shim IPCPs are always bootstrapped
+ eth_dev = None
+ ip_addr = None
+ if layer.is_eth \
+ and (not isinstance(self.testbed, local.Testbed)
+ or isinstance(self.testbed,
+ localnet.LocalNetworkTestbed)):
+ eth_dev = ipcp.ifname
+ if layer.is_udp and hasattr(ipcp, 'ip_addr'):
+ ip_addr = ipcp.ip_addr
+
+ self.irm.create_ipcp(node, ipcp.name, layer_type)
+ self.irm.bootstrap_ipcp(
+ node, ipcp.name, layer_type,
+ layer.name, eth_dev=eth_dev, ip_addr=ip_addr,
+ autobind=False)
+ elif ipcp.bootstrapper:
+ # Unicast/broadcast bootstrapper
+ self.irm.create_ipcp(node, ipcp.name, layer_type)
+
+ policies = {}
+ if hasattr(layer, 'policy'):
+ pols = layer.policy.get_policies()
+ for comp, pol_dict in pols.items():
+ if pol_dict:
+ policies[comp] = next(iter(pol_dict))
+
+ self.irm.bootstrap_ipcp(
+ node, ipcp.name, layer_type,
+ layer.name, policies=policies)
+
+ # Postpone N-1 registrations
+ self.r_ipcps[ipcp] = \
+ self._get_n1_ipcp_names(node, ipcp)
else:
- logger.error("Unsupported IPCP type")
- continue
+ # Non-bootstrapper: just create, will enroll later
+ self.irm.create_ipcp(node, ipcp.name, layer_type)
- cmds.append(cmd)
- # Postpone registrations
- self.r_ipcps[ipcp] = cmds2
-
- node.execute_commands(cmds, time_out=None)
-
- def enroll_dif(self, el):
+ def enroll_layer(self, el):
for e in el:
ipcp = e['enrollee']
- cmds = list()
+ node = ipcp.node
- # Execute postponed registration
+ # Execute postponed registration for enroller
if e['enroller'] in self.r_ipcps:
- e['enroller'].node.execute_commands(self.r_ipcps[e['enroller']],
- time_out=None)
- self.r_ipcps.pop(e['enroller'], None)
-
- cmd = "irm n r " + ipcp.name
- for dif_b in e['enrollee'].node.dif_registrations[ipcp.dif]:
- for ipcp_b in e['enrollee'].node.ipcps:
- if ipcp_b in dif_b.ipcps:
- cmd += " ipcp " + ipcp_b.name
- cmds.append(cmd)
- cmd = "irm i e n " + ipcp.name + " layer " + e['dif'].name + \
- " autobind"
- cmds.append(cmd)
- cmd = "irm n r " + ipcp.dif.name
- for dif_b in e['enrollee'].node.dif_registrations[ipcp.dif]:
- for ipcp_b in e['enrollee'].node.ipcps:
- if ipcp_b in dif_b.ipcps:
- cmd += " ipcp " + ipcp_b.name
- cmds.append(cmd)
-
- e['enrollee'].node.execute_commands(cmds, time_out=None)
+ reg_ipcps = self.r_ipcps[e['enroller']]
+ enroller_node = e['enroller'].node
+ self.irm.reg_name(enroller_node, e['enroller'].name,
+ reg_ipcps)
+ self.irm.reg_name(enroller_node, e['enroller'].layer.name,
+ reg_ipcps)
+ del self.r_ipcps[e['enroller']]
+
+ # Register enrollee name in N-1 layers
+ reg_ipcps = self._get_n1_ipcp_names(node, ipcp)
+ self.irm.reg_name(node, ipcp.name, reg_ipcps)
+
+ # Enroll
+ self.irm.enroll_ipcp(node, ipcp.name, e['enroller'].name)
+
+ # Register layer name
+ self.irm.reg_name(node, ipcp.layer.name, reg_ipcps)
def setup_flows(self, el):
for e in el:
ipcp = e['src']
- cmd = "irm i conn n " + ipcp.name + " dst " + e['dst'].name
retry = 0
max_retries = 3
while retry < max_retries:
time.sleep(retry * 5)
try:
- ipcp.node.execute_command(cmd, time_out=None)
+ self.irm.connect_ipcp(
+ ipcp.node, ipcp.name, e['dst'].name)
break
- except Exception as e:
+ except Exception as ex:
retry += 1
logger.error('Failed to connect IPCP, retrying: ' +
str(retry) + '/' + str(max_retries) +
@@ -422,11 +457,10 @@ class Experiment(mod.Experiment):
logger.info("Installed on all nodes...")
def _bootstrap_prototype(self):
- for dif in self.dif_ordering:
- if isinstance(dif, mod.NormalDIF):
- if len(dif.qos_cubes) != 0:
- logger.warn('QoS cubes not (yet) supported by '
- 'the Ouroboros plugin. Will ignore.')
+ for layer in self.layer_ordering:
+ if hasattr(layer, 'qos_cubes') and len(layer.qos_cubes) != 0:
+ logger.warning('QoS cubes not (yet) supported by '
+ 'the Ouroboros plugin. Will ignore.')
logger.info("Starting IRMd on all nodes...")
self.setup_ouroboros()
logger.info("Creating IPCPs")
@@ -435,7 +469,7 @@ class Experiment(mod.Experiment):
for enrolls, flows in zip(self.enrollments,
self.flows):
- self.enroll_dif(enrolls)
+ self.enroll_layer(enrolls)
self.setup_flows(flows)
logger.info("All done, have fun!")
@@ -455,7 +489,8 @@ class Experiment(mod.Experiment):
cmds.append(kill + 'ipcpd-broadcast || true')
cmds.append(kill + 'ipcpd-eth-llc || true')
cmds.append(kill + 'ipcpd-eth-dix || true')
- cmds.append(kill + 'ipcpd-udp || true')
+ cmds.append(kill + 'ipcpd-udp4 || true')
+ cmds.append(kill + 'ipcpd-udp6 || true')
cmds.append(kill + 'ipcpd-local || true')
cmds.append(kill + 'ocbr || true')
cmds.append(kill + 'oping || true')
@@ -472,10 +507,10 @@ class Experiment(mod.Experiment):
cmds.append('killall -15 irmd')
logger.info("Killing Ouroboros...")
- if isinstance(self.testbed, local.Testbed):
+ if self._is_local:
cmds = list(map(lambda c: "sudo %s" % (c,), cmds))
for cmd in cmds:
- subprocess.check_call(cmd.split())
+ subprocess.call(cmd.split())
else:
for node in self.nodes:
node.execute_commands(cmds, time_out=None, as_root=True)
@@ -486,9 +521,9 @@ class Experiment(mod.Experiment):
def _stop_metrics_exporter(self, nodes):
self.stop_ouroboros_metrics_exporter(nodes)
- def destroy_dif(self, dif):
- for ipcp in dif.ipcps:
- ipcp.node.execute_command('irm i d n ' + ipcp.name)
+ def destroy_layer(self, layer):
+ for ipcp in layer.ipcps:
+ self.irm.destroy_ipcp(ipcp.node, ipcp.name)
def parse_stats(self, lines, spaces=0):
d = {}
@@ -513,38 +548,38 @@ class Experiment(mod.Experiment):
return d
- def export_dif_bandwidth(self, filename, dif):
+ def export_layer_bandwidth(self, filename, layer):
f = open(filename, 'w')
- for node in dif.members:
- ipcp = node.get_ipcp_by_dif(dif)
+ for node in layer.members:
+ ipcp = node.get_ipcp_by_layer(layer)
# Get IPCP address
if not hasattr(ipcp, 'address'):
path = '/tmp/ouroboros/' + ipcp.name + '/dt*'
- dt_path = node.execute_command('ls -d %s' % path)
+ dt_path = node.execute_command('ls -d %s' % path).stdout
dts = dt_path.split('.')
ipcp.address = int(dts[-1])
logger.info('IPCP %s has dt component '
'with address %d' % (ipcp.name, ipcp.address))
- for node in dif.members:
- ipcp = node.get_ipcp_by_dif(dif)
+ for node in layer.members:
+ ipcp = node.get_ipcp_by_layer(layer)
dt_path = '/tmp/ouroboros/' + ipcp.name + '/dt.' + \
str(ipcp.address) + '/'
# Get flows to other endpoints
- fd = node.execute_command('ls --ignore=[01] %s' % dt_path)
+ fd = node.execute_command('ls --ignore=[01] %s' % dt_path).stdout
fds = fd.split('\n')
for fd in fds:
fd_path = dt_path + fd
- fd_file = node.execute_command('cat %s' % fd_path)
+ fd_file = node.execute_command('cat %s' % fd_path).stdout
d = self.parse_stats(fd_file.splitlines())
remote = d["Endpoint address"]
ipcp2_name = ''
- for ipcp2 in dif.ipcps:
+ for ipcp2 in layer.ipcps:
if ipcp2.address == int(remote):
ipcp2_name = ipcp2.name
diff --git a/rumba/prototypes/rlite.py b/rumba/prototypes/rlite.py
deleted file mode 100644
index e67e539..0000000
--- a/rumba/prototypes/rlite.py
+++ /dev/null
@@ -1,194 +0,0 @@
-#
-# A library to manage ARCFIRE experiments
-#
-# Copyright (C) 2017-2018 Nextworks S.r.l.
-# Copyright (C) 2017-2018 imec
-#
-# Sander Vrijders <sander.vrijders@ugent.be>
-# Dimitri Staessens <dimitri.staessens@ugent.be>
-# Vincenzo Maffione <v.maffione@nextworks.it>
-# Marco Capitani <m.capitani@nextworks.it>
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., http://www.fsf.org/about/contact/.
-#
-
-import rumba.ssh_support as ssh
-import rumba.model as mod
-import rumba.log as log
-import rumba.multiprocess as m_processing
-
-import time
-
-
-logger = log.get_logger(__name__)
-
-
-class Experiment(mod.Experiment):
- """
- Represents an rlite experiment.
- """
-
- def __init__(self, testbed, nodes=None,
- git_repo='https://gitlab.com/arcfire/rlite',
- git_branch='master', enrollment_strategy='minimal'):
- """
- Initializes the experiment class.
-
- :param testbed: The testbed to run the experiment on.
- :param nodes: The list of nodes.
- :param git_repo: The git repository to use for installation.
- :param git_branch: The branch of the git repository to use.
- :param enrollment_strategy: Can be 'full-mesh', 'minimal' or 'manual'.
- """
- mod.Experiment.__init__(self, testbed, nodes, git_repo, git_branch,
- prototype_logs=['/tmp/uipcp.log'],
- enrollment_strategy=enrollment_strategy)
-
- @staticmethod
- def make_executor(node, packages, testbed):
- def executor(commands):
- ssh.aptitude_install(testbed, node, packages)
- node.execute_commands(commands, time_out=None, use_proxy=True)
- return executor
-
- def prototype_name(self):
- return 'rlite'
-
- def execute_commands(self, node, cmds):
- ssh.execute_commands(self.testbed, node.ssh_config,
- cmds, time_out=None)
-
- # Prepend sudo to all commands if the user is not 'root'
- def may_sudo(self, cmds):
- if self.testbed.username != 'root':
- for i in range(len(cmds)):
- cmds[i] = "sudo %s" % cmds[i]
-
- def init_nodes(self):
- # Load kernel modules and start the uipcps daemon
- cmds = ["modprobe rlite",
- "modprobe rlite-normal",
- "modprobe rlite-shim-eth",
- "modprobe rlite-shim-udp4",
- "modprobe rlite-shim-loopback",
- "rlite-uipcps -v DBG > /tmp/uipcp.log 2>&1 &"]
- self.may_sudo(cmds)
-
- for node in self.nodes:
- self.execute_commands(node, cmds)
-
- def create_ipcps(self):
- for node in self.nodes:
- cmds = []
-
- for ipcp in node.ipcps:
- # Generate the command to create the IPCP
- if isinstance(ipcp.dif, mod.NormalDIF):
- ipcp_type = 'normal'
- elif isinstance(ipcp.dif, mod.ShimEthDIF):
- ipcp_type = 'shim-eth'
- elif isinstance(ipcp.dif, mod.ShimUDPDIF):
- ipcp_type = 'shim-udp4'
- else:
- logger.warning(
- "unknown type for DIF %s, default to loopback",
- ipcp.dif.name)
- ipcp_type = 'shim-loopback'
-
- cmds.append("rlite-ctl ipcp-create %s %s %s" %
- (ipcp.name, ipcp_type, ipcp.dif.name))
-
- # Generate the command to configure the interface
- # name for the shim-eth
- if isinstance(ipcp.dif, mod.ShimEthDIF):
- cmds.append("rlite-ctl ipcp-config %s netdev %s"
- % (ipcp.name, ipcp.ifname))
-
- if isinstance(ipcp.dif, mod.NormalDIF) \
- and ipcp.dif_bootstrapper:
- cmds.append("rlite-ctl ipcp-enroller-enable %s"
- % (ipcp.name))
-
- self.may_sudo(cmds)
- self.execute_commands(node, cmds)
-
- def register_ipcps(self):
- for node in self.nodes:
- cmds = []
-
- for ipcp in node.ipcps:
- for lower in ipcp.registrations:
- cmds.append("rlite-ctl ipcp-register %s %s"
- % (ipcp.name, lower.name))
-
- self.may_sudo(cmds)
- self.execute_commands(node, cmds)
-
- def enroll_ipcps(self):
- for el in self.enrollments:
- for e in el:
- d = {'enrollee': e['enrollee'].name,
- 'dif': e['dif'].name,
- 'lower_dif': e['lower_dif'].name,
- 'enroller': e['enroller'].name
- }
- cmd = "rlite-ctl ipcp-enroll-retry %(enrollee)s %(dif)s "\
- "%(lower_dif)s %(enroller)s" % d
- cmds = [cmd]
- self.may_sudo(cmds)
- self.execute_commands(e['enrollee'].node, cmds)
- time.sleep(1)
-
- def _install_prototype(self):
- logger.info("installing rlite on all nodes")
-
- packages = ["g++", "gcc", "cmake", "linux-headers-$(uname -r)",
- "protobuf-compiler", "libprotobuf-dev", "git"]
-
- cmds = ["rm -rf ~/rlite",
- "cd ~; git clone -b " + self.git_branch + " " + self.git_repo,
- "cd ~/rlite && ./configure && make && sudo make install",
- "cd ~/rlite && sudo make depmod"]
-
- names = []
- executors = []
- args = []
-
- for node in self.nodes:
- executor = self.make_executor(node, packages, self.testbed)
- names.append(node.name)
- executors.append(executor)
- args.append(cmds)
- m_processing.call_in_parallel(names, args, executors)
- logger.info("installation complete")
-
- def _bootstrap_prototype(self):
- for dif in self.dif_ordering:
- if isinstance(dif, mod.NormalDIF):
- if len(dif.qos_cubes) != 0:
- logger.warn('QoS cubes not (yet) supported by '
- 'the rlite plugin. Will ignore.')
- logger.info("setting up")
- self.init_nodes()
- logger.info("software initialized on all nodes")
- self.create_ipcps()
- logger.info("IPCPs created on all nodes")
- self.register_ipcps()
- logger.info("IPCPs registered to their lower DIFs on all nodes")
- self.enroll_ipcps()
- logger.info("enrollment completed in all DIFs")
-
- def _terminate_prototype(self):
- return
diff --git a/rumba/recpoisson.py b/rumba/recpoisson.py
index a4cf492..98960a4 100644
--- a/rumba/recpoisson.py
+++ b/rumba/recpoisson.py
@@ -27,13 +27,7 @@
import math
import random
-import sys
-
-if sys.version_info < (3, 2):
- from repoze.lru import lru_cache
- # from functools32 import lru_cache
-else:
- from functools import lru_cache
+from functools import lru_cache
@lru_cache(1000)
diff --git a/rumba/ssh_support.py b/rumba/ssh_support.py
index d77b59f..d17895a 100644
--- a/rumba/ssh_support.py
+++ b/rumba/ssh_support.py
@@ -26,7 +26,6 @@
import os
import paramiko
-import re
import time
from paramiko.ssh_exception import *
@@ -34,20 +33,14 @@ from paramiko.ssh_exception import *
import rumba.log as log
# Fix input reordering
-from rumba.model import Executor
+import builtins
-try:
- import builtins # Only in Python 3
+from rumba.model import Executor
- def input(prompt=''):
- log.flush_log()
- return builtins.input(prompt)
-except ImportError: # We are in Python 2
- import __builtin__
- def input(prompt=''):
- log.flush_log()
- return __builtin__.raw_input(prompt)
+def input(prompt=''):
+ log.flush_log()
+ return builtins.input(prompt)
logger = log.get_logger(__name__)
@@ -65,18 +58,16 @@ def get_ssh_client():
return ssh_client
-def _print_stream(stream):
- o = str(stream.read()).strip('b')
- o = o.strip('\'\"')
- o = o.rstrip()
- o = re.sub(r'(\\n)*$', '', o)
- if o != "":
- o_array = o.split('\\n')
- for oi in o_array:
- logger.debug(oi)
- else:
- o_array = []
- return '\n'.join(o_array)
+def _read_stream(stream):
+ """Read and decode a paramiko channel stream."""
+ data = stream.read()
+ if isinstance(data, bytes):
+ data = data.decode('utf-8', errors='replace')
+ text = data.rstrip()
+ if text:
+ for line in text.split('\n'):
+ logger.debug(line)
+ return text
def ssh_connect(hostname, port, username, password, time_out, proxy_server):
@@ -162,7 +153,8 @@ def ssh_sftp(ssh_config, testbed):
chan.invoke_subsystem("sftp")
return paramiko.sftp_client.SFTPClient(chan)
-def execute_commands(testbed, ssh_config, commands, time_out=3):
+def execute_commands(testbed, ssh_config, commands, time_out=3,
+ node_name='', check=True):
"""
Remote execution of a list of shell command on hostname. By
default this function will exit (timeout) after 3 seconds.
@@ -173,45 +165,51 @@ def execute_commands(testbed, ssh_config, commands, time_out=3):
@param time_out: time_out value in seconds, error will be generated if
no result received in given number of seconds, the value None can
be used when no timeout is needed
+ @param node_name: name of the node (for CommandResult)
+ @param check: if True, raise CommandError on non-zero exit code
+ @return: CommandResult for the last command
"""
+ from rumba.command import CommandResult
ssh_connect_check(ssh_config, testbed, time_out)
- o = ""
+ result = None
for command in commands:
logger.debug("%s@%s:%s >> %s" % (testbed.username,
ssh_config.hostname,
ssh_config.port,
command))
envars = '. /etc/profile;'
- command = envars + ' ' + command
+ full_cmd = envars + ' ' + command
chan = ssh_chan(ssh_config, testbed, time_out)
stdout = chan.makefile()
stderr = chan.makefile_stderr()
try:
- chan.exec_command(command)
+ chan.exec_command(full_cmd)
except paramiko.ssh_exception.SSHException as e:
raise SSHException('Failed to execute command')
- o = _print_stream(stdout)
- if chan.recv_exit_status() != 0:
- # Get ready for printing stdout and stderr
- if o != "":
- list_print = ['**** STDOUT:']
- list_print += o.split('\\n')
- else:
- list_print = []
- e = _print_stream(stderr)
- if e != "":
- list_print.append('**** STDERR:')
- list_print += e.split('\\n')
- raise SSHException('A remote command returned an error. '
- 'Output:\n\n\t' +
- '\n\t'.join(list_print) + '\n')
- return o
-
-
-def execute_command(testbed, ssh_config, command, time_out=3):
+
+ out = _read_stream(stdout)
+ err = _read_stream(stderr)
+ exit_code = chan.recv_exit_status()
+
+ result = CommandResult(
+ exit_code=exit_code,
+ stdout=out,
+ stderr=err,
+ command=command,
+ node=node_name
+ )
+
+ if check:
+ result.check()
+
+ return result
+
+
+def execute_command(testbed, ssh_config, command, time_out=3,
+ node_name='', check=True):
"""
Remote execution of a list of shell command on hostname. By
default this function will exit (timeout) after 3 seconds.
@@ -222,12 +220,13 @@ def execute_command(testbed, ssh_config, command, time_out=3):
@param time_out: time_out value in seconds, error will be generated if
no result received in given number of seconds, the value None can
be used when no timeout is needed
+ @param node_name: name of the node (for CommandResult)
+ @param check: if True, raise CommandError on non-zero exit code
- @return: stdout resulting from the command
+ @return: CommandResult from the command
"""
- o = execute_commands(testbed, ssh_config, [command], time_out)
- if o is not None:
- return o
+ return execute_commands(testbed, ssh_config, [command], time_out,
+ node_name=node_name, check=check)
def write_text_to_file(testbed, ssh_config, text, file_name):
diff --git a/rumba/storyboard.py b/rumba/storyboard.py
index 59fbf6f..f82ae7a 100644
--- a/rumba/storyboard.py
+++ b/rumba/storyboard.py
@@ -262,8 +262,9 @@ class ClientProcess(_SBEntity):
self.running = True
try:
- self.pid = self.node.execute_command(cmd, as_root=self.as_root)
- except ssh_support.SSHException:
+ result = self.node.execute_command(cmd, as_root=self.as_root)
+ self.pid = result.stdout
+ except Exception:
logger.warning('Could not start client %s on node %s.',
self.ap_id, self.node.name)
logger.debug('Client app %s on node %s got pid %s.',
@@ -272,8 +273,8 @@ class ClientProcess(_SBEntity):
def run_async(self):
"""Starts this process asynchronously"""
- def callback(pid):
- self.pid = pid
+ def callback(result):
+ self.pid = result.stdout
logger.debug('Client app %s on node %s got pid %s.',
self.ap_id, self.node.name, self.pid)
@@ -325,7 +326,7 @@ class Server(_SBEntity):
def __init__(self, ap, arrival_rate, mean_duration,
options=None, max_clients=float('inf'),
clients=None, nodes=None, min_duration=2,
- s_id=None, as_root=False, difs=None):
+ s_id=None, as_root=False, layers=None):
"""
:param ap: the application binary or command which should be run
@@ -355,10 +356,10 @@ class Server(_SBEntity):
:param as_root: if true, the server app will be started
with root permissions
:type as_root: `bool`
- :param difs: the difs this server intends to register to
- (note: the effect of this parameter is prototype
- dependent, and other strategies might be required)
- :type difs: `rumba.model.DIF` or `list` thereof
+ :param layers: the layers this server intends to register to
+ (note: the effect of this parameter is prototype
+ dependent, and other strategies might be required)
+ :type layers: `rumba.model.Layer` or `list` thereof
"""
self.ap = ap
e_id = s_id if s_id is not None else self.ap.replace(' ', '_')
@@ -377,13 +378,13 @@ class Server(_SBEntity):
self.pids = {}
self.min_duration = min_duration
self.as_root = as_root
- if difs is None:
- difs = []
- elif hasattr(difs, '__iter__'):
- difs = list(difs)
+ if layers is None:
+ layers = []
+ elif hasattr(layers, '__iter__'):
+ layers = list(layers)
else:
- difs = [difs]
- self.difs = difs
+ layers = [layers]
+ self.layers = layers
def add_client(self, client):
"""
@@ -503,8 +504,8 @@ class Server(_SBEntity):
self.id, node.name
)
- def callback(pid):
- self.pids[node] = pid
+ def callback(result):
+ self.pids[node] = result.stdout
_execute_command(node, cmd, callback, as_root=self.as_root)
@@ -568,7 +569,7 @@ class StoryBoard(_SBEntity):
self.commands_list = {}
self.node_map = {}
self.shims = {}
- self.difs = {}
+ self.layers = {}
self._build_nodes_lists()
# The following must be last, because it needs the info from
# _build_nodes_list
@@ -585,10 +586,10 @@ class StoryBoard(_SBEntity):
if self.experiment is not None:
for node in self.experiment.nodes:
self.node_map[node.name] = node
- for dif in self.experiment.dif_ordering:
- self.difs[dif.name] = dif
- if isinstance(dif, model.ShimEthDIF):
- self.shims[dif.name] = dif
+ for layer in self.experiment.layer_ordering:
+ self.layers[layer.name] = layer
+ if layer.is_eth:
+ self.shims[layer.name] = layer
def _validate_and_add_server(self, s, n=None):
if self.experiment is None:
@@ -1063,79 +1064,79 @@ class StoryBoard(_SBEntity):
dst_dir = os.path.join(local_dir, node.name)
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
- logs_list = node.execute_command('ls /tmp/*.rumba.log '
- '|| echo ""')
- logs_list = [x for x in logs_list.split('\n') if x != '']
+ result = node.execute_command('ls /tmp/*.rumba.log '
+ '|| echo ""')
+ logs_list = [x for x in result.stdout.split('\n') if x != '']
logger.debug('Log list is:\n%s', logs_list)
node.fetch_files(logs_list, dst_dir)
- def schedule_export_dif_bandwidth(self, t, filename, dif):
+ def schedule_export_layer_bandwidth(self, t, filename, layer):
"""
Schedules the generation of a csv file of the bandwidth used by
- flows in a certain DIF at a certain time.
+ flows in a certain layer at a certain time.
:param filename: The output csv filename.
- :param dif: The DIF to export
+ :param layer: The layer to export
"""
if self.experiment is None:
raise ValueError("An experiment is needed to schedule commands.")
- action = functools.partial(self.experiment.export_dif_bandwidth,
- filename, dif)
+ action = functools.partial(self.experiment.export_layer_bandwidth,
+ filename, layer)
self.add_event(Event(action, ev_time=t))
- def schedule_link_state(self, t, dif, state):
+ def schedule_link_state(self, t, layer, state):
"""
- Schedules a link's (`rumba.model.ShimEthDIF`) state to go
+ Schedules a link's (`rumba.model.EthLayer`) state to go
up or down at the specified time.
:param t: the time in the storyboard at which the state
change should happen
:type t: `float`
- :param dif: the DIF which should be reconfigured
- :type dif: `rumba.model.ShimEthDIF`
+ :param layer: the layer which should be reconfigured
+ :type layer: `rumba.model.EthLayer`
:param state: the desired state
:type state: `str` -- either `up` or `down`
"""
if self.experiment is None:
raise ValueError("An experiment is needed to schedule commands.")
- if not isinstance(dif, model.ShimEthDIF):
- raise ValueError("Not a Shim Ethernet DIF.")
+ if not hasattr(layer, 'is_eth') or not layer.is_eth:
+ raise ValueError("Not an Ethernet layer.")
if state not in ['up', 'down']:
raise ValueError('Only possible states are "up" and "down"')
if self._script is None:
self._script = _Script(self)
- for node in dif.members:
- action = functools.partial(node.set_link_state, dif, state)
+ for node in layer.members:
+ action = functools.partial(node.set_link_state, layer, state)
self._script.add_event(Event(action, ev_time=t))
- def schedule_link_up(self, t, dif):
+ def schedule_link_up(self, t, layer):
"""
- Schedules a link's (`rumba.model.ShimEthDIF`) state to go
+ Schedules a link's (`rumba.model.EthLayer`) state to go
up at the specified time.
:param t: the time in the storyboard at which the state
change should happen
:type t: `float`
- :param dif: the DIF which should be reconfigured
- :type dif: `rumba.model.ShimEthDIF`
+ :param layer: the layer which should be reconfigured
+ :type layer: `rumba.model.EthLayer`
"""
- self.schedule_link_state(t, dif, 'up')
+ self.schedule_link_state(t, layer, 'up')
- def schedule_link_down(self, t, dif):
+ def schedule_link_down(self, t, layer):
"""
- Schedules a link's (`rumba.model.ShimEthDIF`) state to go
+ Schedules a link's (`rumba.model.EthLayer`) state to go
down at the specified time.
:param t: the time in the storyboard at which the state
change should happen
:type t: `float`
- :param dif: the DIF which should be reconfigured
- :type dif: `rumba.model.ShimEthDIF`
+ :param layer: the layer which should be reconfigured
+ :type layer: `rumba.model.EthLayer`
"""
- self.schedule_link_state(t, dif, 'down')
+ self.schedule_link_state(t, layer, 'down')
def schedule_node_state(self, t, node, state):
"""
@@ -1157,10 +1158,10 @@ class StoryBoard(_SBEntity):
if self._script is None:
self._script = _Script(self)
- for dif in node.difs:
- if not isinstance(dif, model.ShimEthDIF):
+ for layer in node.layers:
+ if not layer.is_eth:
continue
- action = functools.partial(node.set_link_state, dif, state)
+ action = functools.partial(node.set_link_state, layer, state)
self._script.add_event(Event(action, ev_time=t))
def schedule_node_up(self, t, node):
@@ -1189,20 +1190,20 @@ class StoryBoard(_SBEntity):
"""
self.schedule_node_state(t, node, 'down')
- def schedule_destroy_dif(self, t, dif):
+ def schedule_destroy_layer(self, t, layer):
"""
- Destroys a DIF at the specified time.
+ Destroys a layer at the specified time.
:param t: the time in the storyboard at which the state
change should happen
:type t: `float`
- :param dif: the DIF which should go down
- :type dif: `rumba.model.DIF`
+ :param layer: the layer which should go down
+ :type layer: `rumba.model.Layer`
"""
if self.experiment is None:
raise ValueError("An experiment is needed to schedule commands.")
- action = functools.partial(self.experiment.destroy_dif, dif)
+ action = functools.partial(self.experiment.destroy_layer, layer)
self.add_event(Event(action, ev_time=t))
def write_script(self, buffer):
@@ -1283,7 +1284,7 @@ class StoryBoard(_SBEntity):
buffer = StringIO(string)
self.parse_script(buffer, clean)
- def capture_traffic(self, start, end, node, dif):
+ def capture_traffic(self, start, end, node, layer):
"""
Captures the traffic of an interface on a node.
@@ -1293,11 +1294,11 @@ class StoryBoard(_SBEntity):
:type end: `float`
:param node: the node to capture on.
:type node: `rumba.model.Node`
- :param dif: the node's Shim Ethernet DIF whose interface
- will be used for the capture.
- :type dif: `rumba.model.ShimEthDIF`
+ :param layer: the node's Ethernet layer whose interface
+ will be used for the capture.
+ :type layer: `rumba.model.EthLayer`
"""
- for ipcp in dif.ipcps:
+ for ipcp in layer.ipcps:
if ipcp.node is not node:
continue
# In case tcpdump is not present, this assumes a testbed
@@ -1311,7 +1312,7 @@ class StoryBoard(_SBEntity):
pcap_file = (
node.name +
'_' +
- dif.name +
+ layer.name +
'_' +
str(uuid.uuid4())[0:4] + ".pcap"
)
@@ -1533,8 +1534,10 @@ class _Script(object):
'Server': self._servers,
'Client': self._clients,
'ClientProcess': self._processes,
- 'ShimEthDIF': self._shims,
- 'DIF': self._storyboard.difs
+ 'EthLayer': self._shims,
+ 'EthDixLayer': self._shims,
+ 'EthLlcLayer': self._shims,
+ 'Layer': self._storyboard.layers
}
def add_process(self, process):
diff --git a/rumba/testbeds/dockertb.py b/rumba/testbeds/dockertb.py
index d2a1334..8323bdc 100644
--- a/rumba/testbeds/dockertb.py
+++ b/rumba/testbeds/dockertb.py
@@ -70,10 +70,7 @@ class Testbed(mod.Testbed):
docker_client = self.docker_client
if not self.base_image:
- if experiment.prototype_name() == 'ouroboros':
- self.base_image = 'arcfirerumba/ouroboros'
- else:
- raise Exception('Only Ouroboros supported for now.')
+ self.base_image = 'arcfirerumba/ouroboros'
img = self.base_image.rsplit(":", 1)
@@ -107,8 +104,8 @@ class Testbed(mod.Testbed):
if not os.path.exists("/var/run/netns"):
subprocess.check_call(cmd.split())
- for shim in experiment.dif_ordering:
- if not isinstance(shim, mod.ShimEthDIF):
+ for shim in experiment.layer_ordering:
+ if not shim.is_eth:
# Nothing to do here
continue
@@ -151,7 +148,7 @@ class Testbed(mod.Testbed):
subprocess.check_call(cmd.split())
for ipcp in node.ipcps:
- if isinstance(ipcp, mod.ShimEthIPCP):
+ if isinstance(ipcp, mod.EthIPCP):
if ipcp.ifname is None:
ipcp.ifname = "e%i" % node.ipcps.index(ipcp)
@@ -168,15 +165,15 @@ class Testbed(mod.Testbed):
cmd = ""
if self.use_ovs:
- cmd += ('sudo ovs-vsctl add-port %(dif)s %(node)s.%('
+ cmd += ('sudo ovs-vsctl add-port %(layer)s %(node)s.%('
'ifname)s')
else:
cmd += ('sudo ip link set %(node)s.%(ifname)s master '
- '%(dif)s')
+ '%(layer)s')
cmd = (cmd % {'node': node_name,
'ifname': ipcp.ifname,
- 'dif': ipcp.dif.name})
+ 'layer': ipcp.layer.name})
logger.debug('executing >> %s', cmd)
@@ -212,9 +209,8 @@ class Testbed(mod.Testbed):
logger.debug('Stopping node %s' % name)
container.remove(force=True)
- for shim in experiment.dif_ordering:
- if isinstance(shim, mod.ShimEthDIF) \
- and shim.name in self.active_bridges:
+ for shim in experiment.layer_ordering:
+ if shim.is_eth and shim.name in self.active_bridges:
cmd = ""
if self.use_ovs:
cmd += 'sudo ovs-vsctl del-br %(shim)s'
diff --git a/rumba/testbeds/emulab.py b/rumba/testbeds/emulab.py
index 8cd31cf..d0b36b4 100644
--- a/rumba/testbeds/emulab.py
+++ b/rumba/testbeds/emulab.py
@@ -183,14 +183,14 @@ class Testbed(mod.Testbed):
ns2_script += "tb-set-node-os $" + node.name + " " + \
self.image + "\n"
- for dif in experiment.dif_ordering:
- if isinstance(dif, mod.ShimEthDIF):
- if len(dif.ipcps) != 2:
+ for layer in experiment.layer_ordering:
+ if layer.is_eth:
+ if len(layer.ipcps) != 2:
continue
- ns2_script += "set " + dif.name + \
+ ns2_script += "set " + layer.name + \
" [$ns duplex-link $" + \
- dif.members[0].name + " $" + \
- dif.members[1].name + " 1000Mb 0ms DropTail]\n"
+ layer.members[0].name + " $" + \
+ layer.members[1].name + " 1000Mb 0ms DropTail]\n"
ns2_script += "$ns run\n"
@@ -248,7 +248,7 @@ class Testbed(mod.Testbed):
if node.name != node_name:
continue
for ipcp in node.ipcps:
- if ipcp.dif.name == link_name:
+ if ipcp.layer.name == link_name:
self.ip[ipcp] = link_ip
for node in experiment.nodes:
@@ -258,7 +258,7 @@ class Testbed(mod.Testbed):
for item in output:
item = item.split()
for ipcp in node.ipcps:
- if isinstance(ipcp, mod.ShimEthIPCP):
+ if isinstance(ipcp, mod.EthIPCP):
if self.ip[ipcp] == item[1]:
ipcp.ifname = item[0]
diff --git a/rumba/testbeds/jfed.py b/rumba/testbeds/jfed.py
index e9e281d..a06aa50 100644
--- a/rumba/testbeds/jfed.py
+++ b/rumba/testbeds/jfed.py
@@ -29,14 +29,10 @@ import getpass
import xml.dom.minidom as xml
import os.path
import tarfile
-import sys
from rumba.executors.ssh import SSHExecutor
-if sys.version_info[0] >= 3:
- from urllib.request import urlretrieve
-else:
- from urllib import urlretrieve
+from urllib.request import urlretrieve
import rumba.model as mod
import rumba.log as log
@@ -202,24 +198,24 @@ class Testbed(mod.Testbed):
node.ifs = 0
for ipcp in node.ipcps:
- if isinstance(ipcp, mod.ShimEthIPCP):
+ if isinstance(ipcp, mod.EthIPCP):
el3 = doc.createElement("interface")
self.if_id[ipcp] = node.name + ":if" + str(node.ifs)
el3.setAttribute("client_id", self.if_id[ipcp])
node.ifs += 1
el.appendChild(el3)
- for dif in experiment.dif_ordering:
- if isinstance(dif, mod.ShimEthDIF):
+ for layer in experiment.layer_ordering:
+ if layer.is_eth:
el = doc.createElement("link")
top_el.appendChild(el)
- el.setAttribute("client_id", dif.name)
+ el.setAttribute("client_id", layer.name)
el2 = doc.createElement("component_manager")
el2.setAttribute("name", self.authority)
el.appendChild(el2)
- for ipcp in dif.ipcps:
+ for ipcp in layer.ipcps:
el3 = doc.createElement("interface_ref")
el3.setAttribute("client_id", self.if_id[ipcp])
el.appendChild(el3)
@@ -370,7 +366,7 @@ class Testbed(mod.Testbed):
)
i_name = intf.getAttribute("client_id")
for ipcp in node_n.ipcps:
- if isinstance(ipcp, mod.ShimEthIPCP):
+ if isinstance(ipcp, mod.EthIPCP):
if self.if_id[ipcp] == i_name:
ipcp.ifname = ifname
if ifname is None or ifname == "":
diff --git a/rumba/testbeds/localnet.py b/rumba/testbeds/localnet.py
new file mode 100644
index 0000000..95a6bdc
--- /dev/null
+++ b/rumba/testbeds/localnet.py
@@ -0,0 +1,257 @@
+#
+# Rumba - Local Network Testbed (veth + bridges)
+#
+# Copyright (C) 2017-2026 imec
+#
+# Dimitri Staessens <dimitri.staessens@ugent.be>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., http://www.fsf.org/about/contact/.
+#
+
+import hashlib
+import subprocess
+import tempfile
+
+import rumba.model as mod
+import rumba.log as log
+
+from rumba.executors.local import LocalExecutor
+
+logger = log.get_logger(__name__)
+
+
+def _run(cmd):
+ """Run a command with sudo, logging it first."""
+ logger.debug('executing >> %s', cmd)
+ subprocess.check_call(cmd.split())
+
+
+class LocalNetworkTestbed(mod.Testbed):
+ """
+ Local testbed using Linux bridges and veth pairs.
+
+ Each Ethernet layer gets a Linux bridge. Each node's Ethernet IPCP
+ gets a veth pair: one end is attached to the layer's bridge, the
+ other end is the interface that the IPCP binds to. All processes
+ (IRMd, IPCPs, applications) run in the default network namespace
+ under a single IRMd instance.
+
+ This allows testing the real eth-dix / eth-llc stack locally with
+ netem support on the veth interfaces.
+ """
+
+ def __init__(self, exp_name='localnet', username='',
+ proj_name='rumba', password=''):
+ """
+ :param exp_name: The experiment name.
+ :param username: User of the experiment.
+ :param proj_name: Project name of the experiment.
+ :param password: Password of the user.
+ """
+ mod.Testbed.__init__(self, exp_name, username, password, proj_name)
+
+ self.executor = LocalExecutor(self)
+ self.active_bridges = set()
+ self.active_veths = [] # list of (host_end, ipcp_end) tuples
+ self._hosts_backup = None # original /etc/hosts content
+
+ def _swap_in(self, experiment):
+ # Ensure sudo is primed
+ subprocess.check_call('sudo -v'.split())
+
+ # Create a bridge for each Ethernet layer
+ for layer in experiment.layer_ordering:
+ if not layer.is_eth:
+ continue
+
+ bridge = 'br-%s' % layer.name
+ _run('sudo ip link add %s type bridge' % bridge)
+ _run('sudo ip link set dev %s up' % bridge)
+ self.active_bridges.add(bridge)
+
+ # Create veth pairs for each Ethernet IPCP
+ veth_idx = 0
+ for node in experiment.nodes:
+ for ipcp in node.ipcps:
+ if not isinstance(ipcp, mod.EthIPCP):
+ continue
+
+ bridge = 'br-%s' % ipcp.layer.name
+
+ # Generate unique interface names (max 15 chars)
+ ipcp_end = 'veth%d' % veth_idx
+ host_end = 'vb%d' % veth_idx
+ veth_idx += 1
+
+ # Assign the IPCP-facing interface name
+ ipcp.ifname = ipcp_end
+
+ # Create veth pair
+ _run('sudo ip link add %s type veth peer name %s'
+ % (host_end, ipcp_end))
+
+ # Attach bridge end to the layer bridge
+ _run('sudo ip link set %s master %s' % (host_end, bridge))
+
+ # Bring both ends up
+ _run('sudo ip link set dev %s up' % host_end)
+ _run('sudo ip link set dev %s up' % ipcp_end)
+
+ self.active_veths.append((host_end, ipcp_end))
+
+ # Create veth pairs with IP addresses for each UDP layer.
+ # UDP layers are point-to-point (max 2 members), so we create
+ # a single veth pair and assign an IP to each end.
+ udp_layers_done = set()
+ udp_layer_idx = 0
+ for layer in experiment.layer_ordering:
+ if not layer.is_udp:
+ continue
+ if layer.name in udp_layers_done:
+ continue
+ udp_layers_done.add(layer.name)
+ udp_layer_idx += 1
+
+ # Collect the (at most 2) UdpIPCPs for this layer
+ udp_ipcps = [ipcp for node in experiment.nodes
+ for ipcp in node.ipcps
+ if isinstance(ipcp, mod.UdpIPCP)
+ and ipcp.layer == layer]
+
+ if len(udp_ipcps) != 2:
+ raise Exception(
+ "UDP layer %s needs exactly 2 members, got %d"
+ % (layer.name, len(udp_ipcps)))
+
+ end_a = 'veth%d' % veth_idx
+ end_b = 'veth%d' % (veth_idx + 1)
+ veth_idx += 2
+
+ _run('sudo ip link add %s type veth peer name %s'
+ % (end_a, end_b))
+
+ # Assign IP addresses (10.{layer_idx}.0.{1,2}/24)
+ ip_a = '10.%d.0.1/24' % udp_layer_idx
+ ip_b = '10.%d.0.2/24' % udp_layer_idx
+
+ _run('sudo ip addr add %s dev %s' % (ip_a, end_a))
+ _run('sudo ip addr add %s dev %s' % (ip_b, end_b))
+ _run('sudo ip link set dev %s up' % end_a)
+ _run('sudo ip link set dev %s up' % end_b)
+
+ udp_ipcps[0].ifname = end_a
+ udp_ipcps[0].ip_addr = '10.%d.0.1' % udp_layer_idx
+ udp_ipcps[1].ifname = end_b
+ udp_ipcps[1].ip_addr = '10.%d.0.2' % udp_layer_idx
+
+ self.active_veths.append((end_a, end_b))
+
+ # ---- /etc/hosts entries for UDP name resolution ----
+ # The UDP shim IPCP resolves names via DNS (falling back to
+ # /etc/hosts when dns_addr is 0.0.0.0). We add entries so
+ # that IPCP names and overlay names registered in UDP layers
+ # resolve to the correct veth IP address.
+ hosts_entries = [] # list of (ip, md5_hash) tuples
+ for node in experiment.nodes:
+ # Find this node's UDP IPCP(s) and their IPs
+ for ipcp in node.ipcps:
+ if not isinstance(ipcp, mod.UdpIPCP):
+ continue
+ if ipcp.ip_addr is None or ipcp.ip_addr == '0.0.0.0':
+ continue
+ udp_ip = ipcp.ip_addr
+ udp_layer = ipcp.layer
+
+ # 1) The UDP IPCP's own name (MD5-hashed)
+ hosts_entries.append((udp_ip, ipcp.name))
+
+ # 2) Names of overlay IPCPs registered in this UDP layer,
+ # plus their layer names (needed for enrollment)
+ for other_ipcp in node.ipcps:
+ if other_ipcp == ipcp:
+ continue
+ if other_ipcp.layer in node.registrations \
+ and udp_layer in node.registrations[
+ other_ipcp.layer]:
+ hosts_entries.append((udp_ip, other_ipcp.name))
+ hosts_entries.append((udp_ip,
+ other_ipcp.layer.name))
+
+ if hosts_entries:
+ result = subprocess.run(
+ ['cat', '/etc/hosts'], capture_output=True, text=True)
+ self._hosts_backup = result.stdout
+
+ # Deduplicate and append.
+ # The UDP IPCP resolves names by their MD5 hex digest,
+ # so /etc/hosts must map the hash to the IP address.
+ seen = set()
+ lines = []
+ for ip, name in hosts_entries:
+ md5 = hashlib.md5(name.encode()).hexdigest()
+ key = (ip, md5)
+ if key not in seen:
+ seen.add(key)
+ lines.append('%s %s # %s' % (ip, md5, name))
+
+ block = '\n# --- rumba localnet UDP ---\n'
+ for line in lines:
+ block += line + '\n'
+
+ with tempfile.NamedTemporaryFile(
+ mode='w', suffix='.hosts', delete=False) as tmp:
+ tmp.write(self._hosts_backup + block)
+ tmp_path = tmp.name
+ _run('sudo cp %s /etc/hosts' % tmp_path)
+ subprocess.run(['rm', '-f', tmp_path],
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL)
+
+ logger.debug('/etc/hosts entries added: %s', lines)
+
+ logger.info("Experiment swapped in (%d bridges, %d veth pairs)",
+ len(self.active_bridges), len(self.active_veths))
+
+ def _swap_out(self, experiment):
+ # Restore /etc/hosts if we modified it
+ if self._hosts_backup is not None:
+ with tempfile.NamedTemporaryFile(
+ mode='w', suffix='.hosts', delete=False) as tmp:
+ tmp.write(self._hosts_backup)
+ tmp_path = tmp.name
+ _run('sudo cp %s /etc/hosts' % tmp_path)
+ subprocess.run(['rm', '-f', tmp_path],
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL)
+ self._hosts_backup = None
+ logger.debug('/etc/hosts restored')
+
+ # Deleting one end of a veth pair automatically removes the other
+ for host_end, _ in self.active_veths:
+ try:
+ _run('sudo ip link del %s' % host_end)
+ except subprocess.CalledProcessError:
+ logger.debug('veth %s already gone', host_end)
+
+ self.active_veths = []
+
+ for bridge in list(self.active_bridges):
+ try:
+ _run('sudo ip link del %s' % bridge)
+ except subprocess.CalledProcessError:
+ logger.debug('bridge %s already gone', bridge)
+ self.active_bridges.discard(bridge)
+
+ logger.info("Experiment swapped out")
diff --git a/rumba/testbeds/qemu.py b/rumba/testbeds/qemu.py
deleted file mode 100644
index 53a3e42..0000000
--- a/rumba/testbeds/qemu.py
+++ /dev/null
@@ -1,457 +0,0 @@
-#
-# A library to manage ARCFIRE experiments
-#
-# Copyright (C) 2017-2018 Nextworks S.r.l.
-# Copyright (C) 2017-2018 imec
-#
-# Sander Vrijders <sander.vrijders@ugent.be>
-# Dimitri Staessens <dimitri.staessens@ugent.be>
-# Vincenzo Maffione <v.maffione@nextworks.it>
-# Marco Capitani <m.capitani@nextworks.it>
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., http://www.fsf.org/about/contact/.
-#
-
-import multiprocessing as old_m
-import os
-import subprocess
-import sys
-import time
-from subprocess import CalledProcessError
-
-import rumba.model as mod
-import rumba.log as log
-import rumba.multiprocess as m_processing
-
-from rumba.executors.ssh import SSHExecutor
-from rumba.ssh_support import SSHException
-
-if sys.version_info[0] >= 3:
- from urllib.request import urlretrieve
-else:
- from urllib import urlretrieve
-
-
-logger = log.get_logger(__name__)
-
-USER_HOME = os.path.expanduser("~")
-
-
-class Testbed(mod.Testbed):
- """
- Represents a QEMU testbed.
- """
- def __init__(self, exp_name='foo', bzimage_path=None, initramfs_path=None,
- proj_name="rumba", password="root", username="root",
- public_key_path=os.path.join(USER_HOME, '.ssh', 'id_rsa.pub'),
- use_vhost=True):
- """
- Initializes the testbed class.
-
- :param exp_name: The experiment name.
- :param bzimage_path: Path of the bzimage.
- :param initramfs_path: Path of the initramfs.
- :param proj_name: Project name of the experiment.
- :param password: Password of the user.
- :param username: User of the VM.
- :param public_key_path: path to the public key used to connect via
- ssh to the virtual machines.
- If `None`, password auth (root, root)
- will be used.
- :param use_vhost: Use virtual hosting or not?
-
- .. note:: In case no bzimage or initramfs is provided, Rumba
- will automatically download the latest version available
- from a repository.
- """
- mod.Testbed.__init__(self, exp_name,
- username, password,
- proj_name, system_logs=['/var/log/messages'])
- self.vms = {}
- self.shims = []
- self.vhost = use_vhost
- self.boot_processes = []
- self.bzimage_path = bzimage_path
- self.initramfs_path = initramfs_path
- self.multiproc_manager = None
- self.key_path = public_key_path
-
- self.executor = SSHExecutor(self)
-
- # Prepend sudo to all commands if the user is not 'root'
- def _may_sudo(self, cmds):
- if os.geteuid() != 0:
- for i, cmd in enumerate(cmds):
- cmds[i] = "sudo %s" % cmd
-
- @staticmethod
- def _run_command_chain(commands, results_queue,
- error_queue, ignore_errors=False):
- """
- Runs (sequentially) the command list.
-
- On error, breaks and dumps it in error_queue, and interrupts
- as soon as it is non-empty (unless ignore errors is True).
-
- :type commands: list
- :type results_queue: Queue
- :type error_queue: Queue
- :param commands: list of commands to execute
- :param results_queue: Queue of results of parallel processes
- :param error_queue: Queue of error(s) encountered
- :return: None
- """
- errors = 0
- # results_queue.cancel_join_thread()
- # error_queue.cancel_join_thread()
- for command in commands:
- # if not error_queue.empty() and not ignore_errors:
- # break
- logger.debug('executing >> %s', command)
- try:
- subprocess.check_call(command.split())
- except (subprocess.CalledProcessError, IOError) as e:
- error_queue.put(str(e))
- errors += 1
- if not ignore_errors:
- break
- except KeyboardInterrupt:
- error_queue.put('Interrupted')
- if errors == 0:
- results_queue.put("Command chain ran correctly")
- else:
- results_queue.put("Command chain ran with %d error(s)" % errors)
-
- def _recover_if_names(self, experiment):
- for node in experiment.nodes:
- for ipcp in node.ipcps:
- if isinstance(ipcp, mod.ShimEthIPCP):
- shim_name, node_name = ipcp.name.split('.')
- port_set = [x for x in self.vms[node_name]['ports']
- if x['shim'].name == shim_name]
- port = port_set[0]
- port_id = port['port_id']
- vm_id = self.vms[node_name]['id']
- mac = '00:0a:0a:0a:%02x:%02x' % (vm_id, port_id)
- logger.info('Recovering ifname for port: %s.',
- port['tap_id'])
- output = node.execute_command('mac2ifname ' + mac)
- ipcp.ifname = output
- node.execute_command(
- "ip link set dev %(ifname)s up"
- % {'ifname': ipcp.ifname})
-
- def _swap_in(self, experiment):
- """
- :type experiment mod.Experiment
- :param experiment: The experiment running
- """
-
- if os.geteuid() != 0:
- try:
- subprocess.check_call(["sudo", "-v"])
- if self.vhost and \
- (not os.access("/dev/vhost-net", os.R_OK)
- or not os.access("/dev/vhost-net", os.W_OK)
- or not os.access("/dev/kvm", os.R_OK)
- or not os.access("/dev/kvm", os.W_OK)):
- raise Exception('Cannot open vhost device. Make sure it is '
- 'available and you have rw permissions '
- 'on /dev/vhost-net')
- except subprocess.CalledProcessError:
- raise Exception('Not authenticated')
-
- logger.info("swapping in")
-
- # Download the proper buildroot images, if the user did not specify
- # local images
- url_prefix = "https://bitbucket.org/vmaffione/rina-images/downloads/"
- if not self.bzimage_path:
- bzimage = '%s.prod.bzImage' % (experiment.prototype_name())
- self.bzimage_path = os.path.join(mod.cache_dir, bzimage)
- if not os.path.exists(self.bzimage_path):
- logger.info("Downloading %s" % (url_prefix + bzimage))
- urlretrieve(url_prefix + bzimage,
- filename=self.bzimage_path)
- print("\n")
- if not self.initramfs_path:
- initramfs = '%s.prod.rootfs.cpio' % (experiment.prototype_name())
- self.initramfs_path = os.path.join(mod.cache_dir, initramfs)
- if not os.path.exists(self.initramfs_path):
- logger.info("Downloading %s" % (url_prefix + initramfs))
- urlretrieve(url_prefix + initramfs,
- filename=self.initramfs_path)
- print("\n")
-
- logger.info('Setting up interfaces.')
-
- # Building bridges and taps initialization scripts
- br_tab_scripts = []
- for shim in experiment.dif_ordering:
- if not isinstance(shim, mod.ShimEthDIF):
- # Nothing to do here
- continue
- self.shims.append(shim)
- ipcps = shim.ipcps
- command_list = []
- command_list += ('brctl addbr %(br)s\n'
- 'ip link set dev %(br)s up'
- % {'br': shim.name}
- ).split('\n')
- for node in shim.members: # type:mod.Node
- name = node.name
- vm = self.vms.setdefault(name, {'vm': node, 'ports': []})
- port_id = len(vm['ports']) + 1
- tap_id = '%s.%02x' % (name, port_id)
-
- command_list += ('ip tuntap add mode tap name %(tap)s\n'
- 'ip link set dev %(tap)s up\n'
- 'brctl addif %(br)s %(tap)s'
- % {'tap': tap_id, 'br': shim.name}
- ).split('\n')
-
- # While we're at it, build vm ports table and ipcp table
- vm['ports'].append({'tap_id': tap_id,
- 'shim': shim,
- 'port_id': port_id})
- ipcp_set = [x for x in ipcps if x in node.ipcps]
- if len(ipcp_set) > 1:
- raise Exception("Error: more than one ipcp in common "
- "between shim dif %s and node %s"
- % (shim.name, node.name))
- ipcp = ipcp_set[0] # type: mod.ShimEthIPCP
- assert ipcp.name == '%s.%s' % (shim.name, node.name), \
- 'Incorrect Shim Ipcp found: expected %s.%s, found %s' \
- % (shim.name, node.name, ipcp.name)
- ipcp.ifname = tap_id
- # TODO deal with Ip address (shim UDP DIF).
-
- br_tab_scripts.append(command_list)
- ##
- # End of shim/node parsing block
- ##
- #
- for node in experiment.nodes:
- node.has_tcpdump = True
-
- def executor(list_of_commands):
- for cmd in list_of_commands:
- logger.debug('executing >> %s', cmd)
- subprocess.check_call(cmd.split())
-
- names = []
- args = []
- executors = []
- for i, script in enumerate(br_tab_scripts):
- names.append(i)
- self._may_sudo(script)
- args.append(script)
- executors.append(executor)
-
- m_processing.call_in_parallel(names, args, executors)
-
- logger.info('Interfaces setup complete. '
- 'Building VMs (this might take a while).')
-
- # Building vms
-
- boot_batch_size = max(1, old_m.cpu_count() // 2)
- booting_budget = boot_batch_size
- boot_backoff = 12 # in seconds
- base_port = 2222
- vm_memory = 164 # in megabytes
- vm_frontend = 'virtio-net-pci'
-
- vmid = 1
-
- for node in experiment.nodes:
- name = node.name
- vm = self.vms.setdefault(name, {'vm': node, 'ports': []})
- vm['id'] = vmid
- fwdp = base_port + vmid
- fwdc = fwdp + 10000
- mac = '00:0a:0a:0a:%02x:%02x' % (vmid, 99)
- vm['ssh'] = fwdp
- vm['id'] = vmid
- node.ssh_config.hostname = "localhost"
- node.ssh_config.port = fwdp
- node.ssh_config.username = self.username
- node.ssh_config.password = self.password
- log_file = os.path.join(mod.tmp_dir, name + '.log')
-
- vars_dict = {'fwdp': fwdp, 'id': vmid, 'mac': mac,
- 'bzimage': self.bzimage_path,
- 'initramfs': self.initramfs_path,
- 'fwdc': fwdc,
- 'memory': vm_memory, 'frontend': vm_frontend,
- 'vmname': name,
- 'log_file' : log_file}
-
- host_fwd_str = 'hostfwd=tcp::%(fwdp)s-:22' % vars_dict
- vars_dict['hostfwdstr'] = host_fwd_str
-
- command = 'qemu-system-x86_64 '
- # TODO manage non default images
- command += ('-kernel %(bzimage)s '
- '-append "console=ttyS0" '
- '-initrd %(initramfs)s '
- % vars_dict)
- if os.path.exists('/dev/kvm'):
- command += '--enable-kvm '
- command += ('-vga std '
- '-display none '
- '-smp 1 '
- '-m %(memory)sM '
- '-device %(frontend)s,mac=%(mac)s,netdev=mgmt '
- '-netdev user,id=mgmt,%(hostfwdstr)s '
- '-serial file:%(log_file)s '
- % vars_dict
- )
-
- del vars_dict
-
- for port in vm['ports']:
- tap_id = port['tap_id']
- mac = '00:0a:0a:0a:%02x:%02x' % (vmid, port['port_id'])
- port['mac'] = mac
-
- command += (
- '-device %(frontend)s,mac=%(mac)s,netdev=data%(idx)s '
- '-netdev tap,ifname=%(tap)s,id=data%(idx)s,script=no,'
- 'downscript=no%(vhost)s '
- % {'mac': mac, 'tap': tap_id, 'idx': port['port_id'],
- 'frontend': vm_frontend,
- 'vhost': ',vhost=on' if self.vhost else ''}
- )
-
- booting_budget -= 1
- if booting_budget <= 0:
- logger.debug('Sleeping %s secs waiting '
- 'for the VMs to boot', boot_backoff)
-
- time.sleep(boot_backoff)
- booting_budget = boot_batch_size
-
- with open('%s/qemu_out_%s' % (mod.tmp_dir, vmid), 'w')\
- as out_file:
- logger.debug('executing >> %s', command)
- self.boot_processes.append(subprocess.Popen(command.split(),
- stdout=out_file))
-
- vmid += 1
-
- # Wait for the last batch of VMs to start
- if booting_budget < boot_batch_size:
- tsleep = boot_backoff * (boot_batch_size - booting_budget) / \
- boot_batch_size
- logger.debug('Sleeping %s secs '
- 'waiting for the last VMs to boot',
- tsleep)
- time.sleep(tsleep)
-
- logger.info('All VMs are running. Moving on...')
-
- self._recover_if_names(experiment)
-
- if self.key_path is not None:
- for node in experiment.nodes:
- try:
- node.copy_file(self.key_path, '/root/.ssh')
- node.execute_command(
- 'cd /root/.ssh; '
- 'mv authorized_keys old.authorized_keys; '
- 'cat old.authorized_keys id_rsa.pub '
- '> authorized_keys'
- )
- except SSHException as e:
- logger.warning("Could not install ssh key into node %s. "
- "%s.", node.name, str(e))
- logger.debug("Exception details:", exc_info=e)
-
- logger.info('Experiment has been successfully swapped in.')
-
- def _swap_out(self, experiment):
- """
- :rtype str
- :return: The script to tear down the experiment
- """
- logger.info('Killing qemu processes.')
- # TERM qemu processes
- for process in self.boot_processes:
- process.terminate()
-
- # Wait for them to shut down
- for process in self.boot_processes:
- process.wait()
-
- logger.info('Destroying interfaces.')
-
- names = []
- args = []
- executors = []
-
- def executor(list_of_commands):
- for cmd in list_of_commands:
- logger.debug('executing >> %s', cmd)
- try:
- subprocess.check_call(cmd.split())
- except CalledProcessError as e:
- logger.warning('Error during cleanup: %s', str(e))
-
- index = 0
- for vm_name, vm in self.vms.items():
- for port in vm['ports']:
- tap = port['tap_id']
- shim = port['shim']
-
- commands = []
- commands += ('brctl delif %(br)s %(tap)s\n'
- 'ip link set dev %(tap)s down\n'
- 'ip tuntap del mode tap name %(tap)s'
- % {'tap': tap, 'br': shim.name}
- ).split('\n')
- self._may_sudo(commands)
-
- names.append(index)
- index += 1
- args.append(commands)
- executors.append(executor)
-
- m_processing.call_in_parallel(names, args, executors)
-
- logger.info('Port tear-down complete. Destroying bridges.')
-
- names = []
- args = []
- executors = []
-
- index = 0
-
- for shim in self.shims:
- commands = []
- commands += ('ip link set dev %(br)s down\n'
- 'brctl delbr %(br)s'
- % {'br': shim.name}
- ).split('\n')
- self._may_sudo(commands)
-
- names.append(index)
- index += 1
- args.append(commands)
- executors.append(executor)
-
- m_processing.call_in_parallel(names, args, executors)
-
- logger.info('Experiment has been swapped out.')
diff --git a/rumba/topologies.py b/rumba/topologies.py
new file mode 100644
index 0000000..de0e8fc
--- /dev/null
+++ b/rumba/topologies.py
@@ -0,0 +1,146 @@
+#
+# Topology builder helpers for rumba
+#
+# Copyright (C) 2017-2018 Nextworks S.r.l.
+# Copyright (C) 2017-2018 imec
+#
+# Sander Vrijders <sander.vrijders@ugent.be>
+# Dimitri Staessens <dimitri.staessens@ugent.be>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., http://www.fsf.org/about/contact/.
+#
+
+"""Topology builder functions for common network shapes."""
+
+from rumba.model import Node, UnicastLayer, EthDixLayer
+
+
+def _node_name(names, i, prefix):
+ """Return the node name: from `names` list if given, else prefix + i."""
+ if names is not None:
+ return names[i]
+ return prefix + str(i)
+
+
+def build_chain(num_nodes, layer, prefix="node", shim_prefix="e",
+ names=None):
+ """Build a linear chain of nodes connected by EthDixLayers.
+
+ Args:
+ num_nodes: Number of nodes in the chain.
+ layer: The UnicastLayer all nodes belong to.
+ prefix: Name prefix for nodes (default: "node").
+ shim_prefix: Name prefix for EthDixLayers (default: "e").
+ names: Optional list of node names (overrides prefix).
+
+ Returns:
+ A list of Node objects forming a chain.
+ """
+ nodes = []
+ shim_prev = None
+
+ for i in range(num_nodes):
+ shim = EthDixLayer(shim_prefix + str(i)) if i < num_nodes - 1 else None
+
+ layers = [layer]
+ regs = []
+
+ if shim_prev is not None:
+ layers.append(shim_prev)
+ regs.append(shim_prev)
+ if shim is not None:
+ layers.append(shim)
+ regs.append(shim)
+
+ node = Node(_node_name(names, i, prefix), layers=layers,
+ registrations={layer: regs})
+ nodes.append(node)
+ shim_prev = shim
+
+ return nodes
+
+
+def build_ring(num_nodes, layer, prefix="node", shim_prefix="e",
+ names=None):
+ """Build a ring of nodes connected by EthDixLayers.
+
+ Like build_chain, but the last shim links back to the first node.
+
+ Args:
+ num_nodes: Number of nodes in the ring (must be >= 3).
+ layer: The UnicastLayer all nodes belong to.
+ prefix: Name prefix for nodes (default: "node").
+ shim_prefix: Name prefix for EthDixLayers (default: "e").
+ names: Optional list of node names (overrides prefix).
+
+ Returns:
+ A list of Node objects forming a ring.
+ """
+ nodes = []
+ shim_prev = None
+
+ for i in range(num_nodes):
+ shim = EthDixLayer(shim_prefix + str(i))
+
+ layers = [layer]
+ regs = []
+
+ if shim_prev is not None:
+ layers.append(shim_prev)
+ regs.append(shim_prev)
+ layers.append(shim)
+ regs.append(shim)
+
+ node = Node(_node_name(names, i, prefix), layers=layers,
+ registrations={layer: regs})
+ nodes.append(node)
+ shim_prev = shim
+
+ # Close the ring: connect last shim back to first node
+ nodes[0].add_layer(shim_prev)
+ nodes[0].add_registration(layer, shim_prev)
+
+ return nodes
+
+
+def build_star(leaf_names, layer, hub_name="router", shim_prefix="e"):
+ """Build a star topology: leaf nodes connected to a central hub.
+
+ Each leaf is connected to the hub via its own EthDixLayer.
+
+ Args:
+ leaf_names: List of names for the leaf nodes.
+ layer: The UnicastLayer all nodes belong to.
+ hub_name: Name of the central hub node (default: "router").
+ shim_prefix: Name prefix for EthDixLayers (default: "e").
+
+ Returns:
+ A tuple (leaves, hub) where leaves is a list of leaf Nodes
+ and hub is the central Node.
+ """
+ leaves = []
+ shims = []
+
+ for i, name in enumerate(leaf_names):
+ shim = EthDixLayer(shim_prefix + str(i + 1))
+ shims.append(shim)
+ leaf = Node(name, layers=[shim, layer],
+ registrations={layer: [shim]})
+ leaves.append(leaf)
+
+ hub = Node(hub_name, layers=shims + [layer],
+ registrations={layer: shims})
+
+ return leaves, hub
diff --git a/rumba/utils.py b/rumba/utils.py
index 3a3b837..324d3bf 100644
--- a/rumba/utils.py
+++ b/rumba/utils.py
@@ -26,24 +26,19 @@
import enum
import os
+import time
import rumba.log as log
import rumba.model as model
import rumba.testbeds.local as local
# Fix input reordering
-try:
- import builtins # Only in Python 3
+import builtins
- def input(prompt=''):
- log.flush_log()
- return builtins.input(prompt)
-except ImportError: # We are in Python 2
- import __builtin__
- def input(prompt=''):
- log.flush_log()
- return __builtin__.raw_input(prompt)
+def input(prompt=''):
+ log.flush_log()
+ return builtins.input(prompt)
logger = log.get_logger(__name__)
@@ -233,4 +228,64 @@ class ExperimentManager(object):
# Make sure to print all logs before execution terminates,
# Specifically the last two error logs above.
return True
- # Suppress the exception we logged: no traceback, unless logged.
+
+
+# ---------------------------------------------------------------------------
+# Polling utilities
+# ---------------------------------------------------------------------------
+
+def wait_until(predicate, timeout=30, interval=0.5, description="condition"):
+ """
+ Poll *predicate* until it returns a truthy value or *timeout* expires.
+
+ :param predicate: A callable that returns True when the condition is met.
+ :param timeout: Maximum wait time in seconds.
+ :param interval: Polling interval in seconds.
+ :param description: Human-readable description (used in the error message).
+ :returns: The truthy value returned by *predicate*.
+ :raises TimeoutError: If *timeout* expires before *predicate* is truthy.
+ """
+ deadline = time.monotonic() + timeout
+ last_exc = None
+ while time.monotonic() < deadline:
+ try:
+ result = predicate()
+ if result:
+ return result
+ except Exception as e:
+ last_exc = e
+ time.sleep(interval)
+ msg = "Timed out after %ss waiting for %s" % (timeout, description)
+ if last_exc is not None:
+ msg += " (last error: %s: %s)" % (type(last_exc).__name__, last_exc)
+ raise TimeoutError(msg)
+
+
+def wait_for_command(node, command, timeout=30, interval=1.0,
+ description=None):
+ """
+ Poll *command* on *node* until it succeeds (exit code 0).
+
+ :param node: The node to run the command on.
+ :param command: Shell command string.
+ :param timeout: Maximum wait time in seconds.
+ :param interval: Polling interval in seconds.
+ :param description: Human-readable description for error messages.
+ :returns: A CommandResult on success.
+ :raises TimeoutError: If the command does not succeed within *timeout*.
+ """
+ if description is None:
+ description = "'%s' to succeed" % command
+
+ output = [None]
+
+ def _try():
+ try:
+ output[0] = node.execute_command(command, time_out=interval)
+ return True
+ except Exception:
+ return False
+
+ wait_until(_try, timeout=timeout, interval=interval,
+ description=description)
+ return output[0]
diff --git a/rumba/visualizer.py b/rumba/visualizer.py
index a2f35cb..76660c0 100755..100644
--- a/rumba/visualizer.py
+++ b/rumba/visualizer.py
@@ -5,26 +5,46 @@ import plotly.graph_objs as go
from math import sin, cos, pi, sqrt
-__all__ = ['draw_network', 'draw_experiment', 'get_default_test_network']
+__all__ = ['draw_network', 'get_network_from_rumba_experiment', 'get_default_test_network']
import rumba.elements.topology
+from rumba.elements.topology import LayerType
type_to_marker = {
+ 'application': 'square-open',
'eth-dix': 'diamond',
'eth-llc': 'diamond',
'eth-udp': 'diamond',
+ 'local': 'diamond',
'unicast': 'circle',
'broadcast': 'square'
}
+# Map LayerType enum to visualizer type strings
+_layer_type_to_viz = {
+ LayerType.ETH_DIX: 'eth-dix',
+ LayerType.ETH_LLC: 'eth-llc',
+ LayerType.UDP4: 'eth-udp',
+ LayerType.UDP6: 'eth-udp',
+ LayerType.LOCAL: 'local',
+ LayerType.UNICAST: 'unicast',
+ LayerType.BROADCAST: 'broadcast',
+}
+
-def rumba_to_type(_type):
- if _type == rumba.elements.topology.ShimEthIPCP:
+def rumba_to_type(_type_or_layer):
+ """Convert a layer or IPCP class to a visualizer type string."""
+ # Accept a Layer object directly
+ if hasattr(_type_or_layer, 'layer_type'):
+ return _layer_type_to_viz.get(_type_or_layer.layer_type, 'unicast')
+ # Accept IPCP class
+ if _type_or_layer == rumba.elements.topology.EthIPCP:
return 'eth-dix'
- if _type == rumba.elements.topology.ShimUDPIPCP:
+ if _type_or_layer == rumba.elements.topology.UdpIPCP:
return 'eth-udp'
- if _type == rumba.elements.topology.IPCP:
+ if _type_or_layer == rumba.elements.topology.IPCP:
return 'unicast'
+ return 'unicast'
def get_default_test_network():
@@ -66,11 +86,11 @@ def get_default_test_network():
}
-def _get_nodes_in_dif(exp, dif):
+def _get_nodes_in_layer(exp, layer):
nodes = []
n = 0
for node in exp.nodes:
- if dif in [d.name for d in node.difs]:
+ if layer in [l.name for l in node.layers]:
nodes.append(n)
n += 1
@@ -86,45 +106,46 @@ def _get_node_index(exp, node):
return 0
-def _get_network_from_rumba_experiment(exp):
+def get_network_from_rumba_experiment(exp):
print(exp)
print(exp.flows)
_nw = dict()
_nw['layers'] = dict()
_nw['nodes'] = list(range(len(exp.nodes)))
+ _nw['nnames'] = [node.name for node in exp.nodes]
_nw['registrations'] = dict()
for node in exp.nodes:
- for dif in node.difs:
- if dif.name not in _nw['layers']:
- _nw['layers'][dif.name] = dict()
- _nw['layers'][dif.name]['type'] = rumba_to_type(dif.get_ipcp_class())
- _nw['layers'][dif.name]['nodes'] = _get_nodes_in_dif(exp, dif.name)
- _nw['layers'][dif.name]['edges'] = list()
- if _nw['layers'][dif.name]['type'] != 'unicast': # shim
- nodes = _nw['layers'][dif.name]['nodes']
- _nw['layers'][dif.name]['edges'].append((nodes[0], nodes[1]))
- _nw['registrations'][dif.name] = dict()
+ for layer in node.layers:
+ if layer.name not in _nw['layers']:
+ _nw['layers'][layer.name] = dict()
+ _nw['layers'][layer.name]['type'] = rumba_to_type(layer)
+ _nw['layers'][layer.name]['nodes'] = _get_nodes_in_layer(exp, layer.name)
+ _nw['layers'][layer.name]['edges'] = list()
+ if _nw['layers'][layer.name]['type'] != 'unicast': # shim
+ nodes = _nw['layers'][layer.name]['nodes']
+ _nw['layers'][layer.name]['edges'].append((nodes[0], nodes[1]))
+ _nw['registrations'][layer.name] = dict()
for layer in exp.flows:
for flow in layer:
if 'src' in flow and 'dst' in flow:
src = _get_node_index(exp, flow['src'].node)
dst = _get_node_index(exp, flow['dst'].node)
- layer = flow['src'].dif.name
+ layer = flow['src'].layer.name
_nw['layers'][layer]['edges'].append((src, dst))
src_regs = flow['src'].registrations
dst_regs = flow['dst'].registrations
- for dif in src_regs:
- if dif.name not in _nw['registrations'][layer]:
- _nw['registrations'][layer][dif.name] = set()
- _nw['registrations'][layer][dif.name].add(src)
- for dif in dst_regs:
- if dif.name not in _nw['registrations'][layer]:
- _nw['registrations'][layer][dif.name] = set()
- _nw['registrations'][layer][dif.name].add(dst)
+ for reg in src_regs:
+ if reg.name not in _nw['registrations'][layer]:
+ _nw['registrations'][layer][reg.name] = set()
+ _nw['registrations'][layer][reg.name].add(src)
+ for reg in dst_regs:
+ if reg.name not in _nw['registrations'][layer]:
+ _nw['registrations'][layer][reg.name] = set()
+ _nw['registrations'][layer][reg.name].add(dst)
return _nw
@@ -226,7 +247,7 @@ def _create_system_coords(network):
d = sqrt((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1))
_min = d if _min is None else min(d, _min)
- return _min
+ return _min if _min != 0 else 0.1
def _get_ipcps_for_system(network, system):
@@ -297,11 +318,8 @@ def _create_ipcp_graph_data(network):
_create_ipcp_network(network)
_min = _create_system_coords(network)
- print("_min = %s" % _min)
_create_ipcp_coords(network, _min / 5)
- print(network)
-
def _get_ipcp_attributes(network):
coords = list()
@@ -312,7 +330,7 @@ def _get_ipcp_attributes(network):
for ipcp in network['_ipcps']:
coords.append(ipcp['coords'])
colors.append(ipcp['color'])
- labels.append(ipcp['layer'] + ' ' + str(ipcp['node']))
+ labels.append(ipcp['layer'] + ' ' + network['nnames'][ipcp['node']])
markers.append(type_to_marker[ipcp['type']])
return coords, colors, labels, markers
@@ -463,12 +481,6 @@ def draw_network(network, name='Ouroboros network'):
fig.show()
-
-def draw_experiment(exp, name='experiment'):
- _nw = _get_network_from_rumba_experiment(exp)
- draw_network(_nw, name)
-
-
if __name__ == '__main__':
nw = get_default_test_network()
draw_network(nw)
diff --git a/setup.py b/setup.py
index 058a26c..68a7253 100755
--- a/setup.py
+++ b/setup.py
@@ -1,37 +1,53 @@
#!/usr/bin/env python
+import re
import setuptools
-_locals = {}
with open('rumba/_version.py') as fp:
- exec(fp.read(), None, _locals)
-version = _locals['__version__']
+ _version_src = fp.read()
+_match = re.search(r'__version_info__\s*=\s*\((\d+),\s*(\d+),\s*(\d+)\)',
+ _version_src)
+version = '%s.%s.%s' % _match.groups()
+
+with open('README.md') as fp:
+ long_description = fp.read()
setuptools.setup(
name='Rumba',
version=version,
- url='https://gitlab.com/arcfire/rumba',
- keywords='rina measurement testbed',
- author='Sander Vrijders',
- author_email='sander.vrijders@ugent.be',
+ url='https://codeberg.org/o7s/rumba',
+ keywords='ouroboros measurement testbed',
+ author='Sander Vrijders, Dimitri Staessens',
+ author_email='sander@ouroboros.rocks, dimitri@ouroboros.rocks',
license='LGPL',
- description='Rumba measurement framework for RINA',
- packages=[
- 'rumba',
- 'rumba.testbeds',
- 'rumba.prototypes',
- 'rumba.executors',
- 'rumba.elements'
+ description='Rumba measurement framework for Ouroboros',
+ long_description=long_description,
+ long_description_content_type='text/markdown',
+ classifiers=[
+ 'Development Status :: 4 - Beta',
+ 'License :: OSI Approved '
+ ':: GNU Lesser General Public License v2 or later (LGPLv2+)',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10',
+ 'Programming Language :: Python :: 3.11',
+ 'Programming Language :: Python :: 3.12',
+ 'Programming Language :: Python :: 3.13',
+ 'Programming Language :: Python :: 3.14',
+ 'Topic :: System :: Networking',
],
+ packages=setuptools.find_packages(exclude=['examples', 'tools', 'doc']),
+ python_requires='>=3.8',
install_requires=[
- 'paramiko',
- 'docker',
- 'repoze.lru; python_version<"3.2"',
- 'contextlib2; python_version<"3.0"',
- 'enum34; python_version<"3.0"'
+ 'paramiko>=2.0,<4',
+ 'docker>=5.0,<8',
],
- extras_require={'NumpyAcceleration': ['numpy'],
- 'graphs': ['pydot'],
- 'visualizer': ['igraph', 'plotly']},
- scripts=['tools/rumba-access']
+ extras_require={
+ 'NumpyAcceleration': ['numpy'],
+ 'graphs': ['pydot'],
+ 'visualizer': ['igraph', 'plotly'],
+ 'pyouroboros': ['PyOuroboros'],
+ },
+ scripts=['tools/rumba-access'],
)
diff --git a/tools/conf-examples/dc-vpns.conf b/tools/conf-examples/dc-vpns.conf
deleted file mode 100644
index 8bfde51..0000000
--- a/tools/conf-examples/dc-vpns.conf
+++ /dev/null
@@ -1,114 +0,0 @@
-eth 110 100Mbps tor1 spine1
-eth 120 100Mbps tor1 spine2
-eth 11 25Mbps s11 tor1
-eth 12 25Mbps s12 tor1
-eth 13 25Mbps s13 tor1
-eth 14 25Mbps s14 tor1
-eth 15 25Mbps s15 tor1
-eth 16 25Mbps s16 tor1
-eth 17 25Mbps s17 tor1
-eth 18 25Mbps s18 tor1
-eth 210 100Mbps tor2 spine1
-eth 220 100Mbps tor2 spine2
-eth 21 25Mbps s21 tor2
-eth 22 25Mbps s22 tor2
-eth 23 25Mbps s23 tor2
-eth 24 25Mbps s24 tor2
-eth 25 25Mbps s25 tor2
-eth 26 25Mbps s26 tor2
-eth 27 25Mbps s27 tor2
-eth 28 25Mbps s28 tor2
-eth 310 100Mbps tor3 spine1
-eth 320 100Mbps tor3 spine2
-eth 31 25Mbps s31 tor3
-eth 32 25Mbps s32 tor3
-eth 33 25Mbps s33 tor3
-eth 34 25Mbps s34 tor3
-eth 35 25Mbps s35 tor3
-eth 36 25Mbps s36 tor3
-eth 37 25Mbps s37 tor3
-eth 38 25Mbps s38 tor3
-eth 410 100Mbps tor4 spine1
-eth 420 100Mbps tor4 spine2
-eth 41 25Mbps s41 tor4
-eth 42 25Mbps s42 tor4
-eth 43 25Mbps s43 tor4
-eth 44 25Mbps s44 tor4
-eth 45 25Mbps s45 tor4
-eth 46 25Mbps s46 tor4
-eth 47 25Mbps s47 tor4
-eth 48 25Mbps s48 tor4
-
-# DIF dcfabric
-dif dcfabric tor1 110 120
-dif dcfabric tor2 210 220
-dif dcfabric tor3 310 320
-dif dcfabric tor4 410 420
-dif dcfabric spine1 110 210 310 410
-dif dcfabric spine2 120 220 320 420
-
-# DIF VPN1
-dif vpn1 s11 11
-dif vpn1 s12 12
-dif vpn1 s13 13
-dif vpn1 s14 14
-dif vpn1 tor1 11 12 13 14 dcfabric
-dif vpn1 s21 21
-dif vpn1 s22 22
-dif vpn1 s23 23
-dif vpn1 s24 24
-dif vpn1 tor2 21 22 23 24 dcfabric
-
-# DIF VPN2
-dif vpn2 s31 31
-dif vpn2 s32 32
-dif vpn2 s33 33
-dif vpn2 s34 34
-dif vpn2 tor3 31 32 33 34 dcfabric
-dif vpn2 s41 41
-dif vpn2 s42 42
-dif vpn2 s43 43
-dif vpn2 s44 44
-dif vpn2 tor4 41 42 43 44 dcfabric
-
-# DIF VPN3
-dif vpn3 s15 15
-dif vpn3 s16 16
-dif vpn3 s17 17
-dif vpn3 s18 18
-dif vpn3 tor1 15 16 17 18 dcfabric
-dif vpn3 s25 25
-dif vpn3 s26 26
-dif vpn3 s27 27
-dif vpn3 s28 28
-dif vpn3 tor2 25 26 27 28 dcfabric
-
-# DIF VPN4
-dif vpn4 s35 35
-dif vpn4 s36 36
-dif vpn4 s37 37
-dif vpn4 s38 38
-dif vpn4 tor3 35 36 37 38 dcfabric
-dif vpn4 s45 45
-dif vpn4 s46 46
-dif vpn4 s47 47
-dif vpn4 s48 48
-dif vpn4 tor4 45 46 47 48 dcfabric
-
-#Policies
-
-#Multipath FABRIC
-#policy dcfabric spine1,spine2 rmt.pff multipath
-#policy dcfabric spine1,spine2 routing link-state routingAlgorithm=ECMPDijkstra
-#policy dcfabric * rmt cas-ps q_max=1000
-#policy dcfabric * efcp.*.dtcp cas-ps
-
-#Application to DIF mappings
-#appmap vpn1 traffic.generator.server 1
-#appmap vpn1 rina.apps.echotime.server 1
-#appmap vpn2 traffic.generator.server 1
-#appmap vpn2 rina.apps.echotime.server 1
-#appmap vpn3 traffic.generator.server 1
-#appmap vpn3 rina.apps.echotime.server 1
-#appmap vpn4 traffic.generator.server 1
-#appmap vpn4 rina.apps.echotime.server 1
diff --git a/tools/conf-examples/geant2-renumber.conf b/tools/conf-examples/geant2-renumber.conf
deleted file mode 100644
index 07c014c..0000000
--- a/tools/conf-examples/geant2-renumber.conf
+++ /dev/null
@@ -1,86 +0,0 @@
-eth 2000 100Mbps lisbon madrid
-eth 2001 100Mbps lisbon london
-eth 2002 100Mbps london dublin
-eth 2003 100Mbps london paris
-eth 2004 100Mbps london brussels
-eth 2005 100Mbps paris madrid
-eth 2006 100Mbps paris luxemburg
-eth 2007 100Mbps paris bern
-eth 2008 100Mbps madrid bern
-eth 2009 100Mbps bern roma
-eth 2010 100Mbps roma madrid
-eth 2011 100Mbps brussels amsterdam
-eth 2012 100Mbps roma valleta
-eth 2013 100Mbps amsterdam valleta
-eth 2014 100Mbps bern berlin
-eth 2015 100Mbps luxemburg berlin
-eth 2016 100Mbps amsterdam berlin
-eth 2017 100Mbps amsterdam copenhagen
-eth 2018 100Mbps berlin copenhagen
-eth 2019 100Mbps copenhagen oslo
-eth 2020 100Mbps oslo stockholm
-eth 2021 100Mbps stockholm copenhagen
-eth 2023 100Mbps copenhagen tallin
-eth 2024 100Mbps tallin riga
-eth 2025 100Mbps riga vilnius
-eth 2026 100Mbps vilnius warsaw
-eth 2027 100Mbps warsaw berlin
-eth 2028 100Mbps warsaw praha
-eth 2029 100Mbps berlin praha
-eth 2030 100Mbps berlin viena
-eth 2031 100Mbps praha viena
-eth 2032 100Mbps viena budapest
-eth 2034 100Mbps viena ljubljana
-eth 2035 100Mbps ljubljana zagreb
-eth 2036 100Mbps zagreb budapest
-eth 2037 100Mbps budapest sofia
-eth 2038 100Mbps viena athens
-eth 2039 100Mbps sofia athens
-eth 2040 100Mbps athens roma
-eth 2041 100Mbps sofia bucharest
-eth 2042 100Mbps bucharest budapest
-eth 2043 100Mbps athens nicosia
-eth 2044 100Mbps roma nicosia
-eth 2045 100Mbps sofia ankara
-eth 2046 100Mbps bucharest ankara
-eth 2047 100Mbps berlin moscow
-eth 2048 100Mbps copenhagen moscow
-eth 2049 100Mbps roma viena
-
-# DIF renumber
-dif renumber lisbon 2000 2001
-dif renumber madrid 2000 2005
-dif renumber london 2001 2002 2003 2004
-dif renumber dublin 2002
-dif renumber paris 2003 2005 2006 2007
-dif renumber brussels 2004 2011
-dif renumber luxemburg 2006 2015
-dif renumber bern 2007 2008 2009 2014
-dif renumber roma 2009 2010 2012 2040 2044 2049
-dif renumber amsterdam 2011 2013 2016 2017
-dif renumber valleta 2012 2013
-dif renumber berlin 2014 2015 2016 2018 2027 2029 2030 2047
-dif renumber copenhagen 2017 2018 2019 2021 2023 2048
-dif renumber oslo 2019 2020
-dif renumber stockholm 2020 2021
-dif renumber tallin 2023 2024
-dif renumber riga 2024 2025
-dif renumber vilnius 2025 2026
-dif renumber warsaw 2026 2027 2028
-dif renumber praha 2028 2029 2031
-dif renumber viena 2030 2031 2032 2034 2038
-dif renumber budapest 2032 2036 2037 2042
-dif renumber athens 2038 2039 2040 2043
-dif renumber ljubljana 2034 2035
-dif renumber zagreb 2035 2036
-dif renumber sofia 2037 2039 2041 2045
-dif renumber bucharest 2041 2042 2046
-dif renumber nicosia 2043 2044
-dif renumber ankara 2045 2046
-dif renumber moscow 2047 2048
-
-#Policies
-
-#address-change
-policy renumber * namespace-manager address-change useNewTimeout=20001 deprecateOldTimeout=80001 changePeriod=120001 addressRange=100
-policy renumber * routing link-state objectMaximumAge=10000 waitUntilReadCDAP=5001 waitUntilError=5001 waitUntilPDUFTComputation=103 waitUntilFSODBPropagation=101 waitUntilAgeIncrement=997 waitUntilDeprecateAddress=20001 routingAlgorithm=Dijkstra
diff --git a/tools/conf-examples/insane-stacking.conf b/tools/conf-examples/insane-stacking.conf
deleted file mode 100644
index 8032fea..0000000
--- a/tools/conf-examples/insane-stacking.conf
+++ /dev/null
@@ -1,29 +0,0 @@
-eth 300 0Mbps a b
-
-# DIF n1 lays over shim DIF 300
-dif n1 a 300
-dif n1 b 300
-
-# n2 lays over n1
-dif n2 a n1
-dif n2 b n1
-
-# n3 lays over n2
-dif n3 a n2
-dif n3 b n2
-
-# n4 lays over n3
-dif n4 a n3
-dif n4 b n3
-
-# n5 lays over n4
-dif n5 a n4
-dif n5 b n4
-
-# n6 lays over n5
-dif n6 a n5
-dif n6 b n5
-
-# n7 lays over n6
-dif n7 a n6
-dif n7 b n6
diff --git a/tools/conf-examples/isp-sec.conf b/tools/conf-examples/isp-sec.conf
deleted file mode 100644
index 33a35a6..0000000
--- a/tools/conf-examples/isp-sec.conf
+++ /dev/null
@@ -1,189 +0,0 @@
-eth 110 0Mbps cpe11 ar1
-eth 120 0Mbps cpe12 ar1
-eth 130 0Mbps cpe13 ar1
-eth 210 0Mbps cpe21 ar2
-eth 220 0Mbps cpe22 ar2
-eth 230 0Mbps cpe23 ar2
-eth 310 0Mbps cpe31 ar3
-eth 320 0Mbps cpe32 ar3
-eth 330 0Mbps cpe33 ar3
-eth 100 0Mbps ar1 manpe1
-eth 200 0Mbps ar2 manpe1
-eth 300 0Mbps ar3 manpe2
-eth 410 0Mbps manpe1 manpe2
-eth 411 0Mbps manpe1 manpe3
-eth 412 0Mbps manpe1 manpe4
-eth 420 0Mbps manpe2 manpe3
-eth 421 0Mbps manpe2 manpe4
-eth 430 0Mbps manpe3 manpe4
-eth 510 0Mbps manpe3 ser1
-eth 520 0Mbps manpe4 ser2
-eth 600 0Mbps ser1 core1
-eth 610 0Mbps ser1 core2
-eth 620 0Mbps ser2 core1
-eth 630 0Mbps ser2 core2
-eth 700 0Mbps core1 core2
-eth 710 0Mbps core1 core3
-eth 720 0Mbps core2 core4
-eth 730 0Mbps core3 core4
-eth 640 0Mbps core3 edge1
-eth 650 0Mbps core4 edge1
-eth 660 0Mbps core3 edge2
-eth 670 0Mbps core4 edge2
-eth 800 0Mbps edge1 isp2
-eth 810 0Mbps edge1 isp3
-eth 820 0Mbps edge2 isp4
-eth 830 0Mbps edge2 isp5
-
-# DIF core
-dif core ser1 600 610
-dif core ser2 620 630
-dif core core1 600 620 700 710
-dif core core2 610 630 700 720
-dif core core3 640 660 710 730
-dif core core4 650 670 720 730
-dif core edge1 640 650
-dif core edge2 660 670
-
-# DIF access
-dif access ar1 100
-dif access ar2 200
-dif access ar3 300
-dif access manpe1 100 200 410 411 412
-dif access manpe2 300 410 420 421
-dif access manpe3 411 420 430 510
-dif access manpe4 412 421 430 520
-dif access ser1 510
-dif access ser2 520
-
-# DIF service
-dif service ar1 access
-dif service ar2 access
-dif service ar3 access
-dif service ser1 access core
-dif service ser2 access core
-dif service edge1 core
-dif service edge2 core
-
-# DIF emall1
-dif emall1 cpe11 110
-dif emall1 cpe12 120
-dif emall1 cpe21 210
-dif emall1 cpe22 220
-dif emall1 cpe31 310
-dif emall1 ar1 110 120 service
-dif emall1 ar2 210 220 service
-dif emall1 ar3 310 service
-dif emall1 edge1 service 800
-dif emall1 edge2 service 820
-dif emall1 isp2 800
-dif emall1 isp4 820
-
-# DIF emall2
-dif emall2 cpe13 130
-dif emall2 cpe23 230
-dif emall2 cpe32 320
-dif emall2 cpe33 330
-dif emall2 ar1 130 service
-dif emall2 ar2 230 service
-dif emall2 ar3 320 330 service
-dif emall2 edge1 service 810
-dif emall2 edge2 service 830
-dif emall2 isp3 810
-dif emall2 isp5 830
-
-#policies
-policy emall1 * security-manager.auth.default PSOC_authentication-ssh2 keyExchangeAlg=EDH keystore=/creds/ssh2 keystorePass=test
-policy emall1 * security-manager.encrypt.default default encryptAlg=AES128 macAlg=SHA256 compressAlg=deflate
-policy emall1 ar1,ar2,ar3,edge1,edge2 security-manager.auth.service PSOC_authentication-none
-policy emall2 * security-manager.auth.default PSOC_authentication-ssh2 keyExchangeAlg=EDH keystore=/creds/ssh2 keystorePass=test
-policy emall2 * security-manager.encrypt.default default encryptAlg=AES128 macAlg=SHA256 compressAlg=deflate
-policy emall2 ar1,ar2,ar3,edge1,edge2 security-manager.auth.service PSOC_authentication-none
-
-#Enrollments
-enroll access ar1 manpe1 100
-enroll access ar2 manpe1 200
-enroll access ar3 manpe2 300
-enroll access ser1 manpe3 510
-enroll access ser2 manpe4 520
-enroll access manpe1 manpe2 410
-enroll access manpe1 manpe3 411
-enroll access manpe1 manpe4 412
-enroll access manpe2 manpe3 420
-enroll access manpe2 manpe4 421
-enroll access manpe3 manpe4 430
-
-enroll core core1 core2 700
-enroll core core1 core3 710
-enroll core core2 core4 720
-enroll core core3 core4 730
-enroll core ser1 core1 600
-enroll core ser1 core2 610
-enroll core ser2 core1 620
-enroll core ser2 core2 630
-enroll core edge1 core3 640
-enroll core edge1 core4 650
-enroll core edge2 core3 660
-enroll core edge2 core4 670
-
-enroll service edge1 edge2 core
-enroll service edge1 ser1 core
-enroll service edge1 ser2 core
-enroll service edge2 ser1 core
-enroll service edge2 ser2 core
-enroll service ser1 ser2 core
-enroll service ar1 ser1 access
-enroll service ar1 ser2 access
-enroll service ar2 ser1 access
-enroll service ar2 ser2 access
-enroll service ar3 ser1 access
-enroll service ar3 ser2 access
-
-enroll emall1 cpe11 ar1 110
-enroll emall1 cpe12 ar1 120
-enroll emall1 cpe21 ar2 210
-enroll emall1 cpe22 ar2 220
-enroll emall1 cpe31 ar3 310
-enroll emall1 ar1 edge1 service
-enroll emall1 ar1 edge2 service
-enroll emall1 ar2 edge1 service
-enroll emall1 ar2 edge2 service
-enroll emall1 ar3 edge1 service
-enroll emall1 ar3 edge2 service
-enroll emall1 edge1 edge2 service
-enroll emall1 isp2 edge1 800
-enroll emall1 isp4 edge2 820
-
-enroll emall2 cpe13 ar1 130
-enroll emall2 cpe23 ar2 230
-enroll emall2 cpe32 ar3 320
-enroll emall2 cpe33 ar3 330
-enroll emall2 ar1 edge1 service
-enroll emall2 ar1 edge2 service
-enroll emall2 ar2 edge1 service
-enroll emall2 ar2 edge2 service
-enroll emall2 ar3 edge1 service
-enroll emall2 ar3 edge2 service
-enroll emall2 edge1 edge2 service
-enroll emall2 isp3 edge1 810
-enroll emall2 isp5 edge2 830
-
-#Overlays
-overlay ar1 overlays/ispsec/ar1
-overlay ar2 overlays/ispsec/ar2
-overlay ar3 overlays/ispsec/ar3
-overlay cpe11 overlays/ispsec/cpe11
-overlay cpe12 overlays/ispsec/cpe12
-overlay cpe13 overlays/ispsec/cpe13
-overlay cpe21 overlays/ispsec/cpe21
-overlay cpe22 overlays/ispsec/cpe22
-overlay cpe23 overlays/ispsec/cpe23
-overlay cpe31 overlays/ispsec/cpe31
-overlay cpe32 overlays/ispsec/cpe32
-overlay cpe33 overlays/ispsec/cpe33
-overlay edge1 overlays/ispsec/edge1
-overlay edge2 overlays/ispsec/edge2
-overlay isp2 overlays/ispsec/isp2
-overlay isp3 overlays/ispsec/isp3
-overlay isp4 overlays/ispsec/isp4
-overlay isp5 overlays/ispsec/isp5
diff --git a/tools/conf-examples/resilient-square.conf b/tools/conf-examples/resilient-square.conf
deleted file mode 100644
index 592b6a5..0000000
--- a/tools/conf-examples/resilient-square.conf
+++ /dev/null
@@ -1,16 +0,0 @@
-# a, b and c and d are connected through p2p shim DIFs, in circle.
-# Between a and c there is an additional diagonal link.
-eth 300 100Mbps a b
-eth 400 100Mbps b c
-eth 500 100Mbps c d
-eth 600 1Mbps d a
-eth 700 100Mbps a c
-
-# DIF n1 spans over the p2p shim DIFs
-dif n1 a 300 600 700
-dif n1 b 300 400
-dif n1 c 400 500 700
-dif n1 d 500 600
-
-# Use LFA policy as PDU Forwarding Function
-policy n1 * rmt.pff lfa
diff --git a/tools/conf-examples/secure-two-layers.conf b/tools/conf-examples/secure-two-layers.conf
deleted file mode 100644
index 54c1da6..0000000
--- a/tools/conf-examples/secure-two-layers.conf
+++ /dev/null
@@ -1,25 +0,0 @@
-eth 300 0Mbps a b
-eth 400 0Mbps b c
-eth 500 0Mbps c d
-
-# DIF n1 spans a,b and c and runs over the shims
-dif n1 a 300
-dif n1 b 300 400
-dif n1 c 400
-
-# DIF n2 spans c and d and runs over the shims
-dif n2 c 500
-dif n2 d 500
-
-# DIF n3 spans over n1 and n2
-dif n3 a n1
-dif n3 c n1 n2
-dif n3 d n2
-
-policy n3 * security-manager.auth.default PSOC_authentication-ssh2 keyExchangeAlg=EDH keystore=/creds keystorePass=test
-policy n3 * security-manager.encrypt.default default encryptAlg=AES128 macAlg=SHA256 compressAlg=deflate
-policy n3 * security-manager.ttl.default default initialValue=50
-policy n3 * security-manager.errorcheck.default CRC32
-policy n3 * security-manager.auth.n1 PSOC_authentication-password password=kf05j.a1234.af0k
-policy n3 * security-manager.ttl.n1 default initialValue=50
-policy n3 * security-manager.errorcheck.n1 CRC32
diff --git a/tools/conf-examples/seven.conf b/tools/conf-examples/seven.conf
deleted file mode 100644
index b25f476..0000000
--- a/tools/conf-examples/seven.conf
+++ /dev/null
@@ -1,34 +0,0 @@
-# This configuration realizes the following seven-nodes topology
-#
-# MA ---- MB ---- MC --- MD --- ME
-# | |
-# MF MG
-#
-
-# 300 is a shim-eth-vlan DIF, with nodes a and b
-eth 300 0Mbps a b
-
-# 400 is a shim-eth-vlan DIF, with nodes b and c
-eth 400 0Mbps b c
-
-# 500 is a shim-eth-vlan DIF, with nodes c and f
-eth 500 0Mbps c f
-
-# 600 is a shim-eth-vlan DIF, with nodes c and d
-eth 600 0Mbps c d
-
-# 700 is a shim-eth-vlan DIF, with nodes d and e
-eth 700 0Mbps d e
-
-# 800 is a shim-eth-vlan DIF, with nodes e and g
-eth 800 0Mbps e g
-
-# DIF n1 spans over the two shim DIFs
-dif n1 a 300
-dif n1 b 300 400
-dif n1 c 400 500 600
-dif n1 d 600 700
-dif n1 e 700 800
-dif n1 f 500
-dif n1 g 800
-
diff --git a/tools/conf-examples/star.conf b/tools/conf-examples/star.conf
deleted file mode 100644
index 8a4f6ab..0000000
--- a/tools/conf-examples/star.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-# a,b and c are in the same L2 domain
-eth 300 0Mbps a b c
-
-# DIF n1 spans over the shim DIF
-dif n1 a 300
-dif n1 b 300
-dif n1 c 300
diff --git a/tools/conf-examples/triangle.conf b/tools/conf-examples/triangle.conf
deleted file mode 100644
index f89811c..0000000
--- a/tools/conf-examples/triangle.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-# a, b and c are connected through p2p shim DIFs
-eth 300 10Mbps a b
-eth 400 20Mbps b c
-eth 500 30Mbps a c
-
-# DIF n1 spans over the p2p shim DIFs
-dif n1 a 300 500
-dif n1 b 300 400
-dif n1 c 400 500
diff --git a/tools/conf-examples/tutorial1.conf b/tools/conf-examples/tutorial1.conf
deleted file mode 100644
index 8023687..0000000
--- a/tools/conf-examples/tutorial1.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-eth 100 0Mbps system1 system2
-
-dif Normal system1 100
-dif Normal system2 100
diff --git a/tools/conf-examples/tutorial2.conf b/tools/conf-examples/tutorial2.conf
deleted file mode 100644
index b43fc17..0000000
--- a/tools/conf-examples/tutorial2.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-eth 100 0Mbps system1 system2
-eth 101 0Mbps system2 system3
-
-dif Normal system1 100
-dif Normal system2 100 101
-dif Normal system3 101
diff --git a/tools/conf-examples/two-layers.conf b/tools/conf-examples/two-layers.conf
deleted file mode 100644
index dc1bab2..0000000
--- a/tools/conf-examples/two-layers.conf
+++ /dev/null
@@ -1,17 +0,0 @@
-eth 300 0Mbps a b
-eth 400 0Mbps b c
-eth 500 0Mbps c d
-
-# DIF n1 spans a,b and c and runs over the shims
-dif n1 a 300
-dif n1 b 300 400
-dif n1 c 400
-
-# DIF n2 spans c and d and runs over the shims
-dif n2 c 500
-dif n2 d 500
-
-# DIF n3 spans over n1 and n2
-dif n3 a n1
-dif n3 c n1 n2
-dif n3 d n2
diff --git a/tools/democonf2rumba.py b/tools/democonf2rumba.py
deleted file mode 100755
index 795cf22..0000000
--- a/tools/democonf2rumba.py
+++ /dev/null
@@ -1,292 +0,0 @@
-#!/usr/bin/env python
-
-import argparse
-import re
-
-import rumba.model as mod
-import rumba.log as log
-
-
-def make_experiment(filename, experiment_class, experiment_kwargs,
- testbed_class, testbed_kwargs, verbosity):
- """
- :type filename str
- :param filename: path to the .conf file
- :param experiment_class: subclass of mod.Experiment
- :param experiment_kwargs: args dict for experiment constructor
- (nodes will be overwritten)
- :param testbed_class: subclass of mod.Testbed
- :param testbed_kwargs: args dict for experiment constructor
- (nodes will be overwritten)
- """
- shims = {}
- nodes = {}
- difs = {}
-
- print('Reading file %s.' % (filename,))
- print('+++++++++++++++++++')
- print()
-
- with open(filename, 'r') as conf:
-
- line_cnt = 0
-
- while 1:
- line = conf.readline()
- if line == '':
- break
- line_cnt += 1
-
- line = line.replace('\n', '').strip()
-
- if line.startswith('#') or line == "":
- continue
-
- m = re.match(r'\s*eth\s+([\w-]+)\s+(\d+)([GMK])bps\s+(\w.*)$', line)
- if m:
- shim = m.group(1)
- speed = int(m.group(2))
- speed_unit = m.group(3).lower()
- vm_list = m.group(4).split()
-
- if shim in shims or shim in difs:
- print('Error: Line %d: shim %s already defined'
- % (line_cnt, shim))
- continue
-
- if speed_unit == 'K':
- speed = speed // 1000
- if speed_unit == 'G':
- speed = speed * 1000
-
- shims[shim] = {'name': shim,
- 'speed': speed,
- 'type': 'eth'}
-
- for vm in vm_list:
- nodes.setdefault(vm, {'name': vm, 'difs': [],
- 'dif_registrations': {},
- 'registrations': {}})
- nodes[vm]['difs'].append(shim)
- continue
-
- m = re.match(r'\s*dif\s+([\w-]+)\s+([\w-]+)\s+(\w.*)$', line)
- if m:
- dif = m.group(1)
- vm = m.group(2)
- dif_list = m.group(3).split()
-
- if dif in shims:
- print('Error: Line %d: dif %s already defined as shim'
- % (line_cnt, dif))
- continue
-
- difs.setdefault(dif, {
- 'name': dif}) # Other dict contents might be policies.
-
- if vm in nodes and dif in nodes[vm]['dif_registrations']:
- print('Error: Line %d: vm %s in dif %s already specified'
- % (line_cnt, vm, dif))
- continue
-
- nodes.setdefault(vm, {'name': vm, 'difs': [],
- 'dif_registrations': {},
- 'registrations': {}})
- nodes[vm]['difs'].append(dif)
- nodes[vm]['dif_registrations'][dif] = dif_list
- # It is not defined yet, per check above.
-
- continue
-
- # No match, spit a warning
- print('Warning: Line %d unrecognized and ignored' % line_cnt)
-
- # File parsed
-
- parsed_difs = {}
-
- for shim_name, shim in shims.items():
- parsed_difs[shim_name] = (mod.ShimEthDIF(
- shim_name,
- link_quality=mod.LinkQuality(rate=shim['speed']))
- )
-
- for dif_name, dif in difs.items():
- parsed_difs[dif_name] = (mod.NormalDIF(dif_name))
-
- parsed_nodes = []
- for node, node_data in nodes.items():
- name = node_data['name']
- difs = [parsed_difs[x] for x in node_data['difs']]
- dif_registrations = {parsed_difs[x]: [parsed_difs[y] for y in l]
- for x, l in node_data['dif_registrations']
- .items()}
- parsed_nodes.append(mod.Node(name, difs, dif_registrations))
-
- log.set_logging_level(verbosity)
- print()
- print('++++++++++++++++++++')
- print('Calling constructor of testbed %s with args %s.'
- % (testbed_class, testbed_kwargs))
- print('++++++++++++++++++++')
- print()
-
- testbed = testbed_class(**testbed_kwargs)
-
- experiment_kwargs['testbed'] = testbed
- experiment_kwargs['nodes'] = parsed_nodes
-
- exp = experiment_class(**experiment_kwargs)
- try:
- exp.swap_in()
- exp.bootstrap_prototype()
- input("Press ENTER to quit")
- finally:
- exp.swap_out()
-
-
-def setup_testbed_common_args(t_p):
-
- t_p.add_argument('-E', '--exp_name', metavar='EXP_NAME', type=str,
- required=True,
- help='Experiment name')
- t_p.add_argument('-U', '--username', metavar='USERNAME', type=str,
- required=True,
- help='Testbed user name')
- t_p.add_argument('-P', '--proj_name', metavar='PROJECT_NAME', type=str,
- help='Project name')
- t_p.add_argument('-W', '--password', metavar='PASSWORD', type=str,
- help='Testbed password')
-
-
-if __name__ == '__main__':
- description = "Demonstrator config file to rumba script converter"
- epilog = "2017 Marco Capitani <m.capitani@nextworks.it>"
-
- parser = argparse.ArgumentParser(description=description,
- epilog=epilog)
-
- parser.add_argument('-P', '--prototype', type=str, required=True,
- choices=['irati', 'ouroboros', 'rlite'],
- help='The kind of prototype plugin to use to run'
- ' the experiment.')
-
- parser.add_argument('-C', '--conf', metavar='CONFIG', type=str,
- required=True,
- help='Path to the config file to parse')
-
- parser.add_argument(
- '--verbosity', metavar='VERBOSITY', type=str,
- default='INFO',
- choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
- help='Rumba output verbosity')
-
- subparsers = parser.add_subparsers(dest='testbed')
- emulab_p = subparsers.add_parser('emulab', help='Use emulab testbed')
- jfed_p = subparsers.add_parser('jfed', help='Use jfed testbed')
- qemu_p = subparsers.add_parser('qemu', help='Use qemu testbed')
- fake_p = subparsers.add_parser('fake', help='Use fake testbed')
-
- for t in [emulab_p, jfed_p, qemu_p, fake_p]:
- setup_testbed_common_args(t)
-
- qemu_p.add_argument('-B', '--bzimage-path', metavar='BZIMAGE', type=str,
- required=True,
- help='path to the bzImage file to use')
- qemu_p.add_argument('-I', '--initramfs-path', metavar='INITRAMFS', type=str,
- required=True,
- help='path to the initramfs file to use')
- qemu_p.add_argument('-V', '--use_vhost', action='store_true',
- default=False, help='Use vhost')
- qemu_p.add_argument('-Q', '--qemu_logs_dir', metavar='QEMU_LOGS', type=str,
- default=None, help='path to the folder for qemu logs')
- qemu_p.add_argument('--public-key-path', metavar='PATH', type=str,
- default=None, help='path to the user ssh public key.')
-
- emulab_p.add_argument('-R', '--url', metavar='URL', type=str,
- default="wall2.ilabt.iminds.be",
- help='Url')
- emulab_p.add_argument('-I', '--image', metavar='IMG', type=str,
- default="UBUNTU14-64-STD",
- help='Ubuntu image')
-
- jfed_p.add_argument('-C', '--cert_file', metavar='CERT', type=str,
- required=True,
- help='Certificate file')
- jfed_p.add_argument('-H', '--exp_hours', metavar='HOURS', type=str,
- default="2", help='Experiment hours')
- jfed_p.add_argument('-A', '--authority', metavar='AUTH', type=str,
- default="wall2.ilabt.iminds.be",
- help='Authority')
- jfed_p.add_argument('-I', '--image', metavar='IMAGE', type=str,
- default=None,
- help='Image to be used')
- jfed_p.add_argument('--image-custom', metavar='I_CUSTOM', type=bool,
- default=False,
- help='Is the image a custom one?')
- jfed_p.add_argument('--image-owner', metavar='I_OWNER', type=str,
- default=None,
- help='Creator of the image')
- jfed_p.add_argument('--use-physical-machines', metavar='USE_PM', type=bool,
- default=None,
- help='Try to allocate physical machines if True')
-
- args = parser.parse_args()
-
- if args.testbed == 'emulab':
- import rumba.testbeds.emulab as emulab
- test_class = emulab.Testbed
- testbed_args = {a.dest: getattr(args, a.dest)
- for a in emulab_p._actions
- if a.dest != 'help'
- and getattr(args, a.dest) is not None}
- elif args.testbed == 'jfed':
- import rumba.testbeds.jfed as jfed
- test_class = jfed.Testbed
- testbed_args = {a.dest: getattr(args, a.dest)
- for a in jfed_p._actions
- if a.dest != 'help'
- and getattr(args, a.dest) is not None}
- elif args.testbed == 'qemu':
- import rumba.testbeds.qemu as qemu
- test_class = qemu.Testbed
- testbed_args = {a.dest: getattr(args, a.dest)
- for a in qemu_p._actions
- if a.dest != 'help'
- and getattr(args, a.dest) is not None}
- elif args.testbed == 'local':
- import rumba.testbeds.local as local
- test_class = local.Testbed
- testbed_args = {a.dest: getattr(args, a.dest)
- for a in fake_p._actions
- if a.dest != 'help'
- and getattr(args, a.dest) is not None}
- else:
- if args.testbed is None:
- print('Testbed type must be specified!')
- print(parser.format_help())
- exit(1)
- raise ValueError('Unexpected testbed: %s.' % args.testbed)
-
- if args.prototype == 'irati':
- import rumba.prototypes.irati as irati
- exp_class = irati.Experiment
- elif args.prototype == 'ouroboros':
- import rumba.prototypes.ouroboros as ouroboros
- exp_class = ouroboros.Experiment
- elif args.prototype == 'rlite':
- import rumba.prototypes.rlite as rlite
- exp_class = rlite.Experiment
- else:
- raise ValueError('Unexpected prototype: %s.' % args.testbed)
-
- try:
- make_experiment(args.conf,
- experiment_class=exp_class,
- experiment_kwargs={},
- testbed_class=test_class,
- testbed_kwargs=testbed_args,
- verbosity=args.verbosity)
-
- except KeyboardInterrupt:
- print("Interrupted. Closing down.")
diff --git a/tools/scriptgenerator.py b/tools/scriptgenerator.py
index cc3e1ea..f986185 100644
--- a/tools/scriptgenerator.py
+++ b/tools/scriptgenerator.py
@@ -6,22 +6,22 @@ import rumba.log as log
from rumba.utils import ExperimentManager, PAUSE_SWAPOUT
client1 = Client(
- "rinaperf",
- options="-t perf -s 1000 -c 0",
- c_id='rinaperf_^C' # To differentiate
+ "operf",
+ options="-s 1000 -d 0",
+ c_id='operf_flood' # To differentiate
)
client2 = Client(
- "rinaperf",
- options="-t perf -s 1000 -D <duration>",
+ "operf",
+ options="-s 1000 -d <duration>",
shutdown="",
- c_id='rinaperf_-D' # To differentiate
+ c_id='operf_timed' # To differentiate
)
server = Server(
- "rinaperf",
+ "operf",
options="-l",
arrival_rate=0.5,
mean_duration=5,