diff options
| -rw-r--r-- | rumba/model.py | 56 | ||||
| -rwxr-xr-x | rumba/prototypes/enroll.py | 117 | ||||
| -rw-r--r-- | rumba/prototypes/irati.py | 346 | ||||
| -rw-r--r-- | rumba/prototypes/irati_templates.py | 349 | ||||
| -rw-r--r-- | rumba/ssh_support.py | 112 | ||||
| -rw-r--r-- | rumba/testbeds/jfed.py | 5 | ||||
| -rw-r--r-- | rumba/testbeds/qemu.py | 160 | ||||
| -rw-r--r-- | tools/conf-examples/dc-vpns.conf | 114 | ||||
| -rw-r--r-- | tools/conf-examples/geant2-renumber.conf | 86 | ||||
| -rw-r--r-- | tools/conf-examples/insane-stacking.conf | 29 | ||||
| -rw-r--r-- | tools/conf-examples/isp-sec.conf | 189 | ||||
| -rw-r--r-- | tools/conf-examples/resilient-square.conf | 16 | ||||
| -rw-r--r-- | tools/conf-examples/secure-two-layers.conf | 25 | ||||
| -rw-r--r-- | tools/conf-examples/seven.conf | 34 | ||||
| -rw-r--r-- | tools/conf-examples/star.conf | 7 | ||||
| -rw-r--r-- | tools/conf-examples/triangle.conf | 9 | ||||
| -rw-r--r-- | tools/conf-examples/tutorial1.conf | 4 | ||||
| -rw-r--r-- | tools/conf-examples/tutorial2.conf | 6 | ||||
| -rw-r--r-- | tools/conf-examples/two-layers.conf | 17 | ||||
| -rwxr-xr-x | tools/democonf2rumba.py | 19 | 
20 files changed, 1595 insertions, 105 deletions
| diff --git a/rumba/model.py b/rumba/model.py index 613a6be..0d93fcd 100644 --- a/rumba/model.py +++ b/rumba/model.py @@ -20,7 +20,7 @@  # MA  02110-1301  USA  import abc -import re +  # Represents generic testbed info  # @@ -65,7 +65,7 @@ class DIF:          return hash(self.name)      def __eq__(self, other): -        return other != None and self.name == other.name +        return other is not None and self.name == other.name      def __neq__(self, other):          return not self == other @@ -79,6 +79,7 @@ class DIF:      def get_ipcp_class(self):          return IPCP +  # Shim over UDP  #  class ShimUDPDIF(DIF): @@ -88,6 +89,7 @@ class ShimUDPDIF(DIF):      def get_ipcp_class(self):          return ShimUDPIPCP +  # Shim over Ethernet  #  # @link_speed [int] Speed of the Ethernet network, in Mbps @@ -102,6 +104,7 @@ class ShimEthDIF(DIF):      def get_ipcp_class(self):          return ShimEthIPCP +  # Normal DIF  #  # @policies [dict] Policies of the normal DIF @@ -125,12 +128,15 @@ class NormalDIF(DIF):              s += "\n       Component %s has policy %s" % (comp, pol)          return s +  # SSH Configuration  #  class SSHConfig: -    def __init__(self, hostname, port=22): +    def __init__(self, hostname, port=22, proxycommand=None):          self.hostname = hostname          self.port = port +        self.proxycommand = proxycommand +  # A node in the experiment  # @@ -164,7 +170,7 @@ class Node:      def _undeclared_dif(self, dif):          if dif not in self.difs: -            raise Exception("Invalid registration: node %s is not declared "\ +            raise Exception("Invalid registration: node %s is not declared "                              "to be part of DIF %s" % (self.name, dif.name))      def _validate(self): @@ -206,8 +212,8 @@ class Node:          s += " ]\n"          s += "  Bindings: [ " -        s += ", ".join(["'%s' => '%s'"  % (ap, self.bindings[ap]) \ -                                            for ap in self.bindings]) +        s += ", ".join(["'%s' => '%s'" % (ap, self.bindings[ap]) +                       for ap in self.bindings])          s += " ]\n"          return s @@ -216,7 +222,7 @@ class Node:          return hash(self.name)      def __eq__(self, other): -        return other != None and self.name == other.name +        return other is not None and self.name == other.name      def __neq__(self, other):          return not self == other @@ -255,6 +261,7 @@ class Node:          del self.bindings[name]          self._validate() +  # Base class representing an IPC Process to be created in the experiment  #  # @name [string]: IPCP name @@ -277,28 +284,31 @@ class IPCP:                  (self.name, self.dif.name,                   ' '.join([dif.name for dif in self.registrations]),                   ',bootstrapper' if self.dif_bootstrapper else '' -                ) +                 )      def __hash__(self):          return hash((self.name, self.dif.name))      def __eq__(self, other): -        return other != None and self.name == other.name \ +        return other is not None and self.name == other.name \                                  and self.dif == other.dif      def __neq__(self, other):          return not self == other +  class ShimEthIPCP(IPCP):      def __init__(self, name, node, dif, ifname=None):          IPCP.__init__(self, name, node, dif)          self.ifname = ifname +  class ShimUDPIPCP(IPCP):      def __init__(self, name, node, dif):          IPCP.__init__(self, name, node, dif)          # TODO: add IP and port +  # Base class for ARCFIRE experiments  #  # @name [string] Name of the experiment @@ -312,7 +322,7 @@ class Experiment:          self.testbed = testbed          self.enrollment_strategy = 'minimal'  # 'full-mesh', 'manual'          self.dif_ordering = [] -        self.enrollments = [] # a list of per-DIF lists of enrollments +        self.enrollments = []  # a list of per-DIF lists of enrollments          # Generate missing information          self.generate() @@ -360,8 +370,8 @@ class Experiment:              difsdeps_inc_cnt[dif] = len(difsdeps_inc[dif])          del difsdeps_inc -        #print(difsdeps_adj) -        #print(difsdeps_inc_cnt) +        # print(difsdeps_adj) +        # print(difsdeps_inc_cnt)          # Run Kahn's algorithm to compute topological          # ordering on the DIFs graph. @@ -380,12 +390,12 @@ class Experiment:                      frontier.add(nxt)              difsdeps_adj[cur] = set() -        circular_set = [dif for dif in difsdeps_inc_cnt \ +        circular_set = [dif for dif in difsdeps_inc_cnt                          if difsdeps_inc_cnt[dif] != 0]          if len(circular_set): -            raise Exception("Fatal error: The specified DIFs topology" \ -                            "has one or more" \ -                            "circular dependencies, involving the following" \ +            raise Exception("Fatal error: The specified DIFs topology" +                            "has one or more" +                            "circular dependencies, involving the following"                              " DIFs: %s" % circular_set)          print("DIF topological ordering: %s" % self.dif_ordering) @@ -406,8 +416,8 @@ class Experiment:              for node in self.nodes:                  if dif in node.dif_registrations: -                    dif_graphs[dif][node] = [] # init for later use -                    if first is None: # pick any node for later use +                    dif_graphs[dif][node] = []  # init for later use +                    if first is None:  # pick any node for later use                          first = node                      for lower_dif in node.dif_registrations[dif]:                          if lower_dif not in neighsets: @@ -468,11 +478,11 @@ class Experiment:          print("Enrollments:")          for el in self.enrollments:              for e in el: -                print("    [%s] %s --> %s through N-1-DIF %s" % \ -                                    (e['dif'], -                                     e['enrollee'].name, -                                     e['enroller'].name, -                                     e['lower_dif'])) +                print("    [%s] %s --> %s through N-1-DIF %s" % +                      (e['dif'], +                       e['enrollee'].name, +                       e['enroller'].name, +                       e['lower_dif']))      def compute_ipcps(self):          # For each node, compute the required IPCP instances, and associated diff --git a/rumba/prototypes/enroll.py b/rumba/prototypes/enroll.py new file mode 100755 index 0000000..458736a --- /dev/null +++ b/rumba/prototypes/enroll.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python + +# +# Author: Vincenzo Maffione <v.maffione@nextworks.it> +# + +import argparse +import socket +import time +import re + +def printalo(byt): +    print(repr(byt).replace('\\n', '\n')) + + +def get_response(s): +    data = bytes() +    while 1: +        data += s.recv(1024) +        lines = str(data).replace('\\n', '\n').split('\n') +        #print(lines) +        if lines[-1].find("IPCM") != -1: +            return lines[:len(lines)-1] + + +description = "Python script to enroll IPCPs" +epilog = "2016 Vincenzo Maffione <v.maffione@nextworks.it>" + +argparser = argparse.ArgumentParser(description = description, +                                    epilog = epilog) +argparser.add_argument('--ipcm-conf', help = "Path to the IPCM configuration file", +                       type = str, required = True) +argparser.add_argument('--enrollee-name', help = "Name of the enrolling IPCP", +                       type = str, required = True) +argparser.add_argument('--dif', help = "Name of DIF to enroll to", +                       type = str, required = True) +argparser.add_argument('--lower-dif', help = "Name of the lower level DIF", +                       type = str, required = True) +argparser.add_argument('--enroller-name', help = "Name of the remote neighbor IPCP to enroll to", +                       type = str, required = True) +args = argparser.parse_args() + +socket_name = None + +fin = open(args.ipcm_conf, 'r') +while 1: +    line = fin.readline() +    if line == '': +        break + +    m = re.search(r'"(\S+ipcm-console.sock)', line) +    if m != None: +        socket_name = m.group(1) +        break +fin.close() + +if socket_name == None: +    print('Cannot find %s' % (socket_name)) +    quit(1) + +s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + +connected = False +trials = 0 +while trials < 4: +    try: +        s.connect(socket_name) +        connected = True +        break +    except: +        pass +    trials += 1 +    time.sleep(1) + +if connected: +    try: +        # Receive the banner +        get_response(s) + +        # Send the IPCP list command +        cmd = 'list-ipcps\n' +        s.sendall(bytes(cmd, 'ascii')) + +        # Get the list of IPCPs and parse it to look for the enroller ID +        print('Looking up identifier for IPCP %s' % args.enrollee_name) +        lines = get_response(s) +        print(lines) +        enrollee_id = None +        for line in lines: +            rs = r'^\s*(\d+)\s*\|\s*' + args.enrollee_name.replace('.', '\\.') +            m = re.match(rs, line) +            if m != None: +                enrollee_id = m.group(1) + +        if enrollee_id == None: +            print('Could not find the ID of enrollee IPCP %s' \ +                    % args.enrollee_name) +            raise Exception() + +        # Send the enroll command +        cmd = 'enroll-to-dif %s %s %s %s 1\n' \ +                % (enrollee_id, args.dif, args.lower_dif, args.enroller_name) +        print(cmd) + +        s.sendall(bytes(cmd, 'ascii')) + +        # Get the enroll command answer +        lines = get_response(s) +        print(lines) +    except: +        s.close() +        raise + +else: +    print('Failed to connect to "%s"' % socket_name) + +s.close() diff --git a/rumba/prototypes/irati.py b/rumba/prototypes/irati.py index e8766da..1965069 100644 --- a/rumba/prototypes/irati.py +++ b/rumba/prototypes/irati.py @@ -2,6 +2,7 @@  # Commands to setup and instruct IRATI  #  #    Vincenzo Maffione <v.maffione@nextworks.it> +#    Marco Capitani <m.capitani@nextworks.it>  #  # This library is free software; you can redistribute it and/or  # modify it under the terms of the GNU Lesser General Public @@ -17,32 +18,351 @@  # License along with this library; if not, write to the Free Software  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,  # MA  02110-1301  USA +import copy +import json + +import subprocess + +import os  import rumba.ssh_support as ssh  import rumba.model as mod +import rumba.prototypes.irati_templates as irati_templates +  # An experiment over the IRATI implementation  class Experiment(mod.Experiment): + +    @staticmethod +    def real_sudo(s): +        return 'sudo ' + s + +    @staticmethod +    def fake_sudo(s): +        return s +      def __init__(self, testbed, nodes=None):          mod.Experiment.__init__(self, testbed, nodes) +        self.manager = False +        self.conf_files = None + +        if self.testbed.username == 'root': +            self.sudo = self.fake_sudo +        else: +            self.sudo = self.real_sudo + +        self._conf_dir = os.path.join(os.getcwd(), 'IRATI_conf') +        try: +            os.mkdir(self._conf_dir) +        except OSError: +            # Already there, nothing to do +            pass + +    def conf_dir(self, path): +        return os.path.join(self._conf_dir, path)      def setup(self): -        cmds = list() +        """Installs IRATI on the vms.""" +        setup_irati = False +        if setup_irati: +            cmds = list() -        cmds.append("sudo apt-get update") -        cmds.append("sudo apt-get install g++ gcc " -                    "protobuf-compiler libprotobuf-dev git --yes") -        cmds.append("sudo rm -rf ~/irati") -        cmds.append("cd && git clone https://github.com/IRATI/stack irati") -        cmds.append("cd ~/irati && sudo ./install-from-scratch") -        cmds.append("sudo nohup ipcm &> ipcm.log &") +            cmds.append("sudo apt-get update") +            cmds.append("sudo apt-get install g++ gcc " +                        "protobuf-compiler libprotobuf-dev git --yes") +            cmds.append("sudo rm -rf ~/irati") +            cmds.append("cd && git clone https://github.com/IRATI/stack irati") +            cmds.append("cd ~/irati && sudo ./install-from-scratch") +            cmds.append("sudo nohup ipcm &> ipcm.log &") +            for node in self.nodes: +                ssh.execute_commands(self.testbed, node.ssh_config, +                                     cmds, time_out=None) + +    def bootstrap_network(self): +        """Creates the network by enrolling and configuring the nodes"""          for node in self.nodes: -            ssh.execute_commands(self.testbed, node.ssh_config, -                                 cmds, time_out=None) +            self.process_node(node) +        self.enroll_nodes()      def run_prototype(self): -        print("[IRATI experiment] start") -        print("Setting up IRATI on the nodes...") +        print("irati: setting up")          self.setup() -        print("[IRATI experiment] end") +        print("irati: software initialized on all nodes") +        self.conf_files = self.write_conf() +        print("irati: configuration files generated for all nodes") +        self.bootstrap_network() +        print("irati: IPCPs created and enrolled on all nodes") + +    def process_node(self, node): +        """ +        Installs the configuration and boots up rina on a node +        :type node: mod.Node +        :param node:  +        :return:  +        """ +        name = node.name +        gen_files_conf = self.conf_files[node] + ['da.map'] +        dir_path = os.path.dirname(os.path.abspath(__file__)) +        gen_files_bin = 'enroll.py' +        gen_files_conf_full = [self.conf_dir(x) for x in gen_files_conf] +        gen_files_bin_full = [os.path.join(dir_path, 'enroll.py')] + +        ipcm_components = ['scripting', 'console'] +        if self.manager: +            ipcm_components.append('mad') +        ipcm_components = ', '.join(ipcm_components) + +        gen_files = gen_files_conf_full + gen_files_bin_full + +        format_args = {'name': name, +                       'ssh': node.ssh_config.port, +                       'username': self.testbed.username, +                       'genfiles': gen_files, +                       'genfilesconf': ' '.join(gen_files_conf), +                       'genfilesbin': gen_files_bin, +                       'installpath': '/usr', +                       'verb': 'DBG', +                       'ipcmcomps': ipcm_components} +        try: +            # TODO: watch out for empty path... +            ssh.copy_paths_to_testbed(self.testbed, +                                      node.ssh_config, +                                      gen_files, +                                      '') +        except subprocess.CalledProcessError as e: +            raise Exception(str(e)) + +        # TODO: review ssh opts through ssh support + +        cmds = [self.sudo('hostname %(name)s' % format_args), +                self.sudo('chmod a+rw /dev/irati'), +                self.sudo('mv %(genfilesconf)s /etc' % format_args), +                self.sudo('mv %(genfilesbin)s /usr/bin') % format_args, +                self.sudo('chmod a+x /usr/bin/enroll.py') % format_args] + +        # TODO: is the port up on the VM at this point? + +        cmds += [self.sudo('modprobe rina-default-plugin'), +                 self.sudo('%(installpath)s/bin/ipcm -a \"%(ipcmcomps)s\" ' +                           '-c /etc/%(name)s.ipcm.conf -l %(verb)s &> log &' +                           % format_args)] + +        print('DEBUG: sending node setup via ssh.') +        # print('Credentials:') +        # print(node.ssh_config.hostname, node.ssh_config.port, +        #       self.testbed.username, self.testbed.password) +        ssh.execute_commands(self.testbed, node.ssh_config, cmds) + +    def enroll_nodes(self): +        """Runs the enrollments one by one, respecting dependencies""" +        for enrollment_list in self.enrollments: +            for e in enrollment_list: +                print( +                    'I am going to enroll %s to DIF %s against neighbor %s,' +                    ' through lower DIF %s' +                    % (e['enrollee'], +                       e['dif'].name, +                       e['enroller'], +                       e['lower_dif'].name)) + +                subprocess.check_call('sleep 2'. split())  # Important! + +                e_args = {'ldif': e['lower_dif'].name, +                          'dif': e['dif'].name, +                          'name': e['enrollee'].name, +                          'o_name': e['enroller'].name} + +                cmd = self.sudo('enroll.py --lower-dif %(ldif)s --dif %(dif)s ' +                                '--ipcm-conf /etc/%(name)s.ipcm.conf ' +                                '--enrollee-name %(dif)s.%(name)s.IPCP ' +                                '--enroller-name %(dif)s.%(o_name)s.IPCP' +                                % e_args) +                print('DEBUG: sending enrollment operation via ssh.') +                # print('Credentials:') +                # print(e['enrollee'].ssh_config.hostname, +                #       e['enrollee'].ssh_config.port, +                #       self.testbed.username, self.testbed.password) +                ssh.execute_command(self.testbed, +                                    e['enrollee'].ssh_config, +                                    cmd) + +    def write_conf(self): +        """Write the configuration files""" +        # Constants and initializations +        ipcmconfs = dict() +        difconfs = dict() +        ipcp2shim_map = {} +        node2id_map = {} +        mgmt_dif_name = 'NMS' +        conf_files = {}  # dict of per-nod conf files + +        # TODO: what format are the mappings registered in? Is this ok? +        app_mappings = [] +        for node in self.nodes: +            app_mappings += [{'name': app, 'dif': dif.name} +                             for app in node.registrations +                             for dif in node.registrations[app]] + +        # If some app directives were specified, use those to build da.map. +        # Otherwise, assume the standard applications are to be mapped in +        # the DIF with the highest rank. +        if len(app_mappings) == 0: +            if len(self.dif_ordering) > 0: +                for adm in \ +                        irati_templates.da_map_base["applicationToDIFMappings"]: +                    adm["difName"] = "%s" % (self.dif_ordering[-1],) +        else: +            irati_templates.da_map_base["applicationToDIFMappings"] = [] +            for apm in app_mappings: +                irati_templates.da_map_base["applicationToDIFMappings"]\ +                    .append({"encodedAppName": apm['name'], +                             "difName": "%s" % (apm['dif']) +                             }) + +        # TODO ask: I guess this will need to be added, +        # and in that case we should add it to the qemu plugin too... +        # Where should we take it in input? + +        if self.manager: +            # Add MAD/Manager configuration +            irati_templates.ipcmconf_base["addons"] = { +                "mad": { +                    "managerAppName": "", +                    "NMSDIFs": [{"DIF": "%s" % mgmt_dif_name}], +                    "managerConnections": [{ +                        "managerAppName": "manager-1--", +                        "DIF": "%s" % mgmt_dif_name +                    }] +                } +            } + +        node_number = 1 +        for node in self.nodes:  # type: mod.Node +            node2id_map[node.name] = node_number +            node_number += 1 +            ipcmconfs[node.name] = copy.deepcopy(irati_templates.ipcmconf_base) +            if self.manager: +                ipcmconfs[node.name]["addons"]["mad"]["managerAppName"] \ +                    = "%s.mad-1--" % (node.name,) + +        for dif in self.dif_ordering:  # type: mod.DIF +            if isinstance(dif, mod.ShimEthDIF): +                ipcp2shim_map.update({ipcp.name: dif for ipcp in dif.ipcps}) +            elif isinstance(dif, mod.NormalDIF): +                difconfs[dif.name] = dict() +                for node in dif.members: +                    difconfs[dif.name][node.name] = copy.deepcopy( +                        irati_templates.normal_dif_base +                    ) + +        for node in self.nodes:  # type: mod.Node +            ipcmconf = ipcmconfs[node.name] + +            for ipcp in node.ipcps:  # type: mod.ShimEthIPCP +                if isinstance(ipcp, mod.ShimEthIPCP): +                    shim = ipcp2shim_map[ipcp.name]  # type: mod.ShimEthDIF +                    ipcmconf["ipcProcessesToCreate"].append({ +                        "apName": "eth.%s.IPCP" % ipcp.name, +                        "apInstance": "1", +                        "difName": shim.name +                    }) + +                    template_file_name = self.conf_dir('shimeth.%s.%s.dif' +                                                       % (node.name, shim.name)) +                    ipcmconf["difConfigurations"].append({ +                        "name": shim.name, +                        "template": os.path.basename(template_file_name) +                    }) + +                    fout = open(template_file_name, 'w') +                    fout.write(json.dumps( +                        {"difType": "shim-eth-vlan", +                         "configParameters": { +                             "interface-name": ipcp.ifname +                         } +                         }, +                        indent=4, sort_keys=True)) +                    fout.close() +                    conf_files.setdefault(node, []).append( +                        'shimeth.%s.%s.dif' % (node.name, shim.name)) + +        # Run over dif_ordering array, to make sure each IPCM config has +        # the correct ordering for the ipcProcessesToCreate list of operations. +        # If we iterated over the difs map, the order would be randomic, and so +        # some IPCP registrations in lower DIFs may fail. +        #  This would happen because at the moment of registration, +        #  it may be that the IPCP of the lower DIF has not been created yet. +        shims = ipcp2shim_map.values() +        for dif in self.dif_ordering:  # type: mod.NormalDIF + +            if dif in shims: +                # Shims are managed separately, in the previous loop +                continue + +            for node in dif.members:  # type: mod.Node +                node_name = node.name +                ipcmconf = ipcmconfs[node_name] + +                normal_ipcp = {"apName": "%s.%s.IPCP" % (dif.name, node_name), +                               "apInstance": "1", +                               "difName": "%s" % (dif.name,), +                               "difsToRegisterAt": []} + +                for lower_dif in node.dif_registrations[dif]:  # type: mod.DIF +                    normal_ipcp["difsToRegisterAt"].append(lower_dif.name) + +                ipcmconf["ipcProcessesToCreate"].append(normal_ipcp) + +                ipcmconf["difConfigurations"].append({ +                    "name": "%s" % (dif.name,), +                    "template": "normal.%s.%s.dif" % (node_name, dif.name,) +                }) + +                # Fill in the map of IPCP addresses. +                # This could be moved at difconfs +                for other_node in dif.members:  # type: mod.Node +                    difconfs[dif.name][other_node.name] \ +                        ["knownIPCProcessAddresses"].append({ +                         "apName": "%s.%s.IPCP" % (dif.name, node_name), +                         "apInstance": "1", +                         "address": 16 + node2id_map[node_name]}) +                for path, ps in dif.policies.items(): +                    # if policy['nodes'] == [] or vmname in policy['nodes']: +                    # TODO: manage per-node-policies +                    irati_templates.translate_policy( +                        difconfs[dif.name][node_name], path, ps, parms=[]) + +        # Dump the DIF Allocator map +        with open(self.conf_dir('da.map'), 'w') as da_map_file: +            json.dump(irati_templates.da_map_base, +                      da_map_file, +                      indent=4, +                      sort_keys=True) + +        for node in self.nodes: +            # Dump the IPCM configuration files +            with open(self.conf_dir('%s.ipcm.conf' +                                    % (node.name,)), 'w') as node_file: +                json.dump(ipcmconfs[node.name], +                          node_file, +                          indent=4, +                          sort_keys=True) +            conf_files.setdefault(node, []).append( +                '%s.ipcm.conf' % (node.name,)) + +        for dif in self.dif_ordering:  # type: mod.DIF +            dif_conf = difconfs.get(dif.name, None) +            if dif_conf: +                # Dump the normal DIF configuration files +                for node in dif.members: +                    with open(self.conf_dir('normal.%s.%s.dif' +                                            % (node.name, dif.name)), 'w') \ +                            as dif_conf_file: +                        json.dump(dif_conf[node.name], +                                  dif_conf_file, +                                  indent=4, +                                  sort_keys=True) +                    conf_files.setdefault(node, []).append( +                        'normal.%s.%s.dif' % (node.name, dif.name)) +        return conf_files diff --git a/rumba/prototypes/irati_templates.py b/rumba/prototypes/irati_templates.py new file mode 100644 index 0000000..b8d9788 --- /dev/null +++ b/rumba/prototypes/irati_templates.py @@ -0,0 +1,349 @@ +# Environment setup for VMs. Standard linux approach +env_dict = {'installpath': '/usr', 'varpath': ''} + +# Template for a IPCM configuration file +ipcmconf_base = { +    "configFileVersion": "1.4.1", +    "localConfiguration": { +        "installationPath": "%(installpath)s/bin" % env_dict, +        "libraryPath": "%(installpath)s/lib" % env_dict, +        "logPath": "%(varpath)s/var/log" % env_dict, +        "consoleSocket": "%(varpath)s/var/run/ipcm-console.sock" % env_dict, +        "pluginsPaths": [ +                "%(installpath)s/lib/rinad/ipcp" % env_dict, +                "/lib/modules/4.1.33-irati/extra" +        ] +        }, + +    "ipcProcessesToCreate": [], +    "difConfigurations": [], +} + + +da_map_base = { +    "applicationToDIFMappings": [ +        { +            "encodedAppName": "rina.apps.echotime.server-1--", +            "difName": "n.DIF" +        }, +        { +            "encodedAppName": "traffic.generator.server-1--", +            "difName": "n.DIF" +        } +    ], +} + + +# Template for a normal DIF configuration file +normal_dif_base = { +    "difType": "normal-ipc", +    "dataTransferConstants": { +        "addressLength": 2, +        "cepIdLength": 2, +        "lengthLength": 2, +        "portIdLength": 2, +        "qosIdLength": 2, +        "rateLength": 4, +        "frameLength": 4, +        "sequenceNumberLength": 4, +        "ctrlSequenceNumberLength": 4, +        "maxPduSize": 10000, +        "maxPduLifetime": 60000 +    }, + +    "qosCubes": [ +        { +            "name": "unreliablewithflowcontrol", +            "id": 1, +            "partialDelivery": False, +            "orderedDelivery": True, +            "efcpPolicies": { +                "dtpPolicySet": { +                    "name": "default", +                    "version": "0" +                }, +                "initialATimer": 0, +                "dtcpPresent": True, +                "dtcpConfiguration": { +                    "dtcpPolicySet": { +                        "name": "default", +                        "version": "0" +                    }, +                    "rtxControl": False, +                    "flowControl": True, +                    "flowControlConfig": { +                        "rateBased": False, +                        "windowBased": True, +                        "windowBasedConfig": { +                            "maxClosedWindowQueueLength": 10, +                            "initialCredit": 200 +                        } +                    } +                } +            } +        }, { +            "name": "reliablewithflowcontrol", +            "id": 2, +            "partialDelivery": False, +            "orderedDelivery": True, +            "maxAllowableGap": 0, +            "efcpPolicies": { +                "dtpPolicySet": { +                    "name": "default", +                    "version": "0" +                }, +                "initialATimer": 0, +                "dtcpPresent": True, +                "dtcpConfiguration": { +                    "dtcpPolicySet": { +                        "name": "default", +                        "version": "0" +                    }, +                    "rtxControl": True, +                    "rtxControlConfig": { +                        "dataRxmsNmax": 5, +                        "initialRtxTime": 1000 +                    }, +                    "flowControl": True, +                    "flowControlConfig": { +                        "rateBased": False, +                        "windowBased": True, +                        "windowBasedConfig": { +                            "maxClosedWindowQueueLength": 10, +                            "initialCredit": 200 +                        } +                    } +                } +            } +        } +    ], + +    "knownIPCProcessAddresses": [], + +    "addressPrefixes": [ +        { +            "addressPrefix": 0, +            "organization": "N.Bourbaki" +        }, { +            "addressPrefix": 16, +            "organization": "IRATI" +        } +    ], + +    "rmtConfiguration": { +        "pffConfiguration": { +            "policySet": { +                "name": "default", +                "version": "0" +            } +        }, +        "policySet": { +            "name": "default", +            "version": "1" +        } +    }, + +    "enrollmentTaskConfiguration": { +        "policySet": { +            "name": "default", +            "version": "1", +            "parameters": [ +                { +                    "name": "enrollTimeoutInMs", +                    "value": "10000" +                }, { +                    "name": "watchdogPeriodInMs", +                    "value": "30000" +                }, { +                    "name": "declaredDeadIntervalInMs", +                    "value": "120000" +                }, { +                    "name": "neighborsEnrollerPeriodInMs", +                    "value": "0" +                }, { +                    "name": "maxEnrollmentRetries", +                    "value": "0" +                } +            ] +        } +    }, + +    "flowAllocatorConfiguration": { +        "policySet": { +            "name": "default", +            "version": "1" +        } +    }, + +    "namespaceManagerConfiguration": { +        "policySet": { +            "name": "default", +            "version": "1" +        } +    }, + +    "securityManagerConfiguration": { +        "policySet": { +            "name": "default", +            "version": "1" +        } +    }, + +    "resourceAllocatorConfiguration": { +        "pduftgConfiguration": { +            "policySet": { +                "name": "default", +                "version": "0" +            } +        } +    }, + +    "routingConfiguration": { +        "policySet": { +            "name": "link-state", +            "version": "1", +            "parameters": [ +                { +                    "name": "objectMaximumAge", +                    "value": "10000" +                }, { +                    "name": "waitUntilReadCDAP", +                    "value": "5001" +                }, { +                    "name": "waitUntilError", +                    "value": "5001" +                }, { +                    "name": "waitUntilPDUFTComputation", +                    "value": "103" +                }, { +                    "name": "waitUntilFSODBPropagation", +                    "value": "101" +                }, { +                    "name": "waitUntilAgeIncrement", +                    "value": "997" +                }, { +                    "name": "routingAlgorithm", +                    "value": "Dijkstra" +                } +            ] +        } +    } +} + + +def ps_set(d, k, v, parms): +    if k not in d: +        d[k] = {'name': '', 'version': '1'} + +    if d[k]["name"] == v and "parameters" in d[k]: +        cur_names = [p["name"] for p in d[k]["parameters"]] +        for p in parms: +            name, value = p.split('=') +            if name in cur_names: +                for i in range(len(d[k]["parameters"])): +                    if d[k]["parameters"][i]["name"] == name: +                        d[k]["parameters"][i]["value"] = value +                        break +            else: +                d[k]["parameters"].append({'name': name, 'value': value}) + +    elif len(parms) > 0: +        d[k]["parameters"] = [ +            {'name': p.split('=')[0], 'value': p.split('=')[1]} +            for p in parms] + +    d[k]["name"] = v + + +def dtp_ps_set(d, v, parms): +    for i in range(len(d["qosCubes"])): +        ps_set(d["qosCubes"][i]["efcpPolicies"], "dtpPolicySet", v, parms) + + +def dtcp_ps_set(d, v, parms): +    for i in range(len(d["qosCubes"])): +        ps_set(d["qosCubes"][i]["efcpPolicies"]["dtcpConfiguration"], +               "dtcpPolicySet", v, parms) + + +policy_translator = { +    'rmt.pff': lambda d, v, p: ps_set(d["rmtConfiguration"]["pffConfiguration"], +                                      "policySet", v, p), +    'rmt': lambda d, v, p: ps_set(d["rmtConfiguration"], "policySet", v, p), +    'enrollment-task': lambda d, v, p: ps_set(d["enrollmentTaskConfiguration"], +                                              "policySet", v, p), +    'flow-allocator': lambda d, v, p: ps_set(d["flowAllocatorConfiguration"], +                                             "policySet", v, p), +    'namespace-manager': lambda d, v, p: ps_set( +        d["namespaceManagerConfiguration"], "policySet", v, p), +    'security-manager': lambda d, v, p: ps_set( +        d["securityManagerConfiguration"], "policySet", v, p), +    'routing': lambda d, v, p: ps_set( +        d["routingConfiguration"], "policySet", v, p), +    'resource-allocator.pduftg': lambda d, v, p: ps_set( +        d["resourceAllocatorConfiguration"], "policySet", v, p), +    'efcp.*.dtcp': None, +    'efcp.*.dtp': None, +} + + +def is_security_path(path): +    sp = path.split('.') +    return (len(sp) == 3) and (sp[0] == 'security-manager') \ +        and (sp[1] in ['auth', 'encrypt', 'ttl', 'errorcheck']) + + +# Do we know this path ? +def policy_path_valid(path): +    if path in policy_translator: +        return True + +    # Try to validate security configuration +    if is_security_path(path): +            return True + +    return False + + +def translate_security_path(d, path, ps, parms): +    u1, component, profile = path.split('.') +    if "authSDUProtProfiles" not in d["securityManagerConfiguration"]: +        d["securityManagerConfiguration"]["authSDUProtProfiles"] = {} +    d = d["securityManagerConfiguration"]["authSDUProtProfiles"] + +    tr = {'auth': 'authPolicy', 'encrypt': 'encryptPolicy', +          'ttl': 'TTLPolicy', 'errorcheck': 'ErrorCheckPolicy'} + +    if profile == 'default': +        if profile not in d: +            d["default"] = {} + +        ps_set(d["default"], tr[component], ps, parms) + +    else:  # profile is the name of a DIF +        if "specific" not in d: +            d["specific"] = [] +        j = -1 +        for i in range(len(d["specific"])): +            if d["specific"][i]["underlyingDIF"] == profile + ".DIF": +                j = i +                break + +        if j == -1:  # We need to create an entry for the new DIF +            d["specific"].append({"underlyingDIF": profile + ".DIF"}) + +        ps_set(d["specific"][j], tr[component], ps, parms) + + +def translate_policy(difconf, path, ps, parms): +    if path == 'efcp.*.dtcp': +        dtcp_ps_set(difconf, ps, parms) + +    elif path == 'efcp.*.dtp': +        dtp_ps_set(difconf, ps, parms) + +    elif is_security_path(path): +        translate_security_path(difconf, path, ps, parms) + +    else: +        policy_translator[path](difconf, ps, parms) diff --git a/rumba/ssh_support.py b/rumba/ssh_support.py index 30ada62..0179c5d 100644 --- a/rumba/ssh_support.py +++ b/rumba/ssh_support.py @@ -17,9 +17,10 @@  # License along with this library; if not, write to the Free Software  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,  # MA  02110-1301  USA - +import os  import paramiko +  def get_ssh_client():      ssh_client = paramiko.SSHClient()      ssh_client.load_system_host_keys() @@ -27,6 +28,7 @@ def get_ssh_client():      return ssh_client +  def _print_stream(stream):          o = str(stream.read()).strip('b\'\"\\n')          if o != "": @@ -35,6 +37,7 @@ def _print_stream(stream):                  print(oi)          return o +  def execute_commands(testbed, ssh_config, commands, time_out=3):      '''      Remote execution of a list of shell command on hostname. By @@ -49,10 +52,16 @@ def execute_commands(testbed, ssh_config, commands, time_out=3):      '''      ssh_client = get_ssh_client() +    if ssh_config.proxycommand != None: +        proxy = paramiko.ProxyCommand(ssh_config.proxycommand) +    else: +        proxy = None +      try:          ssh_client.connect(ssh_config.hostname, ssh_config.port,                             testbed.username, testbed.password, -                           look_for_keys=True, timeout=time_out) +                           look_for_keys=True, timeout=time_out, +                           sock=proxy)          o = ""          for command in commands:              print("%s@%s:%s >> %s" % (testbed.username, @@ -71,6 +80,7 @@ def execute_commands(testbed, ssh_config, commands, time_out=3):          print(str(e))          return +  def execute_command(testbed, ssh_config, command, time_out=3):      '''      Remote execution of a list of shell command on hostname. By @@ -89,6 +99,7 @@ def execute_command(testbed, ssh_config, command, time_out=3):      if o != None:          return o +  def copy_file_to_testbed(testbed, ssh_config, text, file_name):      '''      Write a string to a given remote file. @@ -101,11 +112,17 @@ def copy_file_to_testbed(testbed, ssh_config, text, file_name):      '''      ssh_client = get_ssh_client() +    if ssh_config.proxycommand != None: +        proxy = paramiko.ProxyCommand(ssh_config.proxycommand) +    else: +        proxy = None +      try:          ssh_client.connect(ssh_config.hostname, ssh_config.port,                             testbed.username,                             testbed.password, -                           look_for_keys=True) +                           look_for_keys=True, +                           sock=proxy)          cmd = "touch " + file_name + \                "; chmod a+rwx " + file_name @@ -125,6 +142,58 @@ def copy_file_to_testbed(testbed, ssh_config, text, file_name):      except Exception as e:          print(str(e)) + +def copy_paths_to_testbed(testbed, ssh_config, paths, destination): +    ''' +    Write a string to a given remote file. +    Overwrite the complete file if it already exists! + +    @param testbed: testbed info +    @param ssh_config: ssh config of the node +    @param paths: source paths (local) as an iterable +    @param destination: destination folder name (remote) +    ''' +    ssh_client = get_ssh_client() + +    if destination is not '' and not destination.endswith('/'): +        destination = destination + '/' + +    try: +        ssh_client.connect(ssh_config.hostname, ssh_config.port, +                           testbed.username, +                           testbed.password, +                           look_for_keys=True) + +        sftp_client = ssh_client.open_sftp() + +        for path in paths: +            file_name = os.path.basename(path) +            dest_file = destination + file_name +            print("Copying %s to %s@%s:%s path %s" % ( +                path, +                testbed.username, +                ssh_config.hostname, +                ssh_config.port, +                dest_file)) +            sftp_client.put(path, dest_file) + +    except Exception as e: +        print(str(e)) + + +def copy_path_to_testbed(testbed, ssh_config, path, destination): +    ''' +    Write a string to a given remote file. +    Overwrite the complete file if it already exists! + +    @param testbed: testbed info +    @param ssh_config: ssh config of the node +    @param path: source path (local) +    @param destination: destination folder name (remote) +    ''' +    copy_paths_to_testbed(testbed, ssh_config, [path], destination) + +  def setup_vlan(testbed, node, vlan_id, int_name):      '''      Gets the interface (ethx) to link mapping @@ -134,22 +203,27 @@ def setup_vlan(testbed, node, vlan_id, int_name):      @param vlan_id: the VLAN id      @param int_name: the name of the interface      ''' +    if testbed.username == 'root': +        def sudo(s): +            return s +    else: +        def sudo(s): +            return 'sudo ' + s +      print("Setting up VLAN on node " + node.name) -    cmds = list() -    cmd = "sudo ip link add link " + \ -          str(int_name) + \ -          " name " + str(int_name) + \ -          "." + str(vlan_id) + \ -          " type vlan id " + str(vlan_id) -    cmds.append(cmd) -    cmd = "sudo ifconfig " + \ -          str(int_name) + "." + \ -          str(vlan_id) + " up" -    cmds.append(cmd) -    cmd = "sudo ethtool -K " + \ -          str(int_name) + " rxvlan off" -    cmds.append(cmd) -    cmd = "sudo ethtool -K " + \ -          str(int_name) + " txvlan off" +    args = {'ifname': str(int_name), 'vlan': str(vlan_id)} + +    cmds = [sudo("ip link set %(ifname)s up" +                 % args), +            sudo("ip link add link %(ifname)s name " +                 "%(ifname)s.%(vlan)s type vlan id %(vlan)s" +                 % args), +            sudo("ifconfig %(ifname)s.%(vlan)s up" +                 % args)] +    # TODO: is ethtool needed? Should install or check if it is present. +    # cmds += [sudo("ethtool -K %(ifname)s rxvlan off" +    #               % args), +    #          sudo("ethtool -K %(ifname)s txvlan off" +    #               % args)]      execute_commands(testbed, node.ssh_config, cmds) diff --git a/rumba/testbeds/jfed.py b/rumba/testbeds/jfed.py index 33d89f8..8ad173b 100644 --- a/rumba/testbeds/jfed.py +++ b/rumba/testbeds/jfed.py @@ -128,6 +128,11 @@ class Testbed(mod.Testbed):              auth_name_r = self.auth_name.replace(".", "-")              node.ssh_config.hostname = node.name + "." + self.exp_name + "." + \                                         auth_name_r + "." + self.auth_name +            node.ssh_config.proxycommand = "ssh -i '" + self.cert_file + \ +                                           "' -o StrictHostKeyChecking=no " + \ +                                           self.username + \ +                                           "@bastion.test.iminds.be nc " + \ +                                           node.ssh_config.hostname + " 22"          subprocess.call(["java", "-jar", self.jfed_jar, "create", "-S", \                           self.proj_name, "--rspec", \ diff --git a/rumba/testbeds/qemu.py b/rumba/testbeds/qemu.py index 3573554..d998625 100644 --- a/rumba/testbeds/qemu.py +++ b/rumba/testbeds/qemu.py @@ -2,6 +2,7 @@  # QEMU testbed for Rumba  #  #    Vincenzo Maffione  <v.maffione@nextworks.it> +#    Marco Capitani <m.capitani@nextworks.it>  #  # This library is free software; you can redistribute it and/or  # modify it under the terms of the GNU Lesser General Public @@ -23,10 +24,12 @@ import subprocess  import os  import rumba.model as mod +from rumba import ssh_support  class Testbed(mod.Testbed): -    def __init__(self, exp_name, username, bzimage, initramfs, proj_name="ARCFIRE", password="", +    def __init__(self, exp_name, bzimage, initramfs, proj_name="ARCFIRE", +                 password="root", username="root",                   use_vhost=True, qemu_logs_dir=None):          mod.Testbed.__init__(self, exp_name, username, password, proj_name)          self.vms = {} @@ -34,15 +37,18 @@ class Testbed(mod.Testbed):          self.bzimage = bzimage          self.initramfs = initramfs          self.vhost = use_vhost -        self.qemu_logs_dir = os.getcwd() if qemu_logs_dir is None else qemu_logs_dir +        self.qemu_logs_dir = os.getcwd() if qemu_logs_dir is None \ +            else qemu_logs_dir          self.boot_processes = []      @staticmethod -    def _run_command_chain(commands, results_queue, error_queue, ignore_errors=False): +    def _run_command_chain(commands, results_queue, +                           error_queue, ignore_errors=False):          """          Runs (sequentially) the command list. -        On error, breaks and dumps it in error_queue, and interrupts as soon as it is non-empty. +        On error, breaks and dumps it in error_queue, and interrupts +        as soon as it is non-empty (unless ignore errors is True).          :type commands: list          :type results_queue: Queue @@ -56,7 +62,7 @@ class Testbed(mod.Testbed):          for command in commands:              if not error_queue.empty() and not ignore_errors:                  break -            print('DEBUG: executing >> %s' % command) +            print('qemu: executing >> %s' % command)              try:                  subprocess.check_call(command.split())              except subprocess.CalledProcessError as e: @@ -69,6 +75,37 @@ class Testbed(mod.Testbed):          else:              results_queue.put("Command chain ran with %d errors" % errors) +    def recover_if_names(self, experiment): +        next_vlan = 10 +        assigned_vlan = {} +        for node in experiment.nodes: +            for ipcp in node.ipcps: +                if isinstance(ipcp, mod.ShimEthIPCP): +                    shim_name, node_name = ipcp.name.split('.') +                    port_set = [x for x in self.vms[node_name]['ports'] +                                if x['shim'].name == shim_name] +                    port = port_set[0] +                    port_id = port['port_id'] +                    vm_id = self.vms[node_name]['id'] +                    mac = '00:0a:0a:0a:%02x:%02x' % (vm_id, port_id) +                    print('qemu: recovering ifname for port: ' +                          + port['tap_id'] + '.') +                    output = ssh_support.execute_command( +                        self, +                        node.ssh_config, +                        'mac2ifname ' + mac) +                    ipcp.ifname = output +                    try: +                        vlan = int(port['shim'].name) +                    except ValueError: +                        vlan = assigned_vlan.get(port['shim'].name, None) +                        if vlan is None: +                            vlan = next_vlan +                            next_vlan += 10 +                            assigned_vlan[port['shim'].name] = vlan +                    ssh_support.setup_vlan(self, node, +                                           vlan, ipcp.ifname) +      def swap_in(self, experiment):          """          :type experiment mod.Experiment @@ -80,19 +117,19 @@ class Testbed(mod.Testbed):              except subprocess.CalledProcessError:                  raise Exception('Not authenticated') -        print("[QEMU testbed] swapping in") +        print("qemu: swapping in")          # Building bridges and taps          shim_processes = []          r_queue = multiprocessing.Queue()          e_queue = multiprocessing.Queue() -        print(experiment.dif_ordering)          for shim in experiment.dif_ordering: -            command_list = []              if not isinstance(shim, mod.ShimEthDIF):                  # Nothing to do here                  continue              self.shims.append(shim) +            ipcps = shim.ipcps +            command_list = []              command_list += ('sudo brctl addbr %(br)s\n'                               'sudo ip link set %(br)s up'                               % {'br': shim.name} @@ -113,23 +150,39 @@ class Testbed(mod.Testbed):                      speed = '%dmbit' % shim.link_speed                      # Rate limit the traffic transmitted on the TAP interface -                    command_list += ('sudo tc qdisc add dev %(tap)s handle 1: root ' -                                     'htb default 11\n' -                                     'sudo tc class add dev %(tap)s parent 1: classid ' -                                     '1:1 htb rate 10gbit\n' -                                     'sudo tc class add dev %(tap)s parent 1:1 classid ' -                                     '1:11 htb rate %(speed)s' -                                     % {'tap': tap_id, 'speed': speed} -                                     ).split('\n') - -                vm['ports'].append({'tap_id': tap_id, 'shim': shim, 'port_id': port_id}) +                    command_list += ( +                        'sudo tc qdisc add dev %(tap)s handle 1: root ' +                        'htb default 11\n' +                        'sudo tc class add dev %(tap)s parent 1: classid ' +                        '1:1 htb rate 10gbit\n' +                        'sudo tc class add dev %(tap)s parent 1:1 classid ' +                        '1:11 htb rate %(speed)s' +                        % {'tap': tap_id, 'speed': speed} +                    ).split('\n') + +                vm['ports'].append({'tap_id': tap_id, +                                    'shim': shim, +                                    'port_id': port_id}) +                ipcp_set = [x for x in ipcps if x in node.ipcps] +                if len(ipcp_set) > 1: +                    raise Exception("Error: more than one ipcp in common " +                                    "between shim dif %s and node %s" +                                    % (shim.name, node.name)) +                ipcp = ipcp_set[0]  # type: mod.ShimEthIPCP +                assert ipcp.name == '%s.%s' % (shim.name, node.name), \ +                    'Incorrect Shim Ipcp found: expected %s.%s, found %s' \ +                    % (shim.name, node.name, ipcp.name) +                ipcp.ifname = tap_id                  # TODO deal with Ip address (shim UDP DIF).              # Avoid stacking processes if one failed before.              if not e_queue.empty():                  break              # Launch commands asynchronously -            process = multiprocessing.Process(target=self._run_command_chain, args=(command_list, r_queue, e_queue)) +            process = multiprocessing.Process(target=self._run_command_chain, +                                              args=(command_list, +                                                    r_queue, +                                                    e_queue))              shim_processes.append(process)              process.start() @@ -142,14 +195,15 @@ class Testbed(mod.Testbed):              # Check for errors              if not e_queue.empty():                  error_str = str(e_queue.get()) -                print('Testbed instantiation failed: %s' % error_str) +                print('qemu: Testbed instantiation failed: %s' % error_str)                  raise Exception('Failure: %s' % error_str)              try:                  # Check for results                  result = r_queue.get(timeout=1)                  if result == "Command chain ran correctly.":                      over_processes += 1 -                    print('DEBUG: %s of %s processes completed.' % (over_processes, total_processes)) +                    print('qemu: %s of %s processes completed.' +                          % (over_processes, total_processes))              except:                  max_waiting_time -= 1 @@ -157,9 +211,9 @@ class Testbed(mod.Testbed):          boot_batch_size = max(1, multiprocessing.cpu_count() // 2)          booting_budget = boot_batch_size -        boot_backoff = 12 # in seconds +        boot_backoff = 12  # in seconds          base_port = 2222 -        vm_memory = 164 # in megabytes +        vm_memory = 164  # in megabytes          vm_frontend = 'virtio-net-pci'          vmid = 1 @@ -167,6 +221,7 @@ class Testbed(mod.Testbed):          for node in experiment.nodes:              name = node.name              vm = self.vms.setdefault(name, {'vm': node, 'ports': []}) +            vm['id'] = vmid              fwdp = base_port + vmid              fwdc = fwdp + 10000              mac = '00:0a:0a:0a:%02x:%02x' % (vmid, 99) @@ -199,7 +254,6 @@ class Testbed(mod.Testbed):                          '-device %(frontend)s,mac=%(mac)s,netdev=mgmt '                          '-netdev user,id=mgmt,%(hostfwdstr)s '                          '-vga std ' -                        '-pidfile rina-%(id)s.pid '                          '-serial file:%(vmname)s.log '                          % vars_dict                          ) @@ -211,23 +265,29 @@ class Testbed(mod.Testbed):                  mac = '00:0a:0a:0a:%02x:%02x' % (vmid, port['port_id'])                  port['mac'] = mac -                command += ('-device %(frontend)s,mac=%(mac)s,netdev=data%(idx)s ' -                            '-netdev tap,ifname=%(tap)s,id=data%(idx)s,script=no,' -                            'downscript=no%(vhost)s ' -                            % {'mac': mac, 'tap': tap_id, 'idx': port['port_id'], -                               'frontend': vm_frontend, 'vhost': ',vhost=on' if self.vhost else ''} -                            ) +                command += ( +                    '-device %(frontend)s,mac=%(mac)s,netdev=data%(idx)s ' +                    '-netdev tap,ifname=%(tap)s,id=data%(idx)s,script=no,' +                    'downscript=no%(vhost)s ' +                    % {'mac': mac, 'tap': tap_id, 'idx': port['port_id'], +                       'frontend': vm_frontend, +                       'vhost': ',vhost=on' if self.vhost else ''} +                )              booting_budget -= 1              if booting_budget <= 0: -                print('Sleeping %s secs waiting for the VMs to boot' % boot_backoff) + +                print('qemu: Sleeping %s secs waiting ' +                      'for the VMs to boot' % boot_backoff) +                  time.sleep(boot_backoff)                  booting_budget = boot_batch_size -            with open('%s/qemu_out_%s' % (self.qemu_logs_dir, vmid), 'w') as out_file: -                print('DEBUG: executing >> %s' % command) -                self.boot_processes.append(subprocess.Popen(command.split(), stdout=out_file)) -                pass +            with open('%s/qemu_out_%s' % (self.qemu_logs_dir, vmid), 'w')\ +                    as out_file: +                print('qemu: executing >> %s' % command) +                self.boot_processes.append(subprocess.Popen(command.split(), +                                                            stdout=out_file))              vmid += 1 @@ -235,9 +295,14 @@ class Testbed(mod.Testbed):          if booting_budget < boot_backoff:              tsleep = boot_backoff * (boot_batch_size - booting_budget) / \                                              boot_batch_size -            print('Sleeping %s secs waiting for the last VMs to boot' % tsleep) +            print('qemu: Sleeping %s secs waiting for the last VMs to boot' % tsleep)              time.sleep(tsleep) +        # TODO: to be removed, we should loop in the ssh part +        print('qemu: Sleeping 5 seconds, just to be on the safe side') +        time.sleep(5) + +        self.recover_if_names(experiment)      def swap_out(self, experiment):          """ @@ -267,9 +332,10 @@ class Testbed(mod.Testbed):                               'sudo ip tuntap del mode tap name %(tap)s'                               % {'tap': tap, 'br': shim.name}                               ).split('\n') -                process = multiprocessing.Process(target=self._run_command_chain, -                                                  args=(commands, results_queue, error_queue), -                                                  kwargs={'ignore_errors': True}) +                process = multiprocessing.Process( +                    target=self._run_command_chain, +                    args=(commands, results_queue, error_queue), +                    kwargs={'ignore_errors': True})                  port_processes.append(process)                  process.start() @@ -280,14 +346,16 @@ class Testbed(mod.Testbed):          while max_waiting_time > 0 and over_processes < total_processes:              # Check for errors              if not error_queue.empty(): -                print('Failure while shutting down: %s' % str(error_queue.get())) +                print('qemu:Failure while shutting down: %s'\ +                         % str(error_queue.get()))                  over_processes += 1              try:                  # Check for results                  result = results_queue.get(timeout=1)                  if result == "Command chain ran correctly.":                      over_processes += 1 -                    print('DEBUG: %s of %s tear-down port processes completed.' % (over_processes, total_processes)) +                    print('qemu: %s of %s tear-down port processes completed.' +                          % (over_processes, total_processes))              except:                  max_waiting_time -= 1 @@ -302,7 +370,9 @@ class Testbed(mod.Testbed):                           % {'br': shim.name}                           ).split('\n')              process = multiprocessing.Process(target=self._run_command_chain, -                                              args=(commands, results_queue, error_queue), +                                              args=(commands, +                                                    results_queue, +                                                    error_queue),                                                kwargs={'ignore_errors': True})              shim_processes.append(process)              process.start() @@ -314,13 +384,15 @@ class Testbed(mod.Testbed):          while max_waiting_time > 0 and over_processes < total_processes:              # Check for errors              if not error_queue.empty(): -                print('Failure while shutting down: %s' % str(error_queue.get())) +                print('qemu: Failure while shutting down: %s' +                      % str(error_queue.get()))                  over_processes += 1              try:                  # Check for results                  result = results_queue.get(timeout=1)                  if result == "Command chain ran correctly.":                      over_processes += 1 -                    print('DEBUG: %s of %s tear-down shim processes completed.' % (over_processes, total_processes)) +                    print('qemu: %s of %s tear-down shim processes completed.' +                          % (over_processes, total_processes))              except:                  max_waiting_time -= 1 diff --git a/tools/conf-examples/dc-vpns.conf b/tools/conf-examples/dc-vpns.conf new file mode 100644 index 0000000..8bfde51 --- /dev/null +++ b/tools/conf-examples/dc-vpns.conf @@ -0,0 +1,114 @@ +eth 110 100Mbps tor1 spine1 +eth 120 100Mbps tor1 spine2 +eth 11 25Mbps s11 tor1 +eth 12 25Mbps s12 tor1 +eth 13 25Mbps s13 tor1 +eth 14 25Mbps s14 tor1 +eth 15 25Mbps s15 tor1 +eth 16 25Mbps s16 tor1 +eth 17 25Mbps s17 tor1 +eth 18 25Mbps s18 tor1 +eth 210 100Mbps tor2 spine1 +eth 220 100Mbps tor2 spine2 +eth 21 25Mbps s21 tor2 +eth 22 25Mbps s22 tor2 +eth 23 25Mbps s23 tor2 +eth 24 25Mbps s24 tor2 +eth 25 25Mbps s25 tor2 +eth 26 25Mbps s26 tor2 +eth 27 25Mbps s27 tor2 +eth 28 25Mbps s28 tor2 +eth 310 100Mbps tor3 spine1 +eth 320 100Mbps tor3 spine2 +eth 31 25Mbps s31 tor3 +eth 32 25Mbps s32 tor3 +eth 33 25Mbps s33 tor3 +eth 34 25Mbps s34 tor3 +eth 35 25Mbps s35 tor3 +eth 36 25Mbps s36 tor3 +eth 37 25Mbps s37 tor3 +eth 38 25Mbps s38 tor3 +eth 410 100Mbps tor4 spine1 +eth 420 100Mbps tor4 spine2 +eth 41 25Mbps s41 tor4 +eth 42 25Mbps s42 tor4 +eth 43 25Mbps s43 tor4 +eth 44 25Mbps s44 tor4 +eth 45 25Mbps s45 tor4 +eth 46 25Mbps s46 tor4 +eth 47 25Mbps s47 tor4 +eth 48 25Mbps s48 tor4 + +# DIF dcfabric   +dif dcfabric tor1 110 120 +dif dcfabric tor2 210 220 +dif dcfabric tor3 310 320 +dif dcfabric tor4 410 420 +dif dcfabric spine1 110 210 310 410 +dif dcfabric spine2 120 220 320 420 + +# DIF VPN1 +dif vpn1 s11 11 +dif vpn1 s12 12 +dif vpn1 s13 13 +dif vpn1 s14 14 +dif vpn1 tor1 11 12 13 14 dcfabric +dif vpn1 s21 21 +dif vpn1 s22 22 +dif vpn1 s23 23 +dif vpn1 s24 24 +dif vpn1 tor2 21 22 23 24 dcfabric + +# DIF VPN2 +dif vpn2 s31 31 +dif vpn2 s32 32 +dif vpn2 s33 33 +dif vpn2 s34 34 +dif vpn2 tor3 31 32 33 34 dcfabric +dif vpn2 s41 41 +dif vpn2 s42 42 +dif vpn2 s43 43 +dif vpn2 s44 44 +dif vpn2 tor4 41 42 43 44 dcfabric + +# DIF VPN3 +dif vpn3 s15 15 +dif vpn3 s16 16 +dif vpn3 s17 17 +dif vpn3 s18 18 +dif vpn3 tor1 15 16 17 18 dcfabric +dif vpn3 s25 25 +dif vpn3 s26 26 +dif vpn3 s27 27 +dif vpn3 s28 28 +dif vpn3 tor2 25 26 27 28  dcfabric + +# DIF VPN4 +dif vpn4 s35 35 +dif vpn4 s36 36 +dif vpn4 s37 37 +dif vpn4 s38 38 +dif vpn4 tor3 35 36 37 38 dcfabric +dif vpn4 s45 45 +dif vpn4 s46 46 +dif vpn4 s47 47 +dif vpn4 s48 48 +dif vpn4 tor4 45 46 47 48 dcfabric + +#Policies + +#Multipath FABRIC +#policy dcfabric spine1,spine2 rmt.pff multipath +#policy dcfabric spine1,spine2 routing link-state routingAlgorithm=ECMPDijkstra +#policy dcfabric * rmt cas-ps q_max=1000 +#policy dcfabric * efcp.*.dtcp cas-ps + +#Application to DIF mappings +#appmap vpn1 traffic.generator.server 1 +#appmap vpn1 rina.apps.echotime.server 1 +#appmap vpn2 traffic.generator.server 1 +#appmap vpn2 rina.apps.echotime.server 1 +#appmap vpn3 traffic.generator.server 1 +#appmap vpn3 rina.apps.echotime.server 1 +#appmap vpn4 traffic.generator.server 1 +#appmap vpn4 rina.apps.echotime.server 1 diff --git a/tools/conf-examples/geant2-renumber.conf b/tools/conf-examples/geant2-renumber.conf new file mode 100644 index 0000000..07c014c --- /dev/null +++ b/tools/conf-examples/geant2-renumber.conf @@ -0,0 +1,86 @@ +eth 2000 100Mbps lisbon madrid +eth 2001 100Mbps lisbon london +eth 2002 100Mbps london dublin +eth 2003 100Mbps london paris +eth 2004 100Mbps london brussels +eth 2005 100Mbps paris madrid +eth 2006 100Mbps paris luxemburg +eth 2007 100Mbps paris bern +eth 2008 100Mbps madrid bern +eth 2009 100Mbps bern roma +eth 2010 100Mbps roma madrid +eth 2011 100Mbps brussels amsterdam +eth 2012 100Mbps roma valleta +eth 2013 100Mbps amsterdam valleta +eth 2014 100Mbps bern berlin +eth 2015 100Mbps luxemburg berlin +eth 2016 100Mbps amsterdam berlin +eth 2017 100Mbps amsterdam copenhagen +eth 2018 100Mbps berlin copenhagen +eth 2019 100Mbps copenhagen oslo +eth 2020 100Mbps oslo stockholm +eth 2021 100Mbps stockholm copenhagen +eth 2023 100Mbps copenhagen tallin +eth 2024 100Mbps tallin riga +eth 2025 100Mbps riga vilnius +eth 2026 100Mbps vilnius warsaw +eth 2027 100Mbps warsaw berlin +eth 2028 100Mbps warsaw praha +eth 2029 100Mbps berlin praha +eth 2030 100Mbps berlin viena +eth 2031 100Mbps praha viena +eth 2032 100Mbps viena budapest +eth 2034 100Mbps viena ljubljana +eth 2035 100Mbps ljubljana zagreb +eth 2036 100Mbps zagreb budapest +eth 2037 100Mbps budapest sofia +eth 2038 100Mbps viena athens +eth 2039 100Mbps sofia athens +eth 2040 100Mbps athens roma +eth 2041 100Mbps sofia bucharest +eth 2042 100Mbps bucharest budapest +eth 2043 100Mbps athens nicosia +eth 2044 100Mbps roma nicosia +eth 2045 100Mbps sofia ankara +eth 2046 100Mbps bucharest ankara +eth 2047 100Mbps berlin moscow +eth 2048 100Mbps copenhagen moscow +eth 2049 100Mbps roma viena + +# DIF renumber   +dif renumber lisbon 2000 2001 +dif renumber madrid 2000 2005 +dif renumber london 2001 2002 2003 2004 +dif renumber dublin 2002 +dif renumber paris 2003 2005 2006 2007 +dif renumber brussels 2004 2011 +dif renumber luxemburg 2006 2015 +dif renumber bern 2007 2008 2009 2014 +dif renumber roma 2009 2010 2012 2040 2044 2049 +dif renumber amsterdam 2011 2013 2016 2017 +dif renumber valleta 2012 2013 +dif renumber berlin 2014 2015 2016 2018 2027 2029 2030 2047 +dif renumber copenhagen 2017 2018 2019 2021 2023 2048 +dif renumber oslo 2019 2020 +dif renumber stockholm 2020 2021 +dif renumber tallin 2023 2024 +dif renumber riga 2024 2025 +dif renumber vilnius 2025 2026 +dif renumber warsaw 2026 2027 2028 +dif renumber praha 2028 2029 2031 +dif renumber viena 2030 2031 2032 2034 2038 +dif renumber budapest 2032 2036 2037 2042 +dif renumber athens 2038 2039 2040 2043 +dif renumber ljubljana 2034 2035 +dif renumber zagreb 2035 2036 +dif renumber sofia 2037 2039 2041 2045 +dif renumber bucharest 2041 2042 2046 +dif renumber nicosia 2043 2044 +dif renumber ankara 2045 2046 +dif renumber moscow 2047 2048 + +#Policies + +#address-change +policy renumber * namespace-manager address-change useNewTimeout=20001 deprecateOldTimeout=80001 changePeriod=120001 addressRange=100 +policy renumber * routing link-state objectMaximumAge=10000 waitUntilReadCDAP=5001 waitUntilError=5001 waitUntilPDUFTComputation=103 waitUntilFSODBPropagation=101 waitUntilAgeIncrement=997 waitUntilDeprecateAddress=20001 routingAlgorithm=Dijkstra diff --git a/tools/conf-examples/insane-stacking.conf b/tools/conf-examples/insane-stacking.conf new file mode 100644 index 0000000..8032fea --- /dev/null +++ b/tools/conf-examples/insane-stacking.conf @@ -0,0 +1,29 @@ +eth 300 0Mbps a b + +# DIF n1 lays over shim DIF 300 +dif n1 a 300 +dif n1 b 300 + +# n2 lays over n1 +dif n2 a n1 +dif n2 b n1 + +# n3 lays over n2 +dif n3 a n2 +dif n3 b n2 + +# n4 lays over n3 +dif n4 a n3 +dif n4 b n3 + +# n5 lays over n4 +dif n5 a n4 +dif n5 b n4 + +# n6 lays over n5 +dif n6 a n5 +dif n6 b n5 + +# n7 lays over n6 +dif n7 a n6 +dif n7 b n6 diff --git a/tools/conf-examples/isp-sec.conf b/tools/conf-examples/isp-sec.conf new file mode 100644 index 0000000..33a35a6 --- /dev/null +++ b/tools/conf-examples/isp-sec.conf @@ -0,0 +1,189 @@ +eth 110 0Mbps cpe11 ar1 +eth 120 0Mbps cpe12 ar1 +eth 130 0Mbps cpe13 ar1 +eth 210 0Mbps cpe21 ar2 +eth 220 0Mbps cpe22 ar2 +eth 230 0Mbps cpe23 ar2 +eth 310 0Mbps cpe31 ar3 +eth 320 0Mbps cpe32 ar3 +eth 330 0Mbps cpe33 ar3 +eth 100 0Mbps ar1 manpe1 +eth 200 0Mbps ar2 manpe1 +eth 300 0Mbps ar3 manpe2 +eth 410 0Mbps manpe1 manpe2 +eth 411 0Mbps manpe1 manpe3 +eth 412 0Mbps manpe1 manpe4 +eth 420 0Mbps manpe2 manpe3 +eth 421 0Mbps manpe2 manpe4 +eth 430 0Mbps manpe3 manpe4 +eth 510 0Mbps manpe3 ser1 +eth 520 0Mbps manpe4 ser2 +eth 600 0Mbps ser1 core1 +eth 610 0Mbps ser1 core2 +eth 620 0Mbps ser2 core1 +eth 630 0Mbps ser2 core2 +eth 700 0Mbps core1 core2 +eth 710 0Mbps core1 core3 +eth 720 0Mbps core2 core4 +eth 730 0Mbps core3 core4 +eth 640 0Mbps core3 edge1 +eth 650 0Mbps core4 edge1 +eth 660 0Mbps core3 edge2 +eth 670 0Mbps core4 edge2 +eth 800 0Mbps edge1 isp2 +eth 810 0Mbps edge1 isp3 +eth 820 0Mbps edge2 isp4 +eth 830 0Mbps edge2 isp5 + +# DIF core +dif core ser1 600 610 +dif core ser2 620 630 +dif core core1 600 620 700 710 +dif core core2 610 630 700 720 +dif core core3 640 660 710 730  +dif core core4 650 670 720 730 +dif core edge1 640 650 +dif core edge2 660 670 + +# DIF access +dif access ar1 100 +dif access ar2 200 +dif access ar3 300 +dif access manpe1 100 200 410 411 412 +dif access manpe2 300 410 420 421 +dif access manpe3 411 420 430 510 +dif access manpe4 412 421 430 520 +dif access ser1 510 +dif access ser2 520 + +# DIF service +dif service ar1 access +dif service ar2 access +dif service ar3 access +dif service ser1 access core +dif service ser2 access core +dif service edge1 core +dif service edge2 core + +# DIF emall1 +dif emall1 cpe11 110 +dif emall1 cpe12 120 +dif emall1 cpe21 210 +dif emall1 cpe22 220 +dif emall1 cpe31 310 +dif emall1 ar1 110 120 service +dif emall1 ar2 210 220 service +dif emall1 ar3 310 service +dif emall1 edge1 service 800 +dif emall1 edge2 service 820 +dif emall1 isp2 800 +dif emall1 isp4 820 + +# DIF emall2 +dif emall2 cpe13 130 +dif emall2 cpe23 230 +dif emall2 cpe32 320 +dif emall2 cpe33 330 +dif emall2 ar1 130 service +dif emall2 ar2 230 service +dif emall2 ar3 320 330 service +dif emall2 edge1 service 810 +dif emall2 edge2 service 830 +dif emall2 isp3 810 +dif emall2 isp5 830 + +#policies +policy emall1 * security-manager.auth.default PSOC_authentication-ssh2 keyExchangeAlg=EDH keystore=/creds/ssh2 keystorePass=test +policy emall1 * security-manager.encrypt.default default encryptAlg=AES128 macAlg=SHA256 compressAlg=deflate  +policy emall1 ar1,ar2,ar3,edge1,edge2 security-manager.auth.service PSOC_authentication-none +policy emall2 * security-manager.auth.default PSOC_authentication-ssh2 keyExchangeAlg=EDH keystore=/creds/ssh2 keystorePass=test +policy emall2 * security-manager.encrypt.default default encryptAlg=AES128 macAlg=SHA256 compressAlg=deflate +policy emall2 ar1,ar2,ar3,edge1,edge2 security-manager.auth.service PSOC_authentication-none + +#Enrollments +enroll access ar1 manpe1 100 +enroll access ar2 manpe1 200 +enroll access ar3 manpe2 300 +enroll access ser1 manpe3 510 +enroll access ser2 manpe4 520 +enroll access manpe1 manpe2 410 +enroll access manpe1 manpe3 411 +enroll access manpe1 manpe4 412 +enroll access manpe2 manpe3 420 +enroll access manpe2 manpe4 421 +enroll access manpe3 manpe4 430 + +enroll core core1 core2 700 +enroll core core1 core3 710 +enroll core core2 core4 720 +enroll core core3 core4 730 +enroll core ser1 core1 600 +enroll core ser1 core2 610 +enroll core ser2 core1 620 +enroll core ser2 core2 630 +enroll core edge1 core3 640 +enroll core edge1 core4 650 +enroll core edge2 core3 660 +enroll core edge2 core4 670 + +enroll service edge1 edge2 core +enroll service edge1 ser1 core +enroll service edge1 ser2 core +enroll service edge2 ser1 core +enroll service edge2 ser2 core +enroll service ser1 ser2 core +enroll service ar1 ser1 access +enroll service ar1 ser2 access +enroll service ar2 ser1 access +enroll service ar2 ser2 access +enroll service ar3 ser1 access +enroll service ar3 ser2 access + +enroll emall1 cpe11 ar1 110 +enroll emall1 cpe12 ar1 120 +enroll emall1 cpe21 ar2 210 +enroll emall1 cpe22 ar2 220 +enroll emall1 cpe31 ar3 310 +enroll emall1 ar1 edge1 service +enroll emall1 ar1 edge2 service +enroll emall1 ar2 edge1 service +enroll emall1 ar2 edge2 service +enroll emall1 ar3 edge1 service +enroll emall1 ar3 edge2 service +enroll emall1 edge1 edge2 service +enroll emall1 isp2 edge1 800 +enroll emall1 isp4 edge2 820 + +enroll emall2 cpe13 ar1 130 +enroll emall2 cpe23 ar2 230 +enroll emall2 cpe32 ar3 320 +enroll emall2 cpe33 ar3 330 +enroll emall2 ar1 edge1 service +enroll emall2 ar1 edge2 service +enroll emall2 ar2 edge1 service +enroll emall2 ar2 edge2 service +enroll emall2 ar3 edge1 service +enroll emall2 ar3 edge2 service +enroll emall2 edge1 edge2 service +enroll emall2 isp3 edge1 810 +enroll emall2 isp5 edge2 830 + +#Overlays +overlay ar1 overlays/ispsec/ar1 +overlay ar2 overlays/ispsec/ar2 +overlay ar3 overlays/ispsec/ar3 +overlay cpe11 overlays/ispsec/cpe11 +overlay cpe12 overlays/ispsec/cpe12 +overlay cpe13 overlays/ispsec/cpe13 +overlay cpe21 overlays/ispsec/cpe21 +overlay cpe22 overlays/ispsec/cpe22 +overlay cpe23 overlays/ispsec/cpe23 +overlay cpe31 overlays/ispsec/cpe31 +overlay cpe32 overlays/ispsec/cpe32 +overlay cpe33 overlays/ispsec/cpe33 +overlay edge1 overlays/ispsec/edge1 +overlay edge2 overlays/ispsec/edge2 +overlay isp2 overlays/ispsec/isp2 +overlay isp3 overlays/ispsec/isp3 +overlay isp4 overlays/ispsec/isp4 +overlay isp5 overlays/ispsec/isp5 diff --git a/tools/conf-examples/resilient-square.conf b/tools/conf-examples/resilient-square.conf new file mode 100644 index 0000000..592b6a5 --- /dev/null +++ b/tools/conf-examples/resilient-square.conf @@ -0,0 +1,16 @@ +# a, b and c and d are connected through p2p shim DIFs, in circle. +# Between a and c there is an additional diagonal link. +eth 300 100Mbps a b +eth 400 100Mbps b c +eth 500 100Mbps c d +eth 600 1Mbps   d a +eth 700 100Mbps a c + +# DIF n1 spans over the p2p shim DIFs +dif n1 a 300 600 700 +dif n1 b 300 400 +dif n1 c 400 500 700 +dif n1 d 500 600 + +# Use LFA policy as PDU Forwarding Function +policy n1 * rmt.pff lfa diff --git a/tools/conf-examples/secure-two-layers.conf b/tools/conf-examples/secure-two-layers.conf new file mode 100644 index 0000000..54c1da6 --- /dev/null +++ b/tools/conf-examples/secure-two-layers.conf @@ -0,0 +1,25 @@ +eth 300 0Mbps a b +eth 400 0Mbps b c +eth 500 0Mbps c d + +# DIF n1 spans a,b and c and runs over the shims +dif n1 a 300 +dif n1 b 300 400 +dif n1 c 400 + +# DIF n2 spans c and d and runs over the shims +dif n2 c 500 +dif n2 d 500 + +# DIF n3 spans over n1 and n2 +dif n3 a n1 +dif n3 c n1 n2 +dif n3 d n2 + +policy n3 * security-manager.auth.default PSOC_authentication-ssh2 keyExchangeAlg=EDH keystore=/creds keystorePass=test +policy n3 * security-manager.encrypt.default default encryptAlg=AES128 macAlg=SHA256 compressAlg=deflate +policy n3 * security-manager.ttl.default default initialValue=50 +policy n3 * security-manager.errorcheck.default CRC32 +policy n3 * security-manager.auth.n1 PSOC_authentication-password password=kf05j.a1234.af0k +policy n3 * security-manager.ttl.n1 default initialValue=50 +policy n3 * security-manager.errorcheck.n1 CRC32 diff --git a/tools/conf-examples/seven.conf b/tools/conf-examples/seven.conf new file mode 100644 index 0000000..b25f476 --- /dev/null +++ b/tools/conf-examples/seven.conf @@ -0,0 +1,34 @@ +# This configuration realizes the following seven-nodes topology +# +# MA ---- MB ---- MC --- MD --- ME +#                 |             | +#                 MF            MG +# + +# 300 is a shim-eth-vlan DIF, with nodes a and b +eth 300 0Mbps a b + +# 400 is a shim-eth-vlan DIF, with nodes b and c +eth 400 0Mbps b c + +# 500 is a shim-eth-vlan DIF, with nodes c and f +eth 500 0Mbps c f + +# 600 is a shim-eth-vlan DIF, with nodes c and d +eth 600 0Mbps c d + +# 700 is a shim-eth-vlan DIF, with nodes d and e +eth 700 0Mbps d e + +# 800 is a shim-eth-vlan DIF, with nodes e and g +eth 800 0Mbps e g + +# DIF n1 spans over the two shim DIFs +dif n1 a 300 +dif n1 b 300 400 +dif n1 c 400 500 600 +dif n1 d 600 700 +dif n1 e 700 800 +dif n1 f 500 +dif n1 g 800 + diff --git a/tools/conf-examples/star.conf b/tools/conf-examples/star.conf new file mode 100644 index 0000000..8a4f6ab --- /dev/null +++ b/tools/conf-examples/star.conf @@ -0,0 +1,7 @@ +# a,b and c are in the same L2 domain +eth 300 0Mbps a b c + +# DIF n1 spans over the shim DIF +dif n1 a 300 +dif n1 b 300 +dif n1 c 300 diff --git a/tools/conf-examples/triangle.conf b/tools/conf-examples/triangle.conf new file mode 100644 index 0000000..f89811c --- /dev/null +++ b/tools/conf-examples/triangle.conf @@ -0,0 +1,9 @@ +# a, b and c are connected through p2p shim DIFs +eth 300 10Mbps a b +eth 400 20Mbps b c +eth 500 30Mbps a c + +# DIF n1 spans over the p2p shim DIFs +dif n1 a 300 500 +dif n1 b 300 400 +dif n1 c 400 500 diff --git a/tools/conf-examples/tutorial1.conf b/tools/conf-examples/tutorial1.conf new file mode 100644 index 0000000..8023687 --- /dev/null +++ b/tools/conf-examples/tutorial1.conf @@ -0,0 +1,4 @@ +eth 100 0Mbps system1 system2 + +dif Normal system1 100 +dif Normal system2 100 diff --git a/tools/conf-examples/tutorial2.conf b/tools/conf-examples/tutorial2.conf new file mode 100644 index 0000000..b43fc17 --- /dev/null +++ b/tools/conf-examples/tutorial2.conf @@ -0,0 +1,6 @@ +eth 100 0Mbps system1 system2 +eth 101 0Mbps system2 system3 + +dif Normal system1 100 +dif Normal system2 100 101 +dif Normal system3 101 diff --git a/tools/conf-examples/two-layers.conf b/tools/conf-examples/two-layers.conf new file mode 100644 index 0000000..dc1bab2 --- /dev/null +++ b/tools/conf-examples/two-layers.conf @@ -0,0 +1,17 @@ +eth 300 0Mbps a b +eth 400 0Mbps b c +eth 500 0Mbps c d + +# DIF n1 spans a,b and c and runs over the shims +dif n1 a 300 +dif n1 b 300 400 +dif n1 c 400 + +# DIF n2 spans c and d and runs over the shims +dif n2 c 500 +dif n2 d 500 + +# DIF n3 spans over n1 and n2 +dif n3 a n1 +dif n3 c n1 n2 +dif n3 d n2 diff --git a/tools/democonf2rumba.py b/tools/democonf2rumba.py index cda112c..c708e8e 100755 --- a/tools/democonf2rumba.py +++ b/tools/democonf2rumba.py @@ -90,8 +90,7 @@ def make_experiment(filename, experiment_class, experiment_kwargs,                                        'dif_registrations': {},                                        'registrations': {}})                  nodes[vm]['difs'].append(dif) -                nodes[vm]['dif_registrations'] \ -                         [dif] = dif_list +                nodes[vm]['dif_registrations'][dif] = dif_list                  # It is not defined yet, per check above.                  continue @@ -202,22 +201,30 @@ if __name__ == '__main__':          import rumba.testbeds.emulab as emulab          test_class = emulab.Testbed          testbed_args = {a.dest: getattr(args, a.dest) -                        for a in emulab_p._actions if a.dest != 'help'} +                        for a in emulab_p._actions +                        if a.dest != 'help' +                        and getattr(args, a.dest) is not None}      elif args.testbed == 'jfed':          import rumba.testbeds.jfed as jfed          test_class = jfed.Testbed          testbed_args = {a.dest: getattr(args, a.dest) -                        for a in jfed_p._actions if a.dest != 'help'} +                        for a in jfed_p._actions +                        if a.dest != 'help' +                        and getattr(args, a.dest) is not None}      elif args.testbed == 'qemu':          import rumba.testbeds.qemu as qemu          test_class = qemu.Testbed          testbed_args = {a.dest: getattr(args, a.dest) -                        for a in qemu_p._actions if a.dest != 'help'} +                        for a in qemu_p._actions +                        if a.dest != 'help' +                        and getattr(args, a.dest) is not None}      elif args.testbed == 'fake':          import rumba.testbeds.faketestbed as fake          test_class = fake.Testbed          testbed_args = {a.dest: getattr(args, a.dest) -                        for a in fake_p._actions if a.dest != 'help'} +                        for a in fake_p._actions +                        if a.dest != 'help' +                        and getattr(args, a.dest) is not None}      else:          if args.testbed is None:              print('Testbed type must be specified!') | 
