aboutsummaryrefslogtreecommitdiff
path: root/rumba/prototypes
diff options
context:
space:
mode:
authorvmaffione <v.maffione@gmail.com>2017-04-15 07:34:21 +0000
committervmaffione <v.maffione@gmail.com>2017-04-15 07:34:21 +0000
commit28d6a8729fac3d109e68afed1bbacc27d526048b (patch)
treecacdb7db6e44712c0a4ca2dc617afb355bc2c852 /rumba/prototypes
parentca1d77df271defb08d5f73b54398491d1049c9f9 (diff)
parent6eceae4bf7ee823d6eed276935741b7c107f6105 (diff)
downloadrumba-28d6a8729fac3d109e68afed1bbacc27d526048b.tar.gz
rumba-28d6a8729fac3d109e68afed1bbacc27d526048b.zip
Merge branch 'master-marco' into 'master'
IRATI config file generation (and other) See merge request !22
Diffstat (limited to 'rumba/prototypes')
-rwxr-xr-xrumba/prototypes/enroll.py117
-rw-r--r--rumba/prototypes/irati.py343
-rw-r--r--rumba/prototypes/irati_templates.py349
3 files changed, 799 insertions, 10 deletions
diff --git a/rumba/prototypes/enroll.py b/rumba/prototypes/enroll.py
new file mode 100755
index 0000000..458736a
--- /dev/null
+++ b/rumba/prototypes/enroll.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+
+#
+# Author: Vincenzo Maffione <v.maffione@nextworks.it>
+#
+
+import argparse
+import socket
+import time
+import re
+
+def printalo(byt):
+ print(repr(byt).replace('\\n', '\n'))
+
+
+def get_response(s):
+ data = bytes()
+ while 1:
+ data += s.recv(1024)
+ lines = str(data).replace('\\n', '\n').split('\n')
+ #print(lines)
+ if lines[-1].find("IPCM") != -1:
+ return lines[:len(lines)-1]
+
+
+description = "Python script to enroll IPCPs"
+epilog = "2016 Vincenzo Maffione <v.maffione@nextworks.it>"
+
+argparser = argparse.ArgumentParser(description = description,
+ epilog = epilog)
+argparser.add_argument('--ipcm-conf', help = "Path to the IPCM configuration file",
+ type = str, required = True)
+argparser.add_argument('--enrollee-name', help = "Name of the enrolling IPCP",
+ type = str, required = True)
+argparser.add_argument('--dif', help = "Name of DIF to enroll to",
+ type = str, required = True)
+argparser.add_argument('--lower-dif', help = "Name of the lower level DIF",
+ type = str, required = True)
+argparser.add_argument('--enroller-name', help = "Name of the remote neighbor IPCP to enroll to",
+ type = str, required = True)
+args = argparser.parse_args()
+
+socket_name = None
+
+fin = open(args.ipcm_conf, 'r')
+while 1:
+ line = fin.readline()
+ if line == '':
+ break
+
+ m = re.search(r'"(\S+ipcm-console.sock)', line)
+ if m != None:
+ socket_name = m.group(1)
+ break
+fin.close()
+
+if socket_name == None:
+ print('Cannot find %s' % (socket_name))
+ quit(1)
+
+s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+
+connected = False
+trials = 0
+while trials < 4:
+ try:
+ s.connect(socket_name)
+ connected = True
+ break
+ except:
+ pass
+ trials += 1
+ time.sleep(1)
+
+if connected:
+ try:
+ # Receive the banner
+ get_response(s)
+
+ # Send the IPCP list command
+ cmd = 'list-ipcps\n'
+ s.sendall(bytes(cmd, 'ascii'))
+
+ # Get the list of IPCPs and parse it to look for the enroller ID
+ print('Looking up identifier for IPCP %s' % args.enrollee_name)
+ lines = get_response(s)
+ print(lines)
+ enrollee_id = None
+ for line in lines:
+ rs = r'^\s*(\d+)\s*\|\s*' + args.enrollee_name.replace('.', '\\.')
+ m = re.match(rs, line)
+ if m != None:
+ enrollee_id = m.group(1)
+
+ if enrollee_id == None:
+ print('Could not find the ID of enrollee IPCP %s' \
+ % args.enrollee_name)
+ raise Exception()
+
+ # Send the enroll command
+ cmd = 'enroll-to-dif %s %s %s %s 1\n' \
+ % (enrollee_id, args.dif, args.lower_dif, args.enroller_name)
+ print(cmd)
+
+ s.sendall(bytes(cmd, 'ascii'))
+
+ # Get the enroll command answer
+ lines = get_response(s)
+ print(lines)
+ except:
+ s.close()
+ raise
+
+else:
+ print('Failed to connect to "%s"' % socket_name)
+
+s.close()
diff --git a/rumba/prototypes/irati.py b/rumba/prototypes/irati.py
index e8766da..89c4fe4 100644
--- a/rumba/prototypes/irati.py
+++ b/rumba/prototypes/irati.py
@@ -2,6 +2,7 @@
# Commands to setup and instruct IRATI
#
# Vincenzo Maffione <v.maffione@nextworks.it>
+# Marco Capitani <m.capitani@nextworks.it>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
@@ -17,32 +18,354 @@
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA
+import copy
+import json
+
+import subprocess
+
+import os
import rumba.ssh_support as ssh
import rumba.model as mod
+import rumba.prototypes.irati_templates as irati_templates
+
# An experiment over the IRATI implementation
class Experiment(mod.Experiment):
+
+ @staticmethod
+ def real_sudo(s):
+ return 'sudo ' + s
+
+ @staticmethod
+ def fake_sudo(s):
+ return s
+
def __init__(self, testbed, nodes=None):
mod.Experiment.__init__(self, testbed, nodes)
+ self.manager = False
+ self.conf_files = None
+
+ if self.testbed.username == 'root':
+ self.sudo = self.fake_sudo
+ else:
+ self.sudo = self.real_sudo
+
+ self._conf_dir = os.path.join(os.getcwd(), 'IRATI_conf')
+ try:
+ os.mkdir(self._conf_dir)
+ except OSError:
+ # Already there, nothing to do
+ pass
+
+ def conf_dir(self, path):
+ return os.path.join(self._conf_dir, path)
def setup(self):
- cmds = list()
+ """Installs IRATI on the vms."""
+ setup_irati = False
+ if setup_irati:
+ cmds = list()
- cmds.append("sudo apt-get update")
- cmds.append("sudo apt-get install g++ gcc "
- "protobuf-compiler libprotobuf-dev git --yes")
- cmds.append("sudo rm -rf ~/irati")
- cmds.append("cd && git clone https://github.com/IRATI/stack irati")
- cmds.append("cd ~/irati && sudo ./install-from-scratch")
- cmds.append("sudo nohup ipcm &> ipcm.log &")
+ cmds.append("sudo apt-get update")
+ cmds.append("sudo apt-get install g++ gcc "
+ "protobuf-compiler libprotobuf-dev git --yes")
+ cmds.append("sudo rm -rf ~/irati")
+ cmds.append("cd && git clone https://github.com/IRATI/stack irati")
+ cmds.append("cd ~/irati && sudo ./install-from-scratch")
+ cmds.append("sudo nohup ipcm &> ipcm.log &")
+ for node in self.nodes:
+ ssh.execute_commands(self.testbed, node.ssh_config,
+ cmds, time_out=None)
+
+ def bootstrap_network(self):
+ """Creates the network by enrolling and configuring the nodes"""
for node in self.nodes:
- ssh.execute_commands(self.testbed, node.ssh_config,
- cmds, time_out=None)
+ self.process_node(node)
+ self.enroll_nodes()
+
+ def run_experiment(self):
+ input('Press ENTER to quit.')
def run_prototype(self):
print("[IRATI experiment] start")
print("Setting up IRATI on the nodes...")
self.setup()
+ self.conf_files = self.write_conf()
+ self.bootstrap_network()
+ self.run_experiment()
print("[IRATI experiment] end")
+
+ def process_node(self, node):
+ """
+ Installs the configuration and boots up rina on a node
+ :type node: mod.Node
+ :param node:
+ :return:
+ """
+ name = node.name
+ gen_files_conf = self.conf_files[node] + ['da.map']
+ dir_path = os.path.dirname(os.path.abspath(__file__))
+ gen_files_bin = 'enroll.py'
+ gen_files_conf_full = [self.conf_dir(x) for x in gen_files_conf]
+ gen_files_bin_full = [os.path.join(dir_path, 'enroll.py')]
+
+ ipcm_components = ['scripting', 'console']
+ if self.manager:
+ ipcm_components.append('mad')
+ ipcm_components = ', '.join(ipcm_components)
+
+ gen_files = gen_files_conf_full + gen_files_bin_full
+
+ format_args = {'name': name,
+ 'ssh': node.ssh_config.port,
+ 'username': self.testbed.username,
+ 'genfiles': gen_files,
+ 'genfilesconf': ' '.join(gen_files_conf),
+ 'genfilesbin': gen_files_bin,
+ 'installpath': '/usr',
+ 'verb': 'DBG',
+ 'ipcmcomps': ipcm_components}
+ try:
+ # TODO: watch out for empty path...
+ ssh.copy_paths_to_testbed(self.testbed,
+ node.ssh_config,
+ gen_files,
+ '')
+ except subprocess.CalledProcessError as e:
+ raise Exception(str(e))
+
+ # TODO: review ssh opts through ssh support
+
+ cmds = [self.sudo('hostname %(name)s' % format_args),
+ self.sudo('chmod a+rw /dev/irati'),
+ self.sudo('mv %(genfilesconf)s /etc' % format_args),
+ self.sudo('mv %(genfilesbin)s /usr/bin') % format_args,
+ self.sudo('chmod a+x /usr/bin/enroll.py') % format_args]
+
+ # TODO: is the port up on the VM at this point?
+
+ cmds += [self.sudo('modprobe rina-default-plugin'),
+ self.sudo('%(installpath)s/bin/ipcm -a \"%(ipcmcomps)s\" '
+ '-c /etc/%(name)s.ipcm.conf -l %(verb)s &> log &'
+ % format_args)]
+
+ print('DEBUG: sending node setup via ssh.')
+ # print('Credentials:')
+ # print(node.ssh_config.hostname, node.ssh_config.port,
+ # self.testbed.username, self.testbed.password)
+ ssh.execute_commands(self.testbed, node.ssh_config, cmds)
+
+ def enroll_nodes(self):
+ """Runs the enrollments one by one, respecting dependencies"""
+ for enrollment_list in self.enrollments:
+ for e in enrollment_list:
+ print(
+ 'I am going to enroll %s to DIF %s against neighbor %s,'
+ ' through lower DIF %s'
+ % (e['enrollee'],
+ e['dif'].name,
+ e['enroller'],
+ e['lower_dif'].name))
+
+ subprocess.check_call('sleep 2'. split()) # Important!
+
+ e_args = {'ldif': e['lower_dif'].name,
+ 'dif': e['dif'].name,
+ 'name': e['enrollee'].name,
+ 'o_name': e['enroller'].name}
+
+ cmd = self.sudo('enroll.py --lower-dif %(ldif)s --dif %(dif)s '
+ '--ipcm-conf /etc/%(name)s.ipcm.conf '
+ '--enrollee-name %(dif)s.%(name)s.IPCP '
+ '--enroller-name %(dif)s.%(o_name)s.IPCP'
+ % e_args)
+ print('DEBUG: sending enrollment operation via ssh.')
+ # print('Credentials:')
+ # print(e['enrollee'].ssh_config.hostname,
+ # e['enrollee'].ssh_config.port,
+ # self.testbed.username, self.testbed.password)
+ ssh.execute_command(self.testbed,
+ e['enrollee'].ssh_config,
+ cmd)
+
+ def write_conf(self):
+ """Write the configuration files"""
+ # Constants and initializations
+ ipcmconfs = dict()
+ difconfs = dict()
+ ipcp2shim_map = {}
+ node2id_map = {}
+ mgmt_dif_name = 'NMS'
+ conf_files = {} # dict of per-nod conf files
+
+ # TODO: what format are the mappings registered in? Is this ok?
+ app_mappings = []
+ for node in self.nodes:
+ app_mappings += [{'name': app, 'dif': dif.name}
+ for app in node.registrations
+ for dif in node.registrations[app]]
+
+ # If some app directives were specified, use those to build da.map.
+ # Otherwise, assume the standard applications are to be mapped in
+ # the DIF with the highest rank.
+ if len(app_mappings) == 0:
+ if len(self.dif_ordering) > 0:
+ for adm in \
+ irati_templates.da_map_base["applicationToDIFMappings"]:
+ adm["difName"] = "%s" % (self.dif_ordering[-1],)
+ else:
+ irati_templates.da_map_base["applicationToDIFMappings"] = []
+ for apm in app_mappings:
+ irati_templates.da_map_base["applicationToDIFMappings"]\
+ .append({"encodedAppName": apm['name'],
+ "difName": "%s" % (apm['dif'])
+ })
+
+ # TODO ask: I guess this will need to be added,
+ # and in that case we should add it to the qemu plugin too...
+ # Where should we take it in input?
+
+ if self.manager:
+ # Add MAD/Manager configuration
+ irati_templates.ipcmconf_base["addons"] = {
+ "mad": {
+ "managerAppName": "",
+ "NMSDIFs": [{"DIF": "%s" % mgmt_dif_name}],
+ "managerConnections": [{
+ "managerAppName": "manager-1--",
+ "DIF": "%s" % mgmt_dif_name
+ }]
+ }
+ }
+
+ node_number = 1
+ for node in self.nodes: # type: mod.Node
+ node2id_map[node.name] = node_number
+ node_number += 1
+ ipcmconfs[node.name] = copy.deepcopy(irati_templates.ipcmconf_base)
+ if self.manager:
+ ipcmconfs[node.name]["addons"]["mad"]["managerAppName"] \
+ = "%s.mad-1--" % (node.name,)
+
+ for dif in self.dif_ordering: # type: mod.DIF
+ if isinstance(dif, mod.ShimEthDIF):
+ ipcp2shim_map.update({ipcp.name: dif for ipcp in dif.ipcps})
+ elif isinstance(dif, mod.NormalDIF):
+ difconfs[dif.name] = dict()
+ for node in dif.members:
+ difconfs[dif.name][node.name] = copy.deepcopy(
+ irati_templates.normal_dif_base
+ )
+
+ for node in self.nodes: # type: mod.Node
+ ipcmconf = ipcmconfs[node.name]
+
+ for ipcp in node.ipcps: # type: mod.ShimEthIPCP
+ if isinstance(ipcp, mod.ShimEthIPCP):
+ shim = ipcp2shim_map[ipcp.name] # type: mod.ShimEthDIF
+ ipcmconf["ipcProcessesToCreate"].append({
+ "apName": "eth.%s.IPCP" % ipcp.name,
+ "apInstance": "1",
+ "difName": shim.name
+ })
+
+ template_file_name = self.conf_dir('shimeth.%s.%s.dif'
+ % (node.name, shim.name))
+ ipcmconf["difConfigurations"].append({
+ "name": shim.name,
+ "template": os.path.basename(template_file_name)
+ })
+
+ fout = open(template_file_name, 'w')
+ fout.write(json.dumps(
+ {"difType": "shim-eth-vlan",
+ "configParameters": {
+ "interface-name": ipcp.ifname
+ }
+ },
+ indent=4, sort_keys=True))
+ fout.close()
+ conf_files.setdefault(node, []).append(
+ 'shimeth.%s.%s.dif' % (node.name, shim.name))
+
+ # Run over dif_ordering array, to make sure each IPCM config has
+ # the correct ordering for the ipcProcessesToCreate list of operations.
+ # If we iterated over the difs map, the order would be randomic, and so
+ # some IPCP registrations in lower DIFs may fail.
+ # This would happen because at the moment of registration,
+ # it may be that the IPCP of the lower DIF has not been created yet.
+ shims = ipcp2shim_map.values()
+ for dif in self.dif_ordering: # type: mod.NormalDIF
+
+ if dif in shims:
+ # Shims are managed separately, in the previous loop
+ continue
+
+ for node in dif.members: # type: mod.Node
+ node_name = node.name
+ ipcmconf = ipcmconfs[node_name]
+
+ normal_ipcp = {"apName": "%s.%s.IPCP" % (dif.name, node_name),
+ "apInstance": "1",
+ "difName": "%s" % (dif.name,),
+ "difsToRegisterAt": []}
+
+ for lower_dif in node.dif_registrations[dif]: # type: mod.DIF
+ normal_ipcp["difsToRegisterAt"].append(lower_dif.name)
+
+ ipcmconf["ipcProcessesToCreate"].append(normal_ipcp)
+
+ ipcmconf["difConfigurations"].append({
+ "name": "%s" % (dif.name,),
+ "template": "normal.%s.%s.dif" % (node_name, dif.name,)
+ })
+
+ # Fill in the map of IPCP addresses.
+ # This could be moved at difconfs
+ for other_node in dif.members: # type: mod.Node
+ difconfs[dif.name][other_node.name] \
+ ["knownIPCProcessAddresses"].append({
+ "apName": "%s.%s.IPCP" % (dif.name, node_name),
+ "apInstance": "1",
+ "address": 16 + node2id_map[node_name]})
+ for path, ps in dif.policies.items():
+ # if policy['nodes'] == [] or vmname in policy['nodes']:
+ # TODO: manage per-node-policies
+ irati_templates.translate_policy(
+ difconfs[dif.name][node_name], path, ps, parms=[])
+
+ # Dump the DIF Allocator map
+ with open(self.conf_dir('da.map'), 'w') as da_map_file:
+ json.dump(irati_templates.da_map_base,
+ da_map_file,
+ indent=4,
+ sort_keys=True)
+
+ for node in self.nodes:
+ # Dump the IPCM configuration files
+ with open(self.conf_dir('%s.ipcm.conf'
+ % (node.name,)), 'w') as node_file:
+ json.dump(ipcmconfs[node.name],
+ node_file,
+ indent=4,
+ sort_keys=True)
+ conf_files.setdefault(node, []).append(
+ '%s.ipcm.conf' % (node.name,))
+
+ for dif in self.dif_ordering: # type: mod.DIF
+ dif_conf = difconfs.get(dif.name, None)
+ if dif_conf:
+ # Dump the normal DIF configuration files
+ for node in dif.members:
+ with open(self.conf_dir('normal.%s.%s.dif'
+ % (node.name, dif.name)), 'w') \
+ as dif_conf_file:
+ json.dump(dif_conf[node.name],
+ dif_conf_file,
+ indent=4,
+ sort_keys=True)
+ conf_files.setdefault(node, []).append(
+ 'normal.%s.%s.dif' % (node.name, dif.name))
+ return conf_files
diff --git a/rumba/prototypes/irati_templates.py b/rumba/prototypes/irati_templates.py
new file mode 100644
index 0000000..b8d9788
--- /dev/null
+++ b/rumba/prototypes/irati_templates.py
@@ -0,0 +1,349 @@
+# Environment setup for VMs. Standard linux approach
+env_dict = {'installpath': '/usr', 'varpath': ''}
+
+# Template for a IPCM configuration file
+ipcmconf_base = {
+ "configFileVersion": "1.4.1",
+ "localConfiguration": {
+ "installationPath": "%(installpath)s/bin" % env_dict,
+ "libraryPath": "%(installpath)s/lib" % env_dict,
+ "logPath": "%(varpath)s/var/log" % env_dict,
+ "consoleSocket": "%(varpath)s/var/run/ipcm-console.sock" % env_dict,
+ "pluginsPaths": [
+ "%(installpath)s/lib/rinad/ipcp" % env_dict,
+ "/lib/modules/4.1.33-irati/extra"
+ ]
+ },
+
+ "ipcProcessesToCreate": [],
+ "difConfigurations": [],
+}
+
+
+da_map_base = {
+ "applicationToDIFMappings": [
+ {
+ "encodedAppName": "rina.apps.echotime.server-1--",
+ "difName": "n.DIF"
+ },
+ {
+ "encodedAppName": "traffic.generator.server-1--",
+ "difName": "n.DIF"
+ }
+ ],
+}
+
+
+# Template for a normal DIF configuration file
+normal_dif_base = {
+ "difType": "normal-ipc",
+ "dataTransferConstants": {
+ "addressLength": 2,
+ "cepIdLength": 2,
+ "lengthLength": 2,
+ "portIdLength": 2,
+ "qosIdLength": 2,
+ "rateLength": 4,
+ "frameLength": 4,
+ "sequenceNumberLength": 4,
+ "ctrlSequenceNumberLength": 4,
+ "maxPduSize": 10000,
+ "maxPduLifetime": 60000
+ },
+
+ "qosCubes": [
+ {
+ "name": "unreliablewithflowcontrol",
+ "id": 1,
+ "partialDelivery": False,
+ "orderedDelivery": True,
+ "efcpPolicies": {
+ "dtpPolicySet": {
+ "name": "default",
+ "version": "0"
+ },
+ "initialATimer": 0,
+ "dtcpPresent": True,
+ "dtcpConfiguration": {
+ "dtcpPolicySet": {
+ "name": "default",
+ "version": "0"
+ },
+ "rtxControl": False,
+ "flowControl": True,
+ "flowControlConfig": {
+ "rateBased": False,
+ "windowBased": True,
+ "windowBasedConfig": {
+ "maxClosedWindowQueueLength": 10,
+ "initialCredit": 200
+ }
+ }
+ }
+ }
+ }, {
+ "name": "reliablewithflowcontrol",
+ "id": 2,
+ "partialDelivery": False,
+ "orderedDelivery": True,
+ "maxAllowableGap": 0,
+ "efcpPolicies": {
+ "dtpPolicySet": {
+ "name": "default",
+ "version": "0"
+ },
+ "initialATimer": 0,
+ "dtcpPresent": True,
+ "dtcpConfiguration": {
+ "dtcpPolicySet": {
+ "name": "default",
+ "version": "0"
+ },
+ "rtxControl": True,
+ "rtxControlConfig": {
+ "dataRxmsNmax": 5,
+ "initialRtxTime": 1000
+ },
+ "flowControl": True,
+ "flowControlConfig": {
+ "rateBased": False,
+ "windowBased": True,
+ "windowBasedConfig": {
+ "maxClosedWindowQueueLength": 10,
+ "initialCredit": 200
+ }
+ }
+ }
+ }
+ }
+ ],
+
+ "knownIPCProcessAddresses": [],
+
+ "addressPrefixes": [
+ {
+ "addressPrefix": 0,
+ "organization": "N.Bourbaki"
+ }, {
+ "addressPrefix": 16,
+ "organization": "IRATI"
+ }
+ ],
+
+ "rmtConfiguration": {
+ "pffConfiguration": {
+ "policySet": {
+ "name": "default",
+ "version": "0"
+ }
+ },
+ "policySet": {
+ "name": "default",
+ "version": "1"
+ }
+ },
+
+ "enrollmentTaskConfiguration": {
+ "policySet": {
+ "name": "default",
+ "version": "1",
+ "parameters": [
+ {
+ "name": "enrollTimeoutInMs",
+ "value": "10000"
+ }, {
+ "name": "watchdogPeriodInMs",
+ "value": "30000"
+ }, {
+ "name": "declaredDeadIntervalInMs",
+ "value": "120000"
+ }, {
+ "name": "neighborsEnrollerPeriodInMs",
+ "value": "0"
+ }, {
+ "name": "maxEnrollmentRetries",
+ "value": "0"
+ }
+ ]
+ }
+ },
+
+ "flowAllocatorConfiguration": {
+ "policySet": {
+ "name": "default",
+ "version": "1"
+ }
+ },
+
+ "namespaceManagerConfiguration": {
+ "policySet": {
+ "name": "default",
+ "version": "1"
+ }
+ },
+
+ "securityManagerConfiguration": {
+ "policySet": {
+ "name": "default",
+ "version": "1"
+ }
+ },
+
+ "resourceAllocatorConfiguration": {
+ "pduftgConfiguration": {
+ "policySet": {
+ "name": "default",
+ "version": "0"
+ }
+ }
+ },
+
+ "routingConfiguration": {
+ "policySet": {
+ "name": "link-state",
+ "version": "1",
+ "parameters": [
+ {
+ "name": "objectMaximumAge",
+ "value": "10000"
+ }, {
+ "name": "waitUntilReadCDAP",
+ "value": "5001"
+ }, {
+ "name": "waitUntilError",
+ "value": "5001"
+ }, {
+ "name": "waitUntilPDUFTComputation",
+ "value": "103"
+ }, {
+ "name": "waitUntilFSODBPropagation",
+ "value": "101"
+ }, {
+ "name": "waitUntilAgeIncrement",
+ "value": "997"
+ }, {
+ "name": "routingAlgorithm",
+ "value": "Dijkstra"
+ }
+ ]
+ }
+ }
+}
+
+
+def ps_set(d, k, v, parms):
+ if k not in d:
+ d[k] = {'name': '', 'version': '1'}
+
+ if d[k]["name"] == v and "parameters" in d[k]:
+ cur_names = [p["name"] for p in d[k]["parameters"]]
+ for p in parms:
+ name, value = p.split('=')
+ if name in cur_names:
+ for i in range(len(d[k]["parameters"])):
+ if d[k]["parameters"][i]["name"] == name:
+ d[k]["parameters"][i]["value"] = value
+ break
+ else:
+ d[k]["parameters"].append({'name': name, 'value': value})
+
+ elif len(parms) > 0:
+ d[k]["parameters"] = [
+ {'name': p.split('=')[0], 'value': p.split('=')[1]}
+ for p in parms]
+
+ d[k]["name"] = v
+
+
+def dtp_ps_set(d, v, parms):
+ for i in range(len(d["qosCubes"])):
+ ps_set(d["qosCubes"][i]["efcpPolicies"], "dtpPolicySet", v, parms)
+
+
+def dtcp_ps_set(d, v, parms):
+ for i in range(len(d["qosCubes"])):
+ ps_set(d["qosCubes"][i]["efcpPolicies"]["dtcpConfiguration"],
+ "dtcpPolicySet", v, parms)
+
+
+policy_translator = {
+ 'rmt.pff': lambda d, v, p: ps_set(d["rmtConfiguration"]["pffConfiguration"],
+ "policySet", v, p),
+ 'rmt': lambda d, v, p: ps_set(d["rmtConfiguration"], "policySet", v, p),
+ 'enrollment-task': lambda d, v, p: ps_set(d["enrollmentTaskConfiguration"],
+ "policySet", v, p),
+ 'flow-allocator': lambda d, v, p: ps_set(d["flowAllocatorConfiguration"],
+ "policySet", v, p),
+ 'namespace-manager': lambda d, v, p: ps_set(
+ d["namespaceManagerConfiguration"], "policySet", v, p),
+ 'security-manager': lambda d, v, p: ps_set(
+ d["securityManagerConfiguration"], "policySet", v, p),
+ 'routing': lambda d, v, p: ps_set(
+ d["routingConfiguration"], "policySet", v, p),
+ 'resource-allocator.pduftg': lambda d, v, p: ps_set(
+ d["resourceAllocatorConfiguration"], "policySet", v, p),
+ 'efcp.*.dtcp': None,
+ 'efcp.*.dtp': None,
+}
+
+
+def is_security_path(path):
+ sp = path.split('.')
+ return (len(sp) == 3) and (sp[0] == 'security-manager') \
+ and (sp[1] in ['auth', 'encrypt', 'ttl', 'errorcheck'])
+
+
+# Do we know this path ?
+def policy_path_valid(path):
+ if path in policy_translator:
+ return True
+
+ # Try to validate security configuration
+ if is_security_path(path):
+ return True
+
+ return False
+
+
+def translate_security_path(d, path, ps, parms):
+ u1, component, profile = path.split('.')
+ if "authSDUProtProfiles" not in d["securityManagerConfiguration"]:
+ d["securityManagerConfiguration"]["authSDUProtProfiles"] = {}
+ d = d["securityManagerConfiguration"]["authSDUProtProfiles"]
+
+ tr = {'auth': 'authPolicy', 'encrypt': 'encryptPolicy',
+ 'ttl': 'TTLPolicy', 'errorcheck': 'ErrorCheckPolicy'}
+
+ if profile == 'default':
+ if profile not in d:
+ d["default"] = {}
+
+ ps_set(d["default"], tr[component], ps, parms)
+
+ else: # profile is the name of a DIF
+ if "specific" not in d:
+ d["specific"] = []
+ j = -1
+ for i in range(len(d["specific"])):
+ if d["specific"][i]["underlyingDIF"] == profile + ".DIF":
+ j = i
+ break
+
+ if j == -1: # We need to create an entry for the new DIF
+ d["specific"].append({"underlyingDIF": profile + ".DIF"})
+
+ ps_set(d["specific"][j], tr[component], ps, parms)
+
+
+def translate_policy(difconf, path, ps, parms):
+ if path == 'efcp.*.dtcp':
+ dtcp_ps_set(difconf, ps, parms)
+
+ elif path == 'efcp.*.dtp':
+ dtp_ps_set(difconf, ps, parms)
+
+ elif is_security_path(path):
+ translate_security_path(difconf, path, ps, parms)
+
+ else:
+ policy_translator[path](difconf, ps, parms)