diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..efa407c --- /dev/null +++ b/.gitignore @@ -0,0 +1,162 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..e6f1e8e --- /dev/null +++ b/README.md @@ -0,0 +1,70 @@ +# Containerlab EDA Connector tool + +> :warning: **Made for EDA version 24.8.1** + +There are two ways of creating a network and experiment with the functions that EDA provides. The first one is having real hardware, and the second one is the sandbox system. Both approaches have drawbacks, however: real hardware is sometimes difficult to come by and lab space / lab installment is difficult to set up and maintain - especially for large setups. The sandbox system is very flexible, although it is much more difficult to add secondary containers such as authentication servers, linux testboxes, or establishing external connectivity. + +[Containerlab](https://containerlab.dev/) provides a very elegant solution to these problems, and this tool aims to provide a smooth experience for onboarding a containerlab topology into the EDA application. It is not a replacement for containerlab (so it won't define architectures for you - although some [examples](./example-topologies/) are provided in this repository), nor is it an extension of containerlab. This tool will not check whether the containerlab setup has been named correctly, or is ready to accept configuration from EDA. It is however created to work with a brand new containerlab setup that has not been touched manually. + +## Check this first! + +Below is a list of prerequisites before you can run the script. Please check them first. If you have checked all these prerequisites and the script is still not working correctly, please create a Github issue or [mail me](mailto:zeno.dhaene@nokia.com). +- your EDA setup should be set up without simulation. This requires a special parameter when EDA is initially installed. **This tool will not work with a 'typical' installation** +- you should be able to ping your containerlab's management IP from your EDA node(s) +- the containerlab should be deployed with the required [startup configuration](./startup-configurations/) +- this program does not use the proxy (e.g. `$http_proxy`) variables. Instead, optional arguments were provided if you want to specify a proxy to reach your FSS. Note that they have not been tested very well, so please reach out if it's not working as expected +- the software image for your node must be uploaded first using the template below (replace the version numbers as necessary). I plan to include this step in this tool, but it has not yet been done. +- change the password of the default user that connects to the remote nodes + +```yaml +--- +apiVersion: artifacts.eda.nokia.com/v1 +kind: Artifact +metadata: + name: srlinux-24.7.1-bin +spec: + repo: srlimages + filePath: srlinux.bin + remoteFileUrl: + fileUrl: http://:8080/SRLinux/srlinux-24.7.1-330.bin +--- +apiVersion: artifacts.eda.nokia.com/v1 +kind: Artifact +metadata: + name: srlinux-24.7.1-md5 +spec: + repo: srlimages + filePath: srlinux.md5 + remoteFileUrl: + fileUrl: http://:8080/SRLinux/srlinux-24.7.1-330.bin.md5 +``` + +Apply this configuration on EDA with the `kubectl apply -f artifacts.yaml` command. + +## Installation + +1. Create a new Python environment: + + `python3 -m venv venv/` +2. Activate the Python environment + + `source venv/bin/activate` +3. Upgrade pip + + `python -m pip install --upgrade pip` +4. Install the required python modules + + `python -m pip install -r requirements.txt` +5. Run the tool + + `python eda_containerlab_connector.py --help` + +## Running the tool + +The video below shows off how the tool can be run: + +![Instruction video](./assets/demo.mp4) + +## Requesting support + +You can request support through the Gitlab issues, via Discord, or personally via Teams or Mail. Note that you can run the script with the flag `-l INFO` or `-l DEBUG` flag for greater detail in where the script is failing. \ No newline at end of file diff --git a/assets/demo.mp4 b/assets/demo.mp4 new file mode 100644 index 0000000..8492476 Binary files /dev/null and b/assets/demo.mp4 differ diff --git a/eda_containerlab_connector.py b/eda_containerlab_connector.py new file mode 100644 index 0000000..5a0deba --- /dev/null +++ b/eda_containerlab_connector.py @@ -0,0 +1,74 @@ +import argparse +import logging +import requests +import os + +from src.integrate import IntegrateCommand +from src.remove import RemoveCommand + +subcommands = [IntegrateCommand(), RemoveCommand()] + +parser = argparse.ArgumentParser( + prog="Containerlab EDA connector", + description="Integrate an existing containerlab topology with EDA (Event-Driven Automation)", + epilog="Made by Zeno Dhaene (zeno.dhaene@nokia.com)", +) + +parser.add_argument( + "--log-level", + "-l", + type=str, + default="WARNING", + choices={"DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"}, + help="log level for the application", +) + +parser.add_argument( + "--http-proxy", + type=str, + default="", + help="HTTP proxy to be used to communicate with EDA", +) + +parser.add_argument( + "--https-proxy", + type=str, + default="", + help="HTTPS proxy to be used to communicate with EDA", +) + +parser.add_argument( + "--verify", action="store_true", help="Enables certificate verification for EDA" +) + +subparsers = parser.add_subparsers( + dest="subparser", + title="sub-commands", + description="valid sub-commands", + help="choose a sub-command for more information", + required=True, +) + +for command in subcommands: + command.create_parser(subparsers) + +args = parser.parse_args() +logging.basicConfig( + level=args.log_level, + format="[%(asctime)s][%(levelname)s] %(message)s", + datefmt="%H:%M:%S", +) +requests.packages.urllib3.disable_warnings() +print(args) + +# this will bite me in the ass someday +os.environ["no_proxy"] = args.eda_url + +matched_subparser = [x for x in subcommands if args.subparser in x.PARSER_ALIASES] + +if len(matched_subparser) > 1: + raise Exception( + f"Multiple {len(matched_subparser)} match given subparser {args.subparser}" + ) +else: + matched_subparser[0].run(args) diff --git a/example-topologies/EDA-T2.clab.yml b/example-topologies/EDA-T2.clab.yml new file mode 100644 index 0000000..23e17fa --- /dev/null +++ b/example-topologies/EDA-T2.clab.yml @@ -0,0 +1,59 @@ +name: eda_t2 + +mgmt: + network: eda_t2_mgmt + ipv4-subnet: 10.58.2.112/28 + +topology: + kinds: + srl: + image: ghcr.io/nokia/srlinux:24.7.1 + nodes: + spine-1: + kind: srl + type: ixrd3l + mgmt-ipv4: 10.58.2.115 + startup-config: startup/spine-1/config.cfg + spine-2: + kind: srl + type: ixrd3l + mgmt-ipv4: 10.58.2.116 + startup-config: startup/spine-2/config.cfg + leaf-1: + kind: srl + type: ixrd2l + mgmt-ipv4: 10.58.2.117 + startup-config: startup/leaf-1/config.cfg + leaf-2: + kind: srl + type: ixrd2l + mgmt-ipv4: 10.58.2.118 + startup-config: startup/leaf-2/config.cfg + leaf-3: + kind: srl + type: ixrd2l + mgmt-ipv4: 10.58.2.119 + startup-config: startup/leaf-3/config.cfg + leaf-4: + kind: srl + type: ixrd2l + mgmt-ipv4: 10.58.2.120 + startup-config: startup/leaf-4/config.cfg + links: + # spine - leaf + - endpoints: ["spine-1:e1-3", "leaf-1:e1-31"] + - endpoints: ["spine-1:e1-5", "leaf-1:e1-33"] + - endpoints: ["spine-1:e1-4", "leaf-2:e1-31"] + - endpoints: ["spine-1:e1-6", "leaf-2:e1-33"] + - endpoints: ["spine-1:e1-7", "leaf-3:e1-31"] + - endpoints: ["spine-1:e1-9", "leaf-3:e1-33"] + - endpoints: ["spine-1:e1-8", "leaf-4:e1-31"] + - endpoints: ["spine-1:e1-10", "leaf-4:e1-33"] + - endpoints: ["spine-2:e1-3", "leaf-1:e1-32"] + - endpoints: ["spine-2:e1-5", "leaf-1:e1-34"] + - endpoints: ["spine-2:e1-4", "leaf-2:e1-32"] + - endpoints: ["spine-2:e1-6", "leaf-2:e1-34"] + - endpoints: ["spine-2:e1-7", "leaf-3:e1-32"] + - endpoints: ["spine-2:e1-9", "leaf-3:e1-34"] + - endpoints: ["spine-2:e1-8", "leaf-4:e1-32"] + - endpoints: ["spine-2:e1-10", "leaf-4:e1-34"] diff --git a/example-topologies/EDA-tiny.clab.yml b/example-topologies/EDA-tiny.clab.yml new file mode 100644 index 0000000..0410aca --- /dev/null +++ b/example-topologies/EDA-tiny.clab.yml @@ -0,0 +1,32 @@ +name: eda_tiny + +mgmt: + network: eda_tiny_mgmt + ipv4-subnet: 10.58.2.128/28 + +topology: + kinds: + srl: + image: ghcr.io/nokia/srlinux:24.7.1 + nodes: + dut1: + kind: srl + type: ixrd3l + mgmt-ipv4: 10.58.2.130 + startup-config: startup/dut1/config.cfg + dut2: + kind: srl + type: ixrd3l + mgmt-ipv4: 10.58.2.131 + startup-config: startup/dut2/config.cfg + dut3: + kind: srl + type: ixrd5 + mgmt-ipv4: 10.58.2.132 + startup-config: startup/dut3/config.cfg + links: + # spine - leaf + - endpoints: ["dut1:e1-1", "dut3:e1-1"] + - endpoints: ["dut1:e1-2", "dut3:e1-2"] + - endpoints: ["dut2:e1-1", "dut3:e1-3"] + - endpoints: ["dut2:e1-2", "dut3:e1-4"] \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..9d6aaf9 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,14 @@ +bcrypt==4.2.0 +certifi==2024.8.30 +cffi==1.17.1 +charset-normalizer==3.3.2 +cryptography==43.0.1 +idna==3.10 +Jinja2==3.1.4 +MarkupSafe==2.1.5 +paramiko==3.5.0 +pycparser==2.22 +PyNaCl==1.5.0 +PyYAML==6.0.2 +requests==2.32.3 +urllib3==2.2.3 diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/eda.py b/src/eda.py new file mode 100644 index 0000000..79c345e --- /dev/null +++ b/src/eda.py @@ -0,0 +1,314 @@ +import logging +import requests +import yaml +import json + +# configure logging +logger = logging.getLogger(__name__) + + +class EDA: + CORE_GROUP = "core.eda.nokia.com" + CORE_VERSION = "v1" + INTERFACE_GROUP = "interfaces.eda.nokia.com" + INTERFACE_VERSION = "v1alpha1" + + def __init__(self, hostname, username, password, http_proxy, https_proxy, verify): + """ + Constructor + + Parameters + ---------- + hostname: EDA hostname (IP or FQDN) + username: EDA user name + password: EDA user password + http_proxy: HTTP proxy to be used for communication with EDA + https_proxy: HTTPS proxy to be used for communication with EDA + verify: Whether to verify the certificate when communicating with EDA + """ + self.url = f"{hostname}" + self.username = username + self.password = password + self.proxies = {"http": http_proxy, "https": https_proxy} + self.verify = verify + self.access_token = None + self.refresh_token = None + self.version = None + self.transactions = [] + + def login(self): + """ + Retrieves an access_token and refresh_token from the EDA API + """ + payload = {"username": self.username, "password": self.password} + + response = self.post("auth/login", payload, False).json() + + if "code" in response and response["code"] != 200: + raise Exception( + f"Could not authenticate with EDA, error message: '{response['message']} {response['details']}'" + ) + + self.access_token = response["access_token"] + self.refresh_token = response["refresh_token"] + + def get_headers(self, requires_auth): + """ + Configures the right headers for an HTTP request + + Parameters + ---------- + requires_auth: Whether the request requires authorization + + Returns + ------- + A header dictionary + """ + headers = {} + if requires_auth: + if self.access_token is None: + logger.info("No access_token found, authenticating...") + self.login() + + headers["Authorization"] = f"Bearer {self.access_token}" + + return headers + + def get(self, api_path, requires_auth=True): + """ + Performs an HTTP GET request, taking the right proxy settings into account + + Parameters + ---------- + api_path: path to be appended to the base EDA hostname + requires_auth: Whether this request requires authentication + + Returns + ------- + The HTTP response + """ + url = f"{self.url}/{api_path}" + logger.info(f"Performing GET request to '{url}'") + + return requests.get( + url, + proxies=self.proxies, + verify=self.verify, + headers=self.get_headers(requires_auth), + ) + + def post(self, api_path, payload, requires_auth=True): + """ + Performs an HTTP POST request, taking the right proxy settings into account + + Parameters + ---------- + api_path: path to be appended to the base EDA hostname + payload: JSON data for the request + requires_auth: Whether this request requires authentication + + Returns + ------- + The HTTP response + """ + url = f"{self.url}/{api_path}" + logger.info(f"Performing POST request to '{url}'") + return requests.post( + url, + proxies=self.proxies, + verify=self.verify, + json=payload, + headers=self.get_headers(requires_auth), + ) + + def is_up(self): + """ + Gets the health of EDA + + Returns + ------- + True if EDA status is "UP", False otherwise + """ + logger.info("Checking whether EDA is up") + health = self.get("core/about/health", requires_auth=False) + logger.debug(health.json()) + return health.json()["status"] == "UP" + + def get_version(self): + """ + Retrieves the EDA version number + """ + # caching this, as it might get called a lot when backwards compatibility + # starts becoming a point of focus + if self.version is not None: + return self.version + + logger.info("Getting EDA version") + version = self.get("core/about/version").json()["eda"]["version"].split("-")[0] + logger.info(f"EDA version is {version}") + + # storing this to make the tool backwards compatible + self.version = version + return version + + def is_authenticated(self): + """ + Retrieves the version number of EDA to see if we can authenticate correctly + + Returns + ------- + True if we can authenticate in EDA, False otherwise + """ + logger.info("Checking whether we can authenticate with EDA") + self.get_version() + # if the previous method did not raise an exception, authentication was successful + return True + + def add_to_transaction(self, cr_type, payload): + """ + Adds a transaction to the basket + + Parameters + ---------- + type: action type, possible values: ['create', 'delete'] + payload: the operation's payload + + Returns + ------- + The newly added transaction item + """ + + item = {"type": {cr_type: payload}} + + self.transactions.append(item) + logger.debug(f"Adding item to transaction: {json.dumps(item, indent=4)}") + + return item + + def add_create_to_transaction(self, resource): + """ + Adds a 'create' operation to the transaction + + Parameters + ---------- + resource: the resource to be created + + Returns + ------- + The created item + """ + return self.add_to_transaction("create", {"value": yaml.safe_load(resource)}) + + def add_delete_to_transaction( + self, kind, name, group=CORE_GROUP, version=CORE_VERSION + ): + """ + Adds a 'delete' operation to the transaction + + Parameters + ---------- + resource: the resource to be removed + + Returns + ------- + The created item + """ + self.add_to_transaction( + "delete", + { + "gvk": { # Group, Version, Kind + "group": group, + "version": version, + "kind": kind, + }, + "name": name, + }, + ) + + def is_transaction_item_valid(self, item): + """ + Validates a transaction item + + Parameters + ---------- + item: the item to be validated + + Returns + ------- + True if the transaction is valid, False otherwise + """ + logger.info("Validating transaction item") + + response = self.post("core/transaction/v1/validate", item) + if response.status_code == 204: + logger.info("Validation successful") + return True + + response = response.json() + + if "code" in response: + message = f"{response['message']}" + if "details" in response: + message = f"{message} - {response['details']}" + logger.warning( + f"While validating a transaction item, the following validation error was returned (code {response['code']}): '{message}'" + ) + + return False + + def commit_transaction( + self, description, dryrun=False, resultType="normal", retain=True + ): + """ + Commits the transaction to EDA, and waits for the transaction to complete + + Parameters + ---------- + description: Description provided for this transaction + dryrun: Whether this commit should be treated as a dryrun + resultType: Don't know yet what this does + retain: Don't know yet what this does + """ + + payload = { + "description": description, + "dryrun": dryrun, + "resultType": resultType, + "retain": retain, + "crs": self.transactions, + } + + logger.info(f"Committing transaction with {len(self.transactions)} item(s)") + logger.debug(json.dumps(payload, indent=4)) + + response = self.post("core/transaction/v1", payload).json() + if "id" not in response: + raise Exception(f"Could not find transaction ID in response {response}") + + transactionId = response["id"] + + logger.info(f"Waiting for transaction with ID {transactionId} to complete") + result = self.get( + f"core/transaction/v1/details/{transactionId}?waitForComplete=true&failOnErrors=true" + ).json() + + if "code" in result: + message = f"{result['message']}" + + if "details" in message: + message = f"{message} - {result['details']}" + + errors = [] + if "errors" in result: + errors = [ + f"{x['error']['message']} {x['error']['details']}" + for x in result["errors"] + ] + + logger.error( + f"Committing transaction failed (error code {result['code']}). Error message: '{message} {errors}'" + ) + raise Exception("Failed to commit - see error above") + + logger.info("Commit successful") + self.transactions = [] diff --git a/src/helpers.py b/src/helpers.py new file mode 100644 index 0000000..ee33077 --- /dev/null +++ b/src/helpers.py @@ -0,0 +1,57 @@ +import os.path +import logging +import yaml + +import src.topology as topology + +from jinja2 import Environment, FileSystemLoader + +# set up jinja2 templating engine +template_loader = FileSystemLoader(searchpath="templates") +template_environment = Environment(loader=template_loader) + +# set up logging +logger = logging.getLogger(__name__) + + +def parse_topology(topology_file): + """ + Parses a topology yml file + + Parameters + ---------- + topology_file: containerlab topology file (yaml format) + + Returns + ------- + A parsed Topology file + """ + logger.info(f"Parsing topology file '{topology_file}'") + if not os.path.isfile(topology_file): + raise Exception(f"Topology file '{topology_file}' does not exist!") + + with open(topology_file, "r") as f: + try: + obj = yaml.safe_load(f) + except yaml.YAMLError as exc: + logger.critical(f"Failed to parse yaml file '{topology_file}'") + raise exc + + return topology.from_obj(obj) + + +def render_template(template_name, data): + """ + Loads a jinja template and renders it with the data provided + + Parameters + ---------- + template_name: name of the template in the 'templates' folder + data: data to be rendered in the template + + Returns + ------- + The rendered template, as str + """ + template = template_environment.get_template(template_name) + return template.render(data) diff --git a/src/integrate.py b/src/integrate.py new file mode 100644 index 0000000..058a302 --- /dev/null +++ b/src/integrate.py @@ -0,0 +1,236 @@ +import logging + +import src.helpers as helpers + +from src.subcommand import SubCommand +from src.eda import EDA + +# set up logging +logger = logging.getLogger(__name__) + + +class IntegrateCommand(SubCommand): + PARSER_NAME = "integrate" + PARSER_ALIASES = [PARSER_NAME, "i"] + + def run(self, args): + """ + Run the program with the arguments specified for this sub-command + + Parameters + ---------- + args: input arguments returned by the argument parser + """ + self.args = args + self.topology = helpers.parse_topology(self.args.topology_file) + self.topology.log_debug() + self.eda = EDA( + args.eda_url, + args.eda_user, + args.eda_password, + args.http_proxy, + args.https_proxy, + args.verify, + ) + + print("== Running pre-checks ==") + self.prechecks() + + print("== Creating allocation pool ==") + self.create_allocation_pool() + self.eda.commit_transaction( + "EDA Containerlab Connector: create IP-mgmt allocation pool" + ) + + print("== Creating node profiles ==") + self.create_node_profiles() + self.eda.commit_transaction("EDA Containerlab Connector: create node profiles") + # self.bootstrap_config() + + print("== Onboarding nodes ==") + self.create_bootstrap_nodes() + self.eda.commit_transaction("EDA Containerlab Connector: create nodes") + + print("== Adding system interfaces ==") + self.create_system_interfaces() + self.eda.commit_transaction( + "EDA Containerlab Connector: create system interfaces" + ) + + print("== Adding topolink interfaces ==") + self.create_topolink_interfaces() + self.eda.commit_transaction( + "EDA Containerlab Connector: create topolink interfaces" + ) + + print("== Creating topolinks ==") + self.create_topolinks() + self.eda.commit_transaction("EDA Containerlab Connector: create topolinks") + + print("Done!") + + def prechecks(self): + """ + Performs pre-checks to see if everything is reachable + """ + # check if the nodes are reachable + self.topology.check_connectivity() + + # check if EDA is reachable + if not self.eda.is_up(): + raise Exception("EDA status is not 'UP'") + + # check if we can authenticate with EDA + if not self.eda.is_authenticated(): + raise Exception( + "Could not authenticate to EDA with the provided credentials" + ) + + def create_allocation_pool(self): + """ + Creates an IP allocation pool for the mgmt network of the topology + """ + logger.info("Creating mgmt allocation pool") + subnet_prefix = self.topology.mgmt_ipv4_subnet + (subnet, prefix) = subnet_prefix.split("/") + parts = subnet.split(".") + gateway = f"{parts[0]}.{parts[1]}.{parts[2]}.{int(parts[3]) + 1}/{prefix}" + + data = { + "pool_name": self.topology.get_mgmt_pool_name(), + "subnet": subnet_prefix, + "gateway": gateway, + } + + pool = helpers.render_template("allocation-pool.j2", data) + logger.debug(pool) + item = self.eda.add_create_to_transaction(pool) + if not self.eda.is_transaction_item_valid(item): + raise Exception( + "Validation error when trying to create a mgmt allocation pool, see warning above. Exiting..." + ) + + def create_node_profiles(self): + """ + Creates node profiles for the topology + """ + logger.info("Creating node profiles") + profiles = self.topology.get_node_profiles() + logger.info(f"Discovered {len(profiles)} distinct node profile(s)") + for profile in profiles: + logger.debug(profile) + item = self.eda.add_create_to_transaction(profile) + if not self.eda.is_transaction_item_valid(item): + raise Exception( + "Validation error when trying to create a node profile, see warning above. Exiting..." + ) + + def bootstrap_config(self): + """ + Push the bootstrap configuration to the nodes + """ + logger.info("Pushing bootstrap config to the nodes") + self.topology.bootstrap_config() + + def create_bootstrap_nodes(self): + """ + Creates nodes for the topology + """ + logger.info("Creating nodes") + bootstrap_nodes = self.topology.get_bootstrap_nodes() + for bootstrap_node in bootstrap_nodes: + logger.debug(bootstrap_node) + item = self.eda.add_create_to_transaction(bootstrap_node) + if not self.eda.is_transaction_item_valid(item): + raise Exception( + "Validation error when trying to create a bootstrap node, see warning above. Exiting..." + ) + + def create_system_interfaces(self): + """ + Creates the system interfaces for all nodes + """ + logger.info("Creating system interfaces") + interfaces = self.topology.get_system_interfaces() + for interface in interfaces: + logger.debug(interface) + item = self.eda.add_create_to_transaction(interface) + if not self.eda.is_transaction_item_valid(item): + raise Exception( + "Validation error when trying to create a system interface, see warning above. Exiting..." + ) + + def create_topolink_interfaces(self): + """ + Creates the interfaces that belong to topology links + """ + logger.info("Creating topolink interfaces") + interfaces = self.topology.get_topolink_interfaces() + for interface in interfaces: + logger.debug(interface) + item = self.eda.add_create_to_transaction(interface) + if not self.eda.is_transaction_item_valid(item): + raise Exception( + "Validation error when trying to create a topolink interface, see warning above. Exiting..." + ) + + def create_topolinks(self): + """ + Creates topolinks for the topology + """ + logger.info("Creating topolinks") + topolinks = self.topology.get_topolinks() + for topolink in topolinks: + logger.debug(topolink) + item = self.eda.add_create_to_transaction(topolink) + if not self.eda.is_transaction_item_valid(item): + raise Exception( + "Validation error when trying to create a topolink, see warning above. Exiting..." + ) + + def create_parser(self, subparsers): + """ + Creates a subparser with arguments specific to this subcommand of the program + + Parameters + ---------- + subparsers: the subparsers object for the parent command + + Returns + ------- + An argparse subparser + """ + parser = subparsers.add_parser( + self.PARSER_NAME, + help="integrate containerlab with EDA", + aliases=self.PARSER_ALIASES, + ) + + parser.add_argument( + "--topology-file", + "-t", + type=str, + required=True, + help="the containerlab topology file", + ) + + parser.add_argument( + "--eda-url", + "-e", + type=str, + required=True, + help="the hostname or IP of your EDA deployment", + ) + + parser.add_argument( + "--eda-user", type=str, default="admin", help="the username of the EDA user" + ) + + parser.add_argument( + "--eda-password", + type=str, + default="admin", + help="the password of the EDA user", + ) + + return parser diff --git a/src/link.py b/src/link.py new file mode 100644 index 0000000..9ff03ca --- /dev/null +++ b/src/link.py @@ -0,0 +1,151 @@ +import logging + +import src.helpers as helpers + +# set up logging +logger = logging.getLogger(__name__) + + +class Link: + def __init__(self, node_1, interface_1, node_2, interface_2): + self.node_1 = node_1 + self.interface_1 = interface_1 + self.node_2 = node_2 + self.interface_2 = interface_2 + + def __repr__(self): + return ( + f"Link({self.node_1}-{self.interface_1}, {self.node_2}-{self.interface_2})" + ) + + def get_link_name(self, topology): + """ + Returns an eda-safe name for the link + """ + return f"{self.node_1.get_node_name(topology)}-{self.interface_1}-{self.node_2.get_node_name(topology)}-{self.interface_2}" + + def get_interface1_name(self): + """ + Returns the name for the interface name of endpoint 1, as specified in EDA + """ + return self.node_1.get_interface_name_for_kind(self.interface_1) + + def get_interface2_name(self): + """ + Returns the name for the interface name of endpoint 2, as specified in EDA + """ + return self.node_2.get_interface_name_for_kind(self.interface_2) + + def is_topolink(self): + """ + Returns True if both endpoints are supported in EDA as topology nodes, False otherwise + """ + # check that both ends of the link are supported in EDA + if self.node_1 is None or not self.node_1.is_eda_supported(): + logger.debug( + f"Link {self} is not a topolink because endpoint 1 node kind '{self.node_1.kind}' is not supported in EDA" + ) + return False + if self.node_2 is None or not self.node_2.is_eda_supported(): + logger.debug( + f"Link {self} is not a topolink because endpoint 2 node kind '{self.node_2.kind}' is not supported in EDA" + ) + return False + + return True + + def get_topolink(self, topology): + """ + Returns an EDA topolink resource that represents this link + """ + logger.info(f"Rendering topolink for {self}") + if not self.is_topolink(): + logger.warning( + f"Could not render topolink, {self} is not a topolink. Please call is_topolink() first" + ) + return None + + data = { + "link_role": "interSwitch", + "link_name": self.get_link_name(topology), + "local_node": self.node_1.get_node_name(topology), + "local_interface": self.get_interface1_name(), + "remote_node": self.node_2.get_node_name(topology), + "remote_interface": self.get_interface2_name(), + } + + return helpers.render_template("topolink.j2", data) + + +def from_obj(python_object, nodes): + """ + Parses a link from a python array of 2 endpoints + + Parameters + ---------- + python_object: the python object containing the endpoints from the input yaml file + nodes: nodes part of the topology + + Returns + ------- + The parsed Link entity + """ + logger.info(f"Parsing link with endpoints {python_object}") + if "endpoints" not in python_object: + raise Exception("The python object does not contain the key 'endpoints'") + + if len(python_object["endpoints"]) != 2: + raise Exception("The endpoint array should be an array of two objects") + + endpoint_1 = python_object["endpoints"][0] + endpoint_2 = python_object["endpoints"][1] + + (node_name_1, interface_1) = split_endpoint(endpoint_1) + (node_name_2, interface_2) = split_endpoint(endpoint_2) + + node_1 = find_node(node_name_1, nodes) + node_2 = find_node(node_name_2, nodes) + + return Link(node_1, interface_1, node_2, interface_2) + + +def split_endpoint(endpoint): + """ + Splits and endpoint into its node name, and the interface + + Parameters + ---------- + endpoint: the name of an endpoint as found in the topology file + + Returns + ------- + A tuple of (node_name, node_interface) where node_name is the name of the node, and node_interface the interface + """ + parts = endpoint.split(":") + + if len(parts) != 2: + raise Exception( + f"Endpoint '{endpoint}' does not adhere to the format '[node]:[interface]'" + ) + + return (parts[0], parts[1]) + + +def find_node(node_name, nodes): + """ + Searches through the provided nodes array for a node with name node_name + + Parameters + ---------- + node_name: the name of the node that's being looked for + nodes: the array of Node that will be searched + + Returns: + -------- + The Node if it was found, None otherwise + """ + for node in nodes: + if node.name == node_name: + return node + + return None diff --git a/src/node.py b/src/node.py new file mode 100644 index 0000000..6475092 --- /dev/null +++ b/src/node.py @@ -0,0 +1,222 @@ +import os +import logging + +# set up logging +logger = logging.getLogger(__name__) + + +class Node: + def __init__(self, name, kind, node_type, version, mgmt_ipv4): + self.name = name + self.kind = kind + + self.node_type = node_type + if node_type is None: + node_type = self.get_default_node_type() + + self.version = version + self.mgmt_ipv4 = mgmt_ipv4 + + def __repr__(self): + return f"Node(name={self.name}, kind={self.kind}, type={self.node_type}, version={self.version}, mgmt_ipv4={self.mgmt_ipv4})" + + def ping(self): + """ + Pings the node + + Returns + ------- + True if the ping was successful, False otherwise + """ + logger.debug(f"Pinging {self.kind} node '{self.name}' with IP {self.mgmt_ipv4}") + param = "-n" if os.sys.platform.lower() == "win32" else "-c" + response = os.system(f"ping {param} 1 {self.mgmt_ipv4} > /dev/null 2>&1") + + if response == 0: + logger.info( + f"Ping to {self.kind} node '{self.name}' with IP {self.mgmt_ipv4} successfull" + ) + else: + logger.warning( + f"Ping to {self.kind} node '{self.name}' with IP {self.mgmt_ipv4} not successfull" + ) + + return response == 0 + + def test_ssh(self): + """ + Tests the SSH connectivity to the node. This method needs to be overwritten by nodes that support it + + Returns + ------- + True if the SSH was successful, raises exception otherwise + """ + logger.info(f"Testing SSH is not supported for {self}") + + def get_node_name(self, topology): + """ + Returns an EDA-safe name for a node + """ + return f"{topology.get_eda_safe_name()}-{self.name}" + + def get_profile_name(self, topology): + """ + Returns an EDA-safe name for a node profile + """ + raise Exception("Node not supported in EDA") + + def get_default_node_type(self): + """ + Allows to override the default node type, if no type was provided + """ + return None + + def get_platform(self): + """ + Platform name to be used in the bootstrap node resource + """ + return "UNKOWN" + + def get_node_profile(self, topology): + """ + Creates a node profile for this node kind & version. This method needs to be overwritten by nodes that support it + + Returns + ------- + the rendered node-profile jinja template + """ + logger.info(f"Node profile is not supported for {self}") + return None + + def bootstrap_config(self): + """ + Pushes the bootstrap configuration to the node. This method needs to be overwritten by nodes that support it + """ + logger.info(f"Pushing bootstrap config to the node not supported for {self}") + + def get_bootstrap_node(self, topology): + """ + Creates a bootstrap node for this node. This method needs to be overwritten by nodes that support it + """ + logger.info(f"Bootstrap node is not supported for {self}") + return None + + def is_eda_supported(self): + """ + Returns True if this node is supported as part of an EDA topology + """ + return False + + def get_interface_name_for_kind(self, ifname): + """ + Converts the containerlab name of an interface to the node's naming convention + + Parameters + ---------- + ifname: name of the interface as specified in the containerlab topology file + + Returns + ------- + The name of the interface as accepted by the node + """ + return ifname + + def get_system_interface_name(self, topology): + """ + Returns the name of this node's system interface, if supported + """ + logger.info(f"Getting system interface name is not supported for {self}") + return None + + def get_system_interface(self, topology): + """ + Creates a system interface for this node. This method needs to be overwritten by nodes that support it + + Parameters + ---------- + topology: the parsed Topology + + Returns + ------- + The rendered interface jinja template + """ + logger.info(f"System interface is not supported for {self}") + return None + + def get_topolink_interface_name(self, topology, ifname): + """ + Returns the name of this node's topolink with given interface + """ + return ( + f"{self.get_node_name(topology)}-{self.get_interface_name_for_kind(ifname)}" + ) + + def get_topolink_interface(self, topology, ifname, other_node): + """ + Creates a topolink interface for this node and interface. This method needs to be overwritten by nodes that support it + + Parameters + ---------- + topology: the parsed Topology + ifname: name of the topolink interface + other_node: node at the other end of the topolink (used for description) + + Returns + ------- + The rendered interface jinja template + """ + logger.info(f"Topolink interface is not supported for {self}") + return None + + +# import specific nodes down here to avoid circular dependencies +from src.node_srl import SRLNode # noqa: E402 + + +def from_obj(name, python_object, kinds): + """ + Parses a node from a Python object + + Parameters + ---------- + name: the name of the node + python_obj: the python object for this node parsed from the yaml input file + kinds: the python object for the kinds in the topology yaml file + + Returns + ------- + The parsed Node entity + """ + logger.info(f"Parsing node with name '{name}'") + kind = python_object["kind"] + node_type = python_object["type"] if "type" in python_object else None + + # support for legacy containerlab files + if "mgmt_ipv4" in python_object: + logger.warning( + "Property mgmt_ipv4 is deprecated, please use mgmt-ipv4 in your clab topology file" + ) + mgmt_ipv4 = python_object["mgmt_ipv4"] + else: + mgmt_ipv4 = python_object["mgmt-ipv4"] + + # check if the kind is in the kinds object + if kind not in kinds: + logger.warning( + f"Could not find kind '{kind}' for node '{name}' in the topology file" + ) + kind = None + version = None + else: + image = kinds[kind]["image"] + parts = image.split(":") + if len(parts) != 2: + logger.warning(f"Could not parse version from node image '{image}'") + version = None + else: + version = parts[1] + + if kind == "srl" or kind == "nokia_srlinux": + return SRLNode(name, kind, node_type, version, mgmt_ipv4) + + return Node(name, kind, node_type, version, mgmt_ipv4) diff --git a/src/node_srl.py b/src/node_srl.py new file mode 100644 index 0000000..ce20915 --- /dev/null +++ b/src/node_srl.py @@ -0,0 +1,294 @@ +import logging +import socket +import os +import re +import tempfile +import src.helpers as helpers + +from paramiko import ( + SSHClient, + BadHostKeyException, + AuthenticationException, + SSHException, + AutoAddPolicy, +) + +from src.node import Node + +# set up logging +logger = logging.getLogger(__name__) + + +class SRLNode(Node): + # this can be made part of the command line arguments, but this is not done (yet) + SRL_USERNAME = "admin" + SRL_PASSWORD = "NokiaSrl1!" + NODE_TYPE = "srlinux" + GNMI_PORT = "57400" + VERSION_PATH = ".system.information.version" + YANG_PATH = ( + "https://eda-asvr/schemaprofiles/srlinux-ghcr-{version}/srlinux-{version}.zip" + ) + SRL_IMAGE = "srlimages/srlinux-{version}-bin/srlinux.bin" + SRL_IMAGE_MD5 = "srlimages/srlinux-{version}-bin/srlinux.bin.md5" + + def __init__(self, name, kind, node_type, version, mgmt_ipv4): + super().__init__(name, kind, node_type, version, mgmt_ipv4) + + def test_ssh(self): + """ + Tests the SSH connectivity to the node + + Returns + ------- + True if the SSH was successful, False otherwise + """ + logger.debug( + f"Testing whether SSH works for node '{self.name}' with IP {self.mgmt_ipv4}" + ) + ssh = SSHClient() + ssh.set_missing_host_key_policy(AutoAddPolicy()) + + try: + ssh.connect( + self.mgmt_ipv4, username=self.SRL_USERNAME, password=self.SRL_PASSWORD + ) + logger.info( + f"SSH test to {self.kind} node '{self.name}' with IP {self.mgmt_ipv4} was successful" + ) + return True + except ( + BadHostKeyException, + AuthenticationException, + SSHException, + socket.error, + ) as e: + logger.critical(f"Could not connect to node {self}, exception: {e}") + raise e + + def get_default_node_type(self): + """ + Allows to override the default node type, if no type was provided + """ + return "ixrd3l" + + def get_platform(self): + """ + Platform name to be used in the bootstrap node resource + """ + t = self.node_type.replace("ixr", "") + return f"7220 IXR-{t.upper()}" + + def get_profile_name(self, topology): + """ + Returns an EDA-safe name for a node profile + """ + return f"{topology.get_eda_safe_name()}-{self.NODE_TYPE}-{self.version}" + + def is_eda_supported(self): + """ + Returns True if this node is supported as part of an EDA topology + """ + return True + + def get_interface_name_for_kind(self, ifname): + """ + Converts the containerlab name of an interface to the node's naming convention + + Parameters + ---------- + ifname: name of the interface as specified in the containerlab topology file + + Returns + ------- + The name of the interface as accepted by the node + """ + pattern = re.compile("^e([0-9])-([0-9]+)$") + + if pattern.match(ifname): + match = pattern.search(ifname) + return f"ethernet-{match.group(1)}-{match.group(2)}" + + return ifname + + def get_node_profile(self, topology): + """ + Creates a node profile for this node kind & version + + Returns + ------- + the rendered node-profile jinja template + """ + logger.info(f"Rendering node profile for {self}") + data = { + "profile_name": self.get_profile_name(topology), + "sw_version": self.version, + "gnmi_port": self.GNMI_PORT, + "operating_system": self.kind, + "version_path": self.VERSION_PATH, + # below evaluates to something like v24\.7\.1.* + "version_match": "v{}.*".format(self.version.replace(".", "\.")), + "yang_path": self.YANG_PATH.format(version=self.version), + "node_user": "admin", + "pool_name": topology.get_mgmt_pool_name(), + "sw_image": self.SRL_IMAGE.format(version=self.version), + "sw_image_md5": self.SRL_IMAGE_MD5.format(version=self.version), + } + + return helpers.render_template("node-profile.j2", data) + + def bootstrap_config(self): + """ + Pushes the bootstrap configuration to the node + + Returns + ------- + the rendered bootstrap config + """ + logger.info(f"Pushing bootstrap config to node {self}") + ssh = SSHClient() + ssh.set_missing_host_key_policy(AutoAddPolicy()) + + data = {"gnmi_port": self.GNMI_PORT} + + bootstrap_config = helpers.render_template("srlinux-bootstrap-config.j2", data) + fd, path = tempfile.mkstemp() + + try: + with os.fdopen(fd, "w") as cfg: + cfg.write(bootstrap_config) + cfg.flush() + print(path) + ssh.connect( + self.mgmt_ipv4, + username=self.SRL_USERNAME, + password=self.SRL_PASSWORD, + ) + sftp = ssh.open_sftp() + logger.info("Copying rendered bootstrap-config to node") + sftp.put(path, "bootstrap-config.cfg") + logger.info("Sourcing the bootstrap-config file") + ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command( + "source bootstrap-config.cfg" + ) + stderr_lines = ssh_stderr.readlines() + if len(stderr_lines) > 0: + logger.error("=== STDERR ===") + logger.error(ssh_stderr) + raise Exception( + "Something went wrong when pushing bootstrap config to the node, see error above" + ) + except ( + BadHostKeyException, + AuthenticationException, + SSHException, + socket.error, + ) as e: + logger.critical(f"Could not connect to node {self}, exception: {e}") + raise e + finally: + os.remove(path) + + def get_bootstrap_node(self, topology): + """ + Creates a bootstrap node for this node + + Returns + ------- + the rendered bootstrap-node jinja template + """ + logger.info(f"Creating bootstrap node for {self}") + + role_value = "leaf" + if "leaf" in self.name: + role_value = "leaf" + elif "spine" in self.name: + role_value = "spine" + elif "borderleaf" in self.name or "bl" in self.name: + role_value = "borderleaf" + elif "dcgw" in self.name: + role_value = "dcgw" + else: + logger.warning(f"Could not determine role of node {self}, defaulting to eda.nokia.com/role=leaf") + + data = { + "node_name": self.get_node_name(topology), + "topology_name": topology.get_eda_safe_name(), + "role_value": role_value, + "node_profile": self.get_profile_name(topology), + "kind": self.kind, + "platform": self.get_platform(), + "sw_version": self.version, + "mgmt_ip": self.mgmt_ipv4, + "system_interface": self.get_system_interface_name(topology), + } + + return helpers.render_template("bootstrap-node.j2", data) + + def get_system_interface_name(self, topology): + """ + Returns the name of this node's system interface + """ + return f"{self.get_node_name(topology)}-system0" + + def get_system_interface(self, topology): + """ + Creates a system interface for this node + + Parameters + ---------- + topology: the parsed Topology + + Returns + ------- + The rendered interface jinja template + """ + logger.info(f"Creating system interface for {self}") + + data = { + "interface_name": self.get_system_interface_name(topology), + "label_key": None, + "label_value": None, + "encap_type": "'null'", + "node_name": self.get_node_name(topology), + "interface": "system0", + "description": "system interface", + } + + return helpers.render_template("interface.j2", data) + + def get_topolink_interface_name(self, topology, ifname): + """ + Returns the name of this node's topolink with given interface + """ + return ( + f"{self.get_node_name(topology)}-{self.get_interface_name_for_kind(ifname)}" + ) + + def get_topolink_interface(self, topology, ifname, other_node): + """ + Creates a topolink interface for this node and interface + + Parameters + ---------- + topology: the parsed Topology + ifname: name of the topolink interface + other_node: node at the other end of the topolink (used for description) + + Returns + ------- + The rendered interface jinja template + """ + logger.info(f"Creating topolink interface for {self}") + + data = { + "interface_name": self.get_topolink_interface_name(topology, ifname), + "label_key": "eda.nokia.com/role", + "label_value": "interSwitch", + "encap_type": "'null'", + "node_name": self.get_node_name(topology), + "interface": self.get_interface_name_for_kind(ifname), + "description": f"inter-switch link to {other_node.get_node_name(topology)}", + } + + return helpers.render_template("interface.j2", data) diff --git a/src/remove.py b/src/remove.py new file mode 100644 index 0000000..b283d68 --- /dev/null +++ b/src/remove.py @@ -0,0 +1,210 @@ +import logging + +import src.helpers as helpers + +from src.subcommand import SubCommand +from src.eda import EDA + +# set up logging +logger = logging.getLogger(__name__) + + +class RemoveCommand(SubCommand): + PARSER_NAME = "remove" + PARSER_ALIASES = [PARSER_NAME, "r"] + + def run(self, args): + """ + Run the program with the arguments specified for this sub-command + + Parameters + ---------- + args: input arguments returned by the argument parser + """ + self.args = args + self.topology = helpers.parse_topology(self.args.topology_file) + self.topology.log_debug() + self.eda = EDA( + args.eda_url, + args.eda_user, + args.eda_password, + args.http_proxy, + args.https_proxy, + args.verify, + ) + + print("== Removing topolinks ==") + self.remove_topolinks() + self.eda.commit_transaction("EDA Containerlab Connector: remove topolinks") + + print("== Removing topolink interfaces ==") + self.remove_topolink_interfaces() + self.eda.commit_transaction( + "EDA Containerlab Connector: remove topolink interfaces" + ) + + print("== Removing system interfaces ==") + self.remove_system_interfaces() + self.eda.commit_transaction( + "EDA Containerlab Connector: remove system interfaces" + ) + + print("== Removing nodes ==") + self.remove_bootstrap_nodes() + self.eda.commit_transaction("EDA Containerlab Connector: remove nodes") + + print("== Removing node profiles ==") + self.remove_node_profiles() + self.eda.commit_transaction("EDA Containerlab Connector: remove node profiles") + + print("== Removing allocation pool ==") + self.remove_allocation_pool() + self.eda.commit_transaction( + "EDA Containerlab Connector: remove allocation pool" + ) + + print("Done!") + + def remove_topolinks(self): + """ + Removes the topolinks for the topology + """ + logger.info("Removing topolinks") + for link in self.topology.links: + logger.debug(link) + if not link.is_topolink(): + logger.debug("Ignoring link, not a topolink") + continue + self.eda.add_delete_to_transaction( + "TopoLink", link.get_link_name(self.topology) + ) + + def remove_topolink_interfaces(self): + """ + Removes the topolink interfaces of the nodes of the topology + """ + logger.info("Removing topolink interfaces") + for link in self.topology.links: + logger.debug(link) + if not link.is_topolink(): + logger.debug("Ignoring link, not a topolink") + continue + + ifname_1 = link.node_1.get_topolink_interface_name( + self.topology, link.interface_1 + ) + ifname_2 = link.node_2.get_topolink_interface_name( + self.topology, link.interface_2 + ) + + for interface in [ifname_1, ifname_2]: + self.eda.add_delete_to_transaction( + "Interface", + interface, + group=self.eda.INTERFACE_GROUP, + version=self.eda.INTERFACE_VERSION, + ) + + def remove_system_interfaces(self): + """ + Removes the system interfaces of the nodes of the topology + """ + logger.info("Removing system interfaces") + for node in self.topology.nodes: + logger.debug(node) + ifname = node.get_system_interface_name(self.topology) + if ifname is None: + logger.debug("Ignoring node, system interface not supported") + continue + self.eda.add_delete_to_transaction( + "Interface", + ifname, + group=self.eda.INTERFACE_GROUP, + version=self.eda.INTERFACE_VERSION, + ) + + def remove_bootstrap_nodes(self): + """ + Removes the toponodes for the topology + """ + logger.info("Removing bootstrapped nodes") + for node in self.topology.nodes: + logger.debug(node) + self.eda.add_delete_to_transaction( + "TopoNode", node.get_node_name(self.topology) + ) + + def remove_node_profiles(self): + """ + Removes the node profiles for the different node kinds in the topology + """ + logger.info("Removing the node profiles") + profile_names = [] + for node in self.topology.nodes: + logger.debug(node) + + if not node.is_eda_supported(): + continue + + profile_name = node.get_profile_name(self.topology) + if profile_name not in profile_names: + # avoids removing the same node profile twice + profile_names.append(profile_name) + logger.debug(f"Profile name: {profile_name}") + self.eda.add_delete_to_transaction("NodeProfile", profile_name) + + def remove_allocation_pool(self): + """ + Removes the allocation pool for the mgmt network of the topology + """ + logger.info("Removing mgmt allocation pool") + self.eda.add_delete_to_transaction( + "IPInSubnetAllocationPool", self.topology.get_mgmt_pool_name() + ) + + def create_parser(self, subparsers): + """ + Creates a subparser with arguments specific to this subcommand of the program + + Parameters + ---------- + subparsers: the subparsers object for the parent command + + Returns + ------- + An argparse subparser + """ + parser = subparsers.add_parser( + self.PARSER_NAME, + help="remove containerlab integration from EDA", + aliases=self.PARSER_ALIASES, + ) + + parser.add_argument( + "--topology-file", + "-t", + type=str, + required=True, + help="the containerlab topology file", + ) + + parser.add_argument( + "--eda-url", + "-e", + type=str, + required=True, + help="the hostname or IP of your EDA deployment", + ) + + parser.add_argument( + "--eda-user", type=str, default="admin", help="the username of the EDA user" + ) + + parser.add_argument( + "--eda-password", + type=str, + default="admin", + help="the password of the EDA user", + ) + + return parser diff --git a/src/subcommand.py b/src/subcommand.py new file mode 100644 index 0000000..390ceb7 --- /dev/null +++ b/src/subcommand.py @@ -0,0 +1,28 @@ +class SubCommand: + # name of the subparser of this command + PARSER_NAME = None + PARSER_ALIASES = [PARSER_NAME] + + def run(self, args): + """ + Run the program with the arguments specified for this sub-command + + Parameters + ---------- + args: input arguments returned by the argument parser + """ + raise Exception(f"Run method not implemented for subparser {args.subparser}") + + def create_parser(self, subparsers): + """ + Creates a subparser with arguments specific to this subcommand of the program + + Parameters + ---------- + subparsers: the subparsers object for the parent command + + Returns + ------- + An argparse subparser + """ + raise Exception("get_parser method not implemented") diff --git a/src/topology.py b/src/topology.py new file mode 100644 index 0000000..a538356 --- /dev/null +++ b/src/topology.py @@ -0,0 +1,174 @@ +import logging + +from src.node import from_obj as node_from_obj +from src.link import from_obj as link_from_obj + +# set up logging +logger = logging.getLogger(__name__) + + +class Topology: + def __init__(self, name, mgmt_ipv4_subnet, nodes, links): + self.name = name + self.mgmt_ipv4_subnet = mgmt_ipv4_subnet + self.nodes = nodes + self.links = links + + def __repr__(self): + return f"Topology(name={self.name}, mgmt_ipv4_subnet={self.mgmt_ipv4_subnet}) with {len(self.nodes)} nodes" + + def log_debug(self): + """ + Prints the topology and all nodes that belong to it to the debug logger + """ + logger.debug("=== Topology ===") + logger.debug(self) + + logger.debug("== Nodes == ") + for node in self.nodes: + logger.debug(node) + + logger.debug("== Links == ") + for link in self.links: + logger.debug(link) + + def check_connectivity(self): + """ + Checks whether all nodes are pingable, and have the SSH interface open + """ + for node in self.nodes: + node.ping() + + for node in self.nodes: + node.test_ssh() + + def get_eda_safe_name(self): + """ + Returns an EDA-safe name for the name of the topology + """ + return self.name.replace("_", "-") + + def get_mgmt_pool_name(self): + """ + Returns an EDA-safe name for the IPInSubnetAllocationPool for mgmt + """ + return f"{self.get_eda_safe_name()}-mgmt-pool" + + def get_node_profiles(self): + """ + Creates node profiles for all nodes in the topology. One node profile per type/sw-version is created + """ + profiles = {} + for node in self.nodes: + node_profile = node.get_node_profile(self) + if node_profile is None: + # node profile not supported (for example, linux containers that are not managed by EDA) + continue + + if f"{node.kind}-{node.version}" not in profiles: + profiles[f"{node.kind}-{node.version}"] = node_profile + + # only return the node profiles, not the keys + return profiles.values() + + def bootstrap_config(self): + """ + Pushes the bootstrap configuration to the nodes + """ + for node in self.nodes: + node.bootstrap_config() + + def get_bootstrap_nodes(self): + """ + Create nodes for the topology + """ + bootstrap_nodes = [] + for node in self.nodes: + bootstrap_node = node.get_bootstrap_node(self) + if bootstrap_node is None: + continue + + bootstrap_nodes.append(bootstrap_node) + + return bootstrap_nodes + + def get_topolinks(self): + """ + Create topolinks for the topology + """ + topolinks = [] + for link in self.links: + if link.is_topolink(): + topolinks.append(link.get_topolink(self)) + + return topolinks + + def get_system_interfaces(self): + """ + Create system interfaces for the nodes in the topology + """ + interfaces = [] + for node in self.nodes: + if not node.is_eda_supported(): + continue + + interface = node.get_system_interface(self) + if interface is not None: + interfaces.append(interface) + + return interfaces + + def get_topolink_interfaces(self): + """ + Create topolink interfaces for the links in the topology + """ + interfaces = [] + for link in self.links: + if link.is_topolink(): + interfaces.append( + link.node_1.get_topolink_interface( + self, link.interface_1, link.node_2 + ) + ) + interfaces.append( + link.node_2.get_topolink_interface( + self, link.interface_2, link.node_1 + ) + ) + + return interfaces + + +def from_obj(python_obj): + """ + Parsers a topology from a Python object + + Parameters + ---------- + python_obj: the python object parsed from the yaml input file + + Returns + ------- + The parsed Topology entity + """ + logger.info( + f"Parsing topology with name '{python_obj['name']}' which contains {len(python_obj['topology']['nodes'])} nodes" + ) + + name = python_obj["name"] + mgmt_ipv4_subnet = python_obj["mgmt"]["ipv4-subnet"] + nodes = [] + for node in python_obj["topology"]["nodes"]: + nodes.append( + node_from_obj( + node, + python_obj["topology"]["nodes"][node], + python_obj["topology"]["kinds"], + ) + ) + + links = [] + for link in python_obj["topology"]["links"]: + links.append(link_from_obj(link, nodes)) + + return Topology(name, mgmt_ipv4_subnet, nodes, links) diff --git a/startup-configurations/srlinux.cfg b/startup-configurations/srlinux.cfg new file mode 100644 index 0000000..46eb9a0 --- /dev/null +++ b/startup-configurations/srlinux.cfg @@ -0,0 +1,31 @@ +set / acl acl-filter cpm type ipv4 entry 261 +set / acl acl-filter cpm type ipv4 entry 261 description "Accept incoming gNMI messages when the other host initiates the TCP connection" +set / acl acl-filter cpm type ipv4 entry 261 action +set / acl acl-filter cpm type ipv4 entry 261 action accept +set / acl acl-filter cpm type ipv4 entry 261 match +set / acl acl-filter cpm type ipv4 entry 261 match ipv4 protocol tcp +set / acl acl-filter cpm type ipv4 entry 261 match transport +set / acl acl-filter cpm type ipv4 entry 261 match transport destination-port +set / acl acl-filter cpm type ipv4 entry 261 match transport destination-port operator eq +set / acl acl-filter cpm type ipv4 entry 261 match transport destination-port value 50052 + +set / system grpc-server discovery +set / system grpc-server discovery admin-state enable +set / system grpc-server discovery rate-limit 65535 +set / system grpc-server discovery session-limit 1024 +set / system grpc-server discovery metadata-authentication true +set / system grpc-server discovery default-tls-profile true +set / system grpc-server discovery network-instance mgmt +set / system grpc-server discovery port 50052 +set / system grpc-server discovery services [ gnmi gnsi ] + + +set / system grpc-server mgmt +set / system grpc-server mgmt admin-state enable +set / system grpc-server mgmt rate-limit 65535 +set / system grpc-server mgmt session-limit 1024 +set / system grpc-server mgmt metadata-authentication true +set / system grpc-server mgmt tls-profile EDA +set / system grpc-server mgmt network-instance mgmt +set / system grpc-server mgmt port {{ gnmi_port }} +set / system grpc-server mgmt services [ gnmi gnoi gnsi ] \ No newline at end of file diff --git a/templates/allocation-pool.j2 b/templates/allocation-pool.j2 new file mode 100644 index 0000000..7dc8877 --- /dev/null +++ b/templates/allocation-pool.j2 @@ -0,0 +1,11 @@ +--- +apiVersion: core.eda.nokia.com/v1 +kind: IPInSubnetAllocationPool +metadata: + name: {{ pool_name }} +spec: + segments: + - subnet: {{ subnet }} + allocations: + - name: gateway + value: {{ gateway }} diff --git a/templates/bootstrap-node.j2 b/templates/bootstrap-node.j2 new file mode 100644 index 0000000..32afff7 --- /dev/null +++ b/templates/bootstrap-node.j2 @@ -0,0 +1,24 @@ +apiVersion: core.eda.nokia.com/v1 +kind: TopoNode +metadata: + name: {{ node_name }} + labels: + eda.nokia.com/role: {{ role_value }} + eda-connector.nokia.com/topology: {{ topology_name }} + eda-connector.nokia.com/role: {{ topology_name}}-{{ role_value }} + namespace: default +spec: + macAddress: '' + serialNumber: '' + nodeProfile: {{ node_profile }} + onBoarded: false + operatingSystem: {{ kind }} + platform: {{ platform }} + version: {{ sw_version }} + productionAddress: {{ mgmt_ip }} + systemInterface: {{ system_interface }} + license: '' + component: [] + npp: + deviationsOnConnect: false + mode: normal diff --git a/templates/interface.j2 b/templates/interface.j2 new file mode 100644 index 0000000..f240dc5 --- /dev/null +++ b/templates/interface.j2 @@ -0,0 +1,23 @@ +apiVersion: interfaces.eda.nokia.com/v1alpha1 +kind: Interface +metadata: + name: {{ interface_name }} + namespace: default +{%- if label_value %} + labels: + {{ label_key }}: {{ label_value }} +{%- endif %} +spec: + enabled: true + encapType: {{ encap_type}} + ethernet: + stormControl: + enabled: false + lldp: true + members: + - enabled: true + interface: {{ interface }} + lacpPortPriority: 32768 + node: {{ node_name }} + type: interface + description: '{{ description }}' \ No newline at end of file diff --git a/templates/node-profile.j2 b/templates/node-profile.j2 new file mode 100644 index 0000000..f170ef4 --- /dev/null +++ b/templates/node-profile.j2 @@ -0,0 +1,23 @@ +--- +apiVersion: core.eda.nokia.com/v1 +kind: NodeProfile +metadata: + name: {{ profile_name }} +spec: + port: {{ gnmi_port }} + transport: GNMI + operatingSystem: {{ operating_system }} + version: {{ sw_version }} + versionPath: {{ version_path }} + versionMatch: {{ version_match }} + yang: {{ yang_path }} + nodeUser: {{ node_user }} + dhcp: + managementPoolv4: '{{ pool_name }}' + gnmiOptions: + annotate: false + commitConfirmed: true + configSubscription: true + images: + - image: {{ sw_image }} + imageMd5: {{ sw_image_md5 }} diff --git a/templates/node-user.j2 b/templates/node-user.j2 new file mode 100644 index 0000000..5ca53fd --- /dev/null +++ b/templates/node-user.j2 @@ -0,0 +1,18 @@ +apiVersion: core.eda.nokia.com/v1 +kind: NodeUser +metadata: + name: {{ node_user }} + labels: null + namespace: default +spec: + username: {{ username }} + password: {{ password }} + groupBindings: + - groups: + - sudo + nodeSelector: + - eda.nokia.com/role=spine + - eda.nokia.com/role=borderleaf + - eda.nokia.com/role=leaf + - eda.nokia.com/role=UNKNOWN + sshPublicKeys: [] \ No newline at end of file diff --git a/templates/srlinux-bootstrap-config.j2 b/templates/srlinux-bootstrap-config.j2 new file mode 100644 index 0000000..b400a0d --- /dev/null +++ b/templates/srlinux-bootstrap-config.j2 @@ -0,0 +1,33 @@ +enter candidate +set / acl acl-filter cpm type ipv4 entry 261 +set / acl acl-filter cpm type ipv4 entry 261 description "Accept incoming gNMI messages when the other host initiates the TCP connection" +set / acl acl-filter cpm type ipv4 entry 261 action +set / acl acl-filter cpm type ipv4 entry 261 action accept +set / acl acl-filter cpm type ipv4 entry 261 match +set / acl acl-filter cpm type ipv4 entry 261 match ipv4 protocol tcp +set / acl acl-filter cpm type ipv4 entry 261 match transport +set / acl acl-filter cpm type ipv4 entry 261 match transport destination-port +set / acl acl-filter cpm type ipv4 entry 261 match transport destination-port operator eq +set / acl acl-filter cpm type ipv4 entry 261 match transport destination-port value 50052 + +set / system grpc-server discovery +set / system grpc-server discovery admin-state enable +set / system grpc-server discovery rate-limit 65535 +set / system grpc-server discovery session-limit 1024 +set / system grpc-server discovery metadata-authentication true +set / system grpc-server discovery default-tls-profile true +set / system grpc-server discovery network-instance mgmt +set / system grpc-server discovery port 50052 +set / system grpc-server discovery services [ gnmi gnsi ] + + +set / system grpc-server mgmt +set / system grpc-server mgmt admin-state enable +set / system grpc-server mgmt rate-limit 65535 +set / system grpc-server mgmt session-limit 1024 +set / system grpc-server mgmt metadata-authentication true +set / system grpc-server mgmt tls-profile EDA +set / system grpc-server mgmt network-instance mgmt +set / system grpc-server mgmt port {{ gnmi_port }} +set / system grpc-server mgmt services [ gnmi gnoi gnsi ] +commit now \ No newline at end of file diff --git a/templates/topolink.j2 b/templates/topolink.j2 new file mode 100644 index 0000000..8e82d4b --- /dev/null +++ b/templates/topolink.j2 @@ -0,0 +1,18 @@ +apiVersion: core.eda.nokia.com/v1 +kind: TopoLink +metadata: + labels: + eda.nokia.com/role: {{ link_role }} + name: {{ link_name }} + namespace: default +spec: + links: + - local: + interface: {{ local_interface }} + interfaceResource: {{ local_node }}-{{ local_interface }} + node: {{ local_node }} + remote: + interface: {{ remote_interface }} + interfaceResource: {{ remote_node }}-{{ remote_interface }} + node: {{ remote_node }} + type: interSwitch \ No newline at end of file