diff --git a/.github/workflows/build_upload_whl.yml b/.github/workflows/build_upload_whl.yml new file mode 100644 index 0000000..38fec87 --- /dev/null +++ b/.github/workflows/build_upload_whl.yml @@ -0,0 +1,205 @@ +name: CI Build Reusable Workflow +on: + workflow_call: + secrets: + GH_TOKEN: + description: 'GitHub token for authentication' + required: true + PYPI_TOKEN: + description: 'PyPI API token to publish package' + required: false + inputs: + UPLOAD_PACKAGE: + description: 'Should the package be uploaded to PyPI?' + required: false + default: false + type: boolean + REPOSITORY_NAME: + description: 'Repository name' + required: false + type: string + BRANCH_NAME: + description: 'Branch name to checkout' + required: true + type: string + PYTHON_VERSION: + description: 'Python version to use' + required: false + default: '3.10.11' + type: string + PUSH_TAG: + description: 'Push tag after version bump' + required: false + default: false + type: boolean + RELEASE_BUILD: + description: 'Is release build?' + required: false + default: false + type: boolean + GIT_USER: + description: 'Git user name for commit and tag' + required: true + type: string + GIT_EMAIL: + description: 'Git user email for commit and tag' + required: true + type: string + PROJECT_NAME: + description: 'Project name for tests' + required: true + type: string + SOURCE_PATH: + description: 'Path to the source code directory' + required: false + default: 'src' + type: string + RUNS_ON: + description: 'Runner type for the job' + required: false + default: 'ubuntu-latest' + type: string + +jobs: + build_whl: + permissions: + contents: write + id-token: write + environment: + name: "pypi" + url: https://pypi.org/p/${{ inputs.PROJECT_NAME }} + runs-on: ${{ inputs.RUNS_ON }} + steps: + - uses: actions/checkout@v4 + with: + fetch-tags: true + fetch-depth: 0 + path: ${{ inputs.SOURCE_PATH }} + ref: ${{ inputs.BRANCH_NAME }} + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: ${{ inputs.PYTHON_VERSION }} + cache: 'pip' + + - name: Version bumping + id: VERSION_BUMP + if: inputs.RELEASE_BUILD == true + env: + GIT_AUTHOR_NAME: ${{ inputs.GIT_USER }} + GIT_AUTHOR_EMAIL: ${{ inputs.GIT_EMAIL }} + GIT_COMMITTER_NAME: ${{ inputs.GIT_USER }} + GIT_COMMITTER_EMAIL: ${{ inputs.GIT_EMAIL }} + shell: bash + run: | + python -m pip install --upgrade pip + python -m venv bump_version + source bump_version/bin/activate + pip install python-semantic-release~=10.2 + pip install -r ${{ inputs.SOURCE_PATH }}/requirements-dev.txt + mfd-create-config-files --project-dir ./${{ inputs.SOURCE_PATH }} + cd ${{ inputs.SOURCE_PATH }} + version_after_bump=$(semantic-release version --print | tail -n 1 | tr -d '\n') + version_from_tag=$(git describe --tags --abbrev=0 | tr -d '\n' | sed 's/^v//') + echo "Version after semantic-release bump is: ${version_after_bump}" + echo "Version from tag: ${version_from_tag}" + # Only check version equality if RELEASE_BUILD is true + if [ "${{ inputs.RELEASE_BUILD }}" == "true" ]; then + if [ "$version_after_bump" == "$version_from_tag" ]; then + echo "Version would not change: version_after_bump=${version_after_bump}, version_from_tag=${version_from_tag}" + exit 1 + fi + fi + semantic-release version --no-push --no-vcs-release + cat pyproject.toml + echo "version_after_bump=v${version_after_bump}" >> $GITHUB_OUTPUT + - name: Create virtual environment for whl creation + shell: bash + run: | + python -m venv whl_creation + source whl_creation/bin/activate + pip install build==1.2.2.post1 + cd ${{ inputs.SOURCE_PATH }} + ../whl_creation/bin/python -m build --wheel --outdir ../whl_creation/dist + ls -l ../whl_creation/dist + + - name: Determine if unit and functional tests should run + id: test_check + shell: bash + run: | + REPO_NAME=$(echo "${{ inputs.PROJECT_NAME }}") + echo "Repository name extracted: $REPO_NAME" + + UNIT_TEST_DIR="${{ inputs.SOURCE_PATH }}/tests/unit/test_$(echo "${REPO_NAME}" | tr '-' '_')" + FUNC_TEST_DIR="${{ inputs.SOURCE_PATH }}/tests/system/test_$(echo "${REPO_NAME}" | tr '-' '_')" + if [ -d "$UNIT_TEST_DIR" ]; then + echo "Unit tests directory exists: $UNIT_TEST_DIR" + echo "run_unit_tests=true" >> $GITHUB_OUTPUT + else + echo "Unit tests directory does not exist: $UNIT_TEST_DIR" + echo "run_unit_tests=false" >> $GITHUB_OUTPUT + fi + if [ -d "$FUNC_TEST_DIR" ]; then + echo "Functional tests directory exists: $FUNC_TEST_DIR" + echo "run_functional_tests=true" >> $GITHUB_OUTPUT + else + echo "Functional tests directory does not exist: $FUNC_TEST_DIR" + echo "run_functional_tests=false" >> $GITHUB_OUTPUT + fi + + - name: Install dependencies for tests + if: steps.test_check.outputs.run_unit_tests == 'true' || steps.test_check.outputs.run_functional_tests == 'true' + shell: bash + run: | + python -m venv test_env + source test_env/bin/activate + python -m pip install -r "${{ inputs.SOURCE_PATH }}/requirements.txt" -r "${{ inputs.SOURCE_PATH }}/requirements-test.txt" -r "${{ inputs.SOURCE_PATH }}/requirements-dev.txt" + + - name: Run unit tests if test directory exists + if: steps.test_check.outputs.run_unit_tests == 'true' + shell: bash + run: | + source test_env/bin/activate + mfd-unit-tests --project-dir ${{ github.workspace }}/${{ inputs.SOURCE_PATH }} + + - name: Run functional tests if test directory exists + if: steps.test_check.outputs.run_functional_tests == 'true' + shell: bash + run: | + source test_env/bin/activate + mfd-system-tests --project-dir ${{ github.workspace }}/${{ inputs.SOURCE_PATH }} + - name: Publish package distributions to PyPI + if: ${{ inputs.RELEASE_BUILD == true && inputs.UPLOAD_PACKAGE == true }} + uses: pypa/gh-action-pypi-publish@release/v1 + with: + packages-dir: 'whl_creation/dist' + password: ${{ secrets.PYPI_TOKEN }} + + - name: Publish comment how to build .whl + if: inputs.RELEASE_BUILD == false + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GH_TOKEN }} + script: | + const prNumber = context.payload.pull_request.number; + const commentBody = "We don't publish DEVs .whl.\n To build .whl, run 'pip install git+https://github.com/${{ inputs.REPOSITORY_NAME }}@${{ inputs.BRANCH_NAME }}'"; + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + body: commentBody + }); + + - name: Push git tag after version bump + if: ${{ inputs.RELEASE_BUILD == true && inputs.PUSH_TAG == true }} + shell: bash + env: + GIT_AUTHOR_NAME: ${{ inputs.GIT_USER }} + GIT_AUTHOR_EMAIL: ${{ inputs.GIT_EMAIL }} + GIT_COMMITTER_NAME: ${{ inputs.GIT_USER }} + GIT_COMMITTER_EMAIL: ${{ inputs.GIT_EMAIL }} + version_after_bump: ${{ steps.VERSION_BUMP.outputs.version_after_bump }} + run: | + cd ${{ inputs.SOURCE_PATH }} + git push origin "${version_after_bump}" \ No newline at end of file diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000..4c43daf --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,98 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL Advanced" + +on: + pull_request: + branches: [ "main" ] + push: + branches: [ "main" ] + +jobs: + analyze: + name: Analyze (${{ matrix.language }}) + # Runner size impacts CodeQL analysis time. To learn more, please see: + # - https://gh.io/recommended-hardware-resources-for-running-codeql + # - https://gh.io/supported-runners-and-hardware-resources + # - https://gh.io/using-larger-runners (GitHub.com only) + # Consider using larger runners or machines with greater resources for possible analysis time improvements. + runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} + permissions: + # required for all workflows + security-events: write + + # required to fetch internal or private CodeQL packs + packages: read + + # only required for workflows in private repositories + actions: read + contents: read + + strategy: + fail-fast: false + matrix: + include: + - language: actions + build-mode: none + - language: python + build-mode: none + # CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'rust', 'swift' + # Use `c-cpp` to analyze code written in C, C++ or both + # Use 'java-kotlin' to analyze code written in Java, Kotlin or both + # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both + # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis, + # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning. + # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how + # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Add any setup steps before running the `github/codeql-action/init` action. + # This includes steps like installing compilers or runtimes (`actions/setup-node` + # or others). This is typically only required for manual builds. + # - name: Setup runtime (example) + # uses: actions/setup-example@v1 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + build-mode: ${{ matrix.build-mode }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + # If the analyze step fails for one of the languages you are analyzing with + # "We were unable to automatically build your code", modify the matrix above + # to set the build mode to "manual" for that language. Then modify this step + # to build your code. + # ℹ️ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + - if: matrix.build-mode == 'manual' + shell: bash + run: | + echo 'If you are using a "manual" build mode for one or more of the' \ + 'languages you are analyzing, replace this with the commands to build' \ + 'your code, for example:' + echo ' make bootstrap' + echo ' make release' + exit 1 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/manual_release.yml b/.github/workflows/manual_release.yml new file mode 100644 index 0000000..022c10f --- /dev/null +++ b/.github/workflows/manual_release.yml @@ -0,0 +1,33 @@ +name: CI BUILD - RELEASE MODE +on: + workflow_dispatch: + +jobs: + build_upload_whl: + strategy: + matrix: + include: + - name: python-version-3-10 + python_version: '3.10' + push_tag: false + upload_package: false + continue-on-error: true + - name: python-version-3-13 + python_version: '3.13' + push_tag: true + upload_package: true + continue-on-error: true + uses: ./.github/workflows/build_upload_whl.yml + secrets: + GH_TOKEN: ${{ secrets.GH_TOKEN }} + PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} + with: + REPOSITORY_NAME: ${{ github.repository }} + BRANCH_NAME: ${{ github.ref_name }} + PYTHON_VERSION: ${{ matrix.python_version }} + PUSH_TAG: ${{ matrix.push_tag }} + RELEASE_BUILD: true + UPLOAD_PACKAGE: ${{ matrix.upload_package }} + GIT_USER: 'mfd-intel-bot' + GIT_EMAIL: 'mfd_intel_bot@intel.com' + PROJECT_NAME: 'mfd-esxi' \ No newline at end of file diff --git a/.github/workflows/pull_requests.yml b/.github/workflows/pull_requests.yml new file mode 100644 index 0000000..18b5b9f --- /dev/null +++ b/.github/workflows/pull_requests.yml @@ -0,0 +1,29 @@ +name: DEV BUILD + +on: + pull_request: + types: [opened, synchronize] + +jobs: + build_upload_whl: + strategy: + matrix: + include: + - name: python-version-3-10 + python_version: '3.10' + push_tag: false + - name: python-version-3-13 + python_version: '3.13' + push_tag: false + uses: ./.github/workflows/build_upload_whl.yml + secrets: + GH_TOKEN: ${{ secrets.GH_TOKEN }} + with: + REPOSITORY_NAME: ${{ github.repository }} + BRANCH_NAME: ${{ github.head_ref }} + PYTHON_VERSION: ${{ matrix.python_version }} + PUSH_TAG: ${{ matrix.push_tag }} + RELEASE_BUILD: false + GIT_USER: 'mfd-intel-bot' + GIT_EMAIL: 'mfd_intel_bot@intel.com' + PROJECT_NAME: 'mfd-esxi' \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4c26cb9 --- /dev/null +++ b/.gitignore @@ -0,0 +1,236 @@ + +# Created by https://www.toptal.com/developers/gitignore/api/pycharm+all,python +# Edit at https://www.toptal.com/developers/gitignore?templates=pycharm+all,python + +### PyCharm+all ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### PyCharm+all Patch ### +# Ignores the whole .idea folder and all .iml files +# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 + +.idea/ + +# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 + +*.iml +modules.xml +.idea/misc.xml +*.ipr + +# Sonarlint plugin +.idea/sonarlint + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +coverage.json +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +pytestdebug.log + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ +doc/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +pythonenv* + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# profiling data +.prof + +# End of https://www.toptal.com/developers/gitignore/api/pycharm+all,python +ruff.toml \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..95dddc8 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,6 @@ +repos: +- repo: https://github.com/psf/black + rev: 25.1.0 + hooks: + - id: black + additional_dependencies: ['click==8.2.1'] diff --git a/AUTHORS.md b/AUTHORS.md new file mode 100644 index 0000000..2c515bb --- /dev/null +++ b/AUTHORS.md @@ -0,0 +1,7 @@ +# AUTHORS + +* Arkadiusz Baczek (arkadiusz.baczek@intel.com) +* Mateusz Chrominski (mateusz.chrominski@intel.com) +* Hubert Cymerys (hubert.cymerys@intel.com) +* Agnieszka Flizikowska (agnieszka.flizikowska@intel.com) +* Adrian Lasota (adrian.lasota@intel.com) \ No newline at end of file diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 134f4b2..0f3a971 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -129,4 +129,4 @@ For answers to common questions about this code of conduct, see the FAQ at [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html [Mozilla CoC]: https://github.com/mozilla/diversity [FAQ]: https://www.contributor-covenant.org/faq -[translations]: https://www.contributor-covenant.org/translations +[translations]: https://www.contributor-covenant.org/translations \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f682f4e..d457db6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ ### License - is licensed under the terms in [LICENSE]. By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms. +MFD ESXi is licensed under the terms in [LICENSE](LICENSE.md). By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms. ### Sign your work @@ -54,4 +54,4 @@ Then you just add a line to every git commit message: Use your real name (sorry, no pseudonyms or anonymous contributions.) If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. +commit automatically with `git commit -s`. \ No newline at end of file diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 0000000..78c3ec7 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,8 @@ +The MIT License (MIT) +Copyright © 2025 Intel Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..9d839c4 --- /dev/null +++ b/README.md @@ -0,0 +1,26 @@ +> [!IMPORTANT] +> This project is under development. All source code and features on the main branch are for the purpose of testing or evaluation and not production ready. + +# MFD ESXI +Library to access and manipulate VMware products: ESXi, VCSA and NSX. + +> [!IMPORTANT] +> This module requires `vsphere-automation-sdk` to work.\ +> Please add `vsphere-automation-sdk` to your requirements file or install it manually: +> ```bash +> pip install vsphere-automation-sdk @ git+https://github.com/vmware/vsphere-automation-sdk-python@v8.0.3.0 +> ``` + +## API - vswitch (ESXivSwitch) +* `set_forged_transmit(self, name: str, enable: bool = True) -> None` - set forged transmit policy on portgroup +* `change_ens_fpo_support(self, enable: bool, vds: str | None = None) -> None` - enable or disable FPO support +* `set_mac_change_policy(self, portgroup_name: str, enable: bool = True) -> None` - set MAC change policy on portgroup + +## OS supported: + +ESXi >= 7.0 +NSX >= 3.2, INFRA api only + +## Issue reporting + +If you encounter any bugs or have suggestions for improvements, you're welcome to contribute directly or open an issue [here](https://github.com/intel/mfd-esxi/issues). \ No newline at end of file diff --git a/configure.py b/configure.py new file mode 100644 index 0000000..4742b65 --- /dev/null +++ b/configure.py @@ -0,0 +1,12 @@ +"""Configure pre-commit hooks.""" + +import subprocess +import sys + +subprocess.run([sys.executable, "-m", "pip", "install", "pre-commit"], check=True) +print("pre-commit version:") +subprocess.run(["pre-commit", "--version"], check=True) +print("python version:") +subprocess.run([sys.executable, "--version"], check=True) + +subprocess.run(["pre-commit", "install"], check=True) diff --git a/examples/simple_example.py b/examples/simple_example.py new file mode 100644 index 0000000..efee074 --- /dev/null +++ b/examples/simple_example.py @@ -0,0 +1,63 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""Simple example.""" + +import logging +from ipaddress import ip_interface + +from mfd_connect import RPyCConnection +from mfd_host import Host + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.DEBUG) + + +if __name__ == "__main__": + connection = RPyCConnection("172.31.0.50") + host = Host(connection=connection) + adapter = host.network.get_interface(interface_name="vmnic2") + host.virtualization.initialize(ip="172.31.0.50", login="root", password="***") + host.virtualization.hv.clean(keep="AT_ESXI") + + print(host.virtualization.api.version) + print(host.virtualization.api.get_performance_metrics([adapter])) + print(host.virtualization.api.get_lldp_status(adapter)) + print(host.virtualization.api.get_adapters_sriov_info([adapter])) + host.virtualization.api.set_adapters_sriov([adapter], 10, False) + print(host.virtualization.api.get_adapters_sriov_info([adapter])) + host.virtualization.api.set_adapters_sriov([adapter], 0, False) + print(host.virtualization.api.get_adapters_sriov_info([adapter])) + host.virtualization.api.set_adapters_sriov([adapter], 8, False) + print(host.virtualization.api.get_adapters_sriov_info([adapter])) + + for vswitch in host.virtualization.vswitch: + if vswitch.name in ["test1", "ATvSwitch", "TESTvSwitch"]: + host.virtualization.del_vswitch(vswitch.name) + + vswitch1 = host.virtualization.set_vswitch( + "test1", uplinks=["vmnic2", "vmnic3"], portgroups=["t1a", "t1b"], mtu=9000 + ) + vswitch1.set_portgroup_vlan("t1a", 82) + vswitch1.change_ens_fpo_support(True, "vSphereDistributedSwitch") + vmknic1 = host.virtualization.find_vmknic(portgroup="PGvmnic2") + vmknic1.add_ip(ip_interface("1.1.1.1/8")) + vmknic1.add_ip("2.1.1.1/8") + vmknic1.add_ip(ip_interface("2001:1::2/64")) + vmknic1.add_ip("2001:1::3/64") + vswitch1.reconfigure(uplinks=["vmnic2"], portgroups=["t1a"]) + vswitch1.reconfigure(uplinks=["vmnic2", "vmnic3"], portgroups=["t1a", "t1b"], mtu=9000) + vmknic1.del_ip("2001:1::2/64") + + vswitch = host.virtualization.set_vswitch("test1", uplinks=["vmnic2"], portgroups=["test1"]) + + vswitch.set_mac_change_policy(portgroup_name="test1", enabled=True) + + vms91 = host.virtualization.hv.prepare_vms("datastore_050_vmfs6", "Base_R91", count=1, suffix="050") + host.virtualization.hv.attach_network(vms91, portgroup="test1", model="sriov", adapter=adapter) + host.virtualization.hv.create_vms(vms91) + + vms90 = host.virtualization.hv.prepare_vms("datastore_050", "Base_R90", count=4, suffix="050") + host.virtualization.hv.attach_network(vms90, portgroup="test1") + host.virtualization.hv.create_vms(vms90) + + host.virtualization.hv.clean(keep="AT_ESXI") diff --git a/mfd_esxi/__init__.py b/mfd_esxi/__init__.py new file mode 100644 index 0000000..3fbf98d --- /dev/null +++ b/mfd_esxi/__init__.py @@ -0,0 +1,3 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""Module for MFD ESXI.""" diff --git a/mfd_esxi/const.py b/mfd_esxi/const.py new file mode 100644 index 0000000..07d1c0f --- /dev/null +++ b/mfd_esxi/const.py @@ -0,0 +1,6 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""Constants shared across modules.""" + +ESXI_UPLINK_NUMBER = 16 +ESXI_UPLINK_FORMAT = "Uplink_%.2d" diff --git a/mfd_esxi/esxi_version.py b/mfd_esxi/esxi_version.py new file mode 100644 index 0000000..a665b12 --- /dev/null +++ b/mfd_esxi/esxi_version.py @@ -0,0 +1,41 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""Version of ESXi.""" + +import re +from packaging.version import Version +from typing import TYPE_CHECKING +from .exceptions import ESXiVersionException + +if TYPE_CHECKING: + from mfd_esxi.host import ESXiHypervisor + + +class ESXiVersion: + """Class for ESXi version.""" + + def __init__(self, full_string: str): + """ + Initialize version object. + + :param full_string: output of vmware -v + """ + match = re.search(r"\s+(?P\d+\.\d+\.\d+)\s+build-(?P\d+)", full_string) + if match: + self.version = Version(match.group("version")) + self.build = int(match.group("build")) + self.full_string = full_string + else: + raise ESXiVersionException(f"Unable to parse version: {full_string}") + + @staticmethod + def discover(owner: "ESXiHypervisor") -> "ESXiVersion": + """ + Discover the ESXi version. + + :param owner: ESXi host + :return: version object + """ + output = owner.execute_command("vmware -v").stdout + version = ESXiVersion(output) + return version diff --git a/mfd_esxi/exceptions.py b/mfd_esxi/exceptions.py new file mode 100644 index 0000000..41b2b10 --- /dev/null +++ b/mfd_esxi/exceptions.py @@ -0,0 +1,80 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""Module for exceptions.""" +import subprocess + + +class ESXiVersionException(Exception): + """Unable to recognize version.""" + + +class ESXiNotFound(Exception): + """Unable to find the name provided.""" + + +class ESXiNotSupported(Exception): + """Not supported feature selected.""" + + +class ESXiNameException(Exception): + """Wrong name.""" + + +class ESXiWrongParameter(Exception): + """Wrong parameter supplied.""" + + +class ESXiRuntimeError(Exception): + """Error during execution.""" + + +class ESXiVMCopyTimeout(Exception): + """Timeout copying VM disk.""" + + +class ESXiVMNotRun(Exception): + """VM is not running.""" + + +class ESXiVFUnavailable(Exception): + """No VF available.""" + + +class ESXiAPISocketError(Exception): + """Unable to communicate with API.""" + + +class ESXiAPIInvalidLogin(Exception): + """Wrong credentials.""" + + +class UninitializedNsxConnection(Exception): + """Connection to NSX is not initialized.""" + + +class UnsupportedNsxEntity(Exception): + """NSX entity is not supported.""" + + +class MissingNsxEntity(Exception): + """NSX entity is missing.""" + + +class NsxApiCallError(Exception): + """NSX api call failed.""" + + +class NsxResourceSetupError(Exception): + """NSX resource setup error.""" + + +class NsxResourcePartialSuccessSetupError(Exception): + """NSX resource partial success setup error.""" + + +class NsxResourceRemoveError(Exception): + """NSX resource remove error.""" + + +class VswitchError(subprocess.CalledProcessError, Exception): + """Vswitch error.""" diff --git a/mfd_esxi/host.py b/mfd_esxi/host.py new file mode 100644 index 0000000..094fca7 --- /dev/null +++ b/mfd_esxi/host.py @@ -0,0 +1,455 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""ESXi host support.""" + +import logging +import re +from collections import namedtuple +from ipaddress import ( + IPv4Interface, + IPv6Interface, + IPv4Address, + IPv6Address, + IPv4Network, + IPv6Network, + ip_address, + ip_network, +) +from typing import TYPE_CHECKING, List, Union, Optional + +from mfd_common_libs import log_levels, add_logging_level +from mfd_typing.utils import strtobool +from mfd_typing import PCIAddress + +from .esxi_version import ESXiVersion +from .exceptions import ESXiNotFound, ESXiRuntimeError +from .host_api import ESXiHostAPI +from .vm_mgr import ESXiVMMgr +from .vmknic import Vmknic +from .vswitch import ESXivSwitch + +if TYPE_CHECKING: + from mfd_connect import Connection + from mfd_connect.base import ConnectionCompletedProcess + from mfd_typing import MACAddress + from mfd_network_adapter.network_interface.esxi import ESXiNetworkInterface + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + +IntnetCliVersion = namedtuple(typename="IntnetCliVersion", field_names=("intnet_ver", "ddk_ver")) + + +class ESXiHypervisor: + """Class for most of ESXi functionality.""" + + def __init__(self, connection: "Connection"): + """ + Initialize host object. + + :param connection: Connection object + """ + self.connection = connection + self.hv: ESXiVMMgr = ESXiVMMgr(self) + + self.esxi_version = None + self.vswitch: List[ESXivSwitch] = [] + self.vmknic: List[Vmknic] = [] + self.mng_vmknic: Union[Vmknic, None] = None + self.mng_ip: Union[IPv4Interface, IPv6Interface, None] = None + self._ip: Optional[Union[IPv4Address, IPv6Address]] = None + self.api: Optional[ESXiHostAPI] = None + + def execute_command(self, command: str, **kwargs) -> "ConnectionCompletedProcess": + """ + Shortcut for execute command. + + :param command: string with command + :param kwargs: parameters + :return: result of command + """ + return self.connection.execute_command(command=command, **kwargs) + + def initialize(self, ip: str, login: str, password: str) -> None: + """Read host configuration.""" + self._ip: Union[IPv4Address, IPv6Address] = ip_address(ip) + self.api: ESXiHostAPI = ESXiHostAPI(ip=ip, login=login, password=password) + + self.initialize_version() + self.initialize_vswitch() + self.initialize_vmknic() + self.initialize_mng() + self.initialize_hv() + + def initialize_version(self) -> None: + """Read version.""" + self.esxi_version = ESXiVersion.discover(self) + + def initialize_vswitch(self) -> None: + """Read vSwitches.""" + self.vswitch = ESXivSwitch.discover(self) + + def initialize_vmknic(self) -> None: + """Read vmknic adapters.""" + self.vmknic = Vmknic.discover(self) + + def initialize_mng(self) -> None: + """Find management adapter and IP.""" + self.mng_vmknic = self.find_vmknic(ip=self._ip) + for ip in self.mng_vmknic.ips: + if self._ip in ip.network: + self.mng_ip = ip + return + raise ESXiNotFound("Could not find management IP") + + def initialize_hv(self) -> None: + """Read VMs on host.""" + self.hv.initialize() + + def add_vswitch(self, name: str) -> "ESXivSwitch": + """ + Add vSwitch on host. + + :param name: name of vSwitch + :return: vSwitch object + """ + vswitch = ESXivSwitch.add_vswitch(self, name) + self.vswitch.append(vswitch) + return vswitch + + def del_vswitch(self, name: str) -> None: + """ + Delete vSwitch on host. + + :param name: name of vSwitch + """ + for i, vswitch in enumerate(self.vswitch): + if vswitch.name == name: + vswitch.del_vswitch() + self.vswitch.pop(i) + return + raise ESXiNotFound(f"Could not find vSwitch {name}") + + def set_vswitch( + self, + name: str, + uplinks: List[str], + portgroups: List[str] = (), + mtu: int = 1500, + vmknic: bool = True, + ) -> "ESXivSwitch": + """ + Set existing or create new vSwitch. + + :param name: name of vSwitch + :param uplinks: list of uplink names + :param portgroups: list of portgroup names + :param mtu: MTU value (default 1500) + :param vmknic: create portgroups for vmknic adapters and add them + :return: + """ + for vswitch in self.vswitch: + if vswitch.name == name: + vswitch.reconfigure(uplinks=uplinks, portgroups=portgroups, mtu=mtu, vmknic=vmknic) + return vswitch + vswitch = self.add_vswitch(name) + vswitch.configure(uplinks=uplinks, portgroups=portgroups, mtu=mtu, vmknic=vmknic) + return vswitch + + def find_vswitch(self, name: str = None, uplink: str = None, portgroup: str = None) -> "ESXivSwitch": + """ + Find vSwitch based on parameter. + + :param name: name of vSwitch + :param uplink: uplink connected to vSwitch + :param portgroup: portgroup connected to vSwitch + :return: vSwitch object + """ + for vswitch in self.vswitch: + if name == vswitch.name or uplink in vswitch.uplinks or portgroup in vswitch.portgroups: + return vswitch + raise ESXiNotFound("Could not find vSwitch") + + def add_vmknic(self, portgroup: str, mtu: int = None, mac: "MACAddress" = None) -> "Vmknic": + """ + Add vmknic adapter. + + :param portgroup: portgroup name + :param mtu: MTU value + :param mac: MAC value + :return: Vmknic adapter + """ + vmknic = Vmknic.add_vmknic(self, portgroup=portgroup, mtu=mtu, mac=mac) + self.vmknic.append(vmknic) + return vmknic + + def del_vmknic(self, name: str = None, portgroup: str = None) -> None: + """ + Delete vmknic adapter. + + :param name: name of adapter + :param portgroup: portgroup of adapter + """ + for i, vmknic in enumerate(self.vmknic): + if vmknic.name == name or vmknic.portgroup == portgroup: + vmknic.del_vmknic() + self.vmknic.pop(i) + return + raise ESXiNotFound("Could not find vmknic") + + def find_vmknic( + self, + name: str = None, + portgroup: str = None, + ip: Union[IPv4Address, IPv6Address, str] = None, + net: Union[IPv4Network, IPv6Network, str] = None, + ) -> "Vmknic": + """ + Find vmknic adapter. + + :param name: name of adapter + :param portgroup: portgroup of adapter + :param ip: IP address of adapter + :param net: IP from the same network as adapter + :return: vmknic adapter + """ + if isinstance(ip, str): + ip = ip_address(ip) + if isinstance(net, str): + net = ip_network(net, strict=False) + for vmknic in self.vmknic: + if vmknic.name == name or vmknic.portgroup == portgroup: + return vmknic + for i in vmknic.ips: + if ip == i.ip: + return vmknic + if net is not None: + for ip in vmknic.ips: + if ip in net: + return vmknic + raise ESXiNotFound("Could not find vmknic") + + def get_meminfo(self) -> dict[str, int]: + """ + Get information regarding the memory of the system. + + :return: dictionary represents /proc/meminfo data + """ + output = self.execute_command("vsish -e get /memory/memInfo", expected_return_codes={0}).stdout + + regex = re.compile( + r"(System heap free \(pages\):(?P\d+)\n\s*)?" + r"System memory usage \(pages\):(?P\d+)", + re.MULTILINE, + ) + match = regex.search(output) + if not match: + raise RuntimeError("Cannot get memory info for the host") + + if match.group("heap_free"): + ret = { + "heap_free": int(match.group("heap_free")), + "mem_usage": int(match.group("mem_usage")), + } + else: + ret = {"mem_usage": int(match.group("mem_usage"))} + return ret + + def find_link_partner(self, vmnic: str) -> str: + """Get link partner adapter (client) for given adapter on the same host. + + :param vmnic: vmnic name + :return: client vmnic name + :raises: ESXiRuntimeError + """ + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Finding link partner for adapter {vmnic}", + ) + output = self.execute_command("esxcfg-nics -l").stdout + output = output.splitlines() + output.pop(0) # remove header + + for base in output: + base = base.split() + if base[0] == vmnic: + break + else: + raise ESXiRuntimeError(f"Could not find adapter {vmnic}") + + # find adapter by closest PCI address + func = int(base[1][-1]) + func = func ^ 1 # change last bit + pci = f"{base[1][:-1]}{func}" + + for line in output: + line = line.split() + if line[1] == pci and line[3] == "Up": + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Link partner for adapter {vmnic} is {line[0]}", + ) + return line[0] + + # find adapter from the same driver + for line in output: + line = line.split() + if line[0] != base[0] and line[2] == base[2] and line[3] == "Up": + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Link partner for adapter {vmnic} is {line[0]}", + ) + return line[0] + + raise ESXiRuntimeError(f"Could not find link partner to adapter {vmnic}") + + def find_pf0(self, nics: List[str]) -> List[str]: + """Find base ports (PF0) of selected ports. + + :param nics: list of vmnic names + :return: list of vmnic names + """ + logger.log(level=log_levels.MODULE_DEBUG, msg=f"Finding PF0 of adapters {nics}") + output = self.execute_command("esxcfg-nics -l").stdout + output = output.splitlines() + output.pop(0) # remove header + + # Create set of PCI addresses + pci_set = set() + for line in output: + line = line.split() + if line[0] in nics: + pci_set.add(line[1].split(".")[0]) + + pf0 = [] + for line in output: + line = line.split() + p = line[1].split(".") + if p[1] == "0" and p[0] in pci_set: + pf0.append(line[0]) + + logger.log(level=log_levels.MODULE_DEBUG, msg=f"Found PF0 ports {pf0}") + return pf0 + + def get_intnetcli_version(self) -> IntnetCliVersion: + """Get version of intnetcli and associated DDK. + + :return: intnetcli version, DDK version + :raises: RuntimeError + """ + command = "esxcli software vib list | grep -i int-esx-intnetcli" + res = self.connection.execute_command(command, shell=True, expected_return_codes=None) + if res.return_code != 0: + versions = IntnetCliVersion(intnet_ver=None, ddk_ver=None) + return versions + + match = re.search(r"(?P[0-9]+)\.(?P[0-9\.]+)", res.stdout) + if match: + versions = IntnetCliVersion(intnet_ver=match.group("intnet"), ddk_ver=match.group("DDK")) + return versions + + raise ESXiRuntimeError("Unknown version of intnetcli installed.") + + def get_pci_passthrough_capable_devices(self) -> dict[PCIAddress, bool]: + """Get Dict with all devices that support PCI passthrough and current PCI passthrough status. + + return: correct passthrough status starting from ESXi 7.0 + as older versions do not present this information, + status for such devices will be presented as false. + """ + result = {} + logger.log(level=log_levels.MODULE_DEBUG, msg="Get PCI passthrough capable devices") + if self.connection.execute_command("uname -r").stdout.strip()[0] < "7": + regex = r"address: (?P\d{4}:\w{2}:\w{2}\.\w+)(?:\s.*)*?\s{4}passthru capable: true" + output = self.connection.execute_command( + "esxcli hardware pci list | grep -i vmnic -B 6 -A 29", shell=True + ).stdout.lower() + match = re.findall(regex, output, re.MULTILINE) + if match: + result = {PCIAddress(data=pci): False for pci in match} + else: + regex = r"(?P\d{4}:\w{2}:\w{2}\.\w+).*(?Pfalse|true)" + output = self.connection.execute_command("esxcli hardware pci pcipassthru list").stdout + + match = re.findall(regex, output) + if match: + result = {PCIAddress(data=pci): strtobool(status) for pci, status in match} + + return result + + def get_pci_passthrough_nics(self) -> list[PCIAddress]: + """Get PCI addresses for PCI passthrough enabled NICs. + + Method does not return Virtual functions which are also PCI passthroughs. + :return: list of PCI strings + :raises ESXiRuntimeError when cannot get PCI addresses for PCI passthrough enabled NIC + """ + logger.log(level=log_levels.MODULE_DEBUG, msg="Get NICs with enabled PCI passthrough") + output = self.connection.execute_command("lspci -p").stdout + match = re.findall(r"(?P\d{4}:\w{2}:\w{2}\.\w+).*?A P .*", output) + if not match: + raise ESXiRuntimeError("Cannot get PCI addresses for PCI passthrough enabled NICs.") + return [PCIAddress(data=pci) for pci in match] + + def get_vds_id(self) -> str: + """Get Distributed vSwitch id. e.g. '64 76 73 5f 74 65 73 74-00 00 00 00 00 00 00 00'. + + :return: VDS id + :raises ESXiRuntimeError: if it cannot get VDS ID + """ + output = self.connection.execute_command("esxcli network vswitch dvs vmware list").stdout + try: + if output: + dvs_id = output.splitlines()[2].lstrip("VDS ID: ") + return dvs_id + except IndexError: + raise ESXiRuntimeError("Cannot get VDS ID.") + + def get_vm_name_for_vf_id(self, vf_id: int | str, interface: "ESXiNetworkInterface") -> str: + """ + Find name of VM associated with a given VF. + + :param vf_id: VF ID + :param interface: ESXi adapter + :return: name of VM associated with VF + """ + # get list of used vfs for adapter + vfs = interface.virtualization.get_connected_vfs_info() + + # get list of used VMs + cmd = "esxcli vm process list" + output = self.connection.execute_command(cmd).stdout + + my_regex = re.compile( + r".*World ID:\s(?P\d+)\n" + r".*Process ID:\s.*\n" + r".*VMX Cartel ID:\s(?P\d+)\n" + r".*UUID:\s.*\n" + r".*Display Name:\s(?P.*)\n" + r".*Config File:\s.*", + re.MULTILINE, + ) + + vms = [ + { + "world_id": match.group("world_id"), + "name": match.group("name"), + "vmx_cartel_id": match.group("vmx_cartel_id"), + } + for match in my_regex.finditer(output) + ] + + # find VM name corresponding to given VF + world_id = [vf.owner_world_id for vf in vfs if vf.vf_id == str(vf_id)] + + if not world_id: + raise Exception(f"VF {vf_id} is not connected to any VM") + + os_version = float(self.connection.get_system_info().kernel_version[:3]) + + return [ + vm["name"] + for vm in vms + if ((os_version < 7.0 or os_version >= 9.0) and vm["vmx_cartel_id"] == world_id[0]) + or ((7.0 <= os_version < 9.0) and vm["world_id"] == world_id[0]) + ][0] diff --git a/mfd_esxi/host_api.py b/mfd_esxi/host_api.py new file mode 100644 index 0000000..4a392a7 --- /dev/null +++ b/mfd_esxi/host_api.py @@ -0,0 +1,380 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""ESXi host support for API/pyvmomi.""" +import logging +from atexit import register as exit_register +from OpenSSL.crypto import load_certificate, FILETYPE_PEM +from packaging.version import Version +from http.client import HTTPException +from socket import error as socket_error +from time import sleep +from typing import List, Dict, Union, Tuple +from pyVim import connect +from pyVmomi import vim + +from mfd_common_libs import log_levels, add_logging_level +from mfd_network_adapter.network_interface import NetworkInterface +from .exceptions import ESXiAPIInvalidLogin, ESXiAPISocketError + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + + +class ESXiHostAPI(object): + """ESXi SOAP API wrapper.""" + + def __init__(self, ip: str, login: str, password: str, port: int = 443): + """ + Init object. + + :param ip: VCenter IP address + :param login: Login name + :param password: Password + :param port: Port number + """ + self.__service = None + self.__content = None + self._ip = ip + self._login = login + self._password = password + self._port = port + self._fingerprint = None + + def __repr__(self) -> str: + """Return string representation of an object. + + :return: class name and IP address + """ + return f"{self.__class__.__name__}('{self._ip}')" + + @property + def _content(self) -> vim.ServiceInstance: + """Content of VCenter in API. + + :return: Service content + """ + try: + if self.__service: + if self.__content.sessionManager.currentSession: + return self.__content + else: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"{self._ip} the session has expired", + ) + except (HTTPException, ConnectionError): + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"{self._ip} HTTP connection error, reconnecting", + ) + + self.__content = self._reconnect() + return self.__content + + def _connect(self) -> vim.ServiceInstance: + """Connect to the specified server using API. + + :return: Service content + :raise AgatVCenterInvalidLogin: Invalid login for VCenter + :raise AgatVCenterSocketError: Error with connection + """ + try: + logger.log(level=log_levels.MODULE_DEBUG, msg=f"Connecting to: {self._ip}") + + self.__service = connect.SmartConnect( + host=self._ip, + user=self._login, + pwd=self._password, + port=self._port, + connectionPoolTimeout=-1, + disableSslCertValidation=True, + ) + exit_register(self._disconnect) + return self.__service.RetrieveServiceContent() + except vim.fault.InvalidLogin: + raise ESXiAPIInvalidLogin + except socket_error: + raise ESXiAPISocketError + + def _disconnect(self) -> None: + """Disconnect from server.""" + if self.__service: + connect.Disconnect(self.__service) + self.__service = None + self.__content = None + + def _reconnect(self) -> vim.ServiceInstance: + """Reconnect server. + + :return: Service content + """ + self._disconnect() + return self._connect() + + @property + def version(self) -> "Version": + """Return version of vSphere as float. + + :return: version object + """ + return Version(self._content.about.apiVersion) + + def get_host(self) -> "vim.HostSystem": + """Get host object from local content. + + :return: host object + """ + return self._content.viewManager.CreateContainerView(self._content.rootFolder, [vim.HostSystem], True).view[0] + + def get_lldp_status(self, adapter: "NetworkInterface") -> "vim.host.PhysicalNic.LldpInfo": + """Get LLDP status. + + :param adapter: Device Interface + :return: LLDP info of the interface + """ + esxi_host = self.get_host() + network_system = esxi_host.configManager.networkSystem + for hint in network_system.QueryNetworkHint(device=[adapter.name]): + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"LLDP info for {hint.device} is {hint.lldpInfo}", + ) + return hint.lldpInfo + + def set_adapters_sriov( + self, + adapters: Union[List["NetworkInterface"], List[str]], + num_vf: int = 0, + wait: bool = True, + ) -> None: + """Update SRIOV settings. + + :param adapters: list of adapters or list of pci addresses on which VFs will be set + :param num_vf: number of Virtual Functions + :param wait: wait for driver to parse new VFs + """ + esxi_host = self.get_host() + pci_passthru_system = esxi_host.configManager.pciPassthruSystem + for adapter in adapters: + if isinstance(adapter, NetworkInterface): + pci = adapter.pci_address.lspci + else: + pci = adapter + + config = vim.host.SriovConfig() + config.sriovEnabled = True if num_vf > 0 else False + config.numVirtualFunction = num_vf + config.id = pci + + pci_passthru_system.UpdatePassthruConfig([config]) + + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Number of Virtual Functions has been updated to {num_vf} on {pci}", + ) + + if wait: + logger.log( + level=log_levels.MODULE_DEBUG, + msg="Wait extra 30 seconds to initialize driver features", + ) + sleep(30) + + def get_adapters_sriov_info( + self, adapters: List["NetworkInterface"], all_ports: bool = False + ) -> Dict[str, Dict[str, Union[bool, int]]]: + """Get info about SR-IOV properties of adapter. + + :param adapters: NetworkInterface + :param all_ports: get all ports of adapter + :return: dict with adapters info + """ + adapters_info = {} + adapters_lspci = set() + if not all_ports: + for adapter in adapters: + adapters_lspci.add(adapter.pci_address.lspci.split(".")[0] + ".") + else: + for adapter in adapters: + adapters_lspci.add(adapter.pci_address.lspci) + + esxi_host = self.get_host() + pci_passthru_system = esxi_host.configManager.pciPassthruSystem + + for device in pci_passthru_system.pciPassthruInfo: + lspci = device.id + if not all_ports: + lspci = lspci.split(".")[0] + "." + for adapter in adapters_lspci: + if lspci == adapter: + try: + max_vfs = device.maxVirtualFunctionSupported + num_vfs = device.numVirtualFunction + req_vfs = device.numVirtualFunctionRequested + enabled = device.sriovEnabled + adapters_info[device.id] = { + "max_vfs": max_vfs, + "num_vfs": num_vfs, + "req_vfs": req_vfs, + "enabled": enabled, + } + except AttributeError: + # Adapter not supporting SR-IOV do not have maxVirtualFunctionSupported + adapters_info[device.id] = { + "max_vfs": 0, + "num_vfs": 0, + "req_vfs": 0, + "enabled": False, + } + adapters_lspci.remove(adapter) + break + return adapters_info + + @staticmethod + def get_performance_metrics_keys( + perf_manager: vim.PerformanceManager, + ) -> Dict[str, int]: + """Get all possible counters and their keys. + + :param perf_manager: performance manager + :return: all possible system counters and their keys + """ + counter_info = {} + for counter in perf_manager.perfCounter: + full_name = f"{counter.groupInfo.key}.{counter.nameInfo.key}.{counter.rollupType}" + counter_info[full_name] = counter.key + return counter_info + + @staticmethod + def get_performance_metrics_stats( + metrics: List[Tuple[vim.PerformanceManager.MetricId, str]], + esxi_host: vim.HostSystem, + perf_manager: vim.PerformanceManager, + ) -> List: + """Gather stats from metrics. + + :param metrics: metrics to gather + :param esxi_host: host instance + :param perf_manager: performance manager + :return: gathered performance stats + """ + stats = [] + for metric, name in metrics: + query = vim.PerformanceManager.QuerySpec(entity=esxi_host, metricId=[metric]) + stats.append((perf_manager.QueryPerf(querySpec=[query]), name)) + return stats + + @staticmethod + def create_performance_metrics_table( + columns: List[str], stats: List, create_chart: bool = True + ) -> Union[str, Dict]: + """Create table with performance stats. + + :param columns: column names + :param stats: gathered performance stats + :param create_chart: indicates if table with metrics should be created + :return: table with performance stats to be printed + """ + # Create nested dicts with stat results + lines = {} + for stat, name in stats: + count = 0 + try: + for value in stat[0].value[0].value: + val = float(value / 100) + stamp = stat[0].sampleInfo[count].timestamp + + # Update line of chart with stat value + line = lines.get(stamp, {}) + line[name] = val + lines[stamp] = line + + count += 1 + except IndexError: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Error getting data, skipping row {name}", + ) + + if not create_chart: + return lines + + # Create output chart + line = " " * 30 + for column in columns: + line += " " * (12 - len(column)) + column + chart = [line] + for line in sorted(lines.keys()): + np = str(line) + " - " + output = f"{np:30}" + for column in columns: + value = lines[line].get(column, float("NaN")) + output += f"{value:12.2f}" + chart.append(output) + + return "\n".join(chart) + + def get_performance_metrics( + self, adapters: List["NetworkInterface"], create_chart: bool = True + ) -> Union[str, Dict]: + """Get performance data from performance manager. + + :param adapters: adapters to gather throughput + :param create_chart: indicates if table with metrics should be created + :return: table with gathered performance metrics + """ + esxi_host = self.get_host() + perf_manager = self._content.perfManager + + # Get all possible counters + counter_info = self.get_performance_metrics_keys(perf_manager) + + # Get keys of interesting counters + key_c = counter_info["cpu.usage.average"] + key_r = counter_info["net.received.average"] + key_t = counter_info["net.transmitted.average"] + + # Prepare columns and metrics + metrics = [(vim.PerformanceManager.MetricId(counterId=key_c, instance=""), "CPU")] + columns = ["CPU"] + for adapter in adapters: + name = adapter.name + "-Rx" + columns.append(name) + metrics.append( + ( + vim.PerformanceManager.MetricId(counterId=key_r, instance=adapter.name), + name, + ) + ) + name = adapter.name + "-Tx" + columns.append(name) + metrics.append( + ( + vim.PerformanceManager.MetricId(counterId=key_t, instance=adapter.name), + name, + ) + ) + + # Gather stats from metrics + stats = self.get_performance_metrics_stats(metrics=metrics, esxi_host=esxi_host, perf_manager=perf_manager) + + return self.create_performance_metrics_table(columns=columns, stats=stats, create_chart=create_chart) + + @property + def fingerprint(self) -> str: + """Get fingerprint of host certificate.""" + if self._fingerprint is None: + self._fingerprint = self.get_fingerprint() + return self._fingerprint + + def get_fingerprint(self, digest: str = "sha1") -> str: + """Get fingerprint of host certificate using digest algorithm. + + :param digest: Name of digest algorithm. + + :return: Fingerprint of host certificate. + """ + cert_bytes = bytes(self.get_host().config.certificate) + cert = load_certificate(FILETYPE_PEM, cert_bytes) + fingerprint_bytes = cert.digest(digest) + return fingerprint_bytes.decode("UTF-8") diff --git a/mfd_esxi/nsx/__init__.py b/mfd_esxi/nsx/__init__.py new file mode 100644 index 0000000..4365540 --- /dev/null +++ b/mfd_esxi/nsx/__init__.py @@ -0,0 +1,3 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""NSX specific logic.""" diff --git a/mfd_esxi/nsx/base.py b/mfd_esxi/nsx/base.py new file mode 100644 index 0000000..e439712 --- /dev/null +++ b/mfd_esxi/nsx/base.py @@ -0,0 +1,47 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""Basic building blocks for more advanced classes.""" +from abc import abstractmethod, ABC +from typing import Optional + +from com.vmware.vapi.std.errors_client import NotFound +from vmware.vapi.bindings.struct import VapiStruct + +from .connection import NsxConnection +from .utils import api_call + + +class NsxEntity(ABC): + """Low level NSX entity.""" + + def __init__(self, name: str, connection: NsxConnection): + """ + Initialize entity instance. + + :param name: NSX entity name. + :param connection: Connection to NSX. + """ + self._connection = connection + self._name = name + + @abstractmethod + @api_call + def _get_content(self) -> VapiStruct: + """ + Get content from NSX. + + :return: Object content from NSX. + """ + + @property + def name(self) -> str: + """Get entity name.""" + return self._name + + @property + def content(self) -> Optional[VapiStruct]: + """Get content.""" + try: + return self._get_content() + except NotFound: + return None diff --git a/mfd_esxi/nsx/connection.py b/mfd_esxi/nsx/connection.py new file mode 100644 index 0000000..0aa7a94 --- /dev/null +++ b/mfd_esxi/nsx/connection.py @@ -0,0 +1,124 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""Connection to NSX.""" +import requests +from packaging.version import Version, parse +from com.vmware import nsx_policy_client, nsx_client + +from vmware.vapi.bindings.stub import ApiClient, StubConfiguration +from vmware.vapi.lib import connect +from vmware.vapi.stdlib.client.factories import StubConfigurationFactory + +from ..exceptions import UninitializedNsxConnection + + +class ApiClientWrapper: + """Wrapper that make NSX SDK just a little more readable.""" + + def __init__(self, stub_config: StubConfiguration): + """ + Initialize instance. + + :param stub_config: NSX API stub configuration. + """ + self._stub_config = stub_config + self._infra_client = None + self._management_client = None + + def initialize(self) -> None: + """Initialize clients.""" + self._infra_client = ApiClient(nsx_policy_client.StubFactory(self._stub_config)) + self._management_client = ApiClient(nsx_client.StubFactory(self._stub_config)) + + @property + def policy(self) -> ApiClient: + """Get wrapped infra client.""" + return self._infra_client + + @property + def management(self) -> ApiClient: + """Get wrapped management client.""" + return self._management_client + + +class NsxConnection: + """Connection to NSX.""" + + def __init__(self, address: str, username: str, password: str, tcp_port: int = 443): + """ + Initialize instance. + + :param address:Address of NSX. + :param username: Name of user. + :param password: Password for user. + :param tcp_port: Port for https connection to NSX. + """ + self._address = address + self._username = username + self._password = password + self._tcp_port = tcp_port + self._api = None + self._version = None + + def __repr__(self): + """Get string representation.""" + return f"{self.__class__.__name__}('{self._address}')" + + @property + def api(self) -> ApiClientWrapper: + """ + Get connection to NSX. + + :return: Connection to NSX. + """ + if self._api is None: + raise UninitializedNsxConnection() + return self._api + + @property + def version(self) -> Version: + """Get NSX version.""" + return parse(self._version) + + def _connect_to_nsx(self) -> None: + session = requests.session() + session.verify = False + session.trust_env = False + requests.packages.urllib3.disable_warnings() + + nsx_url = f"https://{self._address}:{self._tcp_port}" + resp = session.post( + nsx_url + "/api/session/create", + data={"j_username": self._username, "j_password": self._password}, + ) + if resp.status_code != requests.codes.ok: + resp.raise_for_status() + + session.headers["Cookie"] = resp.headers.get("Set-Cookie") + session.headers["X-XSRF-TOKEN"] = resp.headers.get("X-XSRF-TOKEN") + + connector = connect.get_requests_connector(session=session, msg_protocol="rest", url=nsx_url) + stub_config = StubConfigurationFactory.new_runtime_configuration(connector) + self._api = ApiClientWrapper(stub_config) + self._api.initialize() + self._version = self._api.management.node.Version.get().node_version + + @classmethod + def with_connection( + cls: "NsxConnection", + address: str, + username: str, + password: str, + tcp_port: int = 443, + ) -> "NsxConnection": + """ + Initialize connection to NSX using stub factory. + + :param address:Address of NSX. + :param username: Name of user. + :param password: Password for user. + :param tcp_port: Port for https connection to NSX. + """ + instance = cls(address, username, password, tcp_port) + instance._connect_to_nsx() + return instance diff --git a/mfd_esxi/nsx/enforcement_point.py b/mfd_esxi/nsx/enforcement_point.py new file mode 100644 index 0000000..a320d90 --- /dev/null +++ b/mfd_esxi/nsx/enforcement_point.py @@ -0,0 +1,23 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""NSX NsxEnforcementPoint.""" +from com.vmware.nsx_policy.model_client import EnforcementPoint + +from .base import NsxEntity +from .connection import NsxConnection +from .infra_site import NsxInfraSite +from .utils import api_call + + +class NsxEnforcementPoint(NsxEntity): + """NSX NsxEnforcementPoint.""" + + DEFAULT_NAME = "default" + + def __init__(self, connection: NsxConnection): + """Initialize instance.""" + super().__init__(self.DEFAULT_NAME, connection) + + @api_call + def _get_content(self) -> EnforcementPoint: + return self._connection.api.policy.infra.sites.EnforcementPoints.get(NsxInfraSite.DEFAULT_NAME, self.name) diff --git a/mfd_esxi/nsx/fabric_discovered_node.py b/mfd_esxi/nsx/fabric_discovered_node.py new file mode 100644 index 0000000..f8e3bdc --- /dev/null +++ b/mfd_esxi/nsx/fabric_discovered_node.py @@ -0,0 +1,23 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""NSX Fabric Discovered Node.""" +from com.vmware.nsx_policy.model_client import FabricHostNode + +from .base import NsxEntity +from .utils import api_call + + +class NsxFabricDiscoveredNode(NsxEntity): + """NSX Fabric Discovered Node.""" + + @api_call + def _get_content(self) -> FabricHostNode: + # NSX API does not support get-by-name in this case. Manual search from all is required + + return next( + filter( + lambda fdn: self.name == fdn.display_name, + self._connection.api.management.fabric.DiscoveredNodes.list().results, + ), + None, + ) diff --git a/mfd_esxi/nsx/host_transport_node.py b/mfd_esxi/nsx/host_transport_node.py new file mode 100644 index 0000000..ad8dcac --- /dev/null +++ b/mfd_esxi/nsx/host_transport_node.py @@ -0,0 +1,286 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""NSX Host Transport Node.""" +from time import sleep, time +from typing import List + +from mfd_esxi.exceptions import ( + NsxResourceSetupError, + NsxResourcePartialSuccessSetupError, + NsxResourceRemoveError, + MissingNsxEntity, +) + +from com.vmware.nsx_policy.model_client import ( + HostTransportNode, + StandardHostSwitchSpec, + VdsUplink, + StandardHostSwitch, + HostSwitchProfileTypeIdEntry, + StaticIpPoolSpec, + StaticIpv6PoolSpec, + TransportZoneEndPoint, + CpuCoreConfigForEnhancedNetworkingStackSwitch, + AssignedByDhcp, + TransportNodeState, +) +from com.vmware.vapi.std.errors_client import NotFound + +from .base import NsxEntity +from .enforcement_point import NsxEnforcementPoint +from .fabric_discovered_node import NsxFabricDiscoveredNode +from .infra_site import NsxInfraSite +from .utils import api_call +from ..const import ESXI_UPLINK_FORMAT, ESXI_UPLINK_NUMBER + + +class NsxHostTransportNode(NsxEntity): + """Host transport node.""" + + @api_call + def _get_content(self) -> HostTransportNode: + return self._connection.api.policy.infra.sites.enforcement_points.HostTransportNodes.get( + NsxInfraSite.DEFAULT_NAME, NsxEnforcementPoint.DEFAULT_NAME, self.name + ) + + def _patch(self, payload: HostTransportNode, timeout: int) -> str: + self._connection.api.policy.infra.sites.enforcement_points.HostTransportNodes.patch( + NsxInfraSite.DEFAULT_NAME, + NsxEnforcementPoint.DEFAULT_NAME, + self.name, + payload, + ) + + t_end = timeout + time() + + while time() < t_end: + r_state = self._connection.api.policy.infra.sites.enforcement_points.host_transport_nodes.State.get( + NsxInfraSite.DEFAULT_NAME, NsxEnforcementPoint.DEFAULT_NAME, self.name + ) + + if r_state.node_deployment_state.state == TransportNodeState.STATE_SUCCESS: + if r_state.state == TransportNodeState.STATE_SUCCESS: + return self.name + elif r_state.node_deployment_state.state == TransportNodeState.STATE_FAILED: + raise NsxResourceSetupError(r_state.to_json()) + elif r_state.node_deployment_state.state == TransportNodeState.STATE_PARTIAL_SUCCESS: + raise NsxResourcePartialSuccessSetupError(r_state.to_json()) + sleep(10) + + raise NsxResourceSetupError(f"Timeout during operation on Host Transport Node {self.name}") + + @api_call + def add( # noqa: C901 + self, + timeout: int = 600, + ) -> None: + """ + Add host transport node to NSX. Only hosts that are present in VCSA with VDS-es are supported. + + :param timeout: Maximum time add node can take to resolve. + """ + if self.content is not None: + return + + discovered_node = NsxFabricDiscoveredNode(self.name, self._connection).content + if discovered_node is None: + # Standalone ESXi hosts are not supported. They need to be present in discovery + raise NsxResourceSetupError("Transport node missing in discovery") + + switch_specs = StandardHostSwitchSpec(host_switches=[]) + payload = HostTransportNode( + discovered_node_id_for_create=discovered_node.external_id, + display_name=self.name, + host_switch_spec=switch_specs, + description=f"Transport Node {self.name}", + ) + + self._patch(payload=payload, timeout=timeout) + + @api_call + def add_switch( # noqa: C901 + self, + host_switch_name: str, + uplink_name: str, + transport_zone_name: str, + vds_id: str, + uplinks: int = ESXI_UPLINK_NUMBER, + ip_pool_id: str | None = None, + mode: str = StandardHostSwitch.HOST_SWITCH_MODE_STANDARD, + lcore_mapping: List[CpuCoreConfigForEnhancedNetworkingStackSwitch] | None = None, + lcores: int = 0, + timeout: int = 600, + ) -> None: + """ + Add host transport node to NSX. Only hosts that are present in VCSA with VDS-es are supported. + + :param host_switch_name: Name of host switch to set + :param uplink_name: Name of uplink profile + :param transport_zone_name: Name of transport zone + :param vds_id: ID of VDS (from VCSA). It looks like "50 03 bd df 08 0b cc d0-be 1a 5c 0e 16 87 7f a0" + :param uplinks: number of uplinks + :param ip_pool_id: ID of IP pool, for VLAN transport zone can be None + :param mode: 'STANDARD', 'ENS' or 'ENS_INTERRUPT' + :param lcore_mapping: CPU config. Will overwrite if provided alongside 'lcores' + :param lcores: Logical cores count. + :param timeout: Maximum time add node can take to resolve. + """ + payload: HostTransportNode = self.content + if payload is None: + raise MissingNsxEntity(f"Host Transport Node {self.name} is missing") + + uplink_list = [] + for i in range(1, uplinks + 1): + uplink_list.append( + VdsUplink( + uplink_name=ESXI_UPLINK_FORMAT % i, + vds_uplink_name=ESXI_UPLINK_FORMAT % i, + ) + ) + + host_switch_profile_ids = [ + HostSwitchProfileTypeIdEntry( + key=HostSwitchProfileTypeIdEntry.KEY_UPLINKHOSTSWITCHPROFILE, + value=f"/infra/host-switch-profiles/{uplink_name}", + ) + ] + + ip_assignment_spec = None + ipv6_assignment_spec = None + if ip_pool_id is None: + ip_assignment_spec = AssignedByDhcp() + else: + if "IP4" in ip_pool_id: + ip_assignment_spec = StaticIpPoolSpec(ip_pool_id=f"/infra/ip-pools/{ip_pool_id}") + else: + ipv6_assignment_spec = StaticIpv6PoolSpec(ipv6_pool_id=f"/infra/ip-pools/{ip_pool_id}") + + tz_path = f"/infra/sites/{NsxInfraSite.DEFAULT_NAME}/enforcement-points/{NsxEnforcementPoint.DEFAULT_NAME}/transport-zones/{transport_zone_name}" # noqa: E501 + transport_zone_endpoints = [TransportZoneEndPoint(transport_zone_id=tz_path)] + host_switch = StandardHostSwitch( + host_switch_id=vds_id, + host_switch_name=host_switch_name, + host_switch_type=StandardHostSwitch.HOST_SWITCH_TYPE_VDS, + uplinks=uplink_list, + host_switch_mode=mode, + host_switch_profile_ids=host_switch_profile_ids, + ip_assignment_spec=ip_assignment_spec, + ipv6_assignment_spec=ipv6_assignment_spec, + transport_zone_endpoints=transport_zone_endpoints, + ) + + if mode == "ENS": + if lcores > 0: + lcore_mapping = [CpuCoreConfigForEnhancedNetworkingStackSwitch(num_lcores=lcores, numa_node_index=0)] + + if lcore_mapping is not None: + host_switch.cpu_config = lcore_mapping + + switches = [] + if payload.host_switch_spec: + for switch in payload.host_switch_spec.convert_to(StandardHostSwitchSpec).host_switches: + if switch.host_switch_name != host_switch_name: + switches.append(switch) + switches.append(host_switch) + + switch_specs = StandardHostSwitchSpec(host_switches=switches) + payload.host_switch_spec = switch_specs + + self._patch(payload=payload, timeout=timeout) + + @api_call + def delete_switches_return_uplink_profiles(self, timeout: int = 600) -> List[str]: + """ + Delete all host switches. + + :param timeout: maximum time to resolve request + :return: list of their uplink profiles + """ + payload: HostTransportNode = self.content + if payload is None or payload.host_switch_spec is None: + return [] + + names = [] + for switch in payload.host_switch_spec.convert_to(StandardHostSwitchSpec).host_switches: + for up in switch.host_switch_profile_ids: + if up.key == HostSwitchProfileTypeIdEntry.KEY_UPLINKHOSTSWITCHPROFILE: + names.append(up.value.split("/")[-1]) + + payload.host_switch_spec = StandardHostSwitchSpec(host_switches=[]) + + self._patch(payload=payload, timeout=timeout) + + return names + + @api_call + def delete(self, unprepare_host: bool = True, force: bool = False, timeout: int = 600) -> None: + """ + Remove host transport node from NSX. + + :param unprepare_host: True if NSX should be uninstalled from host. + :param force: Force delete the resource even if it is being used somewhere. + :param timeout: Maximum time add node can take to resolve. + """ + if self.content is not None: # If no content it was already removed, or never added. It is fine. + self._connection.api.policy.infra.sites.enforcement_points.HostTransportNodes.delete( + NsxInfraSite.DEFAULT_NAME, + NsxEnforcementPoint.DEFAULT_NAME, + self.name, + unprepare_host=unprepare_host, + force=force, + ) + + t_end = timeout + time() + + while t_end > time(): + try: + rslv = self._connection.api.policy.infra.sites.enforcement_points.host_transport_nodes.State.get( + NsxInfraSite.DEFAULT_NAME, + NsxEnforcementPoint.DEFAULT_NAME, + self.name, + ).node_deployment_state + except NotFound: + return + if rslv.state == TransportNodeState.STATE_FAILED: + raise NsxResourceRemoveError(rslv.to_json()) + + raise NsxResourceRemoveError(f"Timeout on remove Host Transport Node {self.name}") + + @api_call + def update_lcores( + self, + host_switch_name: str, + lcore_mapping: List[CpuCoreConfigForEnhancedNetworkingStackSwitch] | None = None, + lcores: int = 0, + timeout: int = 600, + ) -> None: + """ + Update host transport node. + + :param host_switch_name: Name of host switch to set + :param lcore_mapping: CPU config. Will overwrite if provided alongside 'lcores' + :param lcores: Logical cores count. + :param timeout: Maximum time add node can take to resolve. + """ + payload: HostTransportNode = self.content + if payload is None: + raise MissingNsxEntity(f"Host Transport Node {self.name} is missing") + + switches = [] + if payload.host_switch_spec: + for switch in payload.host_switch_spec.convert_to(StandardHostSwitchSpec).host_switches: + if switch.host_switch_name == host_switch_name: + if switch.host_switch_mode == "ENS": + if lcores > 0: + lcore_mapping = [ + CpuCoreConfigForEnhancedNetworkingStackSwitch(num_lcores=lcores, numa_node_index=0) + ] + if lcore_mapping is not None: + switch.cpu_config = lcore_mapping + + switches.append(switch) + + switch_specs = StandardHostSwitchSpec(host_switches=switches) + payload.host_switch_spec = switch_specs + + self._patch(payload=payload, timeout=timeout) diff --git a/mfd_esxi/nsx/infra_site.py b/mfd_esxi/nsx/infra_site.py new file mode 100644 index 0000000..a0ff67f --- /dev/null +++ b/mfd_esxi/nsx/infra_site.py @@ -0,0 +1,22 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""NSX site.""" +from com.vmware.nsx_policy.model_client import Site + +from .base import NsxEntity +from .connection import NsxConnection +from .utils import api_call + + +class NsxInfraSite(NsxEntity): + """NSX site.""" + + DEFAULT_NAME = "default" + + def __init__(self, connection: NsxConnection): + """Initialize instance.""" + super().__init__(self.DEFAULT_NAME, connection) + + @api_call + def _get_content(self) -> Site: + return self._connection.api.policy.infra.Sites.get(self.name) diff --git a/mfd_esxi/nsx/ip_pool.py b/mfd_esxi/nsx/ip_pool.py new file mode 100644 index 0000000..facd1ec --- /dev/null +++ b/mfd_esxi/nsx/ip_pool.py @@ -0,0 +1,92 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""NSX IP Pool.""" +from time import sleep, time + +from com.vmware.nsx_policy.model_client import ( + IpAddressPoolStaticSubnet, + ChildIpAddressPoolSubnet, + IpAddressPool, + IpPoolRange, + ChildIpAddressPool, + Infra, +) +from com.vmware.vapi.std.errors_client import InvalidRequest + +from ..exceptions import NsxResourceRemoveError +from .utils import api_call +from .base import NsxEntity + + +class NsxIpPool(NsxEntity): + """NSX IP Pool.""" + + @api_call + def _get_content(self) -> IpAddressPool: + return self._connection.api.policy.infra.IpPools.get(self.name) + + @api_call + def add(self, start_range: str, end_range: str, cidr: str, timeout: int = 60) -> None: + """ + Add IP Pool to NSX. It is supposed to work with Overlay ONLY. + + :param start_range: First IP address in pool. + :param end_range: Last IP address in pool. + :param cidr: CIDR. + :param timeout: Maximum time operation can take to resolve. + + :returns: Added IP pool ID. + """ + if self.content is not None: + return + + pool_range = IpPoolRange(start=start_range, end=end_range) + address_subnet = IpAddressPoolStaticSubnet(allocation_ranges=[pool_range], cidr=cidr, id=self.name) + child_subnet = ChildIpAddressPoolSubnet( + ip_address_pool_subnet=address_subnet, + resource_type=ChildIpAddressPoolSubnet.__name__, + ) + + ip_pool = IpAddressPool( + display_name=self.name, + children=[child_subnet], + description=f"IP Pool {self.name}", + resource_type=IpAddressPool.__name__, + id=self.name, + ) + payload = ChildIpAddressPool(ip_address_pool=ip_pool, resource_type=ChildIpAddressPool.__name__) + infra_payload = Infra(resource_type=Infra.__name__, children=[payload]) + t_end = timeout + time() + while time() < t_end: + try: + self._connection.api.policy.Infra.patch(infra_payload) + return + except InvalidRequest as e: + # "proper" way to get exception data. No comment. + if e.to_dict().get("data").get("error_code") == 500045: + # Pool is being removed, and not all nodes were notified. Add will retry + sleep(10) + continue + raise e + + @api_call + def delete(self, timeout: int = 60) -> None: + """ + Delete IP pool. + + :param timeout: Maximum time operation can take to resolve. If None resolution is skipped. + """ + if self.content is not None: + payload = ChildIpAddressPool( + ip_address_pool=self.content, + resource_type=ChildIpAddressPool.__name__, + marked_for_delete=True, + ) + infra_payload = Infra(resource_type=Infra.__name__, children=[payload]) + self._connection.api.policy.Infra.patch(infra_payload) + t_end = timeout + time() + while time() < t_end: + if self.content is None: + return + sleep(10) + raise NsxResourceRemoveError(f"Timeout on remove Ip Pool {self.name}") diff --git a/mfd_esxi/nsx/segment.py b/mfd_esxi/nsx/segment.py new file mode 100644 index 0000000..c8e801e --- /dev/null +++ b/mfd_esxi/nsx/segment.py @@ -0,0 +1,108 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""NSX Segment.""" +from typing import List +from com.vmware.nsx_policy.model_client import Segment + +from .base import NsxEntity +from .connection import NsxConnection +from .enforcement_point import NsxEnforcementPoint +from .infra_site import NsxInfraSite +from .utils import api_call +from ..exceptions import MissingNsxEntity + + +class NsxSegment(NsxEntity): + """NSX Uplink profile.""" + + @api_call + def _get_content(self) -> Segment: + return self._connection.api.policy.infra.Segments.get(self.name) + + @api_call + def add( + self, + transport_zone_name: str, + vlan_ids: List[str] | None = None, + vlan: int | None = None, + overlay_id: int | None = None, + ) -> None: + """ + Add Segment to NSX. + + :param transport_zone_name: name of transport zone + :param vlan_ids: list of vlan numbers or ranges + :param vlan: vlan number + :param overlay_id: create custom VNI ID for overlay traffic + """ + transport_zone_path = ( + f"/infra/sites/{NsxInfraSite.DEFAULT_NAME}/enforcement-points/" + f"{NsxEnforcementPoint.DEFAULT_NAME}/transport-zones/{transport_zone_name}" + ) + + segment: Segment = self.content + if segment is None: + segment = Segment( + id=self.name, + display_name=self.name, + description=f"Segment {self.name}", + resource_type=Segment.__name__, + transport_zone_path=transport_zone_path, + ) + if vlan_ids is not None: + segment.vlan_ids = vlan_ids + elif vlan is not None: + segment.vlan_ids = [str(vlan)] + else: + segment.vlan_ids = [] + if overlay_id is not None: + segment.overlay_id = overlay_id + + self._connection.api.policy.infra.Segments.patch(self.name, segment) + + @api_call + def set_vlan(self, vlan_ids: List[str] | None = None, vlan: int | None = None) -> None: + """ + Set VLAN for segment. + + :param vlan_ids: list of vlan numbers or range + :param vlan: vlan number + """ + segment: Segment = self.content + if segment is None: + raise MissingNsxEntity(f"Could not find segment {self.name}") + + if vlan_ids is not None: + segment.vlan_ids = vlan_ids + elif vlan is not None: + segment.vlan_ids = [str(vlan)] + else: + segment.vlan_ids = [] + + self._connection.api.policy.infra.Segments.patch(self.name, segment) + + @api_call + def delete(self) -> None: + """Delete Segment.""" + if self.content is None: + return + self._connection.api.policy.infra.Segments.delete(self.name) + + @staticmethod + @api_call + def list_zones(zones: List[str], connection: NsxConnection) -> List[str]: + """ + Get list of all segments using selected transport zones. + + :param zones: list of transport zone names + :param connection: NSX object + :return: list of all segment names used by transport zones + """ + results = connection.api.policy.infra.Segments.list().results + segments = [] + for result in results: + if result.transport_zone_path is not None: + tzone_name = result.transport_zone_path.split("/")[-1] + if tzone_name in zones: + segments.append(result.id) + return segments diff --git a/mfd_esxi/nsx/transport_zone.py b/mfd_esxi/nsx/transport_zone.py new file mode 100644 index 0000000..8f13cea --- /dev/null +++ b/mfd_esxi/nsx/transport_zone.py @@ -0,0 +1,81 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""NSX Transport Zone.""" +from com.vmware.nsx_policy.model_client import PolicyTransportZone +from ..exceptions import UnsupportedNsxEntity + +from .base import NsxEntity +from .enforcement_point import NsxEnforcementPoint +from .infra_site import NsxInfraSite +from .utils import api_call + + +class NsxTransportZone(NsxEntity): + """NSX Transport Zone.""" + + @api_call + def _get_content(self) -> PolicyTransportZone: + return self._connection.api.policy.infra.sites.enforcement_points.TransportZones.get( + NsxInfraSite.DEFAULT_NAME, NsxEnforcementPoint.DEFAULT_NAME, self.name + ) + + @api_call + def add( + self, + transport_type: str = PolicyTransportZone.TZ_TYPE_VLAN_BACKED, + ) -> None: + """ + Add Transport Zone to NSX. + + :param transport_type: Type of transport zone. + :param forwarding_mode: Forwarding mode for the transport zone. + """ + if self.content is not None: + return + + policy_transport_zone = PolicyTransportZone( + id=self.name, + display_name=self.name, + description=f"Policy Transport Zone {self.name}", + resource_type=PolicyTransportZone.__name__, + tz_type=transport_type, + ) + + self._connection.api.policy.infra.sites.enforcement_points.TransportZones.patch( + NsxInfraSite.DEFAULT_NAME, + NsxEnforcementPoint.DEFAULT_NAME, + self.name, + policy_transport_zone, + ) + + @api_call + def delete(self) -> None: + """Delete Transport Zone.""" + if self.content is None: + return + self._connection.api.policy.infra.sites.enforcement_points.TransportZones.delete( + NsxInfraSite.DEFAULT_NAME, NsxEnforcementPoint.DEFAULT_NAME, self.name + ) + + @api_call + def update_forwarding_mode(self, forwarding_mode: str = PolicyTransportZone.FORWARDING_MODE_IPV4_ONLY) -> None: + """ + Update forwarding mode of the Transport Zone. + + :param forwarding_mode: New forwarding mode for the transport zone. + """ + if self.content is None: + raise ValueError("Transport Zone does not exist.") + + policy_transport_zone = self.content + policy_transport_zone.forwarding_mode = forwarding_mode + if policy_transport_zone.tz_type != PolicyTransportZone.TZ_TYPE_OVERLAY_BACKED: + # VLAN_BACKED transport zones do not support forwarding mode changes + raise UnsupportedNsxEntity("Cannot change forwarding mode for other than OVERLAY_BACKED transport zones.") + + self._connection.api.policy.infra.sites.enforcement_points.TransportZones.patch( + NsxInfraSite.DEFAULT_NAME, + NsxEnforcementPoint.DEFAULT_NAME, + self.name, + policy_transport_zone, + ) diff --git a/mfd_esxi/nsx/uplink_profile.py b/mfd_esxi/nsx/uplink_profile.py new file mode 100644 index 0000000..9947c90 --- /dev/null +++ b/mfd_esxi/nsx/uplink_profile.py @@ -0,0 +1,81 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""NSX Uplink Profile.""" +from com.vmware.nsx_policy.model_client import ( + PolicyUplinkHostSwitchProfile, + TeamingPolicy, + Uplink, +) + +from .base import NsxEntity +from .utils import api_call +from ..const import ESXI_UPLINK_FORMAT, ESXI_UPLINK_NUMBER + + +class NsxUplinkProfile(NsxEntity): + """NSX Uplink profile.""" + + @api_call + def _get_content(self) -> PolicyUplinkHostSwitchProfile: + return self._connection.api.policy.infra.HostSwitchProfiles.get(self.name) + + @api_call + def add( + self, + uplinks: int = ESXI_UPLINK_NUMBER, + policy: str = TeamingPolicy.POLICY_LOADBALANCE_SRCID, + transport_vlan: int | None = None, + overlay_encap: str | None = None, + ) -> None: + """ + Add or replace Uplink Profile to NSX. + + :param uplinks: number of uplinks + :param policy: teaming policy + :param transport_vlan: VLAN tag or None + :param overlay_encap: when using overlay option of GENEVE or VXLAN + """ + active_list = [] + for i in range(1, uplinks + 1): + uplink = Uplink(uplink_name=ESXI_UPLINK_FORMAT % i, uplink_type=Uplink.UPLINK_TYPE_PNIC) + active_list.append(uplink) + + teaming = TeamingPolicy(active_list=active_list, standby_list=[], policy=policy) + + payload: PolicyUplinkHostSwitchProfile = self.content + if payload is not None: + payload = payload.convert_to(PolicyUplinkHostSwitchProfile) + payload.teaming = teaming + payload.transport_vlan = transport_vlan + payload.overlay_encap = overlay_encap + else: + payload = PolicyUplinkHostSwitchProfile( + id=self.name, + resource_type=PolicyUplinkHostSwitchProfile.__name__, + teaming=teaming, + transport_vlan=transport_vlan, + overlay_encap=overlay_encap, + ) + + self._connection.api.policy.infra.HostSwitchProfiles.patch(self.name, payload) + + @api_call + def delete(self) -> None: + """Delete Uplink Profile.""" + if self.content is None: + return + self._connection.api.policy.infra.HostSwitchProfiles.delete(self.name) + + @api_call + def update_transport_vlan(self, transport_vlan: int) -> None: + """ + Update Uplink Profile with transport VLAN. + + :param transport_vlan: VLAN tag + """ + if self.content is None: + raise ValueError("Uplink profile does not exist.") + + payload = self.content + payload.transport_vlan = transport_vlan + self._connection.api.policy.infra.HostSwitchProfiles.patch(self.name, payload) diff --git a/mfd_esxi/nsx/utils.py b/mfd_esxi/nsx/utils.py new file mode 100644 index 0000000..f3aa4aa --- /dev/null +++ b/mfd_esxi/nsx/utils.py @@ -0,0 +1,46 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""NSX utilities.""" +import logging + +from mfd_common_libs import add_logging_level, log_levels +from typing import Callable, Any + +from mfd_esxi.exceptions import NsxApiCallError +from com.vmware.vapi.std.errors_client import Error, Unauthorized, NotFound + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + + +def api_call(call: Callable) -> Callable: + """ + Mark method as NSX API call. Provide simple and unified way of handling NSX-specific errors. + + :param call: Method to wrap. + :return: Wrapped API call. + """ + + def inner_wrapper(*args, **kwargs) -> Any: + try: + return call(*args, **kwargs) + except Unauthorized: + try: + args[0]._connection._connect_to_nsx() + return call(*args, **kwargs) + except Error as e: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Calling {call.__name__} from {call.__module__} failed with:\n {e.to_json()}", + ) + raise NsxApiCallError() + except NotFound: + raise + except Error as e: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Calling {call.__name__} from {call.__module__} failed with:\n {e.to_json()}", + ) + raise NsxApiCallError() + + return inner_wrapper diff --git a/mfd_esxi/nsx/vni_pool.py b/mfd_esxi/nsx/vni_pool.py new file mode 100644 index 0000000..ba741e3 --- /dev/null +++ b/mfd_esxi/nsx/vni_pool.py @@ -0,0 +1,41 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""NSX VNI Pool.""" +import random +from com.vmware.nsx_policy.model_client import VniPoolConfig + +from .base import NsxEntity +from .utils import api_call + + +class NsxVniPool(NsxEntity): + """NSX Vni Pool.""" + + @api_call + def _get_content(self) -> VniPoolConfig: + return self._connection.api.policy.infra.VniPools.get(self.name) + + @api_call + def add(self) -> None: + """Add VNI pool to NSX.""" + overlay_id = random.randint(75001, 16777215) + + if self.content is not None: + return + + vni_poll_config = VniPoolConfig(start=overlay_id, end=overlay_id) + + self._connection.api.policy.infra.VniPools.patch(self.name, vni_poll_config) + + @api_call + def overlay_id(self) -> int: + """Return VNI overlay ID.""" + payload: VniPoolConfig = self.content + return payload.start + + @api_call + def delete(self) -> None: + """Delete VNI pool.""" + if self.content is None: + return + self._connection.api.policy.infra.VniPools.delete(self.name) diff --git a/mfd_esxi/vcenter/__init__.py b/mfd_esxi/vcenter/__init__.py new file mode 100644 index 0000000..3ca5a66 --- /dev/null +++ b/mfd_esxi/vcenter/__init__.py @@ -0,0 +1,4 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + +"""VCenter wrapper.""" diff --git a/mfd_esxi/vcenter/cluster.py b/mfd_esxi/vcenter/cluster.py new file mode 100644 index 0000000..5af9544 --- /dev/null +++ b/mfd_esxi/vcenter/cluster.py @@ -0,0 +1,156 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + +"""Cluster wrapper.""" +import logging +from packaging.version import parse as version_parse +from typing import Any, Generator, TYPE_CHECKING +from pyVmomi import vim + +from mfd_common_libs import log_levels, add_logging_level +from .host import Host +from .exceptions import VCenterResourceInUse, VCenterResourceSetupError +from .utils import get_obj_from_iter, get_first_match_from_iter + +if TYPE_CHECKING: + from .vcenter import VCenter + from .datacenter import Datacenter + + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + + +class Cluster(object): + """Cluster wrapper.""" + + def __init__(self, name: str, datacenter: "Datacenter"): + """ + Initialize instance. + + :param name: Name of cluster. + :param datacenter: Datacenter. + """ + self._name = name + self._datacenter = datacenter + + def __repr__(self): + """Get string representation.""" + return f"{self.__class__.__name__}('{self.name}')" + + @property + def content(self) -> "vim.ClusterComputeResource": + """Get content of cluster in API.""" + return get_obj_from_iter( + self.vcenter.create_view(self._datacenter.content.hostFolder, [vim.ClusterComputeResource]), + self.name, + ) + + @property + def name(self) -> str: + """Get name of cluster.""" + return self._name + + def destroy(self) -> None: + """Remove cluster from datacenter.""" + try: + self.vcenter.wait_for_tasks([self.content.Destroy()]) + except vim.fault.ResourceInUse as e: + raise VCenterResourceInUse(self, e.msg) + except vim.fault.NotFound: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Nothing to remove. Cluster: {self.name} does not exist.", + ) + + @property + def vcenter(self) -> "VCenter": + """Get VCenter for this cluster.""" + return self._datacenter.vcenter + + @property + def hosts(self) -> Generator["Host", Any, None]: + """Get all hosts from cluster.""" + return ( + Host(host.name, self._datacenter, self) + for host in self.vcenter.create_view(self.content, [vim.HostSystem]) + ) + + def get_host_by_ip(self, ip: str) -> "Host": + """ + Get specific host from cluster. + + :param ip: Host IP address. + + :return: Specific host form cluster. + """ + return get_obj_from_iter(self.hosts, ip) + + def add_host(self, ip: str, login: str, password: str, fingerprint: str) -> "Host": + """ + Add host to cluster. + + :param ip: Host IP address. + :param login: Login to the host. + :param password: Password for the host. + :param fingerprint: Fingerprint for the host. + + :return: New host. + """ + spec = vim.host.ConnectSpec( + hostName=ip, + userName=login, + password=password, + force=True, + sslThumbprint=fingerprint, + ) + + try: + self.vcenter.wait_for_tasks([self.content.AddHost(spec=spec, asConnected=True)]) + except vim.fault.DuplicateName: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Host: {ip} already exist return existing", + ) + if get_first_match_from_iter(self.hosts, lambda h: h.name == ip) is None: + raise VCenterResourceSetupError(self) + + host = Host(ip, self._datacenter, self) + # vSphere from version 7.0.3 should be configured to change system VM location + if self.vcenter.version >= version_parse("7.0.3"): + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"vCenter is in version {self.vcenter.version}, system VM will be reconfigured", + ) + self.reconfigure_system_vm(host) + return host + + def set_evc(self, value: str = "intel-sandybridge") -> None: + """ + Set Enhanced vMotion Compatibility. + + :param value: CPU compatibility mode. + """ + mgr = self.content.EvcManager() + task = mgr.ConfigureEvc(value) + self.vcenter.wait_for_tasks([task]) + + def reconfigure_system_vm(self, host: "Host") -> None: + """ + Reconfigure host to use its local datastore for system VM. + + :param host: Host which should be reconfigured. + """ + cluster_spec = vim.cluster.ConfigSpecEx() + system_vm_spec = vim.cluster.SystemVMsConfigSpec() + local_datastore_name = get_first_match_from_iter(host.datastores, lambda ds: "datastore" in ds.name).name + local_datastore_spec = get_first_match_from_iter( + self.content.datastore, lambda x: x.name == local_datastore_name + ) + + allowed_datastore_spec = vim.cluster.DatastoreUpdateSpec(operation="add", datastore=local_datastore_spec) + + system_vm_spec.allowedDatastores.append(allowed_datastore_spec) + cluster_spec.systemVMsConfig = system_vm_spec + + self.vcenter.wait_for_tasks([self.content.ReconfigureEx(spec=cluster_spec, modify=True)]) diff --git a/mfd_esxi/vcenter/datacenter.py b/mfd_esxi/vcenter/datacenter.py new file mode 100644 index 0000000..d0f0b7b --- /dev/null +++ b/mfd_esxi/vcenter/datacenter.py @@ -0,0 +1,247 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +# pylint: disable=protected-access +"""Datacenter wrapper.""" +import logging +from typing import Any, Generator, Iterable, TYPE_CHECKING + +from pyVmomi import vim +from itertools import chain +from packaging.version import parse as version_parse + +from .host import Host +from .cluster import Cluster +from .distributed_switch.dswitch import DSwitch +from ..const import ESXI_UPLINK_FORMAT, ESXI_UPLINK_NUMBER + +from .exceptions import ( + VCenterResourceInUse, + VCenterResourceMissing, + VCenterResourceSetupError, +) +from .utils import get_obj_from_iter, get_first_match_from_iter + +from mfd_common_libs import log_levels, add_logging_level + +if TYPE_CHECKING: + from .datastore import Datastore + from .vcenter import VCenter + + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + + +class Datacenter(object): + """Datacenter wrapper.""" + + def __init__(self, name: str, vcenter: "VCenter"): + """ + Initialize instance. + + :param name: Name of datacenter. + :param vcenter: VCenter. + """ + self._name = name + self._vcenter = vcenter + + def __repr__(self): + """Get string representation.""" + return f"{self.__class__.__name__}('{self.name}')" + + @property + def content(self) -> "vim.Datacenter": + """Content of datacenter in API.""" + return get_obj_from_iter( + self.vcenter.create_view(self.vcenter.content.rootFolder, [vim.Datacenter]), + self.name, + ) + + @property + def name(self) -> str: + """Get name of datacenter.""" + return self._name + + @property + def network_folder(self) -> "vim.Folder": + """Get network folder of datacenter.""" + return self.content.networkFolder + + def destroy(self) -> None: + """Remove datacenter from VCenter.""" + try: + self.vcenter.wait_for_tasks([self.content.Destroy()]) + except vim.fault.ResourceInUse as e: + raise VCenterResourceInUse(self, e.msg) + except VCenterResourceMissing: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Nothing to remove. Datacenter: {self.name} does not exist.", + ) + + @property + def vcenter(self) -> "VCenter": + """Get VCenter for this datacenter.""" + return self._vcenter + + @property + def dswitches(self) -> Generator["DSwitch", Any, None]: + """Get all dswitches.""" + return ( + DSwitch(ds.name, self) + for ds in self.vcenter.create_view(self.content.networkFolder, [vim.dvs.VmwareDistributedVirtualSwitch]) + ) + + def get_dswitch_by_name(self, name: str) -> "DSwitch": + """ + Get specific DSwitch from datacenter. + + :param name: Name of DSwitch. + :return: DSwitch. + """ + return get_obj_from_iter(self.dswitches, name) + + def add_dswitch( + self, + name: str, + uplinks: int = ESXI_UPLINK_NUMBER, + version: str = "6.0.0", + networkIO: bool = True, + ) -> "DSwitch": + """ + Add new DSwitch to datacenter. + + :param name: Name of DSwitch + :param uplinks: Number of uplinks + :param version: Version of DSwitch + :param networkIO: enable Network I/O + + :return: New DSwitch. + """ + version = version_parse(version) + try: + dswitch = self.get_dswitch_by_name(name) + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"DSwitch: {name} already exist return existing", + ) + return dswitch + except VCenterResourceMissing: + pass + + create_spec = vim.DistributedVirtualSwitch.CreateSpec() + config_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec() + + config_spec.name = name + config_spec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy() + + uplink_port_names = [] + for i in range(1, uplinks + 1): + uplink_port_names.append(ESXI_UPLINK_FORMAT % i) + + config_spec.uplinkPortPolicy.uplinkPortName = uplink_port_names + + if version != version_parse("5.0.0") and version != version_parse("5.1.0"): + config_spec.lacpApiVersion = vim.dvs.VmwareDistributedVirtualSwitch.LacpApiVersion.multipleLag + + create_spec.configSpec = config_spec + create_spec.productInfo = vim.dvs.ProductSpec(version=version.base_version) + + task = self.content.networkFolder.CreateDistributedVirtualSwitch(create_spec) + self.vcenter.wait_for_tasks([task]) + dswitch = DSwitch(name, self) + dswitch.networkIO = networkIO + return dswitch + + @property + def clusters(self) -> Generator["Cluster", Any, None]: + """Gat all clusters from datacenter.""" + return ( + Cluster(cluster.name, self) + for cluster in self.vcenter.create_view(self.content.hostFolder, [vim.ClusterComputeResource]) + ) + + def get_cluster_by_name(self, name: str) -> "Cluster": + """ + Get specific cluster from datacenter. + + :param name: Name of cluster. + + :return: Cluster. + """ + return get_obj_from_iter(self.clusters, name) + + def add_cluster(self, name: str) -> "Cluster": + """ + Add new Cluster to datacenter. + + :param name: Name of Cluster. + + :return: New Cluster. + """ + try: + cluster_spec = vim.cluster.ConfigSpecEx() + + self.content.hostFolder.CreateClusterEx(name=name, spec=cluster_spec) + except vim.fault.DuplicateName: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Cluster: {name} already exist return existing", + ) + return Cluster(name, self) + + @property + def hosts(self) -> Iterable["Host"]: + """Get all hosts from datacenter.""" + hosts = ( + Host(host.name, self) + for host in self.vcenter.create_view(self.content.hostFolder, [vim.ComputeResource]) + if not isinstance(host, vim.ClusterComputeResource) + ) + + return chain(hosts, *(cluster.hosts for cluster in self.clusters)) + + def get_host_by_ip(self, ip: str) -> "Host": + """ + Get specific host from datacenter. + + :param ip: Host IP address. + + :return: Host. + """ + return get_obj_from_iter(self.hosts, ip) + + def add_host(self, ip: str, login: str, password: str, fingerprint: str) -> "Host": + """ + Add standalone host to datacenter. + + :param ip: Host IP address + :param login: Login to the host + :param password: Password for the host + :param fingerprint: Fingerprint for the host + + :return: New host. + """ + spec = vim.host.ConnectSpec( + hostName=ip, + userName=login, + password=password, + force=True, + sslThumbprint=fingerprint, + ) + + try: + self.vcenter.wait_for_tasks([self.content.hostFolder.AddStandaloneHost(spec=spec, addConnected=True)]) + except vim.fault.DuplicateName: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Host: {ip} already exist return existing", + ) + if get_first_match_from_iter(self.hosts, lambda h: h.name == ip) is None: + raise VCenterResourceSetupError(f"Host@{ip}") + return Host(ip, self) + + @property + def datastores(self) -> Iterable["Datastore"]: + """Get all datastores in VCenter.""" + return chain(*(host.datastores for host in self.hosts)) diff --git a/mfd_esxi/vcenter/datastore.py b/mfd_esxi/vcenter/datastore.py new file mode 100644 index 0000000..26cb06c --- /dev/null +++ b/mfd_esxi/vcenter/datastore.py @@ -0,0 +1,78 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""Datastore wrapper.""" +import logging +from typing import Any, Generator, TYPE_CHECKING +from pyVmomi import vim + +from mfd_common_libs import log_levels, add_logging_level +from .virtual_machine import VirtualMachine +from .utils import get_obj_from_iter, MiB +from .exceptions import VCenterResourceMissing + +if TYPE_CHECKING: + from .host import Host + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + + +class Datastore(object): + """Datastore wrapper.""" + + def __init__(self, name: str, host: "Host"): + """ + Initialize instance. + + :param name: Name of datastore. + :param host: Host parent of datastore. + """ + self._name = name + self._host = host + + def __repr__(self): + """Get string representation.""" + return f"{self.__class__.__name__}('{self.name}')" + + @property + def content(self) -> "vim.Datastore": + """Get content of datastore in API.""" + # pylint: disable=protected-access + for datastore in self._host.content.datastore: + if datastore.name == self.name: + return datastore + raise VCenterResourceMissing(self) + + @property + def name(self) -> str: + """Get name of datastore.""" + return self._name + + @property + def host(self) -> "Host": + """Get host parent of datastore.""" + return self._host + + @property + def capacity(self) -> int: + """Get capacity of datastore in MB.""" + return self.content.info.vmfs.capacity / MiB + + @property + def free_space(self) -> int: + """Get free space in datastore in MB.""" + return self.content.info.freeSpace / MiB + + @property + def vms(self) -> Generator["VirtualMachine", Any, None]: + """Get all VMs for datastore.""" + return (VirtualMachine(vm.name, self._host) for vm in self.content.vm) + + def get_vm_by_name(self, name: str) -> "VirtualMachine": + """Get specific VM from datastore. + + :param name: Name of VM. + + :return: Virtual machine. + """ + return get_obj_from_iter(self.vms, name) diff --git a/mfd_esxi/vcenter/distributed_switch/__init__.py b/mfd_esxi/vcenter/distributed_switch/__init__.py new file mode 100644 index 0000000..7b79535 --- /dev/null +++ b/mfd_esxi/vcenter/distributed_switch/__init__.py @@ -0,0 +1,4 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + +"""Distributed switch wrapper.""" diff --git a/mfd_esxi/vcenter/distributed_switch/dswitch.py b/mfd_esxi/vcenter/distributed_switch/dswitch.py new file mode 100644 index 0000000..dd7ea3d --- /dev/null +++ b/mfd_esxi/vcenter/distributed_switch/dswitch.py @@ -0,0 +1,393 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""DSwitch wrapper.""" +import logging +from pyVmomi import vim +from typing import Iterator, Optional, Union, Any, Generator, List, TYPE_CHECKING + +from mfd_common_libs import log_levels, add_logging_level + +from .uplink import DSUplink +from .portgroup import DSPortgroup, DSP_EARLY_BINDING +from ..exceptions import VCenterResourceInUse, VCenterResourceMissing +from ..utils import get_obj_from_iter +from ...const import ESXI_UPLINK_FORMAT + +if TYPE_CHECKING: + from ..datacenter import Datacenter + from ..host import Host + from ..vcenter import VCenter + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + + +class DSwitch(object): + """DSwitch wrapper.""" + + _MTU_LOOKUP = {"default": 1500, "4k": 4074, "9k": 9000} + + def __init__(self, name: str, datacenter: "Datacenter"): + """ + Initialize instance. + + :param name: Name of dswitch. + :param datacenter: Datacenter. + """ + self._name = name + self._datacenter = datacenter + + def __repr__(self): + """Get string representation.""" + return f"{self.__class__.__name__}('{self.name}')" + + @property + def content(self) -> vim.dvs.VmwareDistributedVirtualSwitch: + """Get content of DS in API.""" + return get_obj_from_iter( + self._datacenter.vcenter.create_view( + self._datacenter.network_folder, + [vim.dvs.VmwareDistributedVirtualSwitch], + ), + self.name, + ) + + @property + def name(self) -> str: + """Get name of DSwitch.""" + return self._name + + @property + def uuid(self) -> str: + """Get UUID of DSwitch.""" + return self.content.uuid + + def destroy(self) -> None: + """Remove DSwitch from datacenter.""" + logger.log(level=log_levels.MODULE_DEBUG, msg=f"Destroying DSwitch: {self.name}") + try: + for pg in self.portgroups: + pg.destroy() + for uplink in self.uplinks: + uplink.del_all_nics() + for host in self.hosts: + self.remove_host(host) + self.vcenter.wait_for_tasks([self.content.Destroy()]) + logger.log(level=log_levels.MODULE_DEBUG, msg=f"DSwitch: {self.name} destroyed") + except vim.fault.ResourceInUse as e: + raise VCenterResourceInUse(self, e.msg) + except VCenterResourceMissing: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Nothing to remove. DSwitch: {self.name} does not exist.", + ) + + @property + def vcenter(self) -> "VCenter": + """Get VCenter for this dswitch.""" + return self._datacenter.vcenter + + @property + def mtu(self) -> int: + """MTU value from vSwitch.""" + return self.content.config.maxMtu + + @mtu.setter + def mtu(self, value: Union[str, int]) -> None: + """ + Set MTU value for DSwitch. + + :param value: MTU value. + """ + logger.log(level=log_levels.MODULE_DEBUG, msg=f"Set MTU {value} on {self.name}") + ds_spec = self.get_ds_config_spec() + max_mtu = self._MTU_LOOKUP.get(value) if value in self._MTU_LOOKUP.keys() else int(value) + ds_spec.maxMtu = max_mtu + self.vcenter.wait_for_tasks([self.content.ReconfigureDvs_Task(ds_spec)]) + + def discovery_protocol_type(self, name: str) -> None: + """ + Set Discovery Protocol type on DSwitch. + + :param name: Name of DSwitch. + """ + dvswitch = self._datacenter.get_dswitch_by_name(name) + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"DSwitch: {name} already exist return existing: Dswitch:{dvswitch}", + ) + protocol_config = vim.host.LinkDiscoveryProtocolConfig() + protocol_config.protocol = vim.host.LinkDiscoveryProtocolConfig.ProtocolType.lldp + self._enable_link_discovery_advertise(protocol_config) + + def _enable_link_discovery_advertise(self, protocol_config: vim.host.LinkDiscoveryProtocolConfig) -> None: + """ + Enable Link Discovery Protocol advertising settings on a Distributed vSwitch. + + :param protocol_config: Configuration specifying the selected Link Discovery Protocol to use for this switch. + """ + protocol_config.operation = vim.host.LinkDiscoveryProtocolConfig.OperationType.listen + + config_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec( + configVersion=self.content.config.configVersion, + linkDiscoveryProtocolConfig=protocol_config, + ) + self.vcenter.wait_for_tasks([self.content.ReconfigureDvs_Task(config_spec)]) + + @property + def portgroups(self) -> Generator["DSPortgroup", Any, None]: + """Get all portgroups from DS.""" + return ( + DSPortgroup(pg.name, self) + for pg in self.content.portgroup + if isinstance(pg, vim.dvs.DistributedVirtualPortgroup) and len(pg.tag) == 0 + ) + + def get_portgroup(self, name: str) -> "DSPortgroup": + """ + Get specific portgroup from DS. + + :param name: Name of portgroup. + + :return: Portgroup. + """ + return get_obj_from_iter(self.portgroups, name) + + def add_portgroup(self, name: str, num_ports: int = 8, port_binding: str = DSP_EARLY_BINDING) -> "DSPortgroup": + """ + Add new portgroup to DS. + + :param name: Name of portgroup. + :param num_ports: Number of ports. + :param port_binding: Type of port binding early binding/late binding/ephemeral. + + :return: Portgroup. + """ + dsp = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() + dsp.name = name + dsp.numPorts = num_ports + dsp.type = port_binding + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"New portgroup: {name} for DSwitch {self.name} spec\n{dsp}", + ) + try: + self.vcenter.wait_for_tasks([self.content.AddDVPortgroup_Task([dsp])]) + except vim.fault.DuplicateName: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Portgroup: {name} already exist return existing.", + ) + return DSPortgroup(name, self) + + @property + def uplinks(self) -> Generator["DSUplink", Any, None]: + """Get all uplinks.""" + return ( + DSUplink(name, nr, self) for nr, name in enumerate(self.content.config.uplinkPortPolicy.uplinkPortName) + ) + + def get_uplink(self, name: str) -> "DSUplink": + """ + Get specific uplink from DS. + + :param name: Name of uplink. + + :return: Uplink. + """ + return get_obj_from_iter(self.uplinks, name) + + @property + def hosts(self) -> Generator["Host", Any, None]: + """Get all assigned hosts to DS.""" + return ( + host + for host in self._datacenter.hosts + for dvs_host in self.content.config.host + if host.name == dvs_host.config.host.name + ) + + def get_host(self, name: str) -> Optional["Host"]: + """ + Get specific host from DS. + + :param name: Name of host. + + :return: Host. + :raise ObjectNotFoundInIter: Host was not found + """ + return get_obj_from_iter(self.hosts, name) + + def assign_host(self, host: "Host") -> None: + """ + Assign to DSwitch new host. + + :param host: Host that should be assigned to DS. + """ + try: + self._remove_add_host(host, vim.ConfigSpecOperation.add) + except vim.fault.AlreadyExists: + pass + + def remove_host(self, host: "Host") -> None: + """ + Remove from DSwitch host. + + :param host: Host that should be removed from DS. + """ + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Remove host: {host} from DVSwitch: {self.name}", + ) + self._remove_add_host(host, vim.ConfigSpecOperation.remove) + + def _remove_add_host(self, host: "Host", operation: str) -> None: + """ + Remove or add host to DSwitch. + + :param host: Host. + :param operation: Operation type (add/remove). + """ + ds_spec = self.get_ds_config_spec() + ds_spec.host = [self.get_ds_host_config_spec(host, operation)] + self.vcenter.wait_for_tasks([self.content.ReconfigureDvs_Task(ds_spec)]) + + def get_ds_config_spec(self) -> vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec: + """ + Return DSwitch config spec. + + :return: DSwitch config spec. + """ + ds_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec() + ds_spec.configVersion = self.content.config.configVersion + return ds_spec + + @staticmethod + def get_ds_host_config_spec(host: "Host", operation: str) -> vim.dvs.HostMember.ConfigSpec: + """ + Return DSwitch host config spec. + + :param host: Host. + :param operation: Operation type (add/remove). + + :return: Host member config spec. + """ + host_spec = vim.dvs.HostMember.ConfigSpec() + host_spec.operation = operation + host_spec.host = host.content + return host_spec + + @property + def networkIO(self) -> bool: + """Get Network I/O Control.""" + return self.content.config.networkResourceManagementEnabled + + @networkIO.setter + def networkIO(self, value: bool) -> None: + """Set Network I/O Control. + + :param value: enable/disable Network I/O Control. + """ + self.content.EnableNetworkResourceManagement(value) + + def set_active_standby(self, active: List[str], standby: List[str]) -> None: + """ + Set active and standby uplinks. + + :param active: Active nics. + :param standby: Standby nics. + """ + ds_spec = self.get_ds_config_spec() + ds_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy() + ds_spec.defaultPortConfig.uplinkTeamingPolicy = ( + vim.dvs.VmwareDistributedVirtualSwitch.UplinkPortTeamingPolicy() + ) + ds_spec.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder = ( + vim.dvs.VmwareDistributedVirtualSwitch.UplinkPortOrderPolicy() + ) + + active_uplinks = [] + for i in range(1, len(active) + 1, 1): + active_uplinks.append(ESXI_UPLINK_FORMAT % i) + standby_uplinks = [] + for i in range(len(active) + 1, len(active + standby) + 1, 1): + standby_uplinks.append(ESXI_UPLINK_FORMAT % i) + + ds_spec.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder.activeUplinkPort = active_uplinks + ds_spec.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder.standbyUplinkPort = standby_uplinks + self.vcenter.wait_for_tasks([self.content.ReconfigureDvs_Task(ds_spec)]) + + def add_lag( + self, + name: str, + uplinks_no: int, + lacp_mode: str = "passive", + lb_algorithm: str = "srcDestIpTcpUdpPortVlan", + ) -> None: + """ + Create link aggregation portgroup. + + :param name: Name of LAG. + :param uplinks_no: Number of uplinks. + :param lacp_mode: LACP protocol mode, "active" or "passive". + :param lb_algorithm: Load balancing algorithm used on LAG. + """ + logger.log(level=log_levels.MODULE_DEBUG, msg=f"[API] Adding LAG {name}") + lacp_spec = vim.dvs.VmwareDistributedVirtualSwitch.LacpGroupSpec() + lacp_spec.lacpGroupConfig = vim.dvs.VmwareDistributedVirtualSwitch.LacpGroupConfig() + lacp_spec.lacpGroupConfig.ipfix = vim.dvs.VmwareDistributedVirtualSwitch.LagIpfixConfig() + lacp_spec.lacpGroupConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.LagVlanConfig() + lacp_spec.lacpGroupConfig.name = name + lacp_spec.lacpGroupConfig.uplinkNum = uplinks_no + lacp_spec.lacpGroupConfig.mode = lacp_mode + lacp_spec.lacpGroupConfig.loadbalanceAlgorithm = lb_algorithm + lacp_spec.operation = "add" + + lacp_spec = [lacp_spec] + self.content.UpdateDVSLacpGroupConfig_Task(lacp_spec) + + def remove_lag(self, name: str) -> None: + """ + Remove link aggregation portgroup. + + :param name: Name of LAG. + """ + lacp_spec = vim.dvs.VmwareDistributedVirtualSwitch.LacpGroupSpec() + lacp_spec.lacpGroupConfig = vim.dvs.VmwareDistributedVirtualSwitch.LacpGroupConfig() + lacp_object = get_obj_from_iter(self.content.config.lacpGroupConfig, name=name) + lacp_spec.lacpGroupConfig.key = lacp_object.key + lacp_spec.operation = "remove" + lacp_spec = [lacp_spec] + self.content.UpdateDVSLacpGroupConfig_Task(lacp_spec) + + def update_lag_uplinks(self, host: "Host", adapters_names: Iterator[str], lag_port_keys: Iterator[int]) -> None: + """ + Update uplink in link aggregation portgroup. + + :param host: Name of LAG. + :param adapters_names: Name of uplink NICs. + :param lag_port_keys: LAG port keys. + """ + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"[API] Adding {adapters_names} as uplinks to LAG", + ) + + uplink_pg_name = next((pg.key for pg in self.content.portgroup if "Uplinks" in pg.name), None) + + config = vim.host.NetworkConfig() + proxy_switch_config = vim.host.HostProxySwitch.Config() + proxy_switch_config.uuid = self.content.uuid # ? self.uuid ? + proxy_switch_config.changeOperation = "edit" + proxy_switch_config.spec = vim.host.HostProxySwitch.Specification() + proxy_switch_config.spec.backing = vim.dvs.HostMember.PnicBacking() + proxy_switch_config.spec.backing.pnicSpec = [] + for name, lag_port in zip(adapters_names, lag_port_keys): + logger.log(level=log_levels.MODULE_DEBUG, msg=f"[API] Add {name} to LAG") + host_pnic_spec = vim.dvs.HostMember.PnicSpec() + host_pnic_spec.pnicDevice = name + host_pnic_spec.uplinkPortKey = lag_port + host_pnic_spec.uplinkPortgroupKey = uplink_pg_name + proxy_switch_config.spec.backing.pnicSpec.append(host_pnic_spec) + config.proxySwitch = [proxy_switch_config] + host.update_network_backing(config) diff --git a/mfd_esxi/vcenter/distributed_switch/portgroup.py b/mfd_esxi/vcenter/distributed_switch/portgroup.py new file mode 100644 index 0000000..f9a6af3 --- /dev/null +++ b/mfd_esxi/vcenter/distributed_switch/portgroup.py @@ -0,0 +1,279 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""DSPorgroup wrapper.""" +import logging +from typing import List, Union, Dict, Tuple + +from pyVmomi import vim +from ipaddress import IPv4Network, IPv6Network +from typing import TYPE_CHECKING + +from ..virtual_adapter import VirtualAdapter + +from mfd_esxi.vcenter.exceptions import ( + VCenterResourceInUse, + VCenterDSPortgroupMissingHostMember, + VCenterResourceMissing, +) +from time import time, sleep +from mfd_common_libs import log_levels, add_logging_level + +if TYPE_CHECKING: + from .dswitch import DSwitch + from .uplink import DSUplink + from ..host import Host + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + +DSP_EARLY_BINDING = "earlyBinding" +DSP_LATE_BINDING = "lateBinding" +DSP_EPHEMERAL = "ephemeral" + + +class DSPortgroup(object): + """DSPorgroup wrapper.""" + + def __init__(self, name: str, dswitch: "DSwitch"): + """ + Initialize instance. + + :param name: Name of portgroup. + :param dswitch: DSwitch. + """ + self._name = name + self._dswitch = dswitch + + def __repr__(self): + """Get string representation.""" + return f"{self.__class__.__name__}('{self.name}')" + + @property + def content(self) -> vim.dvs.DistributedVirtualPortgroup: + """Get content of DSPortgroup in API.""" + for pg in self._dswitch.content.portgroup: + if pg.name == self._name and isinstance(pg, vim.dvs.DistributedVirtualPortgroup): + return pg + raise VCenterResourceMissing(self) + + @property + def name(self) -> str: + """Name for DSPortgroup.""" + return self._name + + def destroy(self) -> None: + """Remove DSPortgroup from DSwitch.""" + logger.log(level=log_levels.MODULE_DEBUG, msg=f"Removing portgroup: {self.name}") + try: + for host in self._dswitch.hosts: + for virtual_adapter in self.get_virtual_adapters(host): + virtual_adapter.destroy() + self._dswitch.vcenter.wait_for_tasks([self.content.Destroy()]) + logger.log(level=log_levels.MODULE_DEBUG, msg=f"Portgroup {self.name} destroyed") + except vim.fault.ResourceInUse as e: + raise VCenterResourceInUse(self, e.msg) + except vim.fault.NotFound: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Nothing to remove. Portgroup: {self.name} does not exist.", + ) + + def get_virtual_adapters(self, host: "Host") -> list[VirtualAdapter]: + """ + Get all virtual adapters from DS created on spec host. + + :param host: Host where virtual adapters was created. + + :return: List of virtual adapters. + """ + virtual_adapters = [] + for virtual_nic in host.content.config.network.vnic: + dvp = virtual_nic.spec.distributedVirtualPort + if dvp and dvp.portgroupKey == self.content.key: + virtual_adapters.append(VirtualAdapter(virtual_nic.device, host)) + return virtual_adapters + + def get_virtual_adapter_by_name(self, host: "Host", name: str) -> VirtualAdapter: + """ + Get virtual adapter from DS created on spec host. + + :param host: Host where virtual adapter was created. + :param name: Name of virtual adapter. + + :return: Virtual adapter. + """ + for virtual_adapter in self.get_virtual_adapters(host): + if virtual_adapter.name == name: + return virtual_adapter + raise RuntimeError(f"Virtual adapter {name} does not exist.") + + def add_virtual_adapter( # noqa: C901 + self, + host: "Host", + mtu: Union[str, int] = "default", + ip_ver: str = "4", + ip: str = None, + mask: str = None, + ) -> VirtualAdapter: + """ + Add new virtual adapter to portgroup.If ip and mask is None use DHCP. + + :param host: Host to which add adapter. + :param mtu: MTU size for virtual adapter. + :param ip_ver: IP version 4|6. + :param ip: IP address. + :param mask: Netmask for IP. + + :return: New Virtual adapter. + """ + ip_ver = int(ip_ver) + ip_config = vim.host.IpConfig() + ipv6_config = vim.host.IpConfig.IpV6AddressConfiguration() + + if ip and mask: + ip_net = IPv4Network(f"{ip}/{mask}") if ip_ver == 4 else IPv6Network(f"{ip}/{mask}") + if ip_ver == 4 and mask: + ip_config.dhcp = False + ip_config.ipAddress = ip + ip_config.subnetMask = f"{ip_net.netmask}" + elif ip_ver == 6 and mask: + ip_config.ipV6Config = ipv6_config + ipv6_config.autoConfigurationEnabled = False + ipv6_config.dhcpV6Enabled = False + ipv6_address = vim.host.IpConfig.IpV6Address() + ipv6_address.ipAddress = ip + ipv6_address.prefixLength = int(mask) + ipv6_config.ipV6Address = [ipv6_address] + elif not ip and not mask: + if ip_ver == 4: + ip_config.dhcp = True + elif ip_ver == 6: + ip_config.dhcp = False + ip_config.ipV6Config = ipv6_config + ipv6_config.autoConfigurationEnabled = True + ipv6_config.dhcpV6Enabled = False + else: + raise RuntimeError("Unknown config please set both IP and netmask or none.") + + port_connection = vim.dvs.PortConnection() + port_connection.portgroupKey = self.content.key + port_connection.switchUuid = self.content.config.distributedVirtualSwitch.uuid + + virtual_nic_spec = vim.host.VirtualNic.Specification() + virtual_nic_spec.ip = ip_config + virtual_nic_spec.mtu = VirtualAdapter.get_mtu(mtu) + virtual_nic_spec.distributedVirtualPort = port_connection + + for host_member in self._dswitch.content.config.host: + if host_member.config.host.name == host.name: + name = host.content.configManager.networkSystem.AddVirtualNic("", virtual_nic_spec) + adapter = self.get_virtual_adapter_by_name(host, name) + start = time() + while time() - start < 60: + if adapter.ip: + return self.get_virtual_adapter_by_name(host, name) + logger.log(level=log_levels.MODULE_DEBUG, msg="Adapter has not assigned IP") + sleep(5) + raise RuntimeError("Adapter has not assigned IP") + raise VCenterDSPortgroupMissingHostMember() + + @property + def uplinks(self) -> Dict[str, List["DSUplink"]]: + """ + Get all uplinks assigned to portgroup. + + Sample output: + 'active': [DSUplink('Uplink 0'), DSUplink('Uplink 1')], + 'standby': DSUplink('Uplink 2'), DSUplink('Uplink 3') + """ + uplink_port_order = self.content.config.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder + return { + "active": [ + uplink for uplink in self._dswitch.uplinks if uplink.name in uplink_port_order.activeUplinkPort + ], + "standby": [ + uplink for uplink in self._dswitch.uplinks if uplink.name in uplink_port_order.standbyUplinkPort + ], + } + + @uplinks.setter + def uplinks(self, value: Dict[str, List["DSUplink"]]) -> None: + """ + Set uplinks for the portgrup. + + Sample input: + {'active': [DSUplink('Uplink 0'), DSUplink('Uplink 1')], + 'standby': DSUplink('Uplink 2'), DSUplink('Uplink 3')} + + :param value: Dict of list with uplink. + """ + dsp_config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() + dsp_config.configVersion = self.content.config.configVersion + + dsp_config.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy() + + dsp_config.defaultPortConfig.uplinkTeamingPolicy = ( + vim.dvs.VmwareDistributedVirtualSwitch.UplinkPortTeamingPolicy() + ) + + dsp_config.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder = ( + vim.dvs.VmwareDistributedVirtualSwitch.UplinkPortOrderPolicy() + ) + + active = value.get("active", []) + standby = value.get("standby", []) + uplink_port_order = dsp_config.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder + + uplink_port_order.activeUplinkPort = [uplink.name for uplink in active] + uplink_port_order.standbyUplinkPort = [uplink.name for uplink in standby] + self._dswitch.vcenter.wait_for_tasks([self.content.ReconfigureDVPortgroup_Task(dsp_config)]) + + @property + def vlan(self) -> Union[int, vim.NumericRange]: + """VLAN setting on portgroup.""" + return self.content.config.defaultPortConfig.vlan.vlanId + + @vlan.setter + def vlan(self, value: Union[int, str, List[int], Tuple[int]]) -> None: + """ + Set VLAN on portgroup. If value is int or str - set vlan tag. If value is tuple or list - set trunking vlan. + + :param value: VLAN. + """ + dsp_config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() + dsp_config.configVersion = self.content.config.configVersion + dsp_config.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy() + if isinstance(value, list) or isinstance(value, tuple): + dsp_config.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec() + vlanId = [vim.NumericRange(start=int(start), end=int(end)) for start, end in value] + dsp_config.defaultPortConfig.vlan.vlanId = vlanId + else: + dsp_config.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec() + dsp_config.defaultPortConfig.vlan.vlanId = int(value) + self._dswitch.vcenter.wait_for_tasks([self.content.ReconfigureDVPortgroup_Task(dsp_config)]) + + def set_vlan(self, value: Union[int, str, List[int], Tuple[int]]) -> None: + """ + Set VLAN on portgroup. Method created to keep compatibility between standard portgroup and ds portgroup. + + :param value: Vlan to set. + """ + if value == 4095: + self.vlan = [(0, 4094)] + else: + self.vlan = value + + def set_forged_transmit(self, status: bool) -> None: + """ + Set Forged transmit parameter on Distributed portgroup. + + :param status: Desired status of DS Forged transmits parameter, True - Accept, False - Deny. + """ + dsp_config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() + dsp_config.configVersion = self.content.config.configVersion + dsp_config.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy() + dsp_config.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy() + dsp_config.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy() + dsp_config.defaultPortConfig.securityPolicy.forgedTransmits.value = status + + self._dswitch.vcenter.wait_for_tasks([self.content.ReconfigureDVPortgroup_Task(dsp_config)]) diff --git a/mfd_esxi/vcenter/distributed_switch/uplink.py b/mfd_esxi/vcenter/distributed_switch/uplink.py new file mode 100644 index 0000000..a3c3e05 --- /dev/null +++ b/mfd_esxi/vcenter/distributed_switch/uplink.py @@ -0,0 +1,148 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""DSUplink wrapper.""" +import logging +from typing import Dict, Union, Optional +from pyVmomi import vim +from time import sleep +from typing import TYPE_CHECKING + +from mfd_common_libs import log_levels, add_logging_level +from ..exceptions import VCenterDistributedSwitchUplinkRemovalFailed + +if TYPE_CHECKING: + from .dswitch import DSwitch + from ..host import Host + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + + +class DSUplink(object): + """DSUplink wrapper.""" + + def __init__(self, name: str, number: int, dswitch: "DSwitch"): + """ + Initialize instance. + + :param name: Name of uplink. + :param number: Number of uplink. + :param dswitch: Distributed Switch. + """ + self._name = name + self._number = number + self._dswitch = dswitch + + def __repr__(self): + """Get string representation.""" + return f"{self.__class__.__name__}('{self.name}')" + + @property + def name(self) -> str: + """Get name for DSUplink.""" + return self._name + + @property + def nics(self) -> Dict[str, Dict[str, Union[str, vim.dvs.HostMember]]]: + """Get all nics from uplink.""" + nics = {} + for host in self._dswitch.content.config.host: + port_key = int(host.uplinkPortKey[0]) + for nic in host.config.backing.pnicSpec: + if int(nic.uplinkPortKey) - port_key == self._number: + name = host.config.host.name + nics[name] = { + "nic": nic.pnicDevice, + "host": self._dswitch.get_host(name), + } + return nics + + def add_nic(self, host: "Host", nic: str) -> None: + """ + Assign new nic from host to uplink. + + :param host: Host from we get NIC. + :param nic: Name NIC e.g vmnic1. + """ + self._set_nic_repeat(host, nic) + + def del_nic(self, host: "Host") -> None: + """ + Remove new nic from host to uplink. + + :param host: Host from we remove NIC. + """ + self._set_nic_repeat(host, None) + + def del_all_nics(self) -> None: + """Remove all nics from this uplink.""" + for host in self._dswitch.hosts: + self.del_nic(host) + + def _set_nic_repeat(self, host: "Host", nic: Optional[str]) -> None: + """ + Set host nic for uplink repeating in case of exception. + + :param host: Host. + :param nic: Name nic to set or None to remove. + """ + for time in range(1, 6): + try: + self._set_nic(host, nic) + return + except vim.fault.ConcurrentAccess: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Cannot complete operation due to concurrent operation. Sleeping {time} seconds.", + ) + sleep(time) + except vim.fault.DvsOperationBulkFault as ex: + if nic is None: + logger.log(level=log_levels.MODULE_DEBUG, msg=ex) + raise VCenterDistributedSwitchUplinkRemovalFailed() + else: + raise ex + + self._set_nic(host, nic) + + def _set_nic(self, host: "Host", nic: str) -> None: + """ + Set host nic for uplink. + + :param host: Host. + :param nic: Name nic to set or None to remove. + """ + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Setup NIC: {nic} {host} on uplink {self.name} for {self._dswitch.name}", + ) + for ds_host in self._dswitch.content.config.host: + if ds_host.config.host.name == host.name: + ds_spec = self._dswitch.get_ds_config_spec() + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"DSwitch {self.name} old spec\n{ds_spec}", + ) + host_spec = self._dswitch.get_ds_host_config_spec(host, vim.ConfigSpecOperation.edit) + + host_spec.backing = ds_host.config.backing # vim.dvs.HostMember.PnicBacking() + if nic: + for nr, ps in enumerate(host_spec.backing.pnicSpec): + if str(ps.uplinkPortKey) == str(self._number): + del host_spec.backing.pnicSpec[nr] + break + nic_spec = vim.dvs.HostMember.PnicSpec() + nic_spec.pnicDevice = nic + nic_spec.uplinkPortKey = ds_host.uplinkPortKey[self._number] + host_spec.backing.pnicSpec.append(nic_spec) + else: + for nr, ps in enumerate(host_spec.backing.pnicSpec): + if str(ps.uplinkPortKey) == str(self._number): + del host_spec.backing.pnicSpec[nr] + break + else: + return + + ds_spec.host = [host_spec] + self._dswitch.vcenter.wait_for_tasks([self._dswitch.content.ReconfigureDvs_Task(ds_spec)]) + break diff --git a/mfd_esxi/vcenter/exceptions.py b/mfd_esxi/vcenter/exceptions.py new file mode 100644 index 0000000..c08731d --- /dev/null +++ b/mfd_esxi/vcenter/exceptions.py @@ -0,0 +1,58 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + +"""VCenter specific exceptions.""" +from typing import Any + + +class VCenterResourceInUse(Exception): + """Resource is in use.""" + + def __init__(self, resource: Any, message: str): + """ + Initialize instance. + + :param resource: Resource. + :param message: Exception message. + """ + super().__init__(f"{resource}: {message}") + + +class VCenterResourceMissing(Exception): + """Resource is missing.""" + + def __init__(self, resource: Any): + """ + Initialize instance. + + :param resource: Name of resource. + """ + super().__init__(resource) + + +class VCenterDSPortgroupMissingHostMember(Exception): + """VCenter Distributed Switch Portgroup is missing Host member.""" + + +class VCenterDistributedSwitchUplinkRemovalFailed(Exception): + """VCenter Distributed Switch Uplink removal has failed.""" + + +class VCenterResourceSetupError(Exception): + """Resource setup failed.""" + + def __init__(self, resource: Any): + """ + Initialize instance. + + :param resource: Name of resource. + """ + super().__init__(resource) + + +class VCenterInvalidLogin(Exception): + """Invalid VCenter login used.""" + + +class VCenterSocketError(Exception): + """VCenter socket error.""" diff --git a/mfd_esxi/vcenter/host.py b/mfd_esxi/vcenter/host.py new file mode 100644 index 0000000..3651680 --- /dev/null +++ b/mfd_esxi/vcenter/host.py @@ -0,0 +1,166 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""Host wrapper.""" +import logging +from typing import Optional, Any, Generator, TYPE_CHECKING +from pyVmomi import vim + +from mfd_common_libs import log_levels, add_logging_level +from .virtual_machine import VirtualMachine +from .virtual_switch.vswitch import VSwitch +from .datastore import Datastore +from .exceptions import VCenterResourceInUse +from .utils import get_obj_from_iter + +if TYPE_CHECKING: + from .cluster import Cluster + from .datacenter import Datacenter + from .vcenter import VCenter + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + + +class Host(object): + """Host wrapper.""" + + def __init__(self, name: str, datacenter: "Datacenter", cluster: Optional["Cluster"] = None): + """ + Initialize instance. + + :param name: Name of host. + :param datacenter: Datacenter. + :param cluster: Cluster. + """ + self._name = name + self._datacenter = datacenter + self._cluster = cluster + + def __repr__(self): + """Get string representation.""" + return f"{self.__class__.__name__}('{self.name}')" + + @property + def content(self) -> "vim.HostSystem": + """Get content of host in API.""" + return get_obj_from_iter( + self.vcenter.create_view(self._datacenter.content.hostFolder, [vim.HostSystem], True), + self.name, + ) + + @property + def name(self) -> str: + """Get name of host.""" + return self._name + + def destroy(self) -> None: + """Remove host from datacenter of cluster.""" + try: + if not self._cluster: + self.vcenter.wait_for_tasks([self.content.parent.Destroy()]) + else: + raise VCenterResourceInUse(self, "Can't remove host connected to cluster.") + except vim.fault.ResourceInUse as e: + raise VCenterResourceInUse(self, e.msg) + except vim.fault.NotFound: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Nothing to remove. Host: {self.name} does not exist.", + ) + + @property + def vcenter(self) -> "VCenter": + """Get VCenter for this host.""" + return self._datacenter.vcenter + + @property + def datastores(self) -> Generator["Datastore", Any, None]: + """Get all datastores from host.""" + return (Datastore(datastore.name, self) for datastore in self.content.datastore) + + @property + def datacenter(self) -> "Datacenter": + """Get host datacenter.""" + return self._datacenter + + def get_datastore_by_name(self, name: str) -> "Datastore": + """ + Get specific datastore from VCenter. + + :param name: Name of datastore. + + :return: Datastore. + """ + return get_obj_from_iter(self.datastores, name) + + @property + def vswitches(self) -> Generator["VSwitch", Any, None]: + """Get all vSwitches from host.""" + return ( + VSwitch(vs.name, self) + for vs in self.content.config.network.vswitch + if isinstance(vs, vim.host.VirtualSwitch) + ) + + def get_vswitch_by_name(self, name: str) -> "VSwitch": + """ + Get specific vSwitch from host. + + :param name: Name of vSwitch. + + :return: vSwitch. + """ + return get_obj_from_iter(self.vswitches, name) + + def add_vswitch(self, name: str, mtu: int = 1500, ports: int = 64) -> "VSwitch": + """ + Add new vSwitch to host. + + :param name: Name of vSwitch + :param mtu: MTU size. + :param ports: Number of ports in vSwitch. + + :return: New vSwitch. + """ + spec = vim.host.VirtualSwitch.Specification() + spec.numPorts = ports + spec.mtu = mtu + try: + self.content.configManager.networkSystem.AddVirtualSwitch(name, spec) + except vim.fault.AlreadyExists: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"VSwitch: {name} already exist return existing", + ) + return VSwitch(name, self) + + @property + def vms(self) -> Generator["VirtualMachine", Any, None]: + """Get all VMs for host.""" + return (VirtualMachine(vm.name, self) for vm in self.vcenter.create_view(self.content, [vim.VirtualMachine])) + + def get_vm(self, name: str) -> "VirtualMachine": + """ + Get specific VM from host. + + :param name: Name of VM. + + :return: Virtual machine. + """ + return get_obj_from_iter(self.vms, name) + + def update_network_backing(self, config: "vim.host.NetworkConfig") -> None: + """ + Update host network backing. + + :param config: Host network backing configuration. + """ + self.content.configManager.networkSystem.UpdateNetworkConfig(config, "modify") + + def get_connection_state(self) -> str: + """ + Get connection state of the host added to the Datacenter. + + :return: Connection state of the host. + """ + return str(self.content.runtime.connectionState) diff --git a/mfd_esxi/vcenter/utils.py b/mfd_esxi/vcenter/utils.py new file mode 100644 index 0000000..f3ba9ad --- /dev/null +++ b/mfd_esxi/vcenter/utils.py @@ -0,0 +1,48 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + +"""Various utilities.""" + +from typing import TypeVar, Iterable, Optional, Any + +from mfd_esxi.vcenter.exceptions import VCenterResourceMissing + +T = TypeVar("T") + +MiB = 1024 * 1024 + + +def get_obj_from_iter(iter_obj: Iterable[T], name: str, raise_if_missing: bool = True) -> Optional[T]: + """ + Get object from iterable object by name. + + :param iter_obj: Iterable object. + :param name: Name for the object. + :param raise_if_missing: If true exception will be raised when object is not found. + + :return: Object. + :raise VCenterResourceMissing: Exception when object was not found. + """ + for obj in iter_obj: + if obj.name == name: + return obj + iter_obj = list(iter_obj) + if raise_if_missing: + raise VCenterResourceMissing(f"{name} in:{iter_obj}") + else: + return None + + +def get_first_match_from_iter( + iter_obj: Iterable[T], predicate: Any = lambda o: True, default: Optional[T] = None +) -> Optional[T]: + """ + Get object from iterable object by predicate. + + :param iter_obj: Iterable object. + :param predicate: Predicate applied to iterable object. + :param default: Default value returned if no match will occur. + + :return: First predicate match from iterable object. + """ + return next(filter(predicate, iter_obj), default) diff --git a/mfd_esxi/vcenter/vcenter.py b/mfd_esxi/vcenter/vcenter.py new file mode 100644 index 0000000..b458032 --- /dev/null +++ b/mfd_esxi/vcenter/vcenter.py @@ -0,0 +1,251 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""VCenter wrapper.""" +import logging +from itertools import chain +from http.client import HTTPException +from socket import error as socket_error +from atexit import register as exit_register +from typing import List, Union, Any, Generator, Iterable, Optional, Type, TYPE_CHECKING +from pyVim import connect as pyvmomi_connect +from pyVmomi import vim +from pyVmomi import vmodl +from packaging.version import parse as version_parse, Version + +from mfd_common_libs import log_levels, add_logging_level +from .datacenter import Datacenter +from .utils import get_obj_from_iter +from .exceptions import VCenterInvalidLogin, VCenterSocketError + +if TYPE_CHECKING: + from .cluster import Cluster + from .host import Host + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + + +class VCenter: + """VCenter wrapper.""" + + def __init__(self, ip: str, login: str, password: str, port: int = 443): + """ + Initialize instance. + + :param ip: VCenter IP address. + :param login: Login name. + :param password: Password. + :param port: Port number. + """ + self.__service = None + self._content = None + self._ip = ip + self._login = login + self._password = password + self._port = port + + def __repr__(self): + """Get string representation.""" + return f"{self.__class__.__name__}('{self._ip}')" + + @property + def content(self) -> "vim.ServiceInstance": + """Get content of VCenter in API.""" + try: + if self.__service: + if self._content.sessionManager.currentSession: + return self._content + else: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"{self._ip} the session has expired", + ) + except (HTTPException, ConnectionError): + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"{self._ip} HTTP connection error, reconnecting", + ) + + self._content = self._reconnect() + return self._content + + def wait_for_tasks(self, tasks: List["vim.Task"]) -> None: # noqa: C901 + """ + Wait for task to end. + + :param tasks: List of task to process. + """ + task_list = [str(task) for task in tasks] + if task_list: + # Create filter + obj_specs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task) for task in tasks] + property_spec = vmodl.query.PropertyCollector.PropertySpec(type=vim.Task, pathSet=[], all=True) + filter_spec = vmodl.query.PropertyCollector.FilterSpec() + filter_spec.objectSet = obj_specs + filter_spec.propSet = [property_spec] + pcfilter = self.content.propertyCollector.CreateFilter(filter_spec, True) + try: + version, state = None, None + # Loop looking for updates till the state moves to a completed state. + while len(task_list): + update = self.content.propertyCollector.WaitForUpdates(version) + for filter_set in update.filterSet: + for obj_set in filter_set.objectSet: + task = obj_set.obj + for change in obj_set.changeSet: + if change.name == "info": + state = change.val.state + elif change.name == "info.state": + state = change.val + else: + continue + + if not str(task) in task_list: + continue + + if state == vim.TaskInfo.State.success: + # Remove task from taskList + task_list.remove(str(task)) + elif state == vim.TaskInfo.State.error: + raise task.info.error + # Move to next version + version = update.version + finally: + if pcfilter: + pcfilter.Destroy() + + @property + def datacenters(self) -> Generator["Datacenter", Any, None]: + """Get all datacenters.""" + return (Datacenter(dc.name, self) for dc in self.create_view(self.content.rootFolder, [vim.Datacenter])) + + def get_datacenter_by_name(self, name: str) -> "Datacenter": + """Get specific datacenter from VCenter. + + :param name: Name of datacenter. + + :return: Specific datacenter. + """ + return get_obj_from_iter(self.datacenters, name) + + def add_datacenter(self, name: str) -> "Datacenter": + """ + Add new datacenter. + + :param name: Name of datacenter + + :return: New datacenter. + """ + try: + self.content.rootFolder.CreateDatacenter(name) + except vim.fault.DuplicateName: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Datacenter: {name} already exist return existing", + ) + return Datacenter(name, self) + + @property + def clusters(self) -> Iterable["Cluster"]: + """Get all clusters in VCenter.""" + return chain(*(dc.clusters for dc in self.datacenters)) + + def get_cluster_by_name(self, name: str) -> "Cluster": + """ + Get specific cluster from VCenter. + + :param name: Name of cluster. + + :return: Specific cluster. + """ + return get_obj_from_iter(self.clusters, name) + + @property + def hosts(self) -> Iterable["Host"]: + """Get all hosts from VCenter.""" + return chain(*(dc.hosts for dc in self.datacenters)) + + def get_host_by_ip(self, ip: str) -> "Host": + """ + Get specific host from VCenter. + + :param ip: IP of host. + + :return: Specific host. + """ + return get_obj_from_iter(self.hosts, ip) + + def create_view( + self, + folder: Union[ + "vim.Folder", + "vim.Datacenter", + "vim.ClusterComputeResource", + "vim.HostSystem", + ], + types: Optional[List[Type["vim.ManagedEntity"]]], + recursive: bool = False, + ) -> List[ + Union[ + "vim.dvs.VmwareDistributedVirtualSwitch", + "vim.Datacenter", + "vim.ClusterComputeResource", + "vim.HostSystem", + "vim.VirtualMachine", + ] + ]: + """ + Create a ContainerView managed object for this session. + + :param folder: A reference to an instance of a Folder, Datacenter, Resource, HostSystem. + :param types: An optional list of managed entity types. + :param recursive: Recursive search. + + :return: Container view. + """ + return self.content.viewManager.CreateContainerView(folder, types, recursive).view + + def _connect(self) -> "vim.ServiceInstance": + """ + Connect to the specified server using API. + + :return: Service content. + """ + try: + logger.log(level=log_levels.MODULE_DEBUG, msg=f"Connecting to: {self._ip}") + + self.__service = pyvmomi_connect.SmartConnect( + host=self._ip, + user=self._login, + pwd=self._password, + port=self._port, + connectionPoolTimeout=-1, + disableSslCertValidation=True, + ) + exit_register(self._disconnect) + return self.__service.RetrieveServiceContent() + except vim.fault.InvalidLogin: + raise VCenterInvalidLogin + except socket_error: + raise VCenterSocketError + + def _disconnect(self) -> None: + """Disconnect from server.""" + if self.__service: + pyvmomi_connect.Disconnect(self.__service) + self.__service = None + self._content = None + + def _reconnect(self) -> "vim.ServiceInstance": + """ + Reconnect to server. + + :return: Service content. + """ + self._disconnect() + return self._connect() + + @property + def version(self) -> Version: + """Get version of vSphere.""" + return version_parse(self._content.about.apiVersion) diff --git a/mfd_esxi/vcenter/virtual_adapter.py b/mfd_esxi/vcenter/virtual_adapter.py new file mode 100644 index 0000000..81cb848 --- /dev/null +++ b/mfd_esxi/vcenter/virtual_adapter.py @@ -0,0 +1,231 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +# pylint: disable=protected-access +"""VirtualAdapter wrapper.""" +import logging +from pyVmomi import vim +from typing import Union, TYPE_CHECKING + +from mfd_common_libs import log_levels, add_logging_level +from .exceptions import VCenterResourceMissing, VCenterResourceInUse + +if TYPE_CHECKING: + from .host import Host + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + + +class VirtualAdapter(object): + """VirtualAdapter wrapper.""" + + _MTU_LOOKUP = {"default": 1500, "4k": 4074, "9k": 9000} + + def __init__(self, name: str, host: "Host"): + """ + Initialize instance. + + :param name: Virtual adapter name. + :param host: Host. + """ + self._name = name + self._host = host + self.eth = name + self.portgroup = None + + def __repr__(self): + """Get string representation.""" + return f"{self.__class__.__name__}('{self.name}') in {self._host}" + + def set_vlan(self, vlan_id: int) -> None: + """ + Set vlan on portgroup associated with the Virtual Adapter. + + :param vlan_id: vlan number + """ + self.portgroup.set_vlan(vlan_id) + + @property + def content(self) -> "vim.host.VirtualNic": + """Get content of VirtualNetworkAdapter.""" + for virtual_nic in self._host.content.config.network.vnic: + if virtual_nic.device == self.name: + return virtual_nic + raise VCenterResourceMissing(self) + + @property + def name(self) -> str: + """Get name for VirtualAdapter.""" + return self._name + + def destroy(self) -> None: + """Destroy vnic.""" + try: + self._host.content.configManager.networkSystem.RemoveVirtualNic(self.name) + except vim.fault.ResourceInUse as e: + raise VCenterResourceInUse(self, e.msg) + except vim.fault.NotFound: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Nothing to remove. Virtual Adapter: {self.name} does not exist.", + ) + + @property + def mac(self) -> str: + """MAC value for virtual adapter.""" + return self.content.spec.mac + + @property + def ip(self) -> str: + """Get IP value for virtual adapter.""" + if self.content.spec.ip.ipAddress and not self.content.spec.ip.ipAddress.startswith("169."): + return self.content.spec.ip.ipAddress + elif self.content.spec.ip.ipV6Config.ipV6Address: + ips = self.content.spec.ip.ipV6Config.ipV6Address + for ipv6 in ips: + if not ipv6.ipAddress.lower().startswith("fe80"): + return ipv6.ipAddress + return "" + else: + return "" + + @property + def mask(self) -> Union[int, str]: + """Get MASK for IPv4 or prefixLenght for IPv6 for virtual adapter.""" + if self.content.spec.ip.ipAddress and not self.content.spec.ip.ipAddress.startswith("169."): + return self.content.spec.ip.subnetMask + elif self.content.spec.ip.ipV6Config.ipV6Address: + ips = self.content.spec.ip.ipV6Config.ipV6Address + for ipv6 in ips: + if not ipv6.ipAddress.lower().startswith("fe80"): + return ipv6.prefixLength + return "" + else: + return "" + + @property + def mtu(self) -> int: + """Get MTU value for virtual adapter.""" + return self.content.spec.mtu + + @mtu.setter + def mtu(self, value: Union[int, str]) -> None: + """ + Set MTU value for virtual adapter. + + :param value: MTU value. + """ + spec = self.content.spec + spec.mtu = self._MTU_LOOKUP.get(value) if value in self._MTU_LOOKUP.keys() else int(value) + self._host.content.configManager.networkSystem.UpdateVirtualNic(self.name, spec) + + @property + def tso(self) -> bool: + """ + Get TSO value for virtual adapter. + + :rtype: bool + """ + return self.content.spec.tsoEnabled + + @tso.setter + def tso(self, value: bool) -> None: + """ + Set TSO value for virtual adapter. + + :param value: TSO value True or False. + """ + spec = self.content.spec + spec.tsoEnabled = value + self._host.content.configManager.networkSystem.UpdateVirtualNic(self.name, spec) + + @property + def vmotion(self) -> bool: + """Get value vmotion for virtual adapter.""" + return self._get_property(vim.host.VirtualNicManager.NicType.vmotion) + + @vmotion.setter + def vmotion(self, value: bool) -> None: + """ + Set vmotion for virtual adapter. + + :param value: True or False. + """ + self._set_property(vim.host.VirtualNicManager.NicType.vmotion, value) + + @property + def management(self) -> bool: + """Get management value for virtual adapter.""" + return self._get_property(vim.host.VirtualNicManager.NicType.management) + + @management.setter + def management(self, value: bool) -> None: + """ + Set management for virtual adapter. + + :param value: True or False. + """ + self._set_property(vim.host.VirtualNicManager.NicType.management, value) + + @property + def vsan(self) -> bool: + """Get vsan value for virtual adapter.""" + return self._get_property(vim.host.VirtualNicManager.NicType.vsan) + + @vsan.setter + def vsan(self, value: bool) -> None: + """ + Set vsan for virtual adapter. + + :param value: True or False. + """ + self._set_property(vim.host.VirtualNicManager.NicType.vsan, value) + + @property + def provisioning(self) -> bool: + """Get provisioning value for virtual adapter.""" + return self._get_property(vim.host.VirtualNicManager.NicType.vSphereProvisioning) + + @provisioning.setter + def provisioning(self, value: bool) -> None: + """Set provisioning for virtual adapter. + + :param value: True or False. + """ + self._set_property(vim.HostVirtualNicManagerNicType.vSphereProvisioning, value) + + def _get_property(self, nic_type: vim.host.VirtualNicManager.NicType) -> bool: + """ + Get property value from virtual adapter. + + :param nic_type: Type of property that we want to get. + + :return: Return True if is enabled otherwise false. + """ + nic_type = str(nic_type) + query = self._host.content.configManager.virtualNicManager.QueryNetConfig(nic_type) + vnic = f"{nic_type}.key-vim.host.VirtualNic-{self.name}" + return vnic in set(query.selectedVnic) + + def _set_property(self, nic_type: vim.host.VirtualNicManager.NicType, value: bool) -> None: + """ + Set the property value for virtual adapter. + + :param nic_type: Type of property that we want to set. + :param value: True or False. + """ + if value: + self._host.content.configManager.virtualNicManager.SelectVnic(str(nic_type), self.name) + else: + self._host.content.configManager.virtualNicManager.DeselectVnic(str(nic_type), self.name) + + @classmethod + def get_mtu(cls: "VirtualAdapter", mtu: Union[str, int]) -> int: + """ + Get MTU as int. Legacy method, to be deprecated. + + :param mtu: MTU. + + :return: MTU. + """ + return cls._MTU_LOOKUP.get(mtu) if mtu in cls._MTU_LOOKUP.keys() else int(mtu) diff --git a/mfd_esxi/vcenter/virtual_machine.py b/mfd_esxi/vcenter/virtual_machine.py new file mode 100644 index 0000000..f254b4b --- /dev/null +++ b/mfd_esxi/vcenter/virtual_machine.py @@ -0,0 +1,292 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""VirtualMachine wrapper.""" +import logging +from typing import Optional, Any, Generator, Union, TYPE_CHECKING +from pyVmomi import vim +from time import sleep + +from mfd_common_libs import log_levels, add_logging_level +from .utils import get_obj_from_iter +from .distributed_switch.portgroup import DSPortgroup +from .virtual_switch.portgroup import VSPortgroup + +if TYPE_CHECKING: + from .datastore import Datastore + from .host import Host + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + + +INTERVAL_TIMEOUT = 5 +INTERVAL_POWER_OFF = 30 + +HEARTBEAT_TIMEOUT = 300 +POWER_STATE_TIMEOUT = 60 + + +class VirtualMachine(object): + """VirtualMachine wrapper.""" + + def __init__(self, name: str, host: "Host"): + """ + Initialize instance. + + :param name: Name of VM. + :param host: Host. + """ + self._name = name + self._host = host + + def __repr__(self): + """Get string representation.""" + return f"{self.__class__.__name__}('{self.name}')" + + @property + def content(self) -> "vim.VirtualMachine": + """Get content of VM in API.""" + return get_obj_from_iter( + self._host.vcenter.create_view(self._host.content, [vim.VirtualMachine]), + self.name, + ) + + @property + def name(self) -> str: + """Get name of VM.""" + return self._name + + def unregister(self) -> None: + """Unregister VM from host.""" + self.power_off() + self.content.Unregister() + + @property + def power_state(self) -> "vim.VirtualMachine.PowerState": + """Get power stat for virtual machine.""" + return self.content.runtime.powerState + + def power_off(self, wait: bool = True) -> Optional["vim.Task"]: + """ + Power off virtual machine. + + :param wait: If true method will wait for powered off. + + :return: Task if operation is in progress otherwise None. + """ + if self.power_state == vim.VirtualMachine.PowerState.poweredOn: + task = self.content.PowerOff() + if not wait: + return task + self._host.vcenter.wait_for_tasks([task]) + + def power_on(self, wait: bool = True) -> Optional["vim.Task"]: + """ + Power on virtual machine. + + :param wait: If true method will wait for powered on. + + :return: Task if operation is in progress otherwise None. + """ + if self.power_state == vim.VirtualMachine.PowerState.poweredOff: + task = self.content.PowerOn() + if not wait: + return task + self._host.vcenter.wait_for_tasks([task]) + + def restart(self, wait: bool = True) -> Optional["vim.Task"]: + """ + Restart virtual machine. + + :param wait: If true method will wait for restart. + + :return: Task if operation is in progress otherwise None. + """ + if self.power_state == vim.VirtualMachine.PowerState.poweredOff: + return self.power_on(wait) + task = self.content.Reset() + if not wait: + return task + self._host.vcenter.wait_for_tasks([task]) + + def shutdown(self) -> bool: + """ + Shutdown guest. + + :return: True if shutdown success otherwise False. + """ + if self._wait_for_heartbeat(): + self.content.ShutdownGuest() + return self._wait_for_power_state(vim.VirtualMachine.PowerState.poweredOff) + return False + + def reboot(self) -> bool: + """ + Reboot guest. + + :return: True if reboot success otherwise False. + """ + if self._wait_for_heartbeat(): + self.content.RebootGuest() + sleep(INTERVAL_POWER_OFF) + return self._wait_for_heartbeat() + return False + + def relocate( + self, + datastore: "Datastore", + priority: "vim.VirtualMachine.MovePriority" = vim.VirtualMachine.MovePriority.defaultPriority, + wait: bool = True, + ) -> vim.Task: + """ + Relocate a virtual machine's specific host. + + :param datastore: Datastore that all vm disks will be moved. + :param priority: The task priority (defaultPriority, highPriority, lowPriority). + :param wait: If true method will wait for migrate. + + :return: Task. + """ + relocate_spec = vim.vm.RelocateSpec() + relocate_spec.host = datastore.host.content + relocate_spec.pool = datastore.host.content.parent.resourcePool + relocate_spec.datastore = datastore.content + task = self.content.Relocate(spec=relocate_spec, priority=priority) + if wait: + self._host.vcenter.wait_for_tasks([task]) + return task + + @property + def network_adapters( + self, + ) -> Generator[vim.vm.device.VirtualEthernetCard, Any, None]: + """Get all adapters attached to VM.""" + return ( + adapter + for adapter in self.content.config.hardware.device + if isinstance(adapter, vim.vm.device.VirtualEthernetCard) + ) + + @property + def sriov_adapters( + self, + ) -> Generator[vim.vm.device.VirtualSriovEthernetCard, Any, None]: + """Get all SR-IOV adapters attached to VM.""" + return ( + adapter + for adapter in self.content.config.hardware.device + if isinstance(adapter, vim.vm.device.VirtualSriovEthernetCard) + ) + + def add_vmxnet3_adapter(self, portgroup: Union["DSPortgroup", "VSPortgroup"]) -> None: + """ + Add new vmxnet3 adapter to VM. + + :param portgroup: Portgroup where adapter should be assigned. + """ + nic = vim.vm.device.VirtualDeviceSpec() + nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add + nic.device = vim.vm.device.VirtualVmxnet3() + + if isinstance(portgroup, VSPortgroup): + nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() + nic.device.backing.network = portgroup.content + nic.device.backing.deviceName = portgroup.name + elif isinstance(portgroup, DSPortgroup): + nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() + nic.device.backing.port = vim.dvs.PortConnection() + nic.device.backing.port.portgroupKey = portgroup.content.key + nic.device.backing.port.switchUuid = portgroup.content.config.distributedVirtualSwitch.uuid + + nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() + nic.device.connectable.startConnected = True + nic.device.connectable.allowGuestControl = True + + config = vim.vm.ConfigSpec(deviceChange=[nic]) + self._host.vcenter.wait_for_tasks([self.content.ReconfigVM_Task(config)]) + + def add_sriov_adapter(self, portgroup: Union["DSPortgroup", "VSPortgroup"], adapter_name: str) -> None: + """ + Add new sriov adapter to VM. + + :param portgroup: Portgroup where adapter should be assigned. + :param adapter_name: Adapter name. + """ + nic = vim.vm.device.VirtualDeviceSpec() + nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add + nic.device = vim.vm.device.VirtualSriovEthernetCard() + + if isinstance(portgroup, VSPortgroup): + nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() + nic.device.backing.network = portgroup.content + nic.device.backing.deviceName = portgroup.name + elif isinstance(portgroup, DSPortgroup): + nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() + nic.device.backing.port = vim.dvs.PortConnection() + nic.device.backing.port.portgroupKey = portgroup.content.key + nic.device.backing.port.switchUuid = portgroup.content.config.distributedVirtualSwitch.uuid + + nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() + nic.device.connectable.startConnected = True + + nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo() + nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo() + pnic = [x for x in self.content.summary.runtime.host.config.network.pnic if x.device == adapter_name] + nic.device.sriovBacking.physicalFunctionBacking.id = pnic[0].pci + nic.device.allowGuestOSMtuChange = True + + config = vim.vm.ConfigSpec(deviceChange=[nic]) + self._host.vcenter.wait_for_tasks([self.content.ReconfigVM_Task(config)]) + + def remove_adapter(self, adapter: vim.vm.device.VirtualEthernetCard) -> None: + """ + Remove adapter from VM. + + :param adapter: Adapter to remove. + """ + nic = vim.vm.device.VirtualDeviceSpec() + nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove + nic.device = adapter + + config = vim.vm.ConfigSpec(deviceChange=[nic]) + self._host.vcenter.wait_for_tasks([self.content.ReconfigVM_Task(config)]) + + def _wait_for_heartbeat(self, timeout: int = HEARTBEAT_TIMEOUT, interval: int = INTERVAL_TIMEOUT) -> bool: + """ + Wait for os guest heartbeat. + + :param timeout: Timeout for heartbeat. + :param interval: Interval between checks. + + :return: True if guest is running otherwise False. + """ + heartbeat_time = 0 + while heartbeat_time < timeout: + if self.content.guestHeartbeatStatus == vim.ManagedEntity.Status.green: + break + sleep(interval) + heartbeat_time += interval + return self.content.guestHeartbeatStatus == vim.ManagedEntity.Status.green + + def _wait_for_power_state( + self, + state: vim.VirtualMachine.PowerState, + timeout: int = POWER_STATE_TIMEOUT, + interval: int = INTERVAL_TIMEOUT, + ) -> bool: + """ + Wait for power state. + + :param state: Power state. + :param timeout: Timeout for power state. + :param interval: Interval between checks. + + :return: True if machine is in expected power state. + """ + power_state_time = 0 + while power_state_time < timeout: + if self.power_state == state: + break + sleep(interval) + power_state_time += interval + return self.power_state == state diff --git a/mfd_esxi/vcenter/virtual_switch/__init__.py b/mfd_esxi/vcenter/virtual_switch/__init__.py new file mode 100644 index 0000000..1a1813c --- /dev/null +++ b/mfd_esxi/vcenter/virtual_switch/__init__.py @@ -0,0 +1,4 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +# pylint: disable=protected-access +"""Virtual Switch wrapper.""" diff --git a/mfd_esxi/vcenter/virtual_switch/portgroup.py b/mfd_esxi/vcenter/virtual_switch/portgroup.py new file mode 100644 index 0000000..84476ae --- /dev/null +++ b/mfd_esxi/vcenter/virtual_switch/portgroup.py @@ -0,0 +1,107 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""VSwitch wrapper.""" +import logging +from typing import Any, Generator, Union, TYPE_CHECKING +from pyVmomi import vim + +from mfd_common_libs import log_levels, add_logging_level +from ..utils import get_obj_from_iter +from ..exceptions import VCenterResourceInUse +from ..virtual_adapter import VirtualAdapter + +if TYPE_CHECKING: + from ..host import Host + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + + +class VSPortgroup(object): + """VSwitch wrapper.""" + + def __init__(self, name: str, host: "Host"): + """ + Initialize instance. + + :param name: Name of portgroup. + :param host: Host. + """ + self._name = name + self._host = host + + def __repr__(self): + """Get string representation.""" + return f"{self.__class__.__name__}('{self.name}')" + + @property + def name(self) -> str: + """Name for portgroup.""" + return self._name + + def destroy(self) -> None: + """Destroy VSPortgroup and remove virtual adapter connected to the portgroup.""" + logger.log(level=log_levels.MODULE_DEBUG, msg=f"Removing portgroup: {self.name}") + try: + for virtual_adapter in self.virtual_adapters: + virtual_adapter.destroy() + self._host.content.configManager.networkSystem.RemovePortGroup(self.name) + logger.log(level=log_levels.MODULE_DEBUG, msg=f"Portgroup {self.name} destroyed") + except vim.fault.ResourceInUse as e: + raise VCenterResourceInUse(self, e.msg) + except vim.fault.NotFound: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Nothing to remove. Portgroup: {self.name} does not exist.", + ) + + @property + def virtual_adapters(self) -> Generator["VirtualAdapter", Any, None]: + """ + Get all virtual adapters from vSwitch. + + :return: Generator with all adapters. + """ + return ( + VirtualAdapter(virtual_nic.device, self._host) + for virtual_nic in self._host.content.config.network.vnic + if virtual_nic.portgroup == self.name + ) + + def get_virtual_adapter_by_name(self, name: str) -> "VirtualAdapter": + """ + Get specific virtual adapter from vSwitch. + + :param name: Name of virtual adapter. + + :return: Virtual adapter. + """ + return get_obj_from_iter(self.virtual_adapters, name) + + def add_virtual_adapter(self, mtu: int = 1500, ip: Union[str, int] = None, mask: str = None) -> "VirtualAdapter": + """ + Add new virtual adapter to portgroup when ip is none and mask use DHCP. + + :param mtu: MTU size for virtual adapter. + :param ip: IP address + :param mask: Netmask for IP + + :return: Newly added virtual adapter. + """ + ip_config = vim.host.IpConfig() + + if ip and mask: + ip_config.dhcp = False + ip_config.ipAddress = ip + ip_config.subnetMask = mask + elif not ip and not mask: + ip_config.dhcp = True + else: + raise RuntimeError("Unknown config please set both IP and netmask or none.") + + virtual_nic_spec = vim.host.VirtualNic.Specification() + virtual_nic_spec.ip = ip_config + virtual_nic_spec.mtu = VirtualAdapter.get_mtu(mtu) + + name = self._host.content.configManager.networkSystem.AddVirtualNic(self.name, virtual_nic_spec) + return self.get_virtual_adapter_by_name(name) diff --git a/mfd_esxi/vcenter/virtual_switch/vswitch.py b/mfd_esxi/vcenter/virtual_switch/vswitch.py new file mode 100644 index 0000000..0341892 --- /dev/null +++ b/mfd_esxi/vcenter/virtual_switch/vswitch.py @@ -0,0 +1,184 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""VSwitch wrapper.""" +import logging +from typing import Union, Any, Generator, Dict, Set, TYPE_CHECKING +from pyVmomi import vim + +from mfd_common_libs import log_levels, add_logging_level +from ..utils import get_obj_from_iter +from ..exceptions import VCenterResourceMissing, VCenterResourceInUse +from ..virtual_switch.portgroup import VSPortgroup + +if TYPE_CHECKING: + from ..host import Host + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + + +class VSwitch(object): + """VSwitch wrapper.""" + + _MTU_LOOKUP = {"default": 1500, "4k": 4074, "9k": 9000} + + def __init__(self, name: str, host: "Host"): + """ + Initialize instance. + + :param name: Name of VSwitch. + :param host: Host. + """ + self._name = name + self._host = host + + def __repr__(self): + """Get string representation.""" + return f"{self.__class__.__name__}('{self.name}')" + + @property + def name(self) -> str: + """Name of vSwitch.""" + return self._name + + @property + def content(self) -> vim.host.VirtualSwitch: + """Content of vSwitch in API.""" + for vs in self._host.content.config.network.vswitch: + if vs.name == self._name and isinstance(vs, vim.host.VirtualSwitch): + return vs + raise VCenterResourceMissing(self) + + def destroy(self) -> None: + """Remove vSwitch form host.""" + logger.log(level=log_levels.MODULE_DEBUG, msg=f"Removing VSwitch: {self.name}") + try: + for pg in self.portgroups: + pg.destroy() + self._host.content.configManager.networkSystem.RemoveVirtualSwitch(self.name) + logger.log(level=log_levels.MODULE_DEBUG, msg=f"VSwitch {self.name} destroyed") + except vim.fault.ResourceInUse as e: + raise VCenterResourceInUse(self, e.msg) + except vim.fault.NotFound: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Nothing to remove. vSwitch: {self.name} does not exist.", + ) + + @property + def mtu(self) -> int: + """Get MTU value from vSwitch.""" + return self.content.mtu + + @mtu.setter + def mtu(self, value: Union[int, str]) -> None: + """ + Set MTU value for vSwitch. + + :param value: MTU value. + :type value: int|str + """ + spec = self.content.spec + spec.mtu = self._MTU_LOOKUP.get(value) if value in self._MTU_LOOKUP.keys() else int(value) + self._host.content.configManager.networkSystem.UpdateVirtualSwitch(self.name, spec) + + @property + def portgroups(self) -> Generator["VSPortgroup", Any, None]: + """Get all portgroups from vSwitch.""" + return ( + VSPortgroup(pg.spec.name, self._host) + for pg in self._host.content.config.network.portgroup + if pg.spec.vswitchName == self.name + ) + + def get_portgroup_by_name(self, name: str) -> "VSPortgroup": + """ + Specific portgroup from vSwitch. + + :param name: Name of portgroup. + + :return: Portgroup object. + """ + return get_obj_from_iter(self.portgroups, name) + + def add_portgroup(self, name: str) -> "VSPortgroup": + """ + Add new portgroup to vSwitch. + + :param name: Name for new portgroup. + + :return: New portgroup. + """ + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Adding portgroup: {name} to VSwitch {self.name}", + ) + spec = vim.host.PortGroup.Specification() + spec.name = name + spec.vswitchName = self.name + + policy = vim.host.NetworkPolicy.SecurityPolicy() + policy.allowPromiscuous = True + policy.forgedTransmits = True + policy.macChanges = False + + spec.policy = vim.host.NetworkPolicy(security=policy) + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"New portgroup: {name} for VSwitch {self.name} spec\n{spec}", + ) + try: + self._host.content.configManager.networkSystem.AddPortGroup(portgrp=spec) + except vim.fault.AlreadyExists: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Portgroup: {name} already exist return existing.", + ) + return VSPortgroup(name, self._host) + + @property + def nics(self) -> Dict[str, Set[Union[str]]]: + """Get all nics assigned to vSwitch grouped by active, standby, unused.""" + spec = self.content.spec + if spec.bridge: + nics = { + "active": set(spec.policy.nicTeaming.nicOrder.activeNic), + "standby": set(spec.policy.nicTeaming.nicOrder.standbyNic), + } + nics["unused"] = set(spec.bridge.nicDevice) - nics["active"] - nics["standby"] + return nics + return {"active": set(), "standby": set(), "unused": set()} + + @nics.setter + def nics(self, value: Dict[str, Set[Union[str]]]) -> None: + """ + Set nics to vSwitch. + + :param value: Dict of set. + """ + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Set NIC {value} on VSwitch: {self.name}", + ) + new_nics = {"unused": set(), "active": set(), "standby": set()} + new_nics.update(value) + all_nics = new_nics["active"] | new_nics["standby"] | new_nics["unused"] + + spec = self.content.spec + logger.log(level=log_levels.MODULE_DEBUG, msg=f"VSwitch {self.name} old spec\n{spec}") + + if not spec.bridge and all_nics: + spec.bridge = vim.host.VirtualSwitch.BondBridge() + + spec.bridge.nicDevice = list(all_nics) + if not all_nics: + spec.bridge = None + + spec.policy.nicTeaming.nicOrder.activeNic = list(new_nics["active"]) + spec.policy.nicTeaming.nicOrder.standbyNic = list(new_nics["standby"]) + logger.log(level=log_levels.MODULE_DEBUG, msg=f"VSwitch {self.name} new spec\n{spec}") + + try: + self._host.content.configManager.networkSystem.UpdateVirtualSwitch(self.name, spec) + except vim.fault.ResourceInUse as e: + raise VCenterResourceInUse(self, e.msg) diff --git a/mfd_esxi/vm_base.py b/mfd_esxi/vm_base.py new file mode 100644 index 0000000..919f475 --- /dev/null +++ b/mfd_esxi/vm_base.py @@ -0,0 +1,241 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""Virtual Machine.""" + +import logging +import re +from ipaddress import IPv4Address, IPv6Address, ip_address +from time import time, sleep +from typing import List, Union, TYPE_CHECKING + +from mfd_common_libs import log_levels, add_logging_level +from .exceptions import ESXiNotFound, ESXiWrongParameter, ESXiRuntimeError, ESXiVMNotRun + +if TYPE_CHECKING: + from mfd_esxi.host import ESXiHypervisor + from mfd_connect import RPyCConnection + from mfd_connect.base import ConnectionCompletedProcess + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + + +class ESXiVMBase: + """Class for VM handling.""" + + def __init__(self, owner: "ESXiHypervisor"): + """Init VM fields.""" + self.owner: "ESXiHypervisor" = owner + self.id: Union[int, None] = None + self.name: Union[str, None] = None + self.datastore: Union[str, None] = None + self.folder: Union[str, None] = None + self.ip: Union[IPv4Address, IPv6Address, None] = None + self.connection: Union["RPyCConnection", None] = None + + def initialize(self, output: str) -> None: + """Initialize VM based on vim-cmd vmsvc/getallvms output. + + :param output: line of output + """ + regex = re.search( + r"(?P\d+)\s*(?P.+)\s+\[(?P.+)]\s(?P.+)/.+.vmx\s+.+\s+vmx-\d+", + output, + ) + if not regex: + raise ESXiNotFound("Could not find information about virtual machine") + self.id = int(regex.group("id")) + self.name = regex.group("name").strip() + self.datastore = regex.group("datastore").strip() + self.folder = regex.group("folder").strip() + + @staticmethod + def discover(owner: "ESXiHypervisor") -> List["ESXiVMBase"]: + """ + Discover all VMs on host. + + :param owner: ESXi host + :return: list of DVS + """ + output = owner.execute_command("vim-cmd vmsvc/getallvms", expected_return_codes={0}).stdout + vms = [] + + for line in output.splitlines(): + if line[0].isnumeric() and "vCLS" not in line: + vm = ESXiVMBase(owner) + vm.initialize(line) + vms.append(vm) + return vms + + def execute_command(self, command: str, **kwargs) -> "ConnectionCompletedProcess": + """ + Shortcut for execute command. + + :param command: string with command + :param kwargs: parameters + :return: result of command + """ + return self.connection.execute_command(command=command, **kwargs) + + def register(self, file: str) -> None: + """Register VM. + + :param file: path to vmx file + """ + if self.id is None: + command = f"vim-cmd solo/registervm {file}" + _id = self.owner.execute_command(command, expected_return_codes={0}).stdout + self.id = int(_id) + else: + logger.log(level=log_levels.MODULE_DEBUG, msg=f"VM {self.name} already registered") + + def unregister(self) -> None: + """Unregister VM.""" + if self.id is not None: + command = f"vim-cmd vmsvc/unregister {self.id}" + self.owner.execute_command(command, expected_return_codes={0}) + self.id = None + else: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"VM {self.name} already unregistered", + ) + + def reload(self) -> None: + """Reload vmx file with VM configuration.""" + if self.id is not None: + command = f"vim-cmd vmsvc/reload {self.id}" + self.owner.execute_command(command, expected_return_codes={0}) + return + raise ESXiRuntimeError("Could not reload configuration of VM that has not been registered") + + def start(self) -> None: + """Start VM.""" + command = f"vim-cmd vmsvc/power.on {self.id}" + result = self.owner.execute_command(command, expected_return_codes={0, 1}) + if result.return_code == 0: + return + + if "The attempted operation cannot be performed in the current state" in result.stderr: + logger.log(level=log_levels.MODULE_DEBUG, msg=f"VM {self.name} already started") + return + elif "InsufficientMemoryResourcesFault" in result.stderr: + raise ESXiVMNotRun("Host does not have sufficient memory pool to power on requested VM") + + raise ESXiRuntimeError(f"Command: {command} rc: {result.return_code} output: {result.stderr}") + + def stop(self) -> None: + """Stop VM.""" + command = f"vim-cmd vmsvc/power.off {self.id}" + result = self.owner.execute_command(command, expected_return_codes={0, 1}) + if result.return_code == 0: + return + + if "The attempted operation cannot be performed in the current state" in result.stderr: + logger.log(level=log_levels.MODULE_DEBUG, msg=f"VM {self.name} already stopped") + return + raise ESXiRuntimeError(f"Command: {command} rc: {result.return_code} output: {result.stderr}") + + def shutdown(self, wait: bool = True, timeout: int = 300) -> None: + """Shutdown Guest OS. + + :param wait: wait for VM to stop + :param timeout: time to wait + """ + command = f"vim-cmd vmsvc/power.shutdown {self.id}" + result = self.owner.execute_command(command, expected_return_codes={0, 1}) + if result.return_code == 0: + if wait: + self.wait_for_state("off", timeout=timeout) + return + + if "The attempted operation cannot be performed in the current state" in result.stderr: + logger.log(level=log_levels.MODULE_DEBUG, msg=f"VM {self.name} already stopped") + return + raise ESXiRuntimeError(f"Command: {command} rc: {result.return_code} output: {result.stderr}") + + def reboot(self) -> None: + """Reboot VM.""" + command = f"vim-cmd vmsvc/power.reboot {self.id}" + result = self.owner.execute_command(command, expected_return_codes={0, 1}) + if result.return_code == 0: + return + + if "The attempted operation cannot be performed in the current state" in result.stderr: + return self.start() + raise ESXiRuntimeError(f"Command: {command} rc: {result.return_code} output: {result.stderr}") + + def get_state(self) -> str: + """Get power state of VM.""" + command = f"vim-cmd vmsvc/power.getstate {self.id}" + output = self.owner.execute_command(command, expected_return_codes={0}).stdout + if "Powered on" in output: + return "on" + if "Powered off" in output: + return "off" + raise ESXiRuntimeError(f"Unexpected VM state: {output}") + + def wait_for_state(self, state: str, timeout: int = 60) -> None: + """Wait for desired state. + + :param state: state on or off + :param timeout: time to wait + """ + state = state.lower() + if state not in ["on", "off"]: + raise ESXiWrongParameter(f"Wrong parameter provided: state = {state}") + + start = time() + while time() < start + timeout: + if state == self.get_state(): + return + sleep(5) + raise ESXiRuntimeError(f"Timeout waiting for VM state: {state}") + + def get_guest_mng_ip(self, timeout: int = 300) -> Union[IPv4Address, IPv6Address]: + """Get management ip address for vm. + + :param timeout: time to get ip address from vm + :return: mng IP for virtual machine + """ + ip = self.wait_for_mng_ip(timeout) + + if ip is True: + # VM is running but is unable to provide the mng ip probably because hang + raise ESXiVMNotRun("Unable to get mng ip for vm.") + if ip is False: + # VM crashed during power on, try one reboot + self.stop() + self.start() + ip = self.wait_for_mng_ip(timeout) + if isinstance(ip, bool): + raise ESXiVMNotRun("Unable to get mng ip for vm.") + + self.ip = ip + return ip + + def wait_for_mng_ip(self, timeout: int = 300) -> Union[IPv4Address, IPv6Address, bool]: + """Wait timeout seconds for mng ip. + + :param timeout: seconds to wait + :return: IP of VM or running state if not found + """ + state = False + start = time() + while time() - start < timeout: + command = f"vim-cmd vmsvc/get.guest {self.id}" + result = self.owner.execute_command(command, expected_return_codes=None) + state = 'guestState = "running"' in result.stdout + if result.return_code != 0: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Command {command} ended with code error: {result.return_code}", + ) + else: + pattern = re.compile("ipAddress =.{2}([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+)") + ips = [ip for ip in re.findall(pattern, result.stdout) if ip_address(ip) in self.owner.mng_ip.network] + if ips: + # We assume that vm have only one mng ip + return ip_address(ips[0]) + sleep(5) + return state diff --git a/mfd_esxi/vm_gold.py b/mfd_esxi/vm_gold.py new file mode 100644 index 0000000..2049a41 --- /dev/null +++ b/mfd_esxi/vm_gold.py @@ -0,0 +1,403 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""Virtual Machine: gold and created out of gold.""" + +import logging +import math +import os +import re +from textwrap import dedent +from time import sleep, time +from typing import TYPE_CHECKING + +from mfd_common_libs import log_levels, add_logging_level +from packaging.version import Version +from mfd_connect.local import LocalConnection +from mfd_connect.util.rpc_copy_utils import copy +from .exceptions import ( + ESXiNotFound, + ESXiWrongParameter, + ESXiVMCopyTimeout, + ESXiVFUnavailable, +) +from .vm_base import ESXiVMBase + +if TYPE_CHECKING: + from mfd_esxi.host import ESXiHypervisor + from mfd_network_adapter.network_interface.esxi import ESXiNetworkInterface + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + +ESXI_VM_ADAPTER_TYPES = ["vmxnet", "vmxnet3", "e1000", "e1000e", "vlance"] + +vmx_template = dedent( + """\ + .encoding = "UTF-8" + RemoteDisplay.maxConnections = "-1" + bios.bootRetry.delay = "10" + config.version = "8" + ehci.present = "TRUE" + ethernet0.addressType = "generated" + ethernet0.present = "TRUE" + ethernet0.uptCompatibility = "TRUE" + ethernet0.virtualDev = "vmxnet3" + ethernet0.wakeOnPcktRcv = "FALSE" + floppy0.present = "FALSE" + hpet0.present = "TRUE" + mks.enable3d = "TRUE" + pciBridge0.present = "TRUE" + pciBridge4.functions = "8" + pciBridge4.present = "TRUE" + pciBridge4.virtualDev = "pcieRootPort" + pciBridge5.functions = "8" + pciBridge5.present = "TRUE" + pciBridge5.virtualDev = "pcieRootPort" + pciBridge6.functions = "8" + pciBridge6.present = "TRUE" + pciBridge6.virtualDev = "pcieRootPort" + pciBridge7.functions = "8" + pciBridge7.present = "TRUE" + pciBridge7.virtualDev = "pcieRootPort" + powerType.powerOff = "default" + powerType.reset = "default" + powerType.suspend = "soft" + sched.cpu.affinity = "all" + sched.cpu.latencySensitivity = "normal" + sched.cpu.min = "0" + sched.cpu.shares = "normal" + sched.cpu.units = "mhz" + sched.mem.pin = "TRUE" + sched.mem.shares = "normal" + sched.scsi0:0.shares = "normal" + sched.scsi0:0.throughputCap = "off" + scsi0.present = "TRUE" + scsi0:0.deviceType = "scsi-hardDisk" + scsi0:0.present = "TRUE" + svga.autodetect = "TRUE" + svga.present = "TRUE" + toolScripts.afterPowerOn = "TRUE" + toolScripts.afterResume = "TRUE" + toolScripts.beforePowerOff = "TRUE" + toolScripts.beforeSuspend = "TRUE" + tools.syncTime = "FALSE" + tools.upgrade.policy = "manual" + usb.present = "TRUE" + virtualHW.version = "17" + vmci0.present = "TRUE" +""" +) + +vmotion_template = dedent( + """\ + featMask.vm.cpuid.AES = "Val:1" + featMask.vm.cpuid.AVX = "Val:1" + featMask.vm.cpuid.CMPXCHG16B = "Val:1" + featMask.vm.cpuid.DS = "Val:1" + featMask.vm.cpuid.FAMILY = "Val:6" + featMask.vm.cpuid.FCMD = "Val:1" + featMask.vm.cpuid.IBPB = "Val:1" + featMask.vm.cpuid.IBRS = "Val:1" + featMask.vm.cpuid.Intel = "Val:1" + featMask.vm.cpuid.LAHF64 = "Val:1" + featMask.vm.cpuid.LM = "Val:1" + featMask.vm.cpuid.MDCLEAR = "Val:1" + featMask.vm.cpuid.MODEL = "Val:0x2d" + featMask.vm.cpuid.MWAIT = "Val:1" + featMask.vm.cpuid.NUMLEVELS = "Val:0xd" + featMask.vm.cpuid.NUM_EXT_LEVELS = "Val:0x80000008" + featMask.vm.cpuid.NX = "Val:1" + featMask.vm.cpuid.PCID = "Val:1" + featMask.vm.cpuid.PCLMULQDQ = "Val:1" + featMask.vm.cpuid.POPCNT = "Val:1" + featMask.vm.cpuid.RDTSCP = "Val:1" + featMask.vm.cpuid.SS = "Val:1" + featMask.vm.cpuid.SSBD = "Val:1" + featMask.vm.cpuid.SSE3 = "Val:1" + featMask.vm.cpuid.SSE41 = "Val:1" + featMask.vm.cpuid.SSE42 = "Val:1" + featMask.vm.cpuid.SSSE3 = "Val:1" + featMask.vm.cpuid.STEPPING = "Val:2" + featMask.vm.cpuid.STIBP = "Val:1" + featMask.vm.cpuid.VMX = "Val:1" + featMask.vm.cpuid.XCR0_MASTER_SSE = "Val:1" + featMask.vm.cpuid.XCR0_MASTER_YMM_H = "Val:1" + featMask.vm.cpuid.XSAVE = "Val:1" + featMask.vm.hv.capable = "Val:1" + featMask.vm.vt.realmode = "Val:1" + featureCompat.vm.completeMasks = "TRUE" +""" +) + + +class ESXiVMGold: + """Class for discovering Gold image of VM.""" + + def __init__(self, owner: "ESXiHypervisor", datastore: str, name: str): + """ + Initialize Gold image. + + :param owner: ESXi host + :param datastore: datastore + :param name: name of image (folder and files) + """ + self.owner = owner + self.datastore = datastore + self.name = name + self.firmware = None + self.guestOS = None + self.scsi_dev = None + self.primary_vmdk = None + self.parent_vmdk = None + self.primary_flat = None + self.parent_flat = None + + def initialize(self) -> None: + """Initialize GOLD image based on VMX and VMDK files.""" + path = f"/vmfs/volumes/{self.datastore}/{self.name}" + output = self.owner.execute_command(f"cat {path}/*.vmx", shell=True).stdout + + regex = re.search(r"firmware\s*=\s*(\"efi\")", output) + self.firmware = "efi" if regex else "" + + regex = re.search(r"guestOS\s*=\s*(\")(?P.*)(\")", output) + self.guestOS = regex.group("name") if regex else "" + + regex = re.search(r"scsi0\.virtualDev\s*=\s*(\")(?P.*)(\")", output) + if not regex: + raise ESXiNotFound("Cannot fetch disk device from base image") + self.scsi_dev = regex.group("name") + + regex = re.search(r"scsi0:0\.fileName\s*=\s*(\")(?P.*)(\")", output) + if not regex: + raise ESXiNotFound("Cannot fetch disk file name from base image") + self.primary_vmdk = regex.group("name") + + output = self.owner.execute_command(f"cat {path}/{self.primary_vmdk}").stdout + + regex = re.search(r"parentFileNameHint\s*=\s*(\")(?P.*)(\")", output) + if not regex: + raise ESXiNotFound("Cannot fetch parent disk file name from base image") + self.parent_vmdk = regex.group("name") + + regex = re.search(r"SPARSE\s*(\")(?P.*)(\")", output) + if not regex: + raise ESXiNotFound("Cannot fetch disk image file name from base image") + self.primary_flat = regex.group("name") + + output = self.owner.execute_command(f"cat {path}/{self.parent_vmdk}").stdout + + regex = re.search(r"VMFS\s*(\")(?P.*)(\")", output) + if not regex: + raise ESXiNotFound("Cannot fetch disk image file name from base image") + self.parent_flat = regex.group("name") + + +class ESXiVM(ESXiVMBase): + """Class for handling VMs created out of golden image.""" + + def __init__( + self, + gold: "ESXiVMGold", + name: str, + mng: str, + tag: str = "", + datastore: str = None, + cpu: int = 4, + mem: int = 2048, + iommu: bool = False, + vmotion: bool = False, + ): + """Initialize VM created out of Gold image. + + :param gold: gold image object + :param name: name of new VM + :param mng: portgroup for management network + :param tag: tag string + :param datastore: datastore to create file on + :param cpu: number of cpus + :param mem: amount of memory + :param iommu: enable IOMMU + :param vmotion: prepare machine for vMotion + """ + super().__init__(owner=gold.owner) + self.gold = gold + self.datastore = datastore if datastore is not None else gold.datastore + self.folder = name + self.name = name + self.tag = tag + self.mng = mng + self.cpu = cpu + self.mem = mem + self.iommu = iommu + self.vmotion = vmotion + self.ethernet = [] + self.pciPassthru = [] + + def write_vmx(self) -> None: + """Write VMX file to VM folder.""" + with open(os.path.join(os.getcwd(), f"{self.name}.vmx"), mode="w", newline="\n") as file: + lines = vmx_template.splitlines() + lines = [line + "\n" for line in lines] + file.writelines(lines) + if self.vmotion: + lines = vmotion_template.splitlines() + lines = [line + "\n" for line in lines] + file.writelines(lines) + + file.write(f'displayName = "{self.name}"\n') + file.write(f'nvram = "{self.name}.nvram"\n') + + file.write(f'ethernet0.networkName = "{self.mng}"\n') + if self.iommu: + cpu = math.ceil(self.cpu / 2) * 2 + file.write(f'cpuid.coresPerSocket = "{int(cpu / 2)}"\n') + file.write(f'numvcpus = "{self.cpu}"\n') + file.write('vvtd.enable = "TRUE"\n') + else: + file.write(f'cpuid.coresPerSocket = "{self.cpu}"\n') + file.write(f'numvcpus = "{self.cpu}"\n') + file.write(f'memSize = "{self.mem}"\n') + file.write(f'sched.mem.min = "{self.mem}"\n') + file.write(f'sched.mem.minSize = "{self.mem}"\n') + + if self.gold.firmware: + file.write('firmware = "efi"\n') + file.write(f'guestOS = "{self.gold.guestOS}"\n') + file.write(f'scsi0.virtualDev = "{self.gold.scsi_dev}"\n') + file.write(f'scsi0:0.fileName = "{self.gold.primary_vmdk}"\n') + + for nr, data in enumerate(self.ethernet): + for k, v in data.items(): + file.write(f'ethernet{nr + 1}.{k} = "{v}"\n') + + for nr, data in enumerate(self.pciPassthru): + for k, v in data.items(): + file.write(f'pciPassthru{nr}.{k} = "{v}"\n') + + copy( + src_conn=LocalConnection(), + dst_conn=self.owner.connection, + source=os.path.join(os.getcwd(), file.name), + target=f"/vmfs/volumes/{self.datastore}/{self.name}/{self.name}.vmx", + ) + os.remove(f"{self.name}.vmx") + + def create(self, register: bool = True, start: bool = True) -> None: + """Create VM files. + + :param register: register VM + :param start: start VM after creation + """ + vm_folder = f"/vmfs/volumes/{self.datastore}/{self.name}" + gold_folder = f"/vmfs/volumes/{self.gold.datastore}/{self.gold.name}" + + self.owner.execute_command(f"rm -rf {vm_folder}") + self.owner.execute_command(f"mkdir -p {vm_folder}") + self.owner.execute_command(f"cp {gold_folder}/{self.gold.primary_flat} {vm_folder}") + self.owner.execute_command(f"cp {gold_folder}/{self.gold.primary_vmdk} {vm_folder}") + if self.vmotion: + src = self.owner.execute_command(f"ls -l {gold_folder}/{self.gold.parent_flat}").stdout + src_len = int(src.split()[4]) + + self.owner.connection.start_process(f"cp {gold_folder}/{self.gold.parent_flat} {vm_folder}") + + start_time = time() + while True: + sleep(15) + dst = self.owner.execute_command(f"ls -l {vm_folder}/{self.gold.parent_flat}").stdout + dst_len = int(dst.split()[4]) + if src_len == dst_len: + break + + if time() > start_time + 900: + raise ESXiVMCopyTimeout("Copying of VM disk file took too long") + + self.owner.execute_command(f"cp {gold_folder}/{self.gold.parent_vmdk} {vm_folder}") + else: + self.owner.execute_command(f"ln -s {gold_folder}/{self.gold.parent_flat} {vm_folder}") + self.owner.execute_command(f"ln -s {gold_folder}/{self.gold.parent_vmdk} {vm_folder}") + + self.write_vmx() + + file = f"/vmfs/volumes/{self.datastore}/{self.folder}/{self.name}.vmx" + if register: + self.register(file) + if start: + self.start() + + def attach_network( + self, + portgroup: str, + model: str = "vmxnet3", + rss: bool = False, + pf: "ESXiNetworkInterface" = None, + ) -> None: + """Attach network adapter to VM. + + :param portgroup: portgroup name + :param model: type of adapter: sriov|ptp|vmxnet|vmxnet3|e1000|e1000e|vlance + :param rss: enable RSS on VMXNET3 adapter + :param pf: PF of SR-IOV interface + """ + if model == "sriov": + pci_address = pf.pci_address.lspci_short + bus, dev, fun = re.split("[.:]+", pci_address) + pci = f"{int(bus, 16):04d}:{int(dev, 16):02d}.{int(fun, 16):02d}" + add_adapter = { + "networkName": portgroup, + "pfId": pci, + "deviceId": "0", + "vendorId": "0", + "systemId": "BYPASS", + "id": pci, + "allowMTUChange": "TRUE", + "present": "True", + } + self.pciPassthru.append(add_adapter) + elif model == "ptp": + output = self.owner.execute_command( + f"esxcli network sriovnic vf list -n {pf.name} | grep false | head -n 1", + shell=True, + ).stdout + pattern = r"(?P[a-f0-9]+):(?P[a-f0-9]+):(?P[a-f0-9]+).(?P\d)" + match = re.search(pattern=pattern, string=output) + if not match: + raise ESXiVFUnavailable(f"No VF from {pf.name} available for testing") + is_8_0_3_or_newer = self.owner.esxi_version.version >= Version("8.0.3") + # ESXi 8.0u3 displays in hex - no need to convert for lspci command which needs PCI in hex format + bus = match.group("bus") if is_8_0_3_or_newer else f'{int(match.group("bus")):0{2}x}' + slot = match.group("slot") if is_8_0_3_or_newer else f'{int(match.group("slot")):0{2}x}' + func = match.group("func") + output = self.owner.execute_command(f"lspci -p | grep :{bus}:{slot}.{func}", shell=True).stdout + dev_ven = output.split()[1].split(":") + # .vmx file still needs PCI address in decimal format, need to convert to decimal for ESXi 8.0u3 + if is_8_0_3_or_newer: + pci_pass_id = ( + f'{int(match.group("domain"), 16):0{5}}:{int(match.group("bus"), 16):0{3}}:' + f'{int(match.group("slot"), 16):0{2}}.{int(match.group("func"), 16):0{1}}' + ) + else: + pci_pass_id = match.group(0) + add_adapter = { + "enablePTP": "TRUE", + "systemId": "BYPASS", + "deviceId": f"0x{dev_ven[1]}", + "vendorId": f"0x{dev_ven[0]}", + "id": f"{pci_pass_id}", + "present": "True", + } + self.pciPassthru.append(add_adapter) + elif model in ESXI_VM_ADAPTER_TYPES: + add_adapter = { + "virtualDev": model, + "networkName": portgroup, + "present": "TRUE", + } + # needed for RSS, 4 is max queues for RSS engine, 3 is secondary queues + if rss: + rss_settings = {"pNicFeatures": "4", "ctxPerDev": "3"} + add_adapter.update(rss_settings) + self.ethernet.append(add_adapter) + else: + raise ESXiWrongParameter(f"Wrong parameter {model} provided for adapter type") diff --git a/mfd_esxi/vm_mgr.py b/mfd_esxi/vm_mgr.py new file mode 100644 index 0000000..b4125f2 --- /dev/null +++ b/mfd_esxi/vm_mgr.py @@ -0,0 +1,217 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""Virtual Machine Manager.""" + +import logging +import re +from itertools import cycle +from typing import Generator, List, Union, TYPE_CHECKING + +from mfd_common_libs import log_levels, add_logging_level +from .vm_base import ESXiVMBase +from .vm_gold import ESXiVMGold, ESXiVM + +if TYPE_CHECKING: + from .host import ESXiHypervisor + from mfd_network_adapter.network_interface.esxi import ESXiNetworkInterface + +logger = logging.getLogger(__name__) +add_logging_level(level_name="MODULE_DEBUG", level_value=log_levels.MODULE_DEBUG) + + +class ESXiVMMgr: + """Class for VM manager.""" + + def __init__(self, owner: "ESXiHypervisor"): + """Initialize VM manager. + + :param owner: ESXi host + """ + self.owner = owner + self.vm: List[ESXiVMBase] = [] + self.gold: List[ESXiVMGold] = [] + + def initialize(self) -> None: + """Initialize VM list.""" + self.vm = ESXiVMBase.discover(self.owner) + + def refresh_ids(self) -> None: + """Refresh IDs of VMs.""" + new_vms = ESXiVMBase.discover(self.owner) + for vm in self.vm: + for nvm in new_vms: + if vm.name == nvm.name: + vm.id = nvm.id + break + + def clean(self, keep: str = "") -> None: + """Clean VMs on host based on regex. + + :param keep: if not empty - regex to match VM names to keep + """ + new = [] + for vm in self.vm: + if keep and re.search(keep, vm.name): + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Keeping VM id: {vm.id} name: {vm.name}", + ) + new.append(vm) + else: + logger.log( + level=log_levels.MODULE_DEBUG, + msg=f"Removing VM id: {vm.id} name: {vm.name}", + ) + if vm.connection: + vm.connection.disconnect() + vm.stop() + vm.unregister() + self.vm = new + + def remove_old_images(self, datastore: str) -> None: + """Remove stale VM images from local datastore. + + :param datastore: datastore to remove images + """ + + def get_all_vm_images() -> Generator[tuple, None, None]: + """Get all images present on local datastore as well as their indexes. + + :return: generator of images present on local datastore as well as their indexes + """ + regexp = r"Base_[0-9A-Z]+_VM(?P[0-9]{3})_[0-9]+" + + for entry in self.owner.connection.modules().os.scandir(f"/vmfs/volumes/{datastore}"): + match = re.search(regexp, entry.name) + if entry.is_dir() and match: + yield entry.name, int(match.group("guest_index")) + + for entry in filter(lambda img_info: img_info[1] > 3, get_all_vm_images()): + image = entry[0] + logger.log(level=log_levels.MODULE_DEBUG, msg=f"Image {image} will be removed") + command = f"rm -rf /vmfs/volumes/{datastore}/{image}" + self.owner.execute_command(command) + + def prepare_vms( + self, + gold_datastore: str, + gold_name: str, + count: int, + suffix: str, + mng: str = "VM Network", + tag: str = "", + datastore: str = None, + cpu: int = 4, + mem: int = 2048, + iommu: bool = False, + vmotion: bool = False, + ) -> List["ESXiVM"]: + """Prepare VMs based on Gold image. + + :param gold_datastore: gold datastore name + :param gold_name: name of gold image + :param count: number of VMs to prepare + :param suffix: suffix to add to every name on host + :param mng: name of management network + :param tag: tag string + :param datastore: destination datastore name + :param cpu: number of vCPU on VM + :param mem: amount of memory for VM + :param iommu: enable IOMMU + :param vmotion: prepare for vMotion + """ + for gold in self.gold: + if gold.name == gold_name: + break + else: + gold = ESXiVMGold(owner=self.owner, datastore=gold_datastore, name=gold_name) + gold.initialize() + self.gold.append(gold) + + max_value = 0 + for vm in self.vm: + if vm.name.startswith(f"{gold.name}_VM"): + value = vm.name[len(gold.name) + 3 : len(gold.name) + 6] + try: + value = int(value) + except ValueError: + continue + max_value = max(max_value, value) + + vms = [] + for i in range(count): + vms.append( + ESXiVM( + gold=gold, + name=f"{gold.name}_VM{i + max_value + 1:03}_{suffix}", + mng=mng, + tag=tag, + datastore=datastore, + cpu=cpu, + mem=mem, + iommu=iommu, + vmotion=vmotion, + ) + ) + return vms + + @staticmethod + def attach_network( + vms: List[ESXiVM], + portgroup: Union[str, List[str]], + model: str = "vmxnet3", + rss: bool = False, + adapter: Union["ESXiNetworkInterface", List["ESXiNetworkInterface"]] = None, + ) -> None: + """Attach network adapter to VMs. + + :param vms: list of VMs + :param portgroup: portgroup name + :param model: type of adapter: sriov|vmxnet|vmxnet3|e1000|e1000e|vlance + :param rss: enable RSS on VMXNET3 adapter + :param adapter: PF adapter of SR-IOV adapter + """ + if isinstance(portgroup, List): + pg = cycle(portgroup) + else: + pg = cycle([portgroup]) + if isinstance(adapter, List): + ad = cycle(adapter) + else: + ad = cycle([adapter]) + + for vm in vms: + vm.attach_network(portgroup=next(pg), model=model, rss=rss, pf=next(ad)) + + def create_vms(self, vms: List["ESXiVM"], register: bool = True, start: bool = True) -> None: + """Create VM files. + + :param vms: list of VMs + :param register: register VMs + :param start: start VMs after creation + """ + for vm in vms: + vm.create(register=register, start=start) + self.vm.append(vm) + + @staticmethod + def wait_for_start_vms(vms: List["ESXiVM"], timeout: int = 300) -> None: + """Wait for VMs to start and create connection. + + :param vms: list of VMs + :param timeout: time to wait for VM to start + """ + for vm in vms: + vm.get_guest_mng_ip(timeout=timeout) + + def find_vms(self, gold: str = None) -> List["ESXiVM"]: + """Find VMs based on criteria. + + :param gold: gold image name + """ + vms = [] + for vm in self.vm: + if isinstance(vm, ESXiVM): + if vm.gold.name == gold: + vms.append(vm) + return vms diff --git a/mfd_esxi/vmknic.py b/mfd_esxi/vmknic.py new file mode 100644 index 0000000..9181c6e --- /dev/null +++ b/mfd_esxi/vmknic.py @@ -0,0 +1,181 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""Support for vmkernel adapters.""" + +import re +from typing import List, Union, TYPE_CHECKING +from ipaddress import IPv4Interface, IPv6Interface, ip_interface + +from mfd_typing import MACAddress +from .exceptions import ESXiNotFound, ESXiNotSupported + +if TYPE_CHECKING: + from mfd_esxi.host import ESXiHypervisor + + +class Vmknic: + """Class for vmkernel adapters.""" + + def __init__(self, owner: "ESXiHypervisor", name: str): + """ + Initialize vmknic adapter. + + :param owner: ESXi host + :param name: name of vmknic adapter + """ + self.owner = owner + self.name = name + self.portgroup: Union[str, None] = None + self.ips: List[Union[IPv4Interface, IPv6Interface]] = [] + self.mac: Union[MACAddress, None] = None + self.mtu: int = 1500 + + def initialize(self, output: str) -> None: + """ + Initialize vmknic adapter base on esxcfg-vmknic -l output. + + :param output: output of esxcfg-vmknic -l + """ + found = False + self.ips = [] + for line in output.splitlines(): + if line.startswith(f"{self.name} "): + pattern = ( + rf"^{self.name}\s+(?P.+)\s+IPv(?P\d)\s+(?P\S+)\s+(?P\S+)\s+" + r"(?P\S+)?\s+(?P\S{2}:\S{2}:\S{2}:\S{2}:\S{2}:\S{2})\s+(?P\d+)" + ) + match = re.search(pattern, line) + if match: + self.portgroup = match.group("portgroup").strip() + if match.group("ip") != "N/A" and match.group("netmask") != "N/A": + self.ips.append(ip_interface(f"{match.group('ip')}/{match.group('netmask')}")) + self.mac = MACAddress(match.group("mac")) + self.mtu = int(match.group("mtu")) + found = True + if not found: + raise ESXiNotFound(f"Unable to find vmknic with name {self.name}") + + def refresh(self) -> None: + """Refresh setting of vmknic adapter.""" + output = self.owner.execute_command("esxcfg-vmknic -l").stdout + self.initialize(output) + + @staticmethod + def discover(owner: "ESXiHypervisor") -> List["Vmknic"]: + """ + Discover all vmknic adapters on host. + + :param owner: ESXi host + :return: list of vmknic adapters + """ + output = owner.execute_command("esxcfg-vmknic -l").stdout + vmknic = [] + for line in output.splitlines(): + if line.startswith("Interface") or any(vmk_type in line for vmk_type in ["vxlan", "hyperbus"]): + continue + fields = line.split() + if fields: + vmknic.append(fields[0]) + + vmknic = list(set(vmknic)) # remove duplicates + objects = [] + for vmk in vmknic: + adapter = Vmknic(owner, vmk) + adapter.initialize(output) + objects.append(adapter) + + return objects + + @staticmethod + def add_vmknic( + owner: "ESXiHypervisor", + portgroup: str, + mtu: int = None, + mac: "MACAddress" = None, + ) -> "Vmknic": + """ + Create vmknic adapter. + + :param owner: ESXi host + :param portgroup: portgroup + :param mtu: MTU value + :param mac: MAC address of adapter (optional) + :return: + """ + command = f"esxcli network ip interface add -p {portgroup}" + if mtu is not None: + command += f" -m {mtu}" + if mac is not None: + command += f" -M {mac}" + owner.execute_command(command) + output = owner.execute_command("esxcfg-vmknic -l").stdout + for line in output.splitlines(): + if f" {portgroup} " in line: + name = line.split()[0] + vmknic = Vmknic(owner, name) + vmknic.initialize(output) + return vmknic + raise ESXiNotFound("Could not find created vmknic") + + def del_vmknic(self) -> None: + """Delete vmknic adapter.""" + command = f"esxcli network ip interface remove -p {self.portgroup}" + self.owner.execute_command(command) + + def set_mtu(self, mtu: int) -> None: + """ + Set MTU for vmknic adapter. + + :param mtu: MTU value + """ + command = f"esxcli network ip interface set -i {self.name} -m {mtu}" + self.owner.execute_command(command) + self.mtu = mtu + + def set_vlan(self, vlan: int) -> None: + """ + Set VLAN of vmknic adapter. + + :param vlan: VLAN number (0 - no vlan, 4095 - all vlans) + """ + command = f"esxcli network vswitch standard portgroup set -p {self.portgroup} -v {vlan}" + self.owner.execute_command(command) + + def add_ip(self, ip: Union["IPv4Interface", "IPv6Interface", str]) -> None: + """ + Set IPv4 or add IPv6. + + :param ip: IPv4 or IPv6 + """ + if isinstance(ip, str): + ip = ip_interface(ip) + if ip.version == 4: + command = f"esxcli network ip interface ipv4 set -i {self.name} -I {ip.ip} -N {ip.netmask} -t static" + elif ip.version == 6: + command = f"esxcli network ip interface ipv6 address add -i {self.name} -I {ip}" + else: + raise ESXiNotSupported(f"Unknown ip version {ip.version}") + self.owner.execute_command(command) + if ip.version == 4: + for i in self.ips: + if i.version == 4: + self.ips.remove(i) + break + self.ips.append(ip) + + def del_ip(self, ip: Union["IPv4Interface", "IPv6Interface", str]) -> None: + """ + Delete IPv6. + + :param ip: IPv6 + """ + if isinstance(ip, str): + ip = ip_interface(ip) + if ip.version == 4: + raise ESXiNotSupported("Unable to remove IPv4 address") + elif ip.version == 6: + command = f"esxcli network ip interface ipv6 address remove -i {self.name} -I {ip}" + else: + raise ESXiNotSupported(f"Unknown ip version {ip.version}") + self.owner.execute_command(command) + self.ips.remove(ip) diff --git a/mfd_esxi/vswitch.py b/mfd_esxi/vswitch.py new file mode 100644 index 0000000..b9585e5 --- /dev/null +++ b/mfd_esxi/vswitch.py @@ -0,0 +1,355 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""Support for standard vSwitch.""" + +import re +from typing import List, TYPE_CHECKING +from .exceptions import ESXiNotFound, ESXiNameException, VswitchError + +if TYPE_CHECKING: + from .host import ESXiHypervisor + +ESXI_VSWITCH_NAME_MAX_LEN = 16 +ESXI_PORTGROUP_NAME_MAX_LEN = 39 +ESXI_PORTGROUP_VMKNIC = "PG" + + +class ESXivSwitch: + """Class for standard vSwitch.""" + + def __init__(self, owner: "ESXiHypervisor", name: str): + """ + Initialize vSwitch. + + :param owner: ESXi host + :param name: name of vSwitch + """ + self.owner = owner + self.name = name + self.mtu = 1500 + self.uplinks = [] + self.portgroups = [] + + @staticmethod + def _find_name(line: str) -> str: + """Find name of vSwitch in output. + + :param line: line of text from esxcfg-vswitch-l + """ + m1 = line[0:ESXI_VSWITCH_NAME_MAX_LEN].strip() + m2 = line.split()[0] + return m1 if len(m1) > len(m2) else m2 + + def initialize(self, output: str) -> None: + """ + Initialize vSwitch based on esxcfg-vswitch -l output. + + :param output: output of esxcfg-vswitch -l + """ + self.portgroups = [] + + lines = output.splitlines() + for line in range(len(lines)): + if lines[line].startswith("Switch Name"): + nr = line + 1 + if self.name == ESXivSwitch._find_name(lines[nr]): + fields = lines[nr].split() + if "vmnic" in fields[-1]: + self.mtu = int(fields[-2]) + self.uplinks = fields[-1].split(",") + else: + self.mtu = int(fields[-1]) + break + continue + else: + raise ESXiNotFound(f"Could not find vSwitch {self.name}") + + capture = False + for line in range(nr, len(lines)): + if lines[line].startswith("Switch Name"): + break + if lines[line].startswith(" PortGroup Name"): + capture = True + continue + if capture: + name = lines[line][2 : 2 + ESXI_PORTGROUP_NAME_MAX_LEN].strip() + if len(name) > 0: + self.portgroups.append(name) + else: + capture = False + + def refresh(self) -> None: + """Refresh setting of vSwitch.""" + output = self.owner.execute_command("esxcfg-vswitch -l").stdout + self.initialize(output) + + @staticmethod + def discover(owner: "ESXiHypervisor") -> List["ESXivSwitch"]: + """ + Discover all vSwitches on host. + + :param owner: ESXi host + :return: list of vSwitches + """ + output = owner.execute_command("esxcfg-vswitch -l").stdout + vswitches = [] + + capture_vswitch = False + for line in output.splitlines(): + if capture_vswitch: + name = ESXivSwitch._find_name(line) + vswitch = ESXivSwitch(owner, name) + vswitch.initialize(output) + vswitches.append(vswitch) + capture_vswitch = False + continue + if line.startswith("Switch Name"): + capture_vswitch = True + continue + + return vswitches + + @staticmethod + def add_vswitch(owner: "ESXiHypervisor", name: str) -> "ESXivSwitch": + """ + Create vSwitch. + + :param owner: ESXi host + :param name: name of vSwitch + :return: vSwitch object + """ + if re.match(r"^[A-Za-z0-9_]+$", name): + owner.execute_command(f"esxcli network vswitch standard add -v {name}") + return ESXivSwitch(owner, name) + raise ESXiNameException("Switch name should contain only letters, digits and underscore") + + def del_vswitch(self) -> None: + """Delete vSwitch.""" + for portgroup in self.portgroups: + for vmknic in self.owner.vmknic: + if vmknic.portgroup == portgroup: + self.owner.del_vmknic(portgroup=portgroup) + break + self.owner.execute_command(f"esxcli network vswitch standard remove -v {self.name}") + + def set_mtu(self, mtu: int = 1500) -> None: + """ + Change MTU. + + :param mtu: MTU value + """ + command = f"esxcli network vswitch standard set -m {mtu} -v {self.name}" + self.owner.execute_command(command) + self.mtu = mtu + + def add_uplink(self, name: str) -> None: + """ + Add uplink. + + :param name: vmnic name + """ + if name not in self.uplinks: + command = f"esxcli network vswitch standard uplink add -u {name} -v {self.name}" + self.owner.execute_command(command) + self.uplinks.append(name) + + def del_uplink(self, name: str) -> None: + """ + Remove uplink. + + :param name: name of uplink + """ + command = f"esxcli network vswitch standard uplink remove -u {name} -v {self.name}" + self.owner.execute_command(command) + self.uplinks.remove(name) + + def add_portgroup(self, name: str) -> None: + """ + Create portgroup. + + :param name: name of portgroup + """ + if name not in self.portgroups: + if re.match(r"^[A-Za-z0-9_]+$", name): + command = f"esxcli network vswitch standard portgroup add -p {name} -v {self.name}" + self.owner.execute_command(command) + self.portgroups.append(name) + return + raise ESXiNameException("Portgroup name should contain only letters, digits and underscore") + + def del_portgroup(self, name: str) -> None: + """ + Remove portgroup. + + :param name: name of portgroup + """ + for vmknic in self.owner.vmknic: + if vmknic.portgroup == name: + self.owner.del_vmknic(portgroup=name) + break + command = f"esxcli network vswitch standard portgroup remove -p {name} -v {self.name}" + self.owner.execute_command(command) + self.portgroups.remove(name) + + def set_portgroup_vlan(self, name: str, vlan: int = 0) -> None: + """ + Set VLAN of portgroup. + + :param name: name of portgroup + :param vlan: VLAN number (0 - no vlan, 4095 - all vlans) + """ + command = f"esxcli network vswitch standard portgroup set -v {vlan} -p {name}" + self.owner.execute_command(command) + + def set_portgroup_uplinks(self, name: str, uplinks: List[str]) -> None: + """ + Set uplinks of portgroup. + + :param name: name of porgroup + :param uplinks: list of uplink names + """ + links = ",".join(uplinks) + command = f"esxcli network vswitch standard portgroup policy failover set -a {links} -s '' -p {name}" + self.owner.execute_command(command) + + def reconfigure( + self, + uplinks: List[str], + portgroups: List[str] = (), + mtu: int = 1500, + vmknic: bool = True, + ) -> None: + """ + Reconfigure vSwitch, create/remove uplinks and portgroups. + + Create vmknic adapters, set MTU. + Recover policy of vSwitch and all portgroups. + + :param uplinks: list of uplink names + :param portgroups: list of portgroup names + :param mtu: MTU value (default 1500) + :param vmknic: create portgroups for vmknic adapters and add them + """ + for uplink in self.uplinks.copy(): + if uplink not in uplinks: + self.del_uplink(uplink) + + for uplink in uplinks: + if uplink not in self.uplinks: + self.add_uplink(uplink) + + self.restore_vswitch_default() + + for portgroup in self.portgroups.copy(): + if vmknic and portgroup.startswith(f"{ESXI_PORTGROUP_VMKNIC}vmnic"): + vmnic = portgroup[len(ESXI_PORTGROUP_VMKNIC) :] + if vmnic not in uplinks: + self.del_portgroup(portgroup) + elif portgroup not in portgroups: + self.del_portgroup(portgroup) + + self.restore_portgroups_default() + + self.configure(uplinks=uplinks, portgroups=portgroups, mtu=mtu, vmknic=vmknic) + + def configure( # noqa: C901 + self, + uplinks: List[str], + portgroups: List[str] = (), + mtu: int = 1500, + vmknic: bool = True, + ) -> None: + """ + Configure freshly created vSwitch, create uplinks and portgroups, set MTU. + + :param uplinks: list of uplink names + :param portgroups: list of portgroup names + :param mtu: MTU value (default 1500) + :param vmknic: create portgroups for vmknic adapters and add them + """ + for uplink in uplinks: + if uplink not in self.uplinks: + self.add_uplink(uplink) + + for portgroup in portgroups: + if portgroup not in self.portgroups: + self.add_portgroup(portgroup) + + for uplink in uplinks: + portgroup = f"{ESXI_PORTGROUP_VMKNIC}{uplink}" + if vmknic and portgroup not in self.portgroups: + self.add_portgroup(portgroup) + self.set_portgroup_uplinks(portgroup, [uplink]) + self.owner.add_vmknic(portgroup=portgroup, mtu=mtu) + + if self.mtu != mtu: + self.set_mtu(mtu) + + for vmknic in self.owner.vmknic: + if vmknic.portgroup in self.portgroups: + if vmknic.mtu != mtu: + vmknic.set_mtu(mtu) + + def restore_vswitch_default(self) -> None: + """Restore default vSwitch policy.""" + uplinks = ",".join(self.uplinks) + command = ( + f"esxcli network vswitch standard policy failover set " + f"-v {self.name} -a {uplinks} " + f"-b true -f link -l portid -n true -s ''" + ) + self.owner.execute_command(command) + command = f"esxcli network vswitch standard policy security set " f"-f false -m false -p false -v {self.name}" + self.owner.execute_command(command) + command = f"esxcli network vswitch standard policy shaping set -e false -v {self.name}" + self.owner.execute_command(command) + + def restore_portgroups_default(self) -> None: + """Restore default policy of portgroups.""" + for portgroup in self.portgroups: + self.set_portgroup_vlan(portgroup, 0) + command = f"esxcli network vswitch standard portgroup policy failover set -u -p {portgroup}" + self.owner.execute_command(command) + if portgroup.startswith(f"{ESXI_PORTGROUP_VMKNIC}vmnic"): + vmnic = portgroup[len(ESXI_PORTGROUP_VMKNIC) :] + self.set_portgroup_uplinks(portgroup, [vmnic]) + command = f"esxcli network vswitch standard portgroup policy security set -u -p {portgroup}" + self.owner.execute_command(command) + command = f"esxcli network vswitch standard portgroup policy shaping set -u -p {portgroup}" + self.owner.execute_command(command) + + def set_forged_transmit(self, name: str, enable: bool = True) -> None: + """ + Set forged transmit policy on portgroup. + + :param name: Name of portgroup + :param enable: Status of forged transmit parameter, Allow = True / Disallow = False + """ + command = f"esxcli network vswitch standard portgroup policy security set -p {name} -f {str(enable)}" + self.owner.execute_command(command) + + def change_ens_fpo_support(self, enable: bool, vds: str | None = None) -> None: + """ + Enable or disable FPO support. + + When vds is provided change support on given vds, otherwise change settings globally. + + :param enable: True - enable FPO support, False - disable FPO support + :param vds: portset name, retrieved with discover method + """ + states = {True: "enable", False: "disable"} + cmd = f"nsxdp-cli ens fpo set --{states[enable]}" + if vds: + cmd += f" -dvs {vds}" + + self.owner.execute_command(cmd) + + def set_mac_change_policy(self, portgroup_name: str, enabled: bool = False) -> None: + """ + Set MAC change policy on portgroup. + + :param portgroup_name: Name of portgroup + :param enabled: Status of mac change, Allow= True / Disallow = False + """ + command = f"esxcli network vswitch standard portgroup policy security set -p {portgroup_name} -m {enabled}" + self.owner.execute_command(command, custom_exception=VswitchError) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..1e6782a --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,55 @@ +[build-system] +requires = [ + "setuptools>=80.4.0", + "wheel" +] +build-backend = "setuptools.build_meta" + +[tool.setuptools.dynamic] +dependencies = { file = ["requirements.txt"] } + +[project] +name = "mfd-esxi" +description = "Module for accessing and manipulating VMware products: ESXi, VCSA and NSX" +requires-python = ">=3.10, <3.14" +version = "3.1.0" +dynamic = ["dependencies"] +license-files = ["LICENSE.md", "AUTHORS.md"] +readme = {file = "README.md", content-type = "text/markdown"} + +[project.urls] +Homepage = "https://github.com/intel/mfd" +Repository = "https://github.com/intel/mfd-esxi" +Issues = "https://github.com/intel/mfd-esxi/issues" +Changelog = "https://github.com/intel/mfd-esxi/blob/main/CHANGELOG.md" + +[tool.setuptools.packages.find] +exclude = ["examples", "tests*", "sphinx-doc"] + +[tool.black] +line-length = 119 +exclude = ''' +( + /( + \.eggs + | \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | _build + | buck-out + | build + | dist + | tests + | examples + )/ + | setup.py +) +''' + +[tool.coverage.run] +source_pkgs = ["mfd_esxi"] + +[tool.coverage.report] +exclude_also = ["if TYPE_CHECKING:"] \ No newline at end of file diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..e8b5872 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,12 @@ +-r requirements-test.txt # Ensure tests dependencies are automatically included + +pydocstyle ~= 6.3.0 +flake8 +flake8-annotations +flake8-builtins +flake8-docstrings +black ~= 25.1.0 +flake8-black ~= 0.3.6 +click ~= 8.2.1 + +mfd-code-quality >= 1.2.0, < 2 \ No newline at end of file diff --git a/requirements-docs.txt b/requirements-docs.txt new file mode 100644 index 0000000..3012976 --- /dev/null +++ b/requirements-docs.txt @@ -0,0 +1,3 @@ +-r requirements.txt +sphinx +sphinx_rtd_theme_github_versions \ No newline at end of file diff --git a/requirements-test.txt b/requirements-test.txt new file mode 100644 index 0000000..fa5e893 --- /dev/null +++ b/requirements-test.txt @@ -0,0 +1,10 @@ +-r requirements.txt # Ensure tests dependencies are automatically included + +# Put dependencies required for testing the module here +pytest ~= 8.4 +pytest-mock ~= 3.14 +coverage ~= 7.3.0 + +vsphere-automation-sdk @ git+https://github.com/vmware/vsphere-automation-sdk-python@v8.0.3.0 + +mfd_typing diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..92901cb --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +pyVmomi >= 8.0.3.0.1 +packaging +mfd_connect >= 7.12.0 +mfd_common_libs>=1.11.0 +mfd_network_adapter >= 14.0.0 diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..d61bfbe --- /dev/null +++ b/setup.cfg @@ -0,0 +1,56 @@ +[flake8] +max-line-length = 119 +exclude = + .venv + venv + examples + tests + run_tests.py +show-source = True +max-complexity = 10 +ignore = + # Docstrings in magic methods + D105 + # Blank line after docstring section header in docstrings + D412 + # Blank line after a section in docstrings + D413 + # Missing type annotation for self in method + ANN101 + # Missing type annotation for *args/**kwargs + ANN002 + ANN003 + # Missing return type annotation for special method + ANN204 + # Dynamically typed expressions (typing.Any) are disallowed + ANN401 + # Whitespace before ':' + E203 + # Line break before binary operator + W503 +per-file-ignores = + # F401 Module imported but unused + */__init__.py:F401 + # Missing docstring in public module + # Missing docstring in public class + # Missing docstring in public method + # Missing docstring in public function + # Missing docstring in public package + # Missing type annotation for function argument + # Missing type annotation for cls in classmethod + # Missing return type annotation for public function + # Missing return type annotation for protected function + # Missing return type annotation for secret function + # Missing return type annotation for staticmethod + # Missing return type annotation for classmethod + tests/*:D100,D101,D102,D103,D104,ANN001,ANN102,ANN201,ANN202,ANN203,ANN205,ANN206 + # Missing docstring in public package + tests/__init__.py:D104 + # Missing docstring in public package + tests/*/__init__.py:D104 + +[semantic_release] +version_variable = mfd_esxi/__version__.py:__version__ +version_source = tag +commit_parser = ci_scripts.ci_auto_versioning.commit_parser.parse_commit_message + diff --git a/sphinx-doc/Makefile b/sphinx-doc/Makefile new file mode 100644 index 0000000..58a7c43 --- /dev/null +++ b/sphinx-doc/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXPROJ = MFD-Esxi +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/sphinx-doc/README.md b/sphinx-doc/README.md new file mode 100644 index 0000000..825e8ba --- /dev/null +++ b/sphinx-doc/README.md @@ -0,0 +1,13 @@ +# MFD-ESXI SPHINX DOCUMENTATION + +## HOW TO GENERATE DOCS +### 1. Download or use system embedded Python in version at least 3.7 +### 2. Create venv +- Create Python venv from MFD-Esxi requirements for Sphinx (`/requirements-docs.txt`) +- Link how to do this: `https://python.land/virtual-environments/virtualenv` +### 3. In Activated venv go to MFD-Esxi directory `/sphinx-doc` +### 4. Run command: +```shell +$ python generate_docs.py +``` +### 5. Open `/sphinx-doc/build/html/index.html` in Web browser to read documentation \ No newline at end of file diff --git a/sphinx-doc/conf.py b/sphinx-doc/conf.py new file mode 100644 index 0000000..039f2e0 --- /dev/null +++ b/sphinx-doc/conf.py @@ -0,0 +1,166 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + +# MFD-Esxi documentation build configuration file, created by +# sphinx-quickstart on Fri Nov 24 14:52:29 2017. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +"""Configure file for sphinx docs.""" + + +import os +import sys + +sys.path.append(os.path.abspath("..")) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.viewcode", + "sphinx.ext.autosummary", + "sphinx.ext.inheritance_diagram", +] + +autodoc_default_flags = ["members", "undoc-members", "private-members", "inherited-members", "show-inheritance"] +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = ".rst" + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = "MFD-ESXi" +project_copyright = """Copyright (C) 2025 Intel Corporation +SPDX-License-Identifier: MIT""" +copyright = project_copyright # noqa +author = "Intel Corporation" + +# The full version, including alpha/beta/rc tags. +release = "" + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = "en" + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +todo_include_todos = False + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = "sphinx_rtd_theme_github_versions" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +html_theme_options = { + "logo_only": False, + "display_version": True, + "prev_next_buttons_location": "bottom", + "style_external_links": False, + "vcs_pageview_mode": "", + # Toc options + "collapse_navigation": True, + "sticky_navigation": True, + "navigation_depth": 4, + "includehidden": True, + "titles_only": False, +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# -- Options for HTMLHelp output ------------------------------------------ + +# Output file base name for HTML help builder. +htmlhelp_basename = "mfd-esxi-doc" + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, "mfd-esxi.tex", "MFD-Esxi Documentation", "author", "manual"), +] + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, "mfd-esxi", "MFD-Esxi Documentation", [author], 1)] + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "MFD-Esxi", + "MFD-Esxi Documentation", + author, + "MFD-Esxi", + "One line description of project.", + "Miscellaneous", + ), +] diff --git a/sphinx-doc/generate_docs.py b/sphinx-doc/generate_docs.py new file mode 100644 index 0000000..47b807f --- /dev/null +++ b/sphinx-doc/generate_docs.py @@ -0,0 +1,18 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +"""Generate sphinx docs.""" + +import os +import shutil +import logging + +from sphinx.ext import apidoc +from sphinx.cmd import build + + +apidoc.main(["-e", "-o", "mfd_esxi", os.path.join("..", "mfd_esxi")]) + +build.main(["-b", "html", ".", "build/html"]) + +logging.info("Cleaning folders from build process...") +shutil.rmtree("mfd_esxi") diff --git a/sphinx-doc/genindex.rst b/sphinx-doc/genindex.rst new file mode 100644 index 0000000..a50680d --- /dev/null +++ b/sphinx-doc/genindex.rst @@ -0,0 +1,4 @@ +.. This file is a placeholder and will be replaced + +Index +##### \ No newline at end of file diff --git a/sphinx-doc/index.rst b/sphinx-doc/index.rst new file mode 100644 index 0000000..630c2a2 --- /dev/null +++ b/sphinx-doc/index.rst @@ -0,0 +1,21 @@ +Welcome to MFD-Esxi's documentation! +====================================== + +.. toctree:: + :caption: Home + + Documentation Home + + +.. toctree:: + :caption: Main Documentation + :maxdepth: 4 + + MFD-Esxi Documentation + + +.. toctree:: + :caption: Appendix + + Python Module Index + Index \ No newline at end of file diff --git a/sphinx-doc/py-modindex.rst b/sphinx-doc/py-modindex.rst new file mode 100644 index 0000000..4d1ecb4 --- /dev/null +++ b/sphinx-doc/py-modindex.rst @@ -0,0 +1,4 @@ +.. This file is a placeholder and will be replaced + +Python Module Index +##### \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..8ef2fad --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000..8ef2fad --- /dev/null +++ b/tests/unit/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT diff --git a/tests/unit/test_mfd_esxi/__init__.py b/tests/unit/test_mfd_esxi/__init__.py new file mode 100644 index 0000000..8ef2fad --- /dev/null +++ b/tests/unit/test_mfd_esxi/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT diff --git a/tests/unit/test_mfd_esxi/conftest.py b/tests/unit/test_mfd_esxi/conftest.py new file mode 100644 index 0000000..e276e5d --- /dev/null +++ b/tests/unit/test_mfd_esxi/conftest.py @@ -0,0 +1,35 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +from .fixtures import ( + host, + host_esxcfg_vswitch_1, + host_esxcfg_vswitch_2, + host_esxcfg_vswitch_3, + host_esxcfg_vmknic_1, + host_esxcfg_vmknic_2, + host_esxcfg_vmknic_3, + host_esxcfg_nics_1, + host_esxcfg_nics_2, + host_gold_vmx, + host_gold_vmx_ptp_old_esxi, + host_gold_vmx_ptp_new_esxi, + host_gold_vmx_ptp_new_esxi_hex, + host_getallvms, + host_api, + host_api_with_cert, + host_api_with_vf_info_and_interfaces, + vcenter, + datacenter, + cluster, + cluster_host, + standalone_host, + datastore, + virtual_adapter, + virtual_machine, + dswitch, + dsportgroup, + dsuplink, + vswitch, + vsportgroup, + vcenter_named_entities, +) diff --git a/tests/unit/test_mfd_esxi/fixtures.py b/tests/unit/test_mfd_esxi/fixtures.py new file mode 100644 index 0000000..28ecbf4 --- /dev/null +++ b/tests/unit/test_mfd_esxi/fixtures.py @@ -0,0 +1,677 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + +from ipaddress import ip_address +from textwrap import dedent + +import pytest +from mfd_connect import RPyCConnection +from mfd_connect.base import ConnectionCompletedProcess +from mfd_network_adapter.network_interface import NetworkInterface +from mfd_typing import OSName +from mfd_typing.network_interface import InterfaceInfo +from mfd_typing.pci_address import PCIAddress +from pyVmomi import vim + +from mfd_esxi.host import ESXiHypervisor +from mfd_esxi.host_api import ESXiHostAPI +from mfd_esxi.vcenter.cluster import Cluster +from mfd_esxi.vcenter.datacenter import Datacenter +from mfd_esxi.vcenter.datastore import Datastore +from mfd_esxi.vcenter.distributed_switch.dswitch import DSwitch +from mfd_esxi.vcenter.distributed_switch.portgroup import DSPortgroup +from mfd_esxi.vcenter.distributed_switch.uplink import DSUplink +from mfd_esxi.vcenter.host import Host +from mfd_esxi.vcenter.vcenter import VCenter +from mfd_esxi.vcenter.virtual_adapter import VirtualAdapter +from mfd_esxi.vcenter.virtual_machine import VirtualMachine +from mfd_esxi.vcenter.virtual_switch.portgroup import VSPortgroup +from mfd_esxi.vcenter.virtual_switch.vswitch import VSwitch + + +@pytest.fixture() +def host(mocker): + connection = mocker.create_autospec(RPyCConnection) + connection.get_os_name.return_value = OSName.ESXI + host = ESXiHypervisor(connection=connection) + host._ip = ip_address("172.31.0.82") + host.connection.execute_command.return_value = ConnectionCompletedProcess(return_code=0, args="command", stdout="") + return host + + +@pytest.fixture() +def host_esxcfg_vswitch_1(host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=esxcfg_vswitch_1 + ) + return host + + +@pytest.fixture() +def host_esxcfg_vswitch_2(host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=esxcfg_vswitch_2 + ) + return host + + +@pytest.fixture() +def host_esxcfg_vswitch_3(host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=esxcfg_vswitch_3 + ) + return host + + +@pytest.fixture() +def host_esxcfg_vmknic_1(host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=esxcfg_vmknic_1 + ) + return host + + +@pytest.fixture() +def host_esxcfg_vmknic_2(host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=esxcfg_vmknic_2 + ) + return host + + +@pytest.fixture() +def host_esxcfg_vmknic_3(host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=esxcfg_vmknic_3 + ) + return host + + +@pytest.fixture() +def host_esxcfg_nics_1(host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=esxcfg_nics_1 + ) + return host + + +@pytest.fixture() +def host_esxcfg_nics_2(host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=esxcfg_nics_2 + ) + return host + + +@pytest.fixture() +def host_gold_vmx(host): + host.connection.execute_command.side_effect = [ + ConnectionCompletedProcess(return_code=0, args="command", stdout=base_vmx), + ConnectionCompletedProcess(return_code=0, args="command", stdout=primary_vmdk), + ConnectionCompletedProcess(return_code=0, args="command", stdout=parent_vmdk), + ] + return host + + +@pytest.fixture() +def host_gold_vmx_ptp_old_esxi(host): + host.connection.execute_command.side_effect = [ + ConnectionCompletedProcess(return_code=0, args="command", stdout=base_vmx), + ConnectionCompletedProcess(return_code=0, args="command", stdout=primary_vmdk), + ConnectionCompletedProcess(return_code=0, args="command", stdout=parent_vmdk), + ConnectionCompletedProcess(return_code=0, args="command", stdout=" 0 false 00000:050:01.0 -"), + ConnectionCompletedProcess( + return_code=0, + args="command", + stdout="0000:32:01.0 8086:1889 8086:0000 255/ / @ P pciPassthru", + ), + ] + return host + + +@pytest.fixture() +def host_gold_vmx_ptp_new_esxi(host): + host.connection.execute_command.side_effect = [ + ConnectionCompletedProcess(return_code=0, args="command", stdout=base_vmx), + ConnectionCompletedProcess(return_code=0, args="command", stdout=primary_vmdk), + ConnectionCompletedProcess(return_code=0, args="command", stdout=parent_vmdk), + ConnectionCompletedProcess(return_code=0, args="command", stdout=" 6 false 0000:32:01.0 -"), + ConnectionCompletedProcess( + return_code=0, + args="command", + stdout="0000:32:01.0 8086:1889 8086:0000 255/ / @ P pciPassthru", + ), + ] + return host + + +@pytest.fixture() +def host_gold_vmx_ptp_new_esxi_hex(host): + host.connection.execute_command.side_effect = [ + ConnectionCompletedProcess(return_code=0, args="command", stdout=base_vmx), + ConnectionCompletedProcess(return_code=0, args="command", stdout=primary_vmdk), + ConnectionCompletedProcess(return_code=0, args="command", stdout=parent_vmdk), + ConnectionCompletedProcess(return_code=0, args="command", stdout=" 6 false 0000:b1:01.0 -"), + ConnectionCompletedProcess( + return_code=0, + args="command", + stdout="0000:b1:01.0 8086:1889 8086:0000 255/ / @ P pciPassthru", + ), + ] + return host + + +@pytest.fixture() +def host_getallvms(host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=getallvms + ) + return host + + +@pytest.fixture() +def host_api(mocker, monkeypatch): + host_api = ESXiHostAPI("172.31.0.56", "root", "secret") + host_api_content = mocker.create_autospec(vim.ServiceInstanceContent) + host_api._ESXiHostAPI__content = host_api_content + host_api._ESXiHostAPI__service = True + return host_api + + +@pytest.fixture() +def host_api_with_cert(mocker, host_api): + host_api._ESXiHostAPI__content.rootFolder = object() + host_api._ESXiHostAPI__content.sessionManager = mocker.create_autospec(vim.SessionManager) + host_api._ESXiHostAPI__content.sessionManager.currentSession = True + host_api._service = True + fake_host_config = mocker.create_autospec(vim.host.ConfigInfo) + fake_host_config.certificate = hostapi_cert_bytes + + fake_host = mocker.create_autospec(vim.HostSystem) + fake_host.config = fake_host_config + + fake_view = mocker.create_autospec(vim.view.ContainerView) + fake_view.view = [fake_host] + + host_api._ESXiHostAPI__content.viewManager = mocker.create_autospec(vim.view.ViewManager) + host_api._ESXiHostAPI__content.viewManager.CreateContainerView = lambda x, y, z: fake_view + + return host_api + + +@pytest.fixture() +def vcenter(): + vcenter = VCenter("172.31.12.144", "user", "secret") + return vcenter + + +@pytest.fixture() +def datacenter(vcenter): + datacenter = Datacenter("PY-Datacenter", vcenter) + return datacenter + + +@pytest.fixture() +def cluster(datacenter): + cluster = Cluster("PY-Cluster", datacenter) + return cluster + + +@pytest.fixture() +def standalone_host(datacenter): + host = Host("PY-StandaloneHost", datacenter) + return host + + +@pytest.fixture() +def cluster_host(datacenter, cluster): + host = Host("PY-ClusterHost", datacenter, cluster) + return host + + +@pytest.fixture() +def datastore(standalone_host): + datastore = Datastore("PY-Datastore", standalone_host) + return datastore + + +@pytest.fixture() +def virtual_adapter(standalone_host): + virtual_adapter = VirtualAdapter("PY-VirtualAdapter", standalone_host) + return virtual_adapter + + +@pytest.fixture() +def virtual_machine(standalone_host): + virtual_machine = VirtualMachine("PY-VirtualMachine", standalone_host) + return virtual_machine + + +@pytest.fixture() +def dswitch(datacenter): + dswitch = DSwitch("PY-DSwitch", datacenter) + return dswitch + + +@pytest.fixture() +def dsportgroup(dswitch): + dsportgroup = DSPortgroup("PY-DSPortgroup", dswitch) + return dsportgroup + + +@pytest.fixture() +def dsuplink(dswitch): + dsuplink = DSUplink("PY-DSUplink", 12, dswitch) + return dsuplink + + +@pytest.fixture() +def vswitch(standalone_host): + vswitch = VSwitch("PY-VSwitch", standalone_host) + return vswitch + + +@pytest.fixture() +def vsportgroup(vswitch): + vsportgroup = VSPortgroup("PY-VSPortgroup", vswitch) + return vsportgroup + + +@pytest.fixture() +def vcenter_named_entities(): + class DummyNamedThing: + def __init__(self, name): + self._name = name + + def __repr__(self): + return self._name + + @property + def name(self): + return self._name + + names = ("Named-1", "Named-2", "Named-3") + return [DummyNamedThing(n) for n in names] + + +@pytest.fixture() +def host_api(mocker, monkeypatch): + host_api = ESXiHostAPI("172.31.0.56", "root", "secret") + host_api_content = mocker.create_autospec(vim.ServiceInstanceContent) + host_api._ESXiHostAPI__content = host_api_content + host_api._ESXiHostAPI__service = True + return host_api + + +@pytest.fixture() +def host_api_with_cert(mocker, host_api): + host_api._ESXiHostAPI__content.rootFolder = object() + host_api._ESXiHostAPI__content.sessionManager = mocker.create_autospec(vim.SessionManager) + host_api._ESXiHostAPI__content.sessionManager.currentSession = True + host_api._service = True + fake_host_config = mocker.create_autospec(vim.host.ConfigInfo) + fake_host_config.certificate = hostapi_cert_bytes + + fake_host = mocker.create_autospec(vim.HostSystem) + fake_host.config = fake_host_config + + fake_view = mocker.create_autospec(vim.view.ContainerView) + fake_view.view = [fake_host] + + host_api._ESXiHostAPI__content.viewManager = mocker.create_autospec(vim.view.ViewManager) + host_api._ESXiHostAPI__content.viewManager.CreateContainerView = lambda x, y, z: fake_view + + return host_api + + +@pytest.fixture() +def host_api_with_vf_info_and_interfaces(mocker, host_api): + pci_addresses = [PCIAddress(0, 0, 0, 0), PCIAddress(0, 0, 0, 1)] + names = ["vmnic4", "vmnic5"] + _connection = mocker.create_autospec(RPyCConnection) + _connection.get_os_name.return_value = OSName.ESXI + + interfaces = [] + for pci_address, name in zip(pci_addresses, names): + interfaces.append( + NetworkInterface( + connection=_connection, + interface_info=InterfaceInfo(pci_address=pci_address, name=name), + ) + ) + + host_api._ESXiHostAPI__content.rootFolder = object() + host_api._ESXiHostAPI__content.sessionManager = mocker.create_autospec(vim.SessionManager) + host_api._ESXiHostAPI__content.sessionManager.currentSession = True + host_api._service = True + + fake_pci_passthru_info_list = [] + vfs_info = [ + { + "maxVirtualFunctionSupported": 128, + "numVirtualFunction": 8, + "numVirtualFunctionRequested": 8, + "sriovEnabled": True, + }, + { + "maxVirtualFunctionSupported": 128, + "numVirtualFunction": 0, + "numVirtualFunctionRequested": 0, + "sriovEnabled": False, + }, + ] + + for pci_address, vf_info in zip(pci_addresses, vfs_info): + pci_passthru_info = mocker.create_autospec(vim.host.SriovInfo) + pci_passthru_info.id = pci_address.lspci + pci_passthru_info.maxVirtualFunctionSupported = vf_info["maxVirtualFunctionSupported"] + pci_passthru_info.numVirtualFunction = vf_info["numVirtualFunction"] + pci_passthru_info.numVirtualFunctionRequested = vf_info["numVirtualFunctionRequested"] + pci_passthru_info.sriovEnabled = vf_info["sriovEnabled"] + + fake_pci_passthru_info_list.append(pci_passthru_info) + + other_pci_passthru_info = mocker.create_autospec(vim.host.PciPassthruInfo) + other_pci_address = PCIAddress(1, 1, 1, 2) + other_pci_passthru_info.id = other_pci_address.lspci + fake_pci_passthru_info_list.append(other_pci_passthru_info) + + fake_pci_passthru_system = mocker.create_autospec(vim.host.PciPassthruSystem) + fake_pci_passthru_system.pciPassthruInfo = fake_pci_passthru_info_list + + fake_config_manager = mocker.create_autospec(vim.host.ConfigManager) + fake_config_manager.pciPassthruSystem = fake_pci_passthru_system + + fake_host = mocker.create_autospec(vim.HostSystem) + fake_host.configManager = fake_config_manager + + fake_view = mocker.create_autospec(vim.view.ContainerView) + fake_view.view = [fake_host] + + host_api._ESXiHostAPI__content.viewManager = mocker.create_autospec(vim.view.ViewManager) + host_api._ESXiHostAPI__content.viewManager.CreateContainerView = lambda x, y, z: fake_view + + return host_api, interfaces + + +esxcfg_vswitch_1 = dedent( + """\ + Switch Name Num Ports Used Ports Configured Ports MTU Uplinks + vSwitch0 6690 5 128 1500 vmnic0 + + PortGroup Name VLAN ID Used Ports Uplinks + ATmng 0 0 vmnic0 + VM Network 0 1 vmnic0 + Management Network 0 1 vmnic0 + + DVS Name Num Ports Used Ports Configured Ports MTU Uplinks + DSwitch_063LongName 6690 4 512 9000 vmnic3 + + DVPort ID In Use Client + 0 1 vmnic3 + 9 1 vmk1 +""" +) + +esxcfg_vswitch_2 = dedent( + """\ + Switch Name Num Ports Used Ports Configured Ports MTU Uplinks + ATvSwitchLongName 8570 8 128 1500 vmnic10 + + PortGroup Name VLAN ID Used Ports Uplinks + ATNetwork 0 4 vmnic10 + ATvmnic10 0 1 vmnic10 + + Switch Name Num Ports Used Ports Configured Ports MTU Uplinks + vSwitch0 8570 6 128 1500 vmnic4 + + PortGroup Name VLAN ID Used Ports Uplinks + ATmng 0 1 vmnic4 + VM Network 0 1 vmnic4 + Management Network 0 1 vmnic4 +""" +) + +esxcfg_vswitch_3 = dedent( + """\ + DVS Name Num Ports Used Ports Configured Ports MTU Uplinks + dvSwitch 256 3 256 1500 vmnic9,vmnic8 + + DVPort ID In Use Client + 0 1 vmnic8 + 1 1 vmnic9 +""" +) + +esxcfg_vmknic_1 = dedent( + """\ + Interface Port Group/DVPort/Opaque Network IP Family IP Address Netmask Broadcast MAC Address MTU TSO MSS Enabled Type NetStack + vmk0 Management Network IPv4 172.31.0.82 255.255.0.0 172.31.255.255 48:df:37:aa:bb:cc 1500 65535 true DHCP defaultTcpipStack + vmk0 Management Network IPv6 fe80::4adf:37ff:fe07:1f14 64 48:df:37:aa:bb:cc 1500 65535 true STATIC, PREFERRED defaultTcpipStack + vmk1 ATvmnic10 IPv4 1.1.1.1 255.0.0.0 1.255.255.255 00:50:56:aa:bb:cc 1500 65535 true STATIC defaultTcpipStack + vmk1 ATvmnic10 IPv6 fe80::250:56ff:fe66:5642 64 00:50:56:aa:bb:cc 1500 65535 true STATIC, PREFERRED defaultTcpipStack +""" +) + +esxcfg_vmknic_2 = dedent( + """\ + Interface Port Group/DVPort/Opaque Network IP Family IP Address Netmask Broadcast MAC Address MTU TSO MSS Enabled Type NetStack + vmk0 Management Network IPv4 172.31.0.82 255.255.0.0 172.31.255.255 48:df:37:aa:bb:cc 1500 65535 true DHCP defaultTcpipStack + vmk0 Management Network IPv6 fe80::4adf:37ff:fe07:1f14 64 48:df:37:aa:bb:cc 1500 65535 true STATIC, PREFERRED defaultTcpipStack + vmk1 ATvmnic10 IPv4 1.1.1.1 255.0.0.0 1.255.255.255 00:50:56:aa:bb:cc 1500 65535 true STATIC defaultTcpipStack + vmk1 ATvmnic10 IPv6 fe80::250:56ff:fe66:5642 64 00:50:56:aa:bb:cc 1500 65535 true STATIC, PREFERRED defaultTcpipStack + vmk2 PGvmnic0 IPv4 N/A N/A N/A 00:50:56:aa:bb:cc 9000 65535 true NONE defaultTcpipStack + vmk2 PGvmnic0 IPv6 fe80::250:56ff:fe63:a0ef 64 00:50:56:aa:bb:cc 9000 65535 true STATIC, PREFERRED defaultTcpipStack +""" +) + +esxcfg_vmknic_3 = dedent( + """\ + Interface Port Group/DVPort/Opaque Network IP Family IP Address Netmask Broadcast MAC Address MTU TSO MSS Enabled Type NetStack + vmk0 Management Network IPv4 172.31.0.103 255.255.0.0 172.31.255.255 b0:7b:25:aa:bb:cc 1500 65535 true DHCP defaultTcpipStack + vmk0 Management Network IPv6 fe80::b27b:25ff:fede:7484 64 b0:7b:25:aa:bb:cc 1500 65535 true STATIC, PREFERRED defaultTcpipStack + vmk1 16 IPv4 15.1.1.1 255.0.0.0 15.255.255.255 00:50:aa:bb:cc:5e 1500 65535 true STATIC defaultTcpipStack + vmk1 16 IPv6 fe80::250:56ff:fe6f:1b5e 64 00:50:aa:bb:cc:5e 1500 65535 true STATIC, PREFERRED defaultTcpipStack + vmk1 16 IPv6 3001:15::1:1:1 64 00:50:aa:bb:cc:5e 1500 65535 true STATIC, PREFERRED defaultTcpipStack + vmk10 2764417a-a5e8-4ae5-b5f8-b5c163648066 IPv4 14.1.1.1 255.0.0.0 14.255.255.255 00:50:aa:bb:cc:4c 1700 65535 true STATIC vxlan + vmk10 2764417a-a5e8-4ae5-b5f8-b5c163648066 IPv6 fe80::250:56ff:fe6b:7a4c 64 00:50:aa:bb:cc:4c 1700 65535 true STATIC, PREFERRED vxlan + vmk10 2764417a-a5e8-4ae5-b5f8-b5c163648066 IPv6 3001:14::1:1:1 64 00:50:aa:bb:cc:4c 1700 65535 true STATIC, PREFERRED vxlan + vmk11 c1e2b8b8-8fed-4e7e-a87f-3d438d99ee0c IPv4 11.1.1.1 255.0.0.0 11.255.255.255 00:50:aa:bb:cc:a8 1700 65535 true STATIC vxlan + vmk11 c1e2b8b8-8fed-4e7e-a87f-3d438d99ee0c IPv6 fe80::250:56ff:fe62:fda8 64 00:50:aa:bb:cc:a8 1700 65535 true STATIC, PREFERRED vxlan + vmk11 c1e2b8b8-8fed-4e7e-a87f-3d438d99ee0c IPv6 3001:11::1:1:1 64 00:50:aa:bb:cc:a8 1700 65535 true STATIC, PREFERRED vxlan + vmk12 90e27848-f8d1-4df0-ace1-23d78ce5d85d IPv4 1.1.1.1 255.0.0.0 1.255.255.255 00:50:aa:bb:cc:bd 1700 65535 true STATIC vxlan + vmk12 90e27848-f8d1-4df0-ace1-23d78ce5d85d IPv6 fe80::250:56ff:fe64:28bd 64 00:50:aa:bb:cc:bd 1700 65535 true STATIC, PREFERRED vxlan + vmk12 90e27848-f8d1-4df0-ace1-23d78ce5d85d IPv6 3001:1::1:1:1 64 00:50:aa:bb:cc:bd 1700 65535 true STATIC, PREFERRED vxlan + vmk50 b8200bd5-c046-40f8-8caa-86647c65dda6 IPv4 8.1.1.1 255.0.0.0 8.255.255.255 00:50:aa:bb:cc:30 1700 65535 true STATIC hyperbus +""" +) + +esxcfg_nics_1 = dedent( + """\ + Name PCI Driver Link Speed Duplex MAC Address MTU Description + vmnic0 0000:4b:00.0 icen Up 25000Mbps Full 68:05:ca:aa:bb:cc 1500 Intel(R) Ethernet Controller E810-C for SFP + vmnic1 0000:4b:00.1 icen Up 25000Mbps Full 68:05:ca:aa:bb:cd 1500 Intel(R) Ethernet Controller E810-C for SFP + vmnic10 0000:ca:00.0 i40en Up 10000Mbps Full 3c:fd:aa:bb:cc:e0 1500 Intel(R) Ethernet Controller X710 for 10GbE SFP+ + vmnic11 0000:ca:00.1 i40en Up 10000Mbps Full 3c:fd:aa:bb:cc:e1 1500 Intel(R) Ethernet Controller X710 for 10GbE SFP+ + vmnic12 0000:ca:00.2 i40en Up 10000Mbps Full 3c:fd:aa:bb:cc:e2 1500 Intel(R) Ethernet Controller X710 for 10GbE SFP+ + vmnic13 0000:ca:00.3 i40en Up 10000Mbps Full 3c:fd:aa:bb:cc:e3 1500 Intel(R) Ethernet Controller X710 for 10GbE SFP+ + vmnic14 0000:98:00.0 bnxtnet Down 0Mbps Half 84:16:aa:bb:cc:f0 1500 Broadcom BCM57416 NetXtreme-E 10GBASE-T RDMA Ethernet Controller + vmnic15 0000:98:00.1 bnxtnet Down 0Mbps Half 84:16:aa:bb:cc:f1 1500 Broadcom BCM57416 NetXtreme-E 10GBASE-T RDMA Ethernet Controller + vmnic2 0000:4b:00.2 icen Up 25000Mbps Full 68:05:aa:bb:cc:c2 1500 Intel(R) Ethernet Controller E810-C for SFP + vmnic3 0000:4b:00.3 icen Up 25000Mbps Full 68:05:aa:bb:cc:c3 1500 Intel(R) Ethernet Controller E810-C for SFP + vmnic4 0000:31:00.0 igbn Up 1000Mbps Full 48:df:37:aa:bb:cc 1500 Intel(R) I350 Gigabit Network Connection + vmnic5 0000:31:00.1 igbn Down 0Mbps Half 48:df:37:aa:bb:cd 1500 Intel(R) I350 Gigabit Network Connection + vmnic6 0000:31:00.2 igbn Down 0Mbps Half 48:df:37:aa:bb:ce 1500 Intel(R) I350 Gigabit Network Connection + vmnic7 0000:31:00.3 igbn Down 0Mbps Half 48:df:37:aa:bb:cf 1500 Intel(R) I350 Gigabit Network Connection + vmnic8 0000:b1:00.0 ixgben Up 10000Mbps Full 90:e2:aa:bb:cc:34 1500 Intel(R) 82599 10 Gigabit Dual Port Network Connection + vmnic9 0000:b1:00.1 ixgben Up 10000Mbps Full 90:e2:aa:bb:cc:35 1500 Intel(R) 82599 10 Gigabit Dual Port Network Connection +""" +) + +esxcfg_nics_2 = dedent( + """\ + Name PCI Driver Link Speed Duplex MAC Address MTU Description + vmnic0 0000:e1:00.0 ntg3 Up 1000Mbps Full 70:b5:aa:bb:cc:ce 1500 Broadcom Corporation NetXtreme BCM5720 Gigabit Ethernet + vmnic1 0000:e1:00.1 ntg3 Down 0Mbps Half 70:b5:aa:bb:cc:cf 1500 Broadcom Corporation NetXtreme BCM5720 Gigabit Ethernet + vmnic2 0000:24:00.0 icen Down 100000Mbps Full b4:96:aa:bb:cc:f8 1500 Intel(R) Ethernet Controller E810-C for QSFP + vmnic3 0000:21:00.0 icen Up 100000Mbps Full b4:96:aa:bb:cc:fc 1500 Intel(R) Ethernet Controller E810-C for QSFP +""" +) + +base_vmx = dedent( + """\ + .encoding = "UTF-8" + config.version = "8" + cpuid.coresPerSocket = "4" + displayName = "Base_R91" + ethernet0.networkName = "ATmng" + ethernet0.pciSlotNumber = "160" + ethernet0.present = "TRUE" + ethernet0.virtualDev = "vmxnet3" + firmware = "efi" + floppy0.present = "FALSE" + guestOS = "other-64" + hpet0.present = "TRUE" + memSize = "2048" + messageBus.tunnelEnabled = "FALSE" + mks.enable3d = "TRUE" + numvcpus = "4" + pciBridge0.pciSlotNumber = "17" + pciBridge0.present = "TRUE" + pciBridge4.functions = "8" + pciBridge4.pciSlotNumber = "21" + pciBridge4.present = "TRUE" + pciBridge4.virtualDev = "pcieRootPort" + pciBridge5.functions = "8" + pciBridge5.pciSlotNumber = "22" + pciBridge5.present = "TRUE" + pciBridge5.virtualDev = "pcieRootPort" + pciBridge6.functions = "8" + pciBridge6.pciSlotNumber = "23" + pciBridge6.present = "TRUE" + pciBridge6.virtualDev = "pcieRootPort" + pciBridge7.functions = "8" + pciBridge7.pciSlotNumber = "24" + pciBridge7.present = "TRUE" + pciBridge7.virtualDev = "pcieRootPort" + replay.supported = "false" + sata0.pciSlotNumber = "33" + sata0.present = "TRUE" + sched.cpu.affinity = "all" + sched.cpu.latencySensitivity = "normal" + sched.cpu.min = "0" + sched.cpu.shares = "normal" + sched.cpu.units = "mhz" + sched.mem.min = "2048" + sched.mem.minSize = "2048" + sched.mem.pin = "TRUE" + sched.mem.shares = "normal" + sched.scsi0:0.shares = "normal" + sched.scsi0:0.throughputCap = "off" + scsi0.pciSlotNumber = "160" + scsi0.present = "TRUE" + scsi0.virtualDev = "lsisas1068" + scsi0:0.deviceType = "scsi-hardDisk" + scsi0:0.fileName = "Base_R91-000001.vmdk" + scsi0:0.present = "TRUE" + softPowerOff = "TRUE" + svga.present = "TRUE" + svga.vramSize = "8388608" + toolScripts.afterPowerOn = "TRUE" + toolScripts.afterResume = "TRUE" + toolScripts.beforePowerOff = "TRUE" + toolScripts.beforeSuspend = "TRUE" + tools.guest.desktop.autolock = "FALSE" + tools.syncTime = "FALSE" + tools.upgrade.policy = "manual" + virtualHW.productCompatibility = "hosted" + virtualHW.version = "13" + vmci.filter.enable = "true" + vmci0.pciSlotNumber = "32" + vmci0.present = "TRUE" + vmotion.checkpointFBSize = "8388608" +""" +) + +primary_vmdk = dedent( + """\ + # Disk DescriptorFile + version=1 + CID=3729b687 + parentCID=3729b687 + createType="seSparse" + parentFileNameHint="Base_R91.vmdk" + # Extent description + RW 41943040 SESPARSE "Base_R91-000001-sesparse.vmdk" + + # The Disk Data Base + #DDB + + ddb.encoding = "UTF-8" + ddb.grain = "8" + ddb.longContentID = "f6371048ed90dd02a7e9ded6fffffffe" +""" +) + +parent_vmdk = dedent( + """\ + # Disk DescriptorFile + version=1 + CID=3729b687 + parentCID=ffffffff + createType="vmfs" + + # Extent description + RW 41943040 VMFS "Base_R91-flat.vmdk" + + # The Disk Data Base + #DDB + + ddb.adapterType = "ide" + ddb.deletable = "true" + ddb.encoding = "UTF-8" + ddb.geometry.cylinders = "41610" + ddb.geometry.heads = "16" + ddb.geometry.sectors = "63" + ddb.longContentID = "f6371048ed90dd02a7e9ded6fffffffe" + ddb.thinProvisioned = "1" + ddb.uuid = "60 00 C2 9f 9a b0 9b 9c-13 55 b8 11 fd 2c 92 9b" + ddb.virtualHWVersion = "6" +""" +) + +getallvms = dedent( + """\ +Vmid Name File Guest OS Version Annotation +1 AT_ESXI_050 [datastore_050] AT_ESXI/AT_ESXI.vmx ubuntu64Guest vmx-11 +24 Test Test [Test] [Test] [datastore_050_vmfs6] Test/Test.vmx otherGuest64 vmx-17 +""" +) + +hostapi_cert_bytes = list( + dedent( + """\ +-----BEGIN CERTIFICATE----- +MIIDeDCCAmCgAwIBAgIULSI68CT+a61Fzi5UifInXs8SwgcwDQYJKoZIhvcNAQEL +BQAwZzELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM +DVNhbiBGcmFuY2lzY28xEzARBgNVBAoMCk15IENvbXBhbnkxFjAUBgNVBAMMDW15 +Y29tcGFueS5jb20wHhcNMjUwNzA5MjEzMTA0WhcNMjYwNzA5MjEzMTA0WjBnMQsw +CQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZy +YW5jaXNjbzETMBEGA1UECgwKTXkgQ29tcGFueTEWMBQGA1UEAwwNbXljb21wYW55 +LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMZyFhfQZxmJgHnB +5IgQQNHFRIRG0fcIIOscmQdsPAsFEoABAVWZMDBllVbyrzRm5yH08edL7d/bR2LV +OjsKTO6dD77hAEcXLU6D0byF4GLunky0XYfA+8kdF9RUUZLJY/Q4aNe2rswdB7eB +zSO0I4bOBIeOb5DfOK/rMYUHJzWHNOYUUf2w4H9p06wKAnX22gnUKIuDMOZ9D56Y +E62W1LMkVOgD5mqDN+oOxSR40M03gHSEk01H3biJJjgbvKD0VLEcJTyO7cD1TLPe +AlhyNGIW885IKzIBXi0zSwRD+qK6sJAHock2WkEh1fGzJW4K1hMsy0NuzzWzNWsj +OvXCfm8CAwEAAaMcMBowGAYDVR0RBBEwD4INbXljb21wYW55LmNvbTANBgkqhkiG +9w0BAQsFAAOCAQEAuqOaW3JONXZaN7DRrj7mzJON1Mviqi+sBag3yYs1YYL4/qxd +sukwbnSvLD6rGW8w9Ez/6K16dkLo4lMy3IsOMoecMrohDnDvtYxmcPmDknUjvPON +Bk5DAaaC7paIT0zcZ/UzZbd5MbJWPhggmcFGUVTl2ftsVb1jVm5O/sMaV785Y9Cd ++tEjfxfFmJ3WnInjElHTa16ZJreRPxGnUfBLonr7GUflMe+15C3CVJXgBxUUCvR1 +ygm1smzjqu67KzXYAEibj4HBvlEtpOequkcAp6oD1L22OLXq4LH9DRkr2V2WKi3y +zwtSfd09AbWPe53xxdYlvsniRi1vaB3El+Zn7Q== +-----END CERTIFICATE----- + """ + ).encode("UTF-8") +) diff --git a/tests/unit/test_mfd_esxi/test_esxi_version.py b/tests/unit/test_mfd_esxi/test_esxi_version.py new file mode 100644 index 0000000..5ef5b0a --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_esxi_version.py @@ -0,0 +1,37 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + +import pytest +from packaging.version import Version + +from mfd_esxi.esxi_version import ESXiVersion, ESXiVersionException +from mfd_connect.base import ConnectionCompletedProcess + + +class TestESXiVersion: + output1 = "VMware ESXi 8.0.0 build-20513097" + output2 = "VMware ESXi 7.0.3 build-19193900" + output3 = "VMware ESXi 7.0.1 build-16850804" + output4 = "VMware ESXi 7.0 GA" + + def test_version_1(self): + version = ESXiVersion(self.output1) + assert version.version == Version("8.0.0") + assert version.build == 20513097 + + def test_version_2(self): + version = ESXiVersion(self.output2) + assert version.version == Version("7.0.3") + assert version.build == 19193900 + + def test_discover(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=self.output3 + ) + version = ESXiVersion.discover(host) + assert version.version == Version("7.0.1") + assert version.build == 16850804 + + def test_version_assert(self): + with pytest.raises(ESXiVersionException): + ESXiVersion(self.output4) diff --git a/tests/unit/test_mfd_esxi/test_host.py b/tests/unit/test_mfd_esxi/test_host.py new file mode 100644 index 0000000..3dfdd03 --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_host.py @@ -0,0 +1,396 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +from textwrap import dedent + +import pytest +from packaging.version import Version +from ipaddress import ip_interface +from unittest.mock import MagicMock + +from mfd_connect.base import ConnectionCompletedProcess +from mfd_network_adapter.network_interface.feature.virtualization.data_structures import ( + VFInfo, +) +from mfd_esxi.exceptions import ESXiRuntimeError +from mfd_esxi.host import IntnetCliVersion +from mfd_connect import RPyCConnection +from mfd_typing import OSName, PCIAddress +from mfd_network_adapter.network_interface.esxi import ESXiNetworkInterface +from mfd_typing.network_interface import InterfaceInfo + + +class TestESXiHypervisor: + @pytest.fixture() + def interface(self, mocker): + pci_address = PCIAddress(0, 1, 0, 1) + name = "vmnic1" + _connection = mocker.create_autospec(RPyCConnection) + _connection.get_os_name.return_value = OSName.ESXI + + interface = ESXiNetworkInterface( + connection=_connection, + interface_info=InterfaceInfo(pci_address=pci_address, name=name), + ) + mocker.stopall() + return interface + + def test_initialize_version(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="VMware ESXi 8.0.0 build-20513097" + ) + host.initialize_version() + assert host.esxi_version.version == Version("8.0.0") + assert host.esxi_version.build == 20513097 + + def test_initialize_vswitch(self, host_esxcfg_vswitch_2): + host_esxcfg_vswitch_2.initialize_vswitch() + assert len(host_esxcfg_vswitch_2.vswitch) == 2 + + def test_initialize_vmknic(self, host_esxcfg_vmknic_1): + host_esxcfg_vmknic_1.initialize_vmknic() + assert len(host_esxcfg_vmknic_1.vmknic) == 2 + + def test_initialize_mng(self, host_esxcfg_vmknic_1): + host_esxcfg_vmknic_1.initialize_vmknic() + host_esxcfg_vmknic_1.initialize_mng() + assert host_esxcfg_vmknic_1.mng_vmknic.name == "vmk0" + assert host_esxcfg_vmknic_1.mng_ip == ip_interface("172.31.0.82/16") + + def test_add_vswitch(self, host): + host.add_vswitch("test") + assert len(host.vswitch) == 1 + assert host.vswitch[0].name == "test" + + def test_del_vswitch(self, host_esxcfg_vswitch_2): + host_esxcfg_vswitch_2.initialize_vswitch() + host_esxcfg_vswitch_2.del_vswitch("ATvSwitchLongName") + assert len(host_esxcfg_vswitch_2.vswitch) == 1 + assert host_esxcfg_vswitch_2.vswitch[0].name == "vSwitch0" + + def test_set_vswitch(self, host): + vswitch = host.add_vswitch("test") + host.set_vswitch( + name=vswitch.name, + uplinks=["vmnic4", "vmnic5"], + portgroups=["t1", "t2"], + vmknic=False, + ) + assert "vmnic4" in vswitch.uplinks + assert "vmnic5" in vswitch.uplinks + assert "t1" in vswitch.portgroups + assert "t2" in vswitch.portgroups + + def test_set_vswitch_create(self, host): + host.set_vswitch( + name="test", + uplinks=["vmnic4", "vmnic5"], + portgroups=["t1", "t2"], + vmknic=False, + ) + vswitch = host.vswitch[0] + assert "vmnic4" in vswitch.uplinks + assert "vmnic5" in vswitch.uplinks + assert "t1" in vswitch.portgroups + assert "t2" in vswitch.portgroups + + def test_find_vswitch(self, host_esxcfg_vswitch_2): + host_esxcfg_vswitch_2.initialize_vswitch() + vswitch = host_esxcfg_vswitch_2.find_vswitch(name="vSwitch0") + assert vswitch.name == "vSwitch0" + vswitch = host_esxcfg_vswitch_2.find_vswitch(uplink="vmnic10") + assert vswitch.name == "ATvSwitchLongName" + vswitch = host_esxcfg_vswitch_2.find_vswitch(portgroup="ATmng") + assert vswitch.name == "vSwitch0" + + def test_add_vmknic(self, host_esxcfg_vmknic_2): + vmknic = host_esxcfg_vmknic_2.add_vmknic("PGvmnic0") + assert len(host_esxcfg_vmknic_2.vmknic) == 1 + vmknic.name = "vmknic2" + vmknic.portgroup = "PGvmnic0" + + def test_del_vmknic(self, host_esxcfg_vmknic_1): + host_esxcfg_vmknic_1.initialize_vmknic() + assert len(host_esxcfg_vmknic_1.vmknic) == 2 + host_esxcfg_vmknic_1.del_vmknic(portgroup="ATvmnic10") + assert len(host_esxcfg_vmknic_1.vmknic) == 1 + host_esxcfg_vmknic_1.del_vmknic(name="vmk0") + assert len(host_esxcfg_vmknic_1.vmknic) == 0 + + def test_find_vmknic(self, host_esxcfg_vmknic_2): + host_esxcfg_vmknic_2.add_vmknic("PGvmnic0") + host_esxcfg_vmknic_2.add_vmknic("ATvmnic10") + vmknic = host_esxcfg_vmknic_2.find_vmknic(name="vmk2") + assert vmknic.name == "vmk2" + vmknic = host_esxcfg_vmknic_2.find_vmknic(portgroup="ATvmnic10") + assert vmknic.name == "vmk1" + vmknic = host_esxcfg_vmknic_2.find_vmknic(ip="1.1.1.1") + assert vmknic.name == "vmk1" + vmknic = host_esxcfg_vmknic_2.find_vmknic(net="1.0.0.0/8") + assert vmknic.name == "vmk1" + vmknic = host_esxcfg_vmknic_2.find_vmknic(net="fe80::0/64") + assert vmknic.name == "vmk2" + + def test_find_link_partner1(self, host_esxcfg_nics_1): + lp = host_esxcfg_nics_1.find_link_partner("vmnic2") + assert lp == "vmnic3" + + def test_find_link_partner2(self, host_esxcfg_nics_2): + lp = host_esxcfg_nics_2.find_link_partner("vmnic2") + assert lp == "vmnic3" + + def test_find_link_partner3(self, host_esxcfg_nics_2): + with pytest.raises(ESXiRuntimeError): + host_esxcfg_nics_2.find_link_partner("vmnic3") + + def test_find_pf0(self, host_esxcfg_nics_1): + pf0 = host_esxcfg_nics_1.find_pf0(["vmnic3", "vmnic13", "vmnic8"]) + assert len(pf0) == 3 + assert "vmnic0" in pf0 + assert "vmnic10" in pf0 + assert "vmnic8" in pf0 + + def test_get_meminfo(self, host): + output = """Memory information { + System memory usage (pages):191681 + Number of NUMA nodes:2 + Number of memory nodes:3 + Number of memory tiers:1 + First valid MPN:1 + Last valid MPN:17301503 + Max valid MPN:274877906944 + Max support RAM (in MB):33585088 + } + """ + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=output + ) + pattern_dict = {"mem_usage": 191681} + dict_out = host.get_meminfo() + assert pattern_dict == dict_out + + def test_get_intnetcli_version_success(self, host): + output = """int-esx-intnetcli 700.1.8.1.0-15843807 INT PartnerSupported 2021-11-24""" # noqa + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=output + ) + expected_res = IntnetCliVersion(intnet_ver="1.8.1.0", ddk_ver="700") + assert expected_res == host.get_intnetcli_version() + + def test_get_intnetcli_version_bad_output(self, host): + output = """int-esx-intnetcli bad_version-15843807 INT PartnerSupported 2021-11-24""" # noqa + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=output + ) + with pytest.raises(ESXiRuntimeError, match="Unknown version of intnetcli installed."): + host.get_intnetcli_version() + + def test_get_intnetcli_version_missing_out(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=1, args="command", stdout="" + ) + expected_res = IntnetCliVersion(intnet_ver=None, ddk_ver=None) + assert expected_res == host.get_intnetcli_version() + + def test_get_pci_passthrough_capable_devices_success(self, host): + # < 7.0 case + output = dedent( + """\ + 0000:af:00.0 + address: 0000:af:00.0 + segment: 0x0000 + bus: 0xaf + slot: 0x00 + function: 0x0 + vmkernel name: vmnic4 + passthru capable: true + parent device: pci 0:174:0:0 + dependent device: pci 0:175:0:2 + reset method: function reset + fpt sharable: true + + 0000:af:00.3 + address: 0000:af:00.3 + segment: 0x0000 + passthru capable: true""" + ) + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=output + ) + expected_res = { + PCIAddress(data="0000:af:00.0"): False, + PCIAddress(data="0000:af:00.3"): False, + } + assert expected_res == host.get_pci_passthrough_capable_devices() + + def test_get_pci_passthrough_capable_devices_success_higher_7_0(self, host): + # >= 7.0 case + output = dedent( + """\ + Device ID Enabled + ------------ ------- + 0000:02:00.0 true + 0000:18:00.0 false + """ + ) + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=output + ) + expected_res = { + PCIAddress(data="0000:02:00.0"): True, + PCIAddress(data="0000:18:00.0"): False, + } + assert expected_res == host.get_pci_passthrough_capable_devices() + + def test_get_pci_passthrough_nics_success(self, host): + output = dedent( + """\ + 0000:af:00.0 8086:1572 8086:0004 255/ / A P pciPassthru vmnic8 + 0000:af:00.1 8086:1572 8086:0000 255/ / A P pciPassthru vmnic9 + 0000:af:00.2 8086:1572 8086:0000 255/ / A V i40en vmnic10 + 0000:5e:09.0 8086:1889 8086:0000 255/ / @ P pciPassthru PF_0.94.1_VF_0 + """ + ) + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=output + ) + assert [ + PCIAddress(data="0000:af:00.0"), + PCIAddress(data="0000:af:00.1"), + ] == host.get_pci_passthrough_nics() + + def test_get_pci_passthrough_nics_success_esxi_8(self, host): + # case >= 8.0 + output = dedent( + """\ + 0000:af:00.0 8086:1572 8086:0008 11/ / A V i40en + 0000:af:00.1 8086:1572 8086:0000 11/ / A P pciPassthru + 0000:af:02.0 8086:154c 8086:0000 255/ / @ P pciPassthru + """ + ) + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=output + ) + assert [PCIAddress(data="0000:af:00.1")] == host.get_pci_passthrough_nics() + + def test_get_pci_passthrough_nics_error(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="Invalid output" + ) + with pytest.raises( + ESXiRuntimeError, + match="Cannot get PCI addresses for PCI passthrough enabled NICs.", + ): + host.get_pci_passthrough_nics() + + def test_get_vds_id_success(self, host): + output = dedent( + """\ + DSwitch_078 + Name: DSwitch_078 + VDS ID: 50 1e 81 57 81 ea ba d3-94 e9 0f 7b c9 1b da 81 + Class: cswitch + Num Ports: 3456 + Used Ports: 4 + Configured Ports: 512 + MTU: 1500 + CDP Status: listen + Beacon Timeout: -1 + Uplinks: vmnic4 + VMware Branded: true + DVPort: + Client: vmnic4 + DVPortgroup ID: dvportgroup-117200 + In Use: true + Port ID: 0 + + Client: Base_R83_VM001_078.eth1 + DVPortgroup ID: dvportgroup-117201 + In Use: true + Port ID: 2""" + ) + expected_output = "50 1e 81 57 81 ea ba d3-94 e9 0f 7b c9 1b da 81" + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=output + ) + assert expected_output == host.get_vds_id() + + def test_get_vds_id_wrong(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="Invalid_output" + ) + with pytest.raises(ESXiRuntimeError, match="Cannot get VDS ID."): + host.get_vds_id() + + def test_get_vm_name_by_vf(self, host, interface, mocker): + vfs = [ + VFInfo( + vf_id="0", + pci_address=PCIAddress(domain=0, bus=75, slot=17, func=0), + owner_world_id="2169609", + ), + VFInfo( + vf_id="1", + pci_address=PCIAddress(domain=0, bus=75, slot=17, func=1), + owner_world_id="2169642", + ), + VFInfo( + vf_id="2", + pci_address=PCIAddress(domain=0, bus=75, slot=17, func=2), + owner_world_id="2169798", + ), + VFInfo( + vf_id="3", + pci_address=PCIAddress(domain=0, bus=75, slot=17, func=3), + owner_world_id="2169831", + ), + ] + output = dedent( + """AT_ESXI_145 + World ID: 2101442 + Process ID: 0 + VMX Cartel ID: 2101441 + UUID: 56 4d a9 ac 1c ee 5d d7-f1 ae a5 95 f8 3d 2a 2d + Display Name: AT_ESXI_145 + Config File: /vmfs/volumes/5b645dbf-d27a664c-e76a-1402ec67d6e6/AT_ESXI/AT_ESXI.vmx + VM2_145 + World ID: 2169831 + Process ID: 0 + VMX Cartel ID: 2169642 + UUID: 56 4d 76 ff f9 9b 86 df-e9 9a e3 10 ac e1 69 80 + Display Name: VM2_145 + Config File: /vmfs/volumes/5b645dbf-d27a664c-e76a-1402ec67d6e6/VM2_145/Base_S12SP3.vmx + VM1_145 + World ID: 2169832 + Process ID: 0 + VMX Cartel ID: 2169831 + UUID: 56 4d 07 6d 65 0a 82 54-7c d5 90 c5 f7 89 6a a2 + Display Name: VM1_145 + Config File: /vmfs/volumes/5b645dbf-d27a664c-e76a-1402ec67d6e6/VM1_145/Base_S12SP3.vmx + """ + ) + + vf_id = 3 # 'Owner World ID': '2169831' + results = { + "6.5.0": "VM1_145", # 'VMX Cartel ID': '2169831' + "6.7.0": "VM1_145", # 'VMX Cartel ID': '2169831' + "7.0.0": "VM2_145", # 'World ID': '2169831' + "7.0.2": "VM2_145", # 'World ID': '2169831' + "9.0.0": "VM1_145", # 'VMX Cartel ID': '2169831' + } + + mocker.patch.object(interface.virtualization, "get_connected_vfs_info", return_value=vfs) + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=output, stderr="stderr" + ) + host.connection.get_system_info.side_effect = [ + MagicMock(kernel_version="6.5.0"), + MagicMock(kernel_version="6.7.0"), + MagicMock(kernel_version="7.0.0"), + MagicMock(kernel_version="7.0.2"), + MagicMock(kernel_version="9.0.0"), + ] + assert host.get_vm_name_for_vf_id(vf_id=vf_id, interface=interface) == results["6.5.0"] + assert host.get_vm_name_for_vf_id(vf_id=vf_id, interface=interface) == results["6.7.0"] + assert host.get_vm_name_for_vf_id(vf_id=vf_id, interface=interface) == results["7.0.0"] + assert host.get_vm_name_for_vf_id(vf_id=vf_id, interface=interface) == results["7.0.2"] + assert host.get_vm_name_for_vf_id(vf_id=vf_id, interface=interface) == results["9.0.0"] diff --git a/tests/unit/test_mfd_esxi/test_host_api.py b/tests/unit/test_mfd_esxi/test_host_api.py new file mode 100644 index 0000000..94840db --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_host_api.py @@ -0,0 +1,37 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + + +class TestESXiHostApi: + def test_fingerprint(self, host_api_with_cert): + assert host_api_with_cert.fingerprint == "FE:32:B8:57:D5:6D:75:FC:1E:75:F6:97:2D:7F:27:A0:79:55:22:01" + + def test_get_adapters_sriov_info_all_ports(self, host_api_with_vf_info_and_interfaces): + assert host_api_with_vf_info_and_interfaces[0].get_adapters_sriov_info( + host_api_with_vf_info_and_interfaces[1], all_ports=True + ) == { + "0000:00:00.0": { + "enabled": True, + "max_vfs": 128, + "num_vfs": 8, + "req_vfs": 8, + }, + "0000:00:00.1": { + "enabled": False, + "max_vfs": 128, + "num_vfs": 0, + "req_vfs": 0, + }, + } + + def test_get_adapters_sriov_info_single_port(self, host_api_with_vf_info_and_interfaces): + assert host_api_with_vf_info_and_interfaces[0].get_adapters_sriov_info( + host_api_with_vf_info_and_interfaces[1], all_ports=False + ) == { + "0000:00:00.0": { + "enabled": True, + "max_vfs": 128, + "num_vfs": 8, + "req_vfs": 8, + } + } diff --git a/tests/unit/test_mfd_esxi/test_nsx/__init__.py b/tests/unit/test_mfd_esxi/test_nsx/__init__.py new file mode 100644 index 0000000..8ef2fad --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_nsx/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT diff --git a/tests/unit/test_mfd_esxi/test_nsx/test_host_transport_node.py b/tests/unit/test_mfd_esxi/test_nsx/test_host_transport_node.py new file mode 100644 index 0000000..0a73c9c --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_nsx/test_host_transport_node.py @@ -0,0 +1,67 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +import pytest + +from mfd_esxi.exceptions import MissingNsxEntity +from mfd_esxi.nsx.connection import NsxConnection +from mfd_esxi.nsx.host_transport_node import NsxHostTransportNode + + +class TestHostTransportNode: + @pytest.fixture + def connection(self, mocker): + connection = mocker.create_autospec(NsxConnection) + yield connection + mocker.stopall() + + @pytest.fixture + def host_transport_node(self, connection, mocker): + host_tn = NsxHostTransportNode(name="test_transport_node", connection=connection) + host_tn._patch = mocker.Mock() + + yield host_tn + mocker.stopall() + + def test_add_switch_with_uplink_param(self, host_transport_node, mocker): + + vds_id = "test" + mock_standard_host_switch = mocker.patch("mfd_esxi.nsx.host_transport_node.StandardHostSwitch", autospec=True) + host_transport_node.add_switch( + host_switch_name="test_sw", + uplink_name="uplink", + transport_zone_name="test_tz", + vds_id=vds_id, + uplinks=4, + ip_pool_id="IPV6pool", + ) + mock_standard_host_switch.assert_called_once() + args, kwargs = mock_standard_host_switch.call_args + assert kwargs["host_switch_name"] == "test_sw" + assert len(kwargs["uplinks"]) == 4 + + def test_add_switch_without_uplink_param(self, host_transport_node, mocker): + # test whether Exception is not raised when uplink param is not provided + mock_standard_host_switch = mocker.patch("mfd_esxi.nsx.host_transport_node.StandardHostSwitch", autospec=True) + host_transport_node.add_switch( + host_switch_name="test_sw", + uplink_name="uplink", + transport_zone_name="test_tz", + vds_id="test", + ip_pool_id="IPV4pool", + ) + mock_standard_host_switch.assert_called_once() + args, kwargs = mock_standard_host_switch.call_args + from mfd_esxi.const import ESXI_UPLINK_NUMBER + + assert len(kwargs["uplinks"]) == ESXI_UPLINK_NUMBER + + def test_add_switch_no_payload(self, host_transport_node, mocker): + host_transport_node._get_content = mocker.Mock(return_value=None) + with pytest.raises(MissingNsxEntity): + host_transport_node.add_switch( + host_switch_name="test_sw", + uplink_name="uplink", + transport_zone_name="test_tz", + vds_id="test", + ip_pool_id=None, + ) diff --git a/tests/unit/test_mfd_esxi/test_nsx/test_transport_zone.py b/tests/unit/test_mfd_esxi/test_nsx/test_transport_zone.py new file mode 100644 index 0000000..cd66d35 --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_nsx/test_transport_zone.py @@ -0,0 +1,46 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +import pytest + +from com.vmware.nsx_policy.model_client import PolicyTransportZone +from mfd_esxi.exceptions import UnsupportedNsxEntity +from mfd_esxi.nsx.connection import NsxConnection +from mfd_esxi.nsx.transport_zone import NsxTransportZone + + +class TestTransportZone: + @pytest.fixture + def connection(self, mocker): + connection = mocker.create_autospec(NsxConnection) + yield connection + mocker.stopall() + + @pytest.fixture + def host_transport_node(self, connection, mocker): + host_tn = NsxTransportZone(name="test_transport_zone", connection=connection) + host_tn._patch = mocker.Mock() + + yield host_tn + mocker.stopall() + + def test_updates_forwarding_mode_when_transport_zone_exists(self, mocker, host_transport_node): + host_transport_node._get_content = mocker.Mock() + host_transport_node._get_content.return_value.tz_type = PolicyTransportZone.TZ_TYPE_OVERLAY_BACKED + + host_transport_node.update_forwarding_mode("NEW_MODE") + + host_transport_node._connection.api.policy.infra.sites.enforcement_points.TransportZones.patch.assert_called_once() # noqa: E501 + + def test_raises_value_error_when_transport_zone_does_not_exist(self, mocker, host_transport_node): + host_transport_node._get_content = mocker.Mock() + host_transport_node._get_content.return_value = None + + with pytest.raises(ValueError, match="Transport Zone does not exist."): + host_transport_node.update_forwarding_mode("ANY_MODE") + + def test_raises_exception_when_transport_zone_is_vlan_backed(self, mocker, host_transport_node): + host_transport_node._get_content = mocker.Mock() + host_transport_node._get_content.return_value.tz_type = PolicyTransportZone.TZ_TYPE_VLAN_BACKED + + with pytest.raises(UnsupportedNsxEntity): + host_transport_node.update_forwarding_mode("ANY_MODE") diff --git a/tests/unit/test_mfd_esxi/test_nsx/test_uplink_profile.py b/tests/unit/test_mfd_esxi/test_nsx/test_uplink_profile.py new file mode 100644 index 0000000..a03fc26 --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_nsx/test_uplink_profile.py @@ -0,0 +1,36 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +import pytest + +from mfd_esxi.nsx.connection import NsxConnection +from mfd_esxi.nsx.uplink_profile import NsxUplinkProfile + + +class TestHostTransportNode: + @pytest.fixture + def connection(self, mocker): + connection = mocker.create_autospec(NsxConnection) + yield connection + mocker.stopall() + + @pytest.fixture + def uplink_profile(self, connection, mocker): + host_up = NsxUplinkProfile(name="test_uplink_profile", connection=connection) + host_up._patch = mocker.Mock() + + yield host_up + mocker.stopall() + + def test_updates_vlan_when_uplink_profile_exists(self, mocker, uplink_profile): + uplink_profile._get_content = mocker.Mock() + + uplink_profile.update_transport_vlan(transport_vlan=101) + + uplink_profile._connection.api.policy.infra.HostSwitchProfiles.patch.assert_called_once() # noqa: E501 + + def test_raises_value_error_when_uplink_profile_does_not_exist(self, mocker, uplink_profile): + uplink_profile._get_content = mocker.Mock() + uplink_profile._get_content.return_value = None + + with pytest.raises(ValueError, match="Uplink profile does not exist."): + uplink_profile.update_transport_vlan(transport_vlan=101) diff --git a/tests/unit/test_mfd_esxi/test_vcenter/__init__.py b/tests/unit/test_mfd_esxi/test_vcenter/__init__.py new file mode 100644 index 0000000..8ef2fad --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vcenter/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT diff --git a/tests/unit/test_mfd_esxi/test_vcenter/test_cluster.py b/tests/unit/test_mfd_esxi/test_vcenter/test_cluster.py new file mode 100644 index 0000000..162f7c8 --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vcenter/test_cluster.py @@ -0,0 +1,7 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + + +class TestCluster: + def test_repr(self, cluster): + assert f"{cluster}" == "Cluster('PY-Cluster')" diff --git a/tests/unit/test_mfd_esxi/test_vcenter/test_datacenter.py b/tests/unit/test_mfd_esxi/test_vcenter/test_datacenter.py new file mode 100644 index 0000000..0098ca2 --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vcenter/test_datacenter.py @@ -0,0 +1,7 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + + +class TestDatacenter: + def test_repr(self, datacenter): + assert f"{datacenter}" == "Datacenter('PY-Datacenter')" diff --git a/tests/unit/test_mfd_esxi/test_vcenter/test_datastore.py b/tests/unit/test_mfd_esxi/test_vcenter/test_datastore.py new file mode 100644 index 0000000..1c2e2a8 --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vcenter/test_datastore.py @@ -0,0 +1,7 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + + +class TestDatastore: + def test_repr(self, datastore): + assert f"{datastore}" == "Datastore('PY-Datastore')" diff --git a/tests/unit/test_mfd_esxi/test_vcenter/test_dsportgroup.py b/tests/unit/test_mfd_esxi/test_vcenter/test_dsportgroup.py new file mode 100644 index 0000000..8f92fd3 --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vcenter/test_dsportgroup.py @@ -0,0 +1,7 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + + +class TestDSPortgroup: + def test_repr(self, dsportgroup): + assert f"{dsportgroup}" == "DSPortgroup('PY-DSPortgroup')" diff --git a/tests/unit/test_mfd_esxi/test_vcenter/test_dsuplink.py b/tests/unit/test_mfd_esxi/test_vcenter/test_dsuplink.py new file mode 100644 index 0000000..cb61f48 --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vcenter/test_dsuplink.py @@ -0,0 +1,7 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + + +class TestDSUplink: + def test_repr(self, dsuplink): + assert f"{dsuplink}" == "DSUplink('PY-DSUplink')" diff --git a/tests/unit/test_mfd_esxi/test_vcenter/test_dswitch.py b/tests/unit/test_mfd_esxi/test_vcenter/test_dswitch.py new file mode 100644 index 0000000..3e45609 --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vcenter/test_dswitch.py @@ -0,0 +1,7 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + + +class TestDSwitch: + def test_repr(self, dswitch): + assert f"{dswitch}" == "DSwitch('PY-DSwitch')" diff --git a/tests/unit/test_mfd_esxi/test_vcenter/test_host.py b/tests/unit/test_mfd_esxi/test_vcenter/test_host.py new file mode 100644 index 0000000..bbd2e8f --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vcenter/test_host.py @@ -0,0 +1,8 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + + +class TestHost: + def test_repr(self, standalone_host, cluster_host): + assert f"{standalone_host}" == "Host('PY-StandaloneHost')" + assert f"{cluster_host}" == "Host('PY-ClusterHost')" diff --git a/tests/unit/test_mfd_esxi/test_vcenter/test_utils.py b/tests/unit/test_mfd_esxi/test_vcenter/test_utils.py new file mode 100644 index 0000000..b8b94d8 --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vcenter/test_utils.py @@ -0,0 +1,43 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT +import pytest + +from mfd_esxi.vcenter.exceptions import VCenterResourceMissing +from mfd_esxi.vcenter.utils import get_obj_from_iter, get_first_match_from_iter + + +def test_get_obj_from_iter_match(vcenter_named_entities): + match = get_obj_from_iter(vcenter_named_entities, "Named-2") + assert match is not None + assert match.name == "Named-2" + + +def test_get_obj_from_iter_safe_miss(vcenter_named_entities): + match = get_obj_from_iter(vcenter_named_entities, "unNamed-2", raise_if_missing=False) + assert match is None + + +def test_get_obj_from_iter_unsafe_miss(vcenter_named_entities): + with pytest.raises(VCenterResourceMissing) as exc_info: + get_obj_from_iter(vcenter_named_entities, "unNamed-2", raise_if_missing=True) + assert f"{exc_info.value}" == "unNamed-2 in:[Named-1, Named-2, Named-3]" + + +def test_get_first_match_from_iter_match(vcenter_named_entities): + match = get_first_match_from_iter(vcenter_named_entities) + assert match.name == "Named-1" + + +def test_get_first_match_from_iter_predicate_match(vcenter_named_entities): + match = get_first_match_from_iter(vcenter_named_entities, lambda o: o.name == "Named-3") + assert match.name == "Named-3" + + +def test_get_first_match_from_iter_miss(vcenter_named_entities): + match = get_first_match_from_iter(vcenter_named_entities, lambda o: False) + assert match is None + + +def test_get_first_match_from_iter_default_miss(vcenter_named_entities): + match = get_first_match_from_iter(vcenter_named_entities, lambda o: False, "Missed") + assert match == "Missed" diff --git a/tests/unit/test_mfd_esxi/test_vcenter/test_vcenter.py b/tests/unit/test_mfd_esxi/test_vcenter/test_vcenter.py new file mode 100644 index 0000000..d84a522 --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vcenter/test_vcenter.py @@ -0,0 +1,13 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + + +class TestVCenter: + def test_repr(self, vcenter): + assert f"{vcenter}" == "VCenter('172.31.12.144')" + + def test_initialization(self, vcenter): + assert vcenter._ip == "172.31.12.144" + assert vcenter._login == "user" + assert vcenter._password == "secret" + assert vcenter._port == 443 diff --git a/tests/unit/test_mfd_esxi/test_vcenter/test_virtual_adapter.py b/tests/unit/test_mfd_esxi/test_vcenter/test_virtual_adapter.py new file mode 100644 index 0000000..55c9757 --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vcenter/test_virtual_adapter.py @@ -0,0 +1,7 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + + +class TestVirtualAdapter: + def test_repr(self, virtual_adapter): + assert f"{virtual_adapter}" == "VirtualAdapter('PY-VirtualAdapter') in Host('PY-StandaloneHost')" diff --git a/tests/unit/test_mfd_esxi/test_vcenter/test_virtual_machine.py b/tests/unit/test_mfd_esxi/test_vcenter/test_virtual_machine.py new file mode 100644 index 0000000..d4ea61f --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vcenter/test_virtual_machine.py @@ -0,0 +1,7 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + + +class TestVirtualMachine: + def test_repr(self, virtual_machine): + assert f"{virtual_machine}" == "VirtualMachine('PY-VirtualMachine')" diff --git a/tests/unit/test_mfd_esxi/test_vcenter/test_vsportgroup.py b/tests/unit/test_mfd_esxi/test_vcenter/test_vsportgroup.py new file mode 100644 index 0000000..9c4bb6d --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vcenter/test_vsportgroup.py @@ -0,0 +1,7 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + + +class TestVSPortgroup: + def test_repr(self, vsportgroup): + assert f"{vsportgroup}" == "VSPortgroup('PY-VSPortgroup')" diff --git a/tests/unit/test_mfd_esxi/test_vcenter/test_vswitch.py b/tests/unit/test_mfd_esxi/test_vcenter/test_vswitch.py new file mode 100644 index 0000000..3c830d4 --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vcenter/test_vswitch.py @@ -0,0 +1,7 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + + +class TestVSwitch: + def test_repr(self, vswitch): + assert f"{vswitch}" == "VSwitch('PY-VSwitch')" diff --git a/tests/unit/test_mfd_esxi/test_vm_base.py b/tests/unit/test_mfd_esxi/test_vm_base.py new file mode 100644 index 0000000..28cf030 --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vm_base.py @@ -0,0 +1,255 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + +import pytest +from ipaddress import ip_interface, ip_address + +from mfd_connect.base import ConnectionCompletedProcess +from .fixtures import getallvms +from mfd_esxi.vm_base import ESXiVMBase +from mfd_esxi.exceptions import ESXiRuntimeError, ESXiVMNotRun, ESXiWrongParameter + + +class TestESXiVMBase: + def test_initialize1(self): + vm = ESXiVMBase(None) + lines = getallvms.splitlines() + vm.initialize(lines[1]) + assert vm.id == 1 + assert vm.name == "AT_ESXI_050" + assert vm.datastore == "datastore_050" + assert vm.folder == "AT_ESXI" + + def test_initialize2(self): + vm = ESXiVMBase(None) + lines = getallvms.splitlines() + vm.initialize(lines[2]) + assert vm.id == 24 + assert vm.name == "Test Test [Test] [Test]" + assert vm.datastore == "datastore_050_vmfs6" + assert vm.folder == "Test" + + def test_discover(self, host_getallvms): + vms = ESXiVMBase.discover(host_getallvms) + assert len(vms) == 2 + + def test_register(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="5" + ) + vm = ESXiVMBase(host) + vm.register("path") + assert vm.id == 5 + vm.register("path") + assert vm.id == 5 + + def test_unregister(self, host): + vm = ESXiVMBase(host) + vm.id = 5 + vm.unregister() + assert vm.id is None + vm.unregister() + assert vm.id is None + + def test_reload(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="" + ) + vm = ESXiVMBase(host) + vm.id = 5 + vm.reload() + vm.id = None + with pytest.raises(ESXiRuntimeError): + vm.reload() + + def test_start(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="" + ) + vm = ESXiVMBase(host) + vm.id = 5 + vm.start() + + def test_start_already_started(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=1, + args="command", + stdout="", + stderr="The attempted operation cannot be performed in the current state", + ) + vm = ESXiVMBase(host) + vm.id = 5 + vm.start() + + def test_start_insufficient_memory(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=1, + args="command", + stdout="", + stderr="InsufficientMemoryResourcesFault", + ) + vm = ESXiVMBase(host) + vm.id = 5 + with pytest.raises(ESXiVMNotRun): + vm.start() + + def test_start_error(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=1, args="command", stdout="", stderr="" + ) + vm = ESXiVMBase(host) + vm.id = 5 + with pytest.raises(ESXiRuntimeError): + vm.start() + + def test_stop(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="" + ) + vm = ESXiVMBase(host) + vm.id = 5 + vm.stop() + + def test_stop_already_stopped(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=1, + args="command", + stdout="", + stderr="The attempted operation cannot be performed in the current state", + ) + vm = ESXiVMBase(host) + vm.id = 5 + vm.stop() + + def test_stop_error(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=1, args="command", stdout="", stderr="" + ) + vm = ESXiVMBase(host) + vm.id = 5 + with pytest.raises(ESXiRuntimeError): + vm.stop() + + def test_shutdown(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="Powered off" + ) + vm = ESXiVMBase(host) + vm.id = 5 + vm.shutdown(wait=True, timeout=10) + + def test_shutdown_already_stopped(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=1, + args="command", + stdout="", + stderr="The attempted operation cannot be performed in the current state", + ) + vm = ESXiVMBase(host) + vm.id = 5 + vm.shutdown() + + def test_shutdown_error(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=1, args="command", stdout="", stderr="" + ) + vm = ESXiVMBase(host) + vm.id = 5 + with pytest.raises(ESXiRuntimeError): + vm.shutdown() + + def test_reboot(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="" + ) + vm = ESXiVMBase(host) + vm.id = 5 + vm.reboot() + + def test_reboot_already_stopped(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=1, + args="command", + stdout="", + stderr="The attempted operation cannot be performed in the current state", + ) + vm = ESXiVMBase(host) + vm.id = 5 + vm.reboot() + + def test_reboot_error(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=1, args="command", stdout="", stderr="" + ) + vm = ESXiVMBase(host) + vm.id = 5 + with pytest.raises(ESXiRuntimeError): + vm.reboot() + + def test_getstate_off(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="Powered off" + ) + vm = ESXiVMBase(host) + vm.id = 5 + assert vm.get_state() == "off" + + def test_getstate_on(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="Powered on" + ) + vm = ESXiVMBase(host) + vm.id = 5 + assert vm.get_state() == "on" + + def test_getstate_error(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="" + ) + vm = ESXiVMBase(host) + vm.id = 5 + with pytest.raises(ESXiRuntimeError): + vm.get_state() + + def test_wait_for_state_off(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="Powered off" + ) + vm = ESXiVMBase(host) + vm.id = 5 + vm.wait_for_state("off") + + def test_wait_for_state_on(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="Powered on" + ) + vm = ESXiVMBase(host) + vm.id = 5 + vm.wait_for_state("on") + + def test_wait_for_state_wrong_parameter(self, host): + vm = ESXiVMBase(host) + with pytest.raises(ESXiWrongParameter): + vm.wait_for_state(state="") + + def test_get_guest_mng_ip(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, + args="command", + stdout='guestState = "running"\nipAddress = "1.1.10.1"', + ) + host.mng_ip = ip_interface("1.1.1.1/8") + vm = ESXiVMBase(host) + vm.id = 5 + assert vm.get_guest_mng_ip() == ip_address("1.1.10.1") + + def test_wait_for_mng_ip(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, + args="command", + stdout='guestState = "running"\nipAddress = "1.1.10.1"', + ) + host.mng_ip = ip_interface("1.1.1.1/8") + vm = ESXiVMBase(host) + vm.id = 5 + assert vm.wait_for_mng_ip() == ip_address("1.1.10.1") diff --git a/tests/unit/test_mfd_esxi/test_vm_gold.py b/tests/unit/test_mfd_esxi/test_vm_gold.py new file mode 100644 index 0000000..45d7783 --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vm_gold.py @@ -0,0 +1,101 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + +from unittest.mock import patch, Mock +from mfd_esxi.vm_gold import ESXiVMGold, ESXiVM +from packaging.version import Version + + +class TestESXiVMGold: + def test_initialize(self, host_gold_vmx): + gold = ESXiVMGold(host_gold_vmx, "datastore_050", "Base_R91") + gold.initialize() + assert gold.datastore == "datastore_050" + assert gold.name == "Base_R91" + assert gold.firmware == "efi" + assert gold.guestOS == "other-64" + assert gold.scsi_dev == "lsisas1068" + assert gold.primary_vmdk == "Base_R91-000001.vmdk" + assert gold.primary_flat == "Base_R91-000001-sesparse.vmdk" + assert gold.parent_vmdk == "Base_R91.vmdk" + assert gold.parent_flat == "Base_R91-flat.vmdk" + + +class TestESXiVM: + @patch("mfd_esxi.vm_gold.copy", Mock()) + @patch("mfd_esxi.vm_gold.LocalConnection", Mock()) + def test_write_vmx(self, host_gold_vmx): + gold = ESXiVMGold(host_gold_vmx, "datastore_050", "Base_R91") + gold.initialize() + vm = ESXiVM(gold=gold, name="test", mng="test") + vm.write_vmx() + + def test_attach_network_vmxnet(self, host_gold_vmx): + gold = ESXiVMGold(host_gold_vmx, "datastore_050", "Base_R91") + gold.initialize() + vm = ESXiVM(gold=gold, name="test", mng="test") + vm.attach_network("test1") + vm.attach_network("test2", rss=True) + assert len(vm.ethernet) == 2 + + def test_attach_network_sriov(self, host_gold_vmx, mocker): + gold = ESXiVMGold(host_gold_vmx, "datastore_050", "Base_R91") + gold.initialize() + vm = ESXiVM(gold=gold, name="test", mng="test") + adapter = mocker + adapter.pci_address = mocker + adapter.pci_address.lspci_short = "1:2.3" + vm.attach_network("test1", model="sriov", pf=adapter) + assert len(vm.ethernet) == 0 + assert len(vm.pciPassthru) == 1 + + def test_attach_network_ptp_old_esxi(self, host_gold_vmx_ptp_old_esxi, mocker): + gold = ESXiVMGold(host_gold_vmx_ptp_old_esxi, "datastore_050", "Base_R91") + gold.initialize() + vm = ESXiVM(gold=gold, name="test", mng="test") + host_gold_vmx_ptp_old_esxi.esxi_version = mocker + host_gold_vmx_ptp_old_esxi.esxi_version.version = Version("8.0.2") + adapter = mocker + adapter.name = "vmnic4" + vm.attach_network("test1", model="ptp", pf=adapter) + host_gold_vmx_ptp_old_esxi.connection.execute_command.assert_called_with( + command="lspci -p | grep :32:01.0", shell=True + ) + assert len(vm.ethernet) == 0 + assert len(vm.pciPassthru) == 1 + if len(vm.pciPassthru) == 1: + assert vm.pciPassthru[0]["id"] == "00000:050:01.0" + + def test_attach_network_ptp_new_esxi(self, host_gold_vmx_ptp_new_esxi, mocker): + gold = ESXiVMGold(host_gold_vmx_ptp_new_esxi, "datastore_050", "Base_R91") + gold.initialize() + vm = ESXiVM(gold=gold, name="test", mng="test") + host_gold_vmx_ptp_new_esxi.esxi_version = mocker + host_gold_vmx_ptp_new_esxi.esxi_version.version = Version("8.0.3") + adapter = mocker + adapter.name = "vmnic4" + vm.attach_network("test1", model="ptp", pf=adapter) + host_gold_vmx_ptp_new_esxi.connection.execute_command.assert_called_with( + command="lspci -p | grep :32:01.0", shell=True + ) + assert len(vm.ethernet) == 0 + assert len(vm.pciPassthru) == 1 + if len(vm.pciPassthru) == 1: + assert vm.pciPassthru[0]["id"] == "00000:050:01.0" + + def test_attach_network_ptp_new_esxi_hex(self, host_gold_vmx_ptp_new_esxi_hex, mocker): + gold = ESXiVMGold(host_gold_vmx_ptp_new_esxi_hex, "datastore_050", "Base_R91") + gold.initialize() + vm = ESXiVM(gold=gold, name="test", mng="test") + host_gold_vmx_ptp_new_esxi_hex.esxi_version = mocker + host_gold_vmx_ptp_new_esxi_hex.esxi_version.version = Version("8.0.3") + adapter = mocker + adapter.name = "vmnic4" + vm.attach_network("test1", model="ptp", pf=adapter) + host_gold_vmx_ptp_new_esxi_hex.connection.execute_command.assert_called_with( + command="lspci -p | grep :b1:01.0", shell=True + ) + assert len(vm.ethernet) == 0 + assert len(vm.pciPassthru) == 1 + if len(vm.pciPassthru) == 1: + assert vm.pciPassthru[0]["id"] == "00000:177:01.0" diff --git a/tests/unit/test_mfd_esxi/test_vm_mgr.py b/tests/unit/test_mfd_esxi/test_vm_mgr.py new file mode 100644 index 0000000..c7e8b5c --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vm_mgr.py @@ -0,0 +1,68 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + +from unittest.mock import patch, Mock + +from mfd_connect.base import ConnectionCompletedProcess +from mfd_esxi.vm_mgr import ESXiVMMgr + + +class TestESXiVMMgr: + def test_initialize(self, host_getallvms): + hv = ESXiVMMgr(host_getallvms) + hv.initialize() + assert len(hv.vm) == 2 + + def test_clean_all(self, host_getallvms): + hv = ESXiVMMgr(host_getallvms) + hv.initialize() + hv.clean() + assert len(hv.vm) == 0 + + def test_clean_keep(self, host_getallvms): + hv = ESXiVMMgr(host_getallvms) + hv.initialize() + hv.clean(keep="AT_ESXI") + assert len(hv.vm) == 1 + assert hv.vm[0].name == "AT_ESXI_050" + + def test_prepare_vms(self, host_gold_vmx): + hv = ESXiVMMgr(host_gold_vmx) + vms = hv.prepare_vms("datastore_050", "Base_R91", count=3, suffix="test") + assert hv.gold[0].name == "Base_R91" + assert len(vms) == 3 + + def test_attach_network(self, host_gold_vmx): + hv = ESXiVMMgr(host_gold_vmx) + vms = hv.prepare_vms("datastore_050", "Base_R91", count=3, suffix="test") + hv.attach_network(vms, "test") + assert vms[0].ethernet[0]["networkName"] == "test" + assert vms[1].ethernet[0]["networkName"] == "test" + assert vms[2].ethernet[0]["networkName"] == "test" + + @patch("mfd_esxi.vm_gold.copy", Mock()) + @patch("mfd_esxi.vm_gold.LocalConnection", Mock()) + def test_create_vms(self, host_gold_vmx): + hv = ESXiVMMgr(host_gold_vmx) + vms = hv.prepare_vms("datastore_050", "Base_R91", count=3, suffix="test") + host_gold_vmx.connection.execute_command.side_effect = None + host_gold_vmx.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="5" + ) + hv.create_vms(vms) + assert len(hv.vm) == 3 + + @patch("mfd_esxi.vm_gold.copy", Mock()) + @patch("mfd_esxi.vm_gold.LocalConnection", Mock()) + def test_find_vms(self, host_gold_vmx): + hv = ESXiVMMgr(host_gold_vmx) + vms = hv.prepare_vms("datastore_050", "Base_R91", count=3, suffix="test") + host_gold_vmx.connection.execute_command.side_effect = None + host_gold_vmx.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="5" + ) + hv.create_vms(vms) + found = hv.find_vms("Base_R91") + assert len(found) == 3 + found = hv.find_vms("Base_R90") + assert len(found) == 0 diff --git a/tests/unit/test_mfd_esxi/test_vmknic.py b/tests/unit/test_mfd_esxi/test_vmknic.py new file mode 100644 index 0000000..d9a865f --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vmknic.py @@ -0,0 +1,78 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + +from ipaddress import ip_interface + +from .fixtures import esxcfg_vmknic_1 +from mfd_typing import MACAddress +from mfd_esxi.vmknic import Vmknic + + +class TestVmknic: + def test_initialize(self, host): + vmknic = Vmknic(host, "vmk1") + vmknic.initialize(esxcfg_vmknic_1) + assert vmknic.mtu == 1500 + assert vmknic.name == "vmk1" + assert vmknic.portgroup == "ATvmnic10" + assert vmknic.mac == MACAddress("00:50:56:aa:bb:cc") + + def test_refresh(self, host_esxcfg_vmknic_2): + vmknic = Vmknic(host_esxcfg_vmknic_2, "vmk2") + vmknic.refresh() + assert str(vmknic.mac) == "00:50:56:aa:bb:cc" + assert vmknic.ips[0] == ip_interface("fe80::250:56ff:fe63:a0ef/64") + + def test_discover(self, host_esxcfg_vmknic_1): + vmknics = Vmknic.discover(host_esxcfg_vmknic_1) + assert len(vmknics) == 2 + + def test_add_vmknic(self, host_esxcfg_vmknic_2): + vmknic = Vmknic.add_vmknic(host_esxcfg_vmknic_2, "PGvmnic0") + assert vmknic.name == "vmk2" + assert vmknic.portgroup == "PGvmnic0" + assert vmknic.mtu == 9000 + assert len(vmknic.ips) == 1 + assert ip_interface("fe80::250:56ff:fe63:a0ef/64") in vmknic.ips + + def test_del_vmknic(self, host): + vmknic = Vmknic(host, "vmk2") + vmknic.del_vmknic() + + def test_set_mtu(self, host): + vmknic = Vmknic(host, "vmk2") + vmknic.set_mtu(5000) + assert vmknic.mtu == 5000 + + def test_set_vlan(self, host): + vmknic = Vmknic(host, "vmk2") + vmknic.set_vlan(500) + + def test_add_ipv4(self, host): + vmknic = Vmknic(host, "vmk2") + vmknic.add_ip("1.1.1.1/8") + assert len(vmknic.ips) == 1 + assert vmknic.ips[0] == ip_interface("1.1.1.1/8") + + def test_add_ipv4_multiple(self, host): + vmknic = Vmknic(host, "vmk2") + vmknic.add_ip("1.1.1.1/8") + vmknic.add_ip("2.1.1.1/8") + assert len(vmknic.ips) == 1 + assert vmknic.ips[0] == ip_interface("2.1.1.1/8") + + def test_add_ipv6(self, host): + vmknic = Vmknic(host, "vmk2") + vmknic.add_ip("2001:1::1/64") + assert len(vmknic.ips) == 1 + assert vmknic.ips[0] == ip_interface("2001:1::1/64") + + def test_add_ipv6_multiple(self, host): + vmknic = Vmknic(host, "vmk2") + vmknic.add_ip("2001:1::1/64") + vmknic.add_ip("2001:1::2/64") + assert len(vmknic.ips) == 2 + + def test_discover_vxlan_vmk(self, host_esxcfg_vmknic_3): + vmknics = Vmknic.discover(host_esxcfg_vmknic_3) + assert len(vmknics) == 2 diff --git a/tests/unit/test_mfd_esxi/test_vswitch.py b/tests/unit/test_mfd_esxi/test_vswitch.py new file mode 100644 index 0000000..3ef20b2 --- /dev/null +++ b/tests/unit/test_mfd_esxi/test_vswitch.py @@ -0,0 +1,192 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: MIT + +from mfd_connect.base import ConnectionCompletedProcess + +from mfd_esxi.exceptions import VswitchError +from mfd_esxi.vswitch import ESXivSwitch +from .fixtures import esxcfg_vswitch_1, esxcfg_vswitch_2, esxcfg_vmknic_1 + + +class TestESXivSwitch: + def test_initialize(self, host): + vswitch = ESXivSwitch(host, "vSwitch0") + vswitch.initialize(esxcfg_vswitch_1) + assert vswitch.mtu == 1500 + assert vswitch.uplinks == ["vmnic0"] + assert "ATmng" in vswitch.portgroups + assert "VM Network" in vswitch.portgroups + assert "Management Network" in vswitch.portgroups + + def test_refresh(self, host_esxcfg_vswitch_2): + vswitch = ESXivSwitch(host_esxcfg_vswitch_2, "ATvSwitchLongName") + vswitch.refresh() + assert vswitch.mtu == 1500 + assert vswitch.uplinks == ["vmnic10"] + assert "ATNetwork" in vswitch.portgroups + assert "ATvmnic10" in vswitch.portgroups + + def test_discover(self, host_esxcfg_vswitch_1): + vswitches = ESXivSwitch.discover(host_esxcfg_vswitch_1) + assert len(vswitches) == 1 + + def test_add_vswitch(self, host): + vswitch = ESXivSwitch.add_vswitch(host, "test") + assert isinstance(vswitch, ESXivSwitch) + assert vswitch.name == "test" + + def test_del_vswitch(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=esxcfg_vswitch_2 + ) + host.initialize_vswitch() + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=esxcfg_vmknic_1 + ) + host.initialize_vmknic() + + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="" + ) + assert len(host.vswitch) == 2 + assert len(host.vmknic) == 2 + for vswitch in host.vswitch: + if vswitch.name == "ATvSwitchLongName": + vswitch.del_vswitch() + assert len(host.vmknic) == 1 + break + else: + assert False + + def test_set_mtu(self, host): + vswitch = ESXivSwitch(host, "vSwitch0") + vswitch.set_mtu(5000) + assert vswitch.mtu == 5000 + + def test_add_uplink(self, host): + vswitch = ESXivSwitch(host, "vSwitch0") + vswitch.add_uplink("vmnic99") + assert "vmnic99" in vswitch.uplinks + + def test_del_uplink(self, host): + vswitch = ESXivSwitch(host, "vSwitch0") + vswitch.add_uplink("vmnic99") + vswitch.del_uplink("vmnic99") + assert len(vswitch.uplinks) == 0 + + def test_add_portgroup(self, host): + vswitch = ESXivSwitch(host, "vSwitch0") + vswitch.add_portgroup("PGtest") + assert "PGtest" in vswitch.portgroups + + def test_del_portgroup(self, host): + vswitch = ESXivSwitch(host, "vSwitch0") + vswitch.add_portgroup("PGtest") + vswitch.del_portgroup("PGtest") + assert len(vswitch.portgroups) == 0 + + def test_del_portgroup_vmknic(self, host): + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=esxcfg_vswitch_2 + ) + host.initialize_vswitch() + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout=esxcfg_vmknic_1 + ) + host.initialize_vmknic() + + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="" + ) + assert len(host.vswitch) == 2 + assert len(host.vmknic) == 2 + + for vswitch in host.vswitch: + if vswitch.name == "ATvSwitchLongName": + vswitch.del_portgroup("ATvmnic10") + assert len(host.vmknic) == 1 + break + else: + assert False + + def test_set_vlan(self, host): + vswitch = ESXivSwitch(host, "ATvSwitchLongName") + vswitch.initialize(esxcfg_vswitch_2) + vswitch.set_portgroup_vlan("ATNetwork", 10) + + def test_set_portgroup_uplinks(self, host): + vswitch = ESXivSwitch(host, "ATvSwitchLongName") + vswitch.initialize(esxcfg_vswitch_2) + vswitch.set_portgroup_uplinks("ATNetwork", ["vmnic10"]) + + def test_configure(self, host): + vswitch = ESXivSwitch(host, "ATvSwitch") + vswitch.configure(uplinks=["vmnic4", "vmnic5"], portgroups=["t1", "t2"], vmknic=False) + assert "vmnic4" in vswitch.uplinks + assert "vmnic5" in vswitch.uplinks + assert "t1" in vswitch.portgroups + assert "t2" in vswitch.portgroups + + def test_reconfigure(self, host): + vswitch = ESXivSwitch(host, "vSwitch0") + vswitch.initialize(esxcfg_vswitch_1) + vswitch.reconfigure( + uplinks=["vmnic4", "vmnic5"], + portgroups=["t1", "t2"], + mtu=9000, + vmknic=False, + ) + assert "vmnic4" in vswitch.uplinks + assert "vmnic5" in vswitch.uplinks + assert "t1" in vswitch.portgroups + assert "t2" in vswitch.portgroups + assert 9000 == vswitch.mtu + + def test_set_forged_transmit(self, host): + vswitch = ESXivSwitch(host, "vSwitch0") + host.connection.execute_command.return_value = ConnectionCompletedProcess( + return_code=0, args="command", stdout="" + ) + vswitch.set_forged_transmit("protgroup-name", True) + host.connection.execute_command.assert_called_once_with( + "esxcli network vswitch standard portgroup policy security set -p protgroup-name -f True" + ) + + def test_change_ens_fpo_support(self, host): + vswitch = ESXivSwitch(host, "vSwitch0") + host.connection.execute_command.return_value = ConnectionCompletedProcess(return_code=0, args="", stdout="") + vswitch.change_ens_fpo_support(True) + host.connection.execute_command.assert_called_once_with("nsxdp-cli ens fpo set --enable") + + def test_change_ens_fpo_support_provided_vds(self, host): + vswitch = ESXivSwitch(host, "vSwitch0") + host.connection.execute_command.return_value = ConnectionCompletedProcess(return_code=0, args="", stdout="") + vswitch.change_ens_fpo_support(True, "vSphereDistributedSwitch") + host.connection.execute_command.assert_called_once_with( + "nsxdp-cli ens fpo set --enable -dvs vSphereDistributedSwitch" + ) + + def test_set_mac_change_policy_enabled(self, host): + vswitch = ESXivSwitch(host, "vSwitch0") + portgroup = "test1" + host.connection.execute_command.return_value = ConnectionCompletedProcess(return_code=0, args="", stdout="..") + + vswitch.set_mac_change_policy(portgroup_name=portgroup, enabled=True) + + assert host.connection.execute_command.call_count == 1 + host.connection.execute_command.assert_called_with( + command=f"esxcli network vswitch standard portgroup policy security set -p {portgroup} -m True", + custom_exception=VswitchError, + ) + + def test_set_mac_change_policy_disabled(self, host): + vswitch = ESXivSwitch(host, "vSwitch0") + portgroup = "test1" + host.connection.execute_command.return_value = ConnectionCompletedProcess(return_code=0, args="", stdout="..") + vswitch.set_mac_change_policy(portgroup_name=portgroup, enabled=False) + + assert host.connection.execute_command.call_count == 1 + host.connection.execute_command.assert_called_with( + command=f"esxcli network vswitch standard portgroup policy security set -p {portgroup} -m False", + custom_exception=VswitchError, + )