diff --git a/.github/actions/Dockerfile b/.github/actions/Dockerfile new file mode 100644 index 0000000..250d118 --- /dev/null +++ b/.github/actions/Dockerfile @@ -0,0 +1,21 @@ +FROM ubuntu:20.04 + +# Set the frontend to avoid prompts +ENV DEBIAN_FRONTEND=noninteractive + +# Install Terraform, Packer, and Ansible +RUN apt-get update && \ + apt-get install -y curl unzip git bash ansible gnupg && \ + curl -fsSL https://apt.releases.hashicorp.com/gpg | gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg && \ + echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com focal main" > /etc/apt/sources.list.d/hashicorp.list && \ + apt-get update && \ + apt-get install -y terraform packer xorriso +#RUN mkdir packer ansible terraform vagrant +# Copy the entrypoint script into the container +COPY entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh + +#EXPOSE 8826 if using http_directory + +# Set the entrypoint of the Docker container to be the entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] diff --git a/.github/actions/action.yaml b/.github/actions/action.yaml new file mode 100644 index 0000000..c9f5131 --- /dev/null +++ b/.github/actions/action.yaml @@ -0,0 +1,18 @@ +name: "Build Infrastructure" +description: "Build VM image using Packer with vSphere" + +inputs: + vcenter_user: + required: true + vcenter_password: + required: true + vcenter_server: + required: true + +runs: + using: "docker" + image: "Dockerfile" + env: + VCENTER_USER: ${{ inputs.vcenter_user }} + VCENTER_PASSWORD: ${{ inputs.vcenter_password }} + VCENTER_SERVER: ${{ inputs.vcenter_server }} diff --git a/.github/actions/entrypoint.sh b/.github/actions/entrypoint.sh new file mode 100644 index 0000000..39efb83 --- /dev/null +++ b/.github/actions/entrypoint.sh @@ -0,0 +1,48 @@ +#!/bin/bash +set -euo pipefail + +# Print info +echo "[INFO] Starting Packer build..." + +# Required env check +: "${VCENTER_USER:?VCENTER_USER not set}" +: "${VCENTER_PASSWORD:?VCENTER_PASSWORD not set}" +: "${VCENTER_SERVER:?VCENTER_SERVER not set}" + +# Export as Packer vars +export PACKER_VAR_vcenter_user="$VCENTER_USER" +export PACKER_VAR_vcenter_password="$VCENTER_PASSWORD" +export PACKER_VAR_vcenter_server="$VCENTER_SERVER" + +# Optional debug +echo "[INFO] Using vCenter: $VCENTER_SERVER" + +packer plugins install github.com/hashicorp/vsphere + +# Move into packer directory if not already +cd "${PACKER_DIR:-./packer}" +pwd + +ls -al ./ +ls -al /root/ansible-optimize/packer +ls -al ../ + +# Validate template +packer fmt -check -diff . +packer validate centos9.json + +echo "validated no error" + +# Build image +packer build -force centos9.json + +echo "[SUCCESS] Packer build complete." + +# Now, let's run Terraform to provision the VM +echo "[INFO] Starting Terraform provisioning..." + +# Run Terraform init and apply +terraform init +terraform apply -auto-approve + +echo "[SUCCESS] Terraform apply complete." diff --git a/.github/workflows/build_inf.yml b/.github/workflows/build_inf.yml new file mode 100644 index 0000000..c6b746d --- /dev/null +++ b/.github/workflows/build_inf.yml @@ -0,0 +1,47 @@ +name: Build Infrastructure + + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + packer-build: + #runs-on: ubuntu-latest # Can be changed based on your needs + runs-on: self-hosted + + container: + image: ghcr.io/catthehacker/ubuntu:act-latest + # volumes: + # - ansible:/ansible + # - packer:/packer + # - terraform:/terraform + # - vagrant:/vagrant + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + submodules: true + + - name: Set up Docker Build Environment + run: | + echo "Setting up Docker environment for Packer build" + + - name: Set environment variables from GitHub Secrets + run: | + echo "VCENTER_USER=${{ secrets.VCENTER_USER }}" >> $GITHUB_ENV + echo "VCENTER_PASSWORD=${{ secrets.VCENTER_PASSWORD }}" >> $GITHUB_ENV + echo "VCENTER_SERVER=${{ secrets.VCENTER_SERVER }}" >> $GITHUB_ENV + + - name: Run Packer Build + uses: ./.github/actions # Reference custom action (Docker container) + with: + vcenter_user: ${{ secrets.VCENTER_USER }} + vcenter_password: ${{ secrets.VCENTER_PASSWORD }} + vcenter_server: ${{ secrets.VCENTER_SERVER }} + diff --git a/.github/workflows/terra_ci.yml b/.github/workflows/terra_ci.yml new file mode 100644 index 0000000..db6787e --- /dev/null +++ b/.github/workflows/terra_ci.yml @@ -0,0 +1,48 @@ +name: Terraform CI/CD Pipeline + +on: + push: + branches: + - main + workflow_run: + workflows: [Build Infrastructure] + types: [completed] + +jobs: + + terraform: + #runs-on: ubuntu-latest + if: ${{ github.event.workflow_run.conclusion == 'success' }} + runs-on: self-hosted + + container: + image: ghcr.io/catthehacker/ubuntu:act-latest + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set environment variables from GitHub Secrets + run: | + echo "VCENTER_USER=${{ secrets.VCENTER_USER }}" >> $GITHUB_ENV + echo "VCENTER_PASSWORD=${{ secrets.VCENTER_PASSWORD }}" >> $GITHUB_ENV + echo "VCENTER_SERVER=${{ secrets.VCENTER_SERVER }}" >> $GITHUB_ENV + + - name: Set up Terraform + uses: hashicorp/setup-terraform@v2 + with: + terraform_version: 'latest' + + - run: pwd + + - name: Initialize Terraform + run: | + terraform init + + - name: Terraform Plan + run: | + terraform plan + + - name: Apply Terraform Configuration + run: | + terraform apply -auto-approve diff --git a/.gitignore b/.gitignore index 2faf43d..f7a4bc7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ # Local .terraform directories **/.terraform/* - +.env # .tfstate files *.tfstate *.tfstate.* @@ -15,6 +15,7 @@ crash.*.log # to change depending on the environment. *.tfvars *.tfvars.json +.secrets # Ignore override files as they are usually used to override resources locally and so # are not checked in diff --git a/ansible/gather_facts.yml b/ansible/gather_facts.yml new file mode 100644 index 0000000..e61245f --- /dev/null +++ b/ansible/gather_facts.yml @@ -0,0 +1,4 @@ + - hosts: all + tasks: + - debug: + var: ansible_facts \ No newline at end of file diff --git a/ansible/inventory/inventory.ini b/ansible/inventory/inventory.ini new file mode 100644 index 0000000..ed7428c --- /dev/null +++ b/ansible/inventory/inventory.ini @@ -0,0 +1,2 @@ +[centos9] +centos9.local # The VM hostname or IP address diff --git a/ansible/optimize_cdn.yml b/ansible/optimize_cdn.yml new file mode 100644 index 0000000..cb43cc7 --- /dev/null +++ b/ansible/optimize_cdn.yml @@ -0,0 +1,391 @@ +# --- +# - hosts: localhost +# become: yes +# tasks: +# - name: Ensure IP forwarding is enabled +# ansible.posix.sysctl: +# name: net.ipv4.ip_forward +# value: "1" +# state: present +# sysctl_set: yes +# reload: yes +--- +# cdn_cache_optimization.yml +# Ansible playbook for optimizing servers as CDN caching nodes with SSD detection and configuration + +- name: CDN Cache Server Optimization with SSD Detection and Configuration + hosts: cdn_cache_servers + become: yes + vars: + cache_mount_base: /var/cache/cdn + fs_type: xfs + cache_user: cdn-cache + cache_group: cdn-cache + swap_size_mb: 4096 + nginx_worker_processes: auto + nginx_worker_connections: 65535 + tcp_max_syn_backlog: 65536 + somaxconn: 65536 + tcp_max_tw_buckets: 1440000 + tcp_fin_timeout: 15 + tcp_keepalive_time: 300 + tcp_keepalive_probes: 5 + tcp_keepalive_intvl: 15 + net_core_netdev_max_backlog: 300000 + net_core_somaxconn: 65535 + + tasks: + # System updates and essential packages + - name: Update apt cache + apt: + update_cache: yes + cache_valid_time: 3600 + when: ansible_os_family == "Debian" + + - name: Install essential packages + package: + name: + - htop + - iotop + - sysstat + - nload + - net-tools + - iftop + - bmon + - tcpdump + - dstat + - xfsprogs + - nvme-cli + - nginx + - varnish + - prometheus-node-exporter + - parted + - lsblk + - util-linux + state: present + + # Kernel parameter tuning + - name: Set kernel parameters for CDN cache performance + sysctl: + name: "{{ item.key }}" + value: "{{ item.value }}" + state: present + reload: yes + loop: + # Networking tuning + - { key: "net.core.somaxconn", value: "{{ somaxconn }}" } + - { key: "net.core.netdev_max_backlog", value: "{{ net_core_netdev_max_backlog }}" } + - { key: "net.ipv4.tcp_max_syn_backlog", value: "{{ tcp_max_syn_backlog }}" } + - { key: "net.ipv4.tcp_fin_timeout", value: "{{ tcp_fin_timeout }}" } + - { key: "net.ipv4.tcp_keepalive_time", value: "{{ tcp_keepalive_time }}" } + - { key: "net.ipv4.tcp_keepalive_probes", value: "{{ tcp_keepalive_probes }}" } + - { key: "net.ipv4.tcp_keepalive_intvl", value: "{{ tcp_keepalive_intvl }}" } + - { key: "net.ipv4.tcp_max_tw_buckets", value: "{{ tcp_max_tw_buckets }}" } + - { key: "net.ipv4.ip_local_port_range", value: "1024 65535" } + - { key: "net.ipv4.tcp_slow_start_after_idle", value: "0" } + - { key: "net.ipv4.tcp_rmem", value: "4096 87380 16777216" } + - { key: "net.ipv4.tcp_wmem", value: "4096 65536 16777216" } + # VM Tuning + - { key: "vm.swappiness", value: "10" } + - { key: "vm.dirty_ratio", value: "5" } + - { key: "vm.dirty_background_ratio", value: "2" } + - { key: "vm.vfs_cache_pressure", value: "50" } + - { key: "fs.file-max", value: "2097152" } + - { key: "fs.nr_open", value: "2097152" } + - { key: "fs.inotify.max_user_watches", value: "524288" } + + # Identify all SSDs on the server + - name: Initialize SSD devices list + set_fact: + ssd_devices: [] + + - name: Identify all SSD devices using Ansible facts + set_fact: + ssd_devices: "{{ ssd_devices + ['/dev/' + item] }}" + loop: "{{ ansible_devices.keys() | list }}" + when: + - ansible_devices[item].rotational is defined + - not ansible_devices[item].rotational + - item | regex_search('^(s|v|xv)d[a-z]|nvme[0-9]n[0-9]') + + - name: Display detected SSD devices + debug: + msg: "Detected SSD devices: {{ ssd_devices }}" + + - name: Fail if no SSDs detected + fail: + msg: "No SSD devices detected on this server. CDN caching requires SSD storage." + when: ssd_devices | length == 0 + + # Create CDN cache user and group for permissions + - name: Create CDN cache user and group + user: + name: "{{ cache_user }}" + group: "{{ cache_group }}" + system: yes + create_home: no + state: present + + # Process each SSD device + - name: Process and configure each SSD device + block: + - name: Create mount directory for SSD + file: + path: "{{ cache_mount_base }}/ssd{{ idx }}" + state: directory + mode: '0755' + loop: "{{ range(0, ssd_devices | length) | list }}" + loop_control: + loop_var: idx + + - name: Create partition on SSD + parted: + device: "{{ item }}" + number: 1 + state: present + fs_type: "{{ fs_type }}" + loop: "{{ ssd_devices }}" + ignore_errors: yes # Some devices might already be partitioned + + - name: Get partition name for each SSD + shell: lsblk -no NAME "{{ item }}" | grep -v "$(basename {{ item }})" | head -1 #basename command extract the file name or the last part basename /dev/sda → sda + register: ssd_partitions + loop: "{{ ssd_devices }}" + changed_when: false + + - name: Create partition list + set_fact: + ssd_partitions_list: "{{ ssd_partitions.results | map(attribute='stdout') | map('regex_replace', '^', '/dev/') | list }}" #map command prepends + + - name: Format each SSD partition with XFS + filesystem: + fstype: "{{ fs_type }}" + dev: "{{ item }}" + opts: "-L CDN_CACHE_{{ idx }}" + loop: "{{ ssd_partitions_list }}" + loop_control: + index_var: idx + when: item | length > 0 + + - name: Mount each SSD with optimized parameters + mount: + path: "{{ cache_mount_base }}/ssd{{ idx }}" + src: "{{ item }}" + fstype: "{{ fs_type }}" + opts: "noatime,nodiratime,discard,nobarrier" + state: mounted + loop: "{{ ssd_partitions_list }}" + loop_control: + index_var: idx + when: item | length > 0 + + - name: Set permissions on each cache directory + file: + path: "{{ cache_mount_base }}/ssd{{ idx }}" + owner: "{{ cache_user }}" + group: "{{ cache_group }}" + mode: '0755' + state: directory + loop: "{{ range(0, ssd_devices | length) | list }}" + loop_control: + loop_var: idx + when: ssd_devices | length > 0 + + # Configure swapfile on first SSD for performance if needed + - name: Configure swapfile on first SSD + block: + - name: Create a swapfile on first SSD + command: dd if=/dev/zero of={{ cache_mount_base }}/ssd0/swapfile bs=1M count={{ swap_size_mb }} + args: + creates: "{{ cache_mount_base }}/ssd0/swapfile" + + - name: Set swapfile permissions + file: + path: "{{ cache_mount_base }}/ssd0/swapfile" + owner: root + group: root + mode: '0600' + + - name: Format swapfile + command: mkswap {{ cache_mount_base }}/ssd0/swapfile + + - name: Enable swapfile + command: swapon {{ cache_mount_base }}/ssd0/swapfile + ignore_errors: yes + + - name: Add swapfile to fstab + lineinfile: + path: /etc/fstab + line: "{{ cache_mount_base }}/ssd0/swapfile none swap sw 0 0" + state: present + when: ssd_devices | length > 0 + + # IO scheduler optimization for all SSDs + - name: Set IO scheduler for all SSDs + shell: echo none > /sys/block/{{ item | basename }}/queue/scheduler + loop: "{{ ssd_devices }}" + ignore_errors: yes + when: ssd_devices | length > 0 + + - name: Set read ahead for all SSDs + shell: echo 256 > /sys/block/{{ item | basename }}/queue/read_ahead_kb + loop: "{{ ssd_devices }}" + ignore_errors: yes + when: ssd_devices | length > 0 + + # Configure Nginx for CDN caching + - name: Create Nginx cache directories on each SSD + file: + path: "{{ cache_mount_base }}/ssd{{ idx }}/nginx_cache" + state: directory + owner: "{{ cache_user }}" + group: "{{ cache_group }}" + mode: '0755' + loop: "{{ range(0, ssd_devices | length) | list }}" + loop_control: + loop_var: idx + when: ssd_devices | length > 0 + notify: restart nginx + + - name: Configure Nginx for CDN caching + template: + src: templates/nginx.conf.j2 + dest: /etc/nginx/nginx.conf + owner: root + group: root + mode: '0644' + vars: + ssd_cache_dirs: "{{ range(0, ssd_devices | length) | map('regex_replace', '^(.*)$', cache_mount_base + '/ssd\\1/nginx_cache') | list }}" + notify: restart nginx + + - name: Configure Nginx default site + template: + src: templates/default.conf.j2 + dest: /etc/nginx/conf.d/default.conf + owner: root + group: root + mode: '0644' + vars: + ssd_cache_dirs: "{{ range(0, ssd_devices | length) | map('regex_replace', '^(.*)$', cache_mount_base + '/ssd\\1/nginx_cache') | list }}" + notify: restart nginx + + # Configure Varnish as additional caching layer + - name: Create Varnish storage directories on each SSD + file: + path: "{{ cache_mount_base }}/ssd{{ idx }}/varnish" + state: directory + owner: varnish + group: varnish + mode: '0755' + loop: "{{ range(0, ssd_devices | length) | list }}" + loop_control: + loop_var: idx + when: ssd_devices | length > 0 + notify: restart varnish + + - name: Configure Varnish Cache + template: + src: templates/varnish.vcl.j2 + dest: /etc/varnish/default.vcl + owner: root + group: root + mode: '0644' + notify: restart varnish + + - name: Configure Varnish service + template: + src: templates/varnish.service.j2 + dest: /etc/systemd/system/varnish.service + owner: root + group: root + mode: '0644' + vars: + varnish_storage_args: "{{ range(0, ssd_devices | length) | map('regex_replace', '^(.*)$', '-s malloc,1G -s file,' + cache_mount_base + '/ssd\\1/varnish/cache:10G') | join(' ') }}" + notify: reload systemd + + # System resource limits for cache performance + - name: Set system resource limits + pam_limits: + domain: '*' + limit_type: "{{ item.limit_type }}" + limit_item: "{{ item.limit_item }}" + value: "{{ item.value }}" + loop: + - { limit_type: '-', limit_item: 'nofile', value: '1048576' } + - { limit_type: '-', limit_item: 'nproc', value: '65535' } + - { limit_type: '-', limit_item: 'memlock', value: 'unlimited' } + - { limit_type: '-', limit_item: 'core', value: 'unlimited' } + + # Setup monitoring + - name: Configure Prometheus Node Exporter + template: + src: templates/node_exporter.service.j2 + dest: /etc/systemd/system/node_exporter.service + owner: root + group: root + mode: '0644' + notify: reload systemd + + # Enable and start services + - name: Enable and start required services + systemd: + name: "{{ item }}" + state: started + enabled: yes + loop: + - nginx + - varnish + - prometheus-node-exporter + + handlers: + - name: restart nginx + systemd: + name: nginx + state: restarted + + - name: restart varnish + systemd: + name: varnish + state: restarted + + - name: reload systemd + systemd: + daemon_reload: yes + +# Here's an example nginx.conf.j2 template with SSD cache support + +# templates/nginx.conf.j2 +# worker_processes {{ nginx_worker_processes }}; +# worker_rlimit_nofile 1048576; +# pid /run/nginx.pid; +# +# events { +# worker_connections {{ nginx_worker_connections }}; +# multi_accept on; +# use epoll; +# } +# +# http { +# # Basic settings +# sendfile on; +# tcp_nopush on; +# tcp_nodelay on; +# keepalive_timeout 65; +# types_hash_max_size 2048; +# server_tokens off; +# +# # Cache setup for SSDs +# {% for cache_dir in ssd_cache_dirs %} +# proxy_cache_path {{ cache_dir }} +# levels=1:2 +# keys_zone=cache_{{ loop.index }}:100m +# inactive=24h +# max_size=10g; +# {% endfor %} +# +# # Other nginx configs... +# } + +# Templates for other configuration files would also need to be updated to +# work with multiple SSD devices. Each template should dynamically use the +# detected SSDs to distribute cache data appropriately. \ No newline at end of file diff --git a/packer/centos9.json b/packer/centos9.json new file mode 100644 index 0000000..b91ebbb --- /dev/null +++ b/packer/centos9.json @@ -0,0 +1,78 @@ +{ + "builders": [ + { + "type": "vsphere-iso", + "vcenter_server": "{{ user `vcenter_server` }}", + "username": "{{ user `vcenter_user` }}", + "password": "{{ user `vcenter_password` }}", + "insecure_connection": "true", + + "datacenter": "office", + "host": "192.168.10.20", + "datastore": "datastore1", + + "network_adapters": [ + { + "network": "vlan16", + "network_card": "vmxnet3" + } + ], + + "folder": "Templates", + "vm_name": "centos9-template", + "convert_to_template": true, + + "guest_os_type": "centos8_64Guest", + + "CPUs": 4, + "RAM": 4096, + "disk_controller_type": "pvscsi", + "storage": [ + { + "disk_size": "30720", + "disk_thin_provisioned": true, + "disk_controller_index": 0 + } + ], + + "iso_paths": [ + "[esxi02-datastore2] ISO/CentOS-Stream-9-20240520.0-x86_64-boot.iso" + ], + + "ssh_username": "packer", + "ssh_password": "packer", + "ssh_timeout": "2m", + "http_directory": ".", + "http_port_min": "8826", + "http_port_max": "8826", + + + "cd_files": [ + "./scripts/centos9/kickstart.cfg" + ], + + "boot_command": [ + "", + "linux inst.text inst.ks=cdrom:/kickstart.cfg " + ] + } + ], + "provisioners": [ + { + "type": "shell", + "inline": [ + "dnf update -y", + "dnf install -y epel-release", + "dnf install -y vim curl wget net-tools", + "useradd -m ansible && echo 'ansible:ansible' | chpasswd", + "echo 'ansible ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers" + ] + } + ], + "variables": { + "vcenter_user": "{{ env `VCENTER_USER` }}", + "vcenter_password": "{{ env `VCENTER_PASSWORD` }}", + "vcenter_server": "{{ env `VCENTER_SERVER` }}" + } + } + \ No newline at end of file diff --git a/packer/scripts/centos9/kickstart.cfg b/packer/scripts/centos9/kickstart.cfg new file mode 100644 index 0000000..7ef93f7 --- /dev/null +++ b/packer/scripts/centos9/kickstart.cfg @@ -0,0 +1,37 @@ +#version=RHEL9 +url --url=http://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/ +repo --name=baseos --baseurl=http://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/ +repo --name=appstream --baseurl=http://mirror.stream.centos.org/9-stream/AppStream/x86_64/os/ +repo --name=crb --baseurl=http://mirror.stream.centos.org/9-stream/CRB/x86_64/os/ +#cdrom +#text +skipx +firstboot --disable +lang en_US +keyboard us +network --bootproto=dhcp --activate --onboot=on +rootpw packer +firewall --enabled +authconfig --enableshadow --passalgo=sha512 +services --enabled=sshd,chronyd --disabled=cloud-init,cloud-init-local,cloud-config,cloud-final +selinux --enforcing +timezone UTC --utc +bootloader --location=mbr +zerombr +reboot +clearpart --all --initlabel +part /boot/efi --fstype="efi" --size=600 --fsoptions="umask=0077,shortname=winnt" +part /boot --fstype=ext4 --size=1024 +part / --fstype=xfs --grow --size=27400 +eula --agreed + +%packages +@^minimal-environment +@standard +%end + +%post +useradd packer +echo "packer:packer" | chpasswd +echo "packer ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers +%end diff --git a/terraform/main.tf b/terraform/main.tf new file mode 100644 index 0000000..b0935a7 --- /dev/null +++ b/terraform/main.tf @@ -0,0 +1,14 @@ +provider "vagrant" {} + +resource "vagrant_vm" "centos9" { + box = "centos/9" # Vagrant box for CentOS 9 + memory = 4096 # 4GB RAM + cpus = 4 # 4 CPUs + disk_size = 30720 # Disk size in MB (30GB) + disk_type = "thin" # Thin provisioning + + provisioner "ansible" { + playbook = "ansible/playbook.yml" # Path to your Ansible playbook + inventory = "ansible/inventory/hosts" # Path to your inventory file + } +} diff --git a/terraform/output.tf b/terraform/output.tf new file mode 100644 index 0000000..9cd12ce --- /dev/null +++ b/terraform/output.tf @@ -0,0 +1,3 @@ +output "vm_ip" { + value = vagrant_vm.centos9.ipv4_address +} diff --git a/terraform/varriables.tf b/terraform/varriables.tf new file mode 100644 index 0000000..101c292 --- /dev/null +++ b/terraform/varriables.tf @@ -0,0 +1,15 @@ +variable "vcenter_user" { + description = "vCenter user" + type = string +} + +variable "vcenter_password" { + description = "vCenter password" + type = string + sensitive = true +} + +variable "vcenter_server" { + description = "vCenter server address" + type = string +} diff --git a/vagrant/vagrantfile b/vagrant/vagrantfile new file mode 100644 index 0000000..b0935a7 --- /dev/null +++ b/vagrant/vagrantfile @@ -0,0 +1,14 @@ +provider "vagrant" {} + +resource "vagrant_vm" "centos9" { + box = "centos/9" # Vagrant box for CentOS 9 + memory = 4096 # 4GB RAM + cpus = 4 # 4 CPUs + disk_size = 30720 # Disk size in MB (30GB) + disk_type = "thin" # Thin provisioning + + provisioner "ansible" { + playbook = "ansible/playbook.yml" # Path to your Ansible playbook + inventory = "ansible/inventory/hosts" # Path to your inventory file + } +}