Permalink
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
645 lines (585 sloc) 28.9 KB
enable_ansible_debug: false # set value to true for verbose output from ansible
nsx_t_installer: nsx-t-gen # Set to name of installer or env or any value so resources can be identified
nsx_t_version: 2.1
# vCenter details to deploy the Mgmt OVAs (Mgr, Edge, Controller)
vcenter_host: vcsa-01.corp.local.io # EDIT - this is for deployment of the ovas for the mgmt plane
vcenter_usr: administrator@vsphere.local # EDIT - this is for deployment of the ovas for the mgmt plane
vcenter_pwd: 39adladfasdfl! # EDIT - this is for deployment of the ovas for the mgmt plane
vcenter_datacenter: Datacenter # EDIT
vcenter_datastore: Datastore # EDIT
vcenter_cluster: Cluster # EDIT
vcenter_manager: vcenter-1 # EDIT
vcenter_rp: # EDIT - resource pool where mgmt plane vms would get deployed
# OVA general network settings
ntpservers: 10.13.12.2 # EDIT
mgmt_portgroup: 'VM Network' # EDIT
dnsserver: 10.13.12.2 # EDIT
dnsdomain: corp.local.io # EDIT
defaultgateway: 10.13.12.1 # EDIT
netmask: 255.255.255.0 # EDIT
# Host a Webserver to serve the ova images and ovftool
# Please edit the endpoint and the file names
nsx_image_webserver: http://10.85.24.5:9080
# Download NSX-T 2.1 bits from
# https://my.vmware.com/group/vmware/details?downloadGroup=NSX-T-210&productId=673
nsx_mgr_ova: nsx-unified-appliance-2.1.0.0.0.7395503.ova
nsx_controller_ova: nsx-controller-2.1.0.0.0.7395493.ova
nsx_edge_ova: nsx-edge-2.1.0.0.0.7395502.ova
# Ensure ovftool is minimally v4.2
# https://my.vmware.com/group/vmware/details?productId=614&downloadGroup=OVFTOOL420#
ovftool_image: VMware-ovftool-4.2.0-5965791-lin.x86_64.bundle
# The Esxi Hosts can be added to transport nodes in two ways:
# a) specify the esxi hosts individually - first checked for
# b) or use compute_vcenter_manager to add hosts under a specific vcenter as transport nodes
# Specify passwd of the esxi hosts that should be used for nsx-t
esxi_hosts_root_pwd: rootPasswd # EDIT - Root password for the esxi hosts
# Specify FQDN, ip (and passwd if they are different) for each of the esxi hosts that should be used for nsx-t
# Listing the individual hosts that needs to be used
esxi_hosts_config: |
esxi_hosts:
- name: esxi-host1.corp.local.io
ip: 10.13.12.10
root_pwd: rootPasswd
# - name: esxi-host2.corp.local.io
# ip: 10.13.12.11
# root_pwd: rootPasswd
# Using a separate compute manager to specify esxi hosts
# If filled, then change the the esxi_hosts_config section to be empty
# Sample empty config:
# esxi_hosts_config: # Provide space after colon
# Handle multiple vcenter and compute clusters for Esxi Hosts
# This would override esxi_hosts_config
compute_manager_configs: |
compute_managers:
- vcenter_name: vcenter-01
vcenter_host: vcenter-01.corp.local
vcenter_usr: administrator@vsphere.local
vcenter_pwd: VMWare1!
# Multiple clusters under same vcenter can be specified
clusters:
- vcenter_cluster: Cluster1
overlay_profile_mtu: 1600 # Min 1600
overlay_profile_vlan: EDIT_ME # VLAN ID for the TEP/Overlay network
# Specify an unused vmnic on esxi host to be used for nsx-t
# can be multiple vmnics separated by comma
uplink_vmnics: vmnic1 # vmnic1,vmnic2...
# - vcenter_cluster: Cluster2
# overlay_profile_mtu: 1600 # Min 1600
# overlay_profile_vlan: EDIT_ME # VLAN ID for the TEP/Overlay network
# # Specify an unused vmnic on esxi host to be used for nsx-t
# # can be multiple vmnics separated by comma
# uplink_vmnics: vmnic1 # vmnic1,vmnic2...
# Multiple vcenters can be specified
# - vcenter_name: vcenter-02
# vcenter_host: vcenter-02.corp.local
# vcenter_usr: administrator@vsphere.local
# vcenter_pwd: VMWare2!
# clusters:
# - vcenter_cluster: Cluster4
# overlay_profile_mtu: 1600 # Min 1600
# overlay_profile_vlan: EDIT_ME # VLAN ID for the TEP/Overlay network
# uplink_vmnics: vmnic1 # vmnic1,vmnic2...
# Using a separate vCenter to add Edges
# Edge Specific vCenter settings
# If Edges are going to use the same vCenter as Mgr, then dont set any of the following properties
edge_vcenter_host: # EDIT - If filled, then Edges would use this separate vcenter
edge_vcenter_usr: # EDIT - Use Edge specific vCenter
edge_vcenter_pwd: # EDIT - Use Edge specific vCenter
edge_vcenter_datacenter: # EDIT - Use Edge specific vCenter
edge_vcenter_datastore: # EDIT - Use Edge specific vCenter
edge_vcenter_cluster: # EDIT - Use Edge specific vCenter
edge_vcenter_rp: # EDIT - Use Edge specific vCenter
edge_ntpservers: # EDIT - Use Edge specific vCenter
edge_mgmt_portgroup: # EDIT - Use Edge specific vCenter
edge_dnsserver: # EDIT - Use Edge specific vCenter
edge_dnsdomain: # EDIT - Use Edge specific vCenter
edge_defaultgateway: # EDIT - Use Edge specific vCenter
edge_netmask: # EDIT - Use Edge specific vCenter
# Edit fololowing parameters
nsx_t_manager_host_name: nsx-t-mgr.corp.local.io # Set as FQDN, will be used also as certificate common name
nsx_t_manager_vm_name: 'NSX-T Mgr' # Can have spaces
nsx_t_manager_ip: 10.13.12.50
nsx_t_manager_admin_user: admin
nsx_t_manager_admin_pwd: VMware1! # Min 8 chars, upper, lower, number, special digit
nsx_t_manager_root_pwd: VMware1! # Min 8 chars, upper, lower, number, special digit
# Following properties can be used for deploying controller to same cluster/rp
nsx_t_controller_host_prefix: nsx-t-ctl # Without spaces, Generated controller would be nsx-t-ctrl-1.corp.local.io,...
nsx_t_controller_vm_name_prefix: 'NSX-T Controller' # Generated edge host name would be "NSX-T Controller-1"
nsx_t_controller_ips: 10.13.12.51,10.13.12.52,10.13.12.53 # Should be 1 or 3 ips to maintain quorum for Controller Cluster
nsx_t_controller_root_pwd: VMware1! # Min 8 chars, upper, lower, number, special digit
nsx_t_controller_cluster_pwd: VMware1! # Min 8 chars, upper, lower, number, special digit
# Controllers would be deployed to the same Mgmt Cluster
# as specified by the vcenter_host & vcenter_cluster
# NOT RECOMMENDED To use separate cluster for each controller
nsx_t_controllers_config:
# CAUTION: Using different clusters for controllers require complex anti-affinity rules setup
# If really want to spread controllers across different controllers,
# remove previous line (the empty nsx_t_controllers_config) and
# uncomment following nsx_t_controllers_config section, edit it to allow different clusters and resource pool per controller
# nsx_t_controllers_config: |
# controllers:
# host_prefix: nsx-t-ctl # EDIT_ME
# vm_name_prefix: NSX-T-Controller # EDIT_ME
# root_pwd: VMware1! # EDIT_ME
# cluster_pwd: VMware1! # EDIT_ME
# # Should be 1 or 3 members
# members:
# # EDIT ME to specify the correct cluster,
# # resource pool, datastore per controller member
# - ip: 10.13.12.51 # EDIT_ME
# cluster: Cluster1 # EDIT_ME
# resource_pool: nsx_rp1 # EDIT_ME
# datastore: store1 # EDIT_ME
# - ip: 10.13.12.52 # EDIT_ME
# cluster: Cluster2 # EDIT_ME
# resource_pool: nsx_rp2 # EDIT_ME
# datastore: store2 # EDIT_ME
# - ip: 10.13.12.53 # EDIT_ME
# cluster: Cluster3 # EDIT_ME
# resource_pool: nsx_rp3 # EDIT_ME
# datastore: store3 # EDIT_ME
nsx_t_edge_host_prefix: nsx-t-edge # Without spaces, generated edge would be nsx-t-edge-1.corp.local.io,...
nsx_t_edge_vm_name_prefix: 'NSX-T Edge' # Generated edge host name would be "NSX-T Edge-1"
nsx_t_edge_ips: 10.13.12.54,10.13.12.55 # comma separated ips, requires min 2 for HA
nsx_t_edge_root_pwd: VMware1!
nsx_t_edge_portgroup_ext: EDIT_ME_vlan-name-for-uplink # For external routing
nsx_t_edge_portgroup_transport: EDIT_ME_vlan-name-for-TEP # For TEP/overlay
# If ova deployment succeeded but controller membership failed or edges didnt get to join for any reason
# enable rerun of the configure controllers
rerun_configure_controllers: false # set it to true if you want to rerun the configure controllers
# (as part of base ova install job) even as ova deployment succeeded
# Edge network interfaces
# Network1 and Network4 are for mgmt and not used for uplink
# Network2 is for external uplink
# Network3 is for overlay
# Change only if necessary
nsx_t_edge_overlay_interface: fp-eth1 # Wired to Network3
nsx_t_edge_uplink_interface: fp-eth0 # Wired to Network2
# Tunnel endpoint network ip pool - change pool_end based on # of members in the tep pool
nsx_t_tep_pool_name: tep-ip-pool
nsx_t_tep_pool_cidr: 192.168.213.0/24
nsx_t_tep_pool_gateway: 192.168.213.1
nsx_t_tep_pool_start: 192.168.213.10
nsx_t_tep_pool_end: 192.168.213.200
#nsx_t_tep_pool_nameserver: 192.168.213.2 # Not required
# Memory reservation is turned ON by default with the NSX-T OVAs.
# This would mean a deployment of an edge or a mgr would reserve full memory
# leading to memory constraints
# if nsx_t_keep_reservation to true - would keep reservation ON, recommended for production setups.
# if nsx_t_keep_reservation to false - would turn reservation OFF, recommended for POCs, smaller setups.
nsx_t_keep_reservation: true # for Prod setup
# Default Sizing of NSX-T VMs
# Manager Sizing
# small : 2 vCPU, 8GB RAM
# medium : 4 vCPU, 16GB RAM - default with OVA
# large : 8 vCPU, 32GB RAM
# Controller Sizing
# default: 4 vCPU, 16GB RAM
# Edge Instance Sizing
# small : 2 vCPU, 4GB RAM - Too small - dont use
# medium : 4 vCPU, 8GB RAM - default with OVA
# large : 8 vCPU, 16GB RAM
# Controller is fixed to the 4 vCPU and 16 GB RAM
# Change default size for deployment for edge and mgr
# Size of the edge determines size of loadbalancers and # of lbrs.
# So, choose the size of edge based on requirements
nsx_t_mgr_deploy_size: small # Recommended for real barebones demo, smallest setup
#nsx_t_edge_deploy_size: medium # Recommended for POCs, smaller setup (# of lbrs very limited)
nsx_t_edge_deploy_size: large # Recommended when 4 small lbrs are required
# More control over memory and vcpu to use
# Not exposed and support removed
# nsx_t_sizing_spec: |
# nsx_t_sizing:
# mgr:
# cpu: 2
# memory: 8192 # in MB, needs to be multiples of 1024
# controller:
# # Controller would be resized from 4vCPU/16GB to 2vCPU/8GB
# cpu: 2
# memory: 8192 # in MB, needs to be multiples of 1024
# edge:
# cpu: 2
# memory: 8192 # in MB, needs to be multiples of 1024
nsx_t_overlay_hostswitch: hostswitch2
nsx_t_vlan_hostswitch: hostswitch1
# For Edge External uplink
# Check with network admin if its tagged or untagged
nsx_t_transport_vlan: 0
nsx_t_vlan_transport_zone: vlan-tz
nsx_t_overlay_transport_zone: overlay-tz
nsx_t_pas_ncp_cluster_tag: pas1
nsx_t_edge_cluster: 'Edge Cluster'
# For outbound uplink connection used by Edge
nsx_t_single_uplink_profile_name: "single-uplink-profile"
nsx_t_single_uplink_profile_mtu: 1600 # Min 1600
nsx_t_single_uplink_profile_vlan: 0 # Default
# For internal overlay connection used by Esxi hosts
nsx_t_overlay_profile_name: "host-overlay-profile"
nsx_t_overlay_profile_mtu: 1600 # Min 1600
nsx_t_overlay_profile_vlan: EDIT_ME # VLAN ID for the TEP/Overlay network
# Specify an unused vmnic on esxi host to be used for nsx-t
# can be multiple vmnics separated by comma
nsx_t_esxi_vmnics: vmnic1 # vmnic1,vmnic2...
# Configs for T0Router (only one per run), T1Routers, Logical switches and tags...
# Make sure the ncp/cluster tag matches the one defined at the top level.
# Expects to use atleast 2 edge instances for HA that need to be installed
nsx_t_t0router_spec: |
t0_router:
name: DefaultT0Router
ha_mode: 'ACTIVE_STANDBY'
# Specify the edges to be used for hosting the T0Router instance
edge_indexes:
# Index starts from 1 -> denoting nsx-t-edge-01
primary: 1 # Index for primary edge to be used
secondary: 2 # Index for secondary edge to be used
vip: 10.13.12.103/29 # T0 router vip - make sure this range does not intrude with the external vip ranges
ip1: 10.13.12.101/29 # T0 router uplink ports - make sure this range does not intrude with the external vip ranges
ip2: 10.13.12.102/29 # T0 router uplink ports - make sure this range does not intrude with the external vip ranges
vlan_uplink: 0
static_route:
next_hop: 10.13.12.1
network: 0.0.0.0/0
admin_distance: 1
tags:
ncp/cluster: 'pas1' # Should match the top level ncp/cluster tag value
ncp/shared_resource: 'true' # required for PKS
testtag: 'testpas1'
# T1 Logical Router with associated logical switches
# Add additional or comment off unnecessary t1 routers and switches as needed
# Can have 3 different setups:
# 1: One shared mgmt T1 Router and infra logical switch for both PKS & PAS
# 2: One mgmt T1 Router and infra logical switch for either PKS or PAS..
# Comment off the T1 router not required
# 3: Separate mgmt T1 Router and infra logical switch for each PKS and PAS..
# Add additional T1Router-Mgmt2 as needed with its infra logical switch
# Name the routers and logical switches and cidrs differently to avoid conflict
nsx_t_t1router_logical_switches_spec: |
t1_routers:
# Add additional T1 Routers or collapse switches into same T1 Router as needed
# Comment off the following T1 Routers if there is no PAS
- name: T1-Router-PAS-Infra
# No need for stateful routing
switches:
- name: PAS-Infra
logical_switch_gw: 192.168.1.1 # Last octet should be 1 rather than 0
subnet_mask: 24
- name: T1-Router-PAS-ERT
# This T1 would run ERT where the GoRouter requires LBR in front which requires stateful routing
# So pin to the edges
pinned_to_edges: true # Pin to edges.
switches:
- name: PAS-ERT
logical_switch_gw: 192.168.2.1 # Last octet should be 1 rather than 0
subnet_mask: 24
- name: T1-Router-PAS-Services
# No need for stateful routing
switches:
- name: PAS-Services
logical_switch_gw: 192.168.3.1 # Last octet should be 1 rather than 0
subnet_mask: 24
# - name: T1-Router-PAS-DynamicServices
# # No need for stateful routing
# switches:
# - name: PAS-DynamicServices
# logical_switch_gw: 192.168.4.1 # Last octet should be 1 rather than 0
# subnet_mask: 24
# # Comment off the following T1 Routers if there is no PKS
# - name: T1-Router-PKS-Infra
# # No need for stateful routing
# switches:
# - name: PKS-Infra
# logical_switch_gw: 172.23.1.1 # Last octet should be 1 rather than 0
# subnet_mask: 24
#
# - name: T1Router-PKS-Services
# # No need for stateful routing
# switches:
# - name: PKS-Services
# logical_switch_gw: 172.23.2.1 # Last octet should be 1 rather than 0
# subnet_mask: 24
# Make sure the ncp/cluster tag matches the one defined on the T0 Router
# Additional the ncp/ha tag should be set for HA Spoof guard profile
nsx_t_ha_switching_profile_spec: |
ha_switching_profiles:
- name: HASwitchingProfile
tags:
ncp/cluster: 'pas1' # Should match the top level ncp/cluster tag value
ncp/ha: 'true' # Required for HA
testtag: 'testpas1'
# Make sure the ncp/cluster tag matches the one defined on the T0 Router
# Add additional container ip blocks as needed
nsx_t_container_ip_block_spec: |
container_ip_blocks:
- name: container-ip-block
cidr: 192.168.128.0/17
# Specify tags with PAS 2.0 and NSX Tile 2.1.0
# Tags not needed for PAS 2.1 and NSX Tile 2.1.3
tags:
ncp/cluster: 'pas1' # Should match the top level ncp/cluster tag value
testtag: 'testpas1'
# - name: node-container-ip-block-pks-without-tag
# cidr: 172.24.0.0/14
# # No tags for this block
# - name: pod-container-ip-block-pks-with-tag
# cidr: 172.28.0.0/14
# tags:
# ncp/shared_resource: 'true'
# Make sure the ncp/cluster tag matches the one defined on the T0 Router for PAS
# Make sure the ncp/shared tag is set to true for PKS
# Additional the ncp/external tag should be set for external facing ip pool
# Add additional exernal ip pools as needed
# Change the cidr, gateway, nameserver, dns_domain as needed
# The cidr, gateway, start/end ips should be reachable via static or bgp routing through T0 router
# Leave some ips reserved for Ops mgr, pks controller, lbr vips for goRouter, SSH Proxy etc.
# Refer to the `docs/static-routing-setup.md`
nsx_t_external_ip_pool_spec: |
external_ip_pools:
- name: snat-vip-pool-for-pas
cidr: 10.100.0.0/24 # Should be a 0/24 or some valid cidr, matching the external exposed uplink
gateway: 10.100.0.1
start: 10.100.0.31 # Should not include gateway, not overlap with the T0 router uplink ips; reserve some for Ops Mgr, LB Vips for GoRouter, SSH Proxy
end: 10.100.0.200 # Should not include gateway, not overlap with the T0 router uplink ips
# Specify tags with PAS 2.0 and NSX Tile 2.1.0
# Tags not needed for PAS 2.1 and NSX Tile 2.1.3
tags:
ncp/cluster: 'pas1' # Should match the top level ncp/cluster tag value for PAS
ncp/external: 'true' # Required for external facing ips
testtag: 'testpas1'
- name: snat-vip-pool-with-no-tags
cidr: 10.200.0.0/24 # Should be a 0/24 or some valid cidr, matching the external exposed uplink
gateway: 10.200.0.1
start: 10.200.0.31 # Should not include gateway, not overlap with the T0 router uplink ips
end: 10.200.0.200 # Should not include gateway
- name: snat-vip-pool-for-pks
cidr: 10.300.0.0/24 # Should be a 0/24 or some valid cidr, matching the external exposed uplink
gateway: 10.300.0.1
start: 10.300.0.31 # Should not include gateway, not overlap with the T0 router uplink ips; reserve some for PKS Controller, Harbor
end: 10.300.0.200 # Should not include gateway
tags:
ncp/external: 'true' # Required for external facing ips
ncp/shared_resource: 'true' # Required for PKS
# Specify NAT rules
# Provide matching dnat and snat rule for specific vms by using ips for destination_network and translated_network that need to be exposed like Ops Mgr
# Provide snat rules for outbound from either container or a vm by specifying the source_network (cidr) and translated network ip
# Ingress into Ops Mgr: External IP of Ops Mgr -> DNAT -> translated into internal ip of Ops Mgr
# Egress from Ops Mgr: internal ip of Ops Mgr -> SNAT -> translated into external IP of Ops Mgr
# Ingress into PKS API Controller: External IP of controller -> DNAT -> translated into internal ip of controller
# Egress from PKS API Controller: internal ip of controller -> SNAT -> translated into external IP of controller
# Egress from SomeNetwork: cidr of network -> SNAT -> translated into some external IP
nsx_t_nat_rules_spec: |
nat_rules:
# Sample entry for allowing inbound to PAS Ops manager - ingress
- t0_router: DefaultT0Router
nat_type: dnat
destination_network: 10.13.12.2 # External IP address for PAS opsmanager
translated_network: 192.168.1.5 # Internal IP of PAS Ops manager
rule_priority: 1024 # Higher priority
# Sample entry for allowing outbound from PAS Ops Mgr to external - egress
- t0_router: DefaultT0Router
nat_type: snat
source_network: 192.168.1.5 # Internal IP of PAS opsmanager
translated_network: 10.13.12.2 # External IP address for PAS opsmanager
rule_priority: 1024 # Higher priority
# Sample entry for PAS Infra network SNAT - egress
- t0_router: DefaultT0Router
nat_type: snat
source_network: 192.168.1.0/24 # PAS Infra network cidr
translated_network: 10.13.12.3 # SNAT External Address for PAS networks
rule_priority: 8000 # Lower priority
#destination_network: 101.101.101.1 # Optional - allow specific network to reach out to
# Sample entry for PAS ERT network SNAT - egress
- t0_router: DefaultT0Router
nat_type: snat
source_network: 192.168.2.0/24 # PAS ERT network cidr
translated_network: 10.13.12.3 # SNAT External Address for PAS networks
rule_priority: 8000 # Lower priority
#destination_network: 101.101.101.1 # Optional - allow specific network to reach out to
# Sample entry for PAS Services network SNAT - egress
- t0_router: DefaultT0Router
nat_type: snat
source_network: 192.168.3.0/24 # PAS Services network cidr
translated_network: 10.13.12.3 # SNAT External Address for PAS networks
rule_priority: 8001 # Lower priority
#destination_network: 101.101.101.1 # Optional - allow specific network to reach out to
# Sample entry for PAS Dynamic Services network SNAT - egress
- t0_router: DefaultT0Router
nat_type: snat
source_network: 192.168.4.0/24 # PAS Dynamic Services network cidr
translated_network: 10.13.12.3 # SNAT External Address for PAS networks
rule_priority: 8001 # Lower priority
#destination_network: 101.101.101.1 # Optional - allow specific network to reach out to
# # Sample entry for PKS PKS-Infra network - egress
# - t0_router: DefaultT0Router
# nat_type: snat
# source_network: 172.23.1.0/24 # PKS Infra network cidr
# translated_network: 10.13.12.6 # SNAT External Address for PKS networks
# rule_priority: 8001 # Lower priority
# #destination_network: 101.101.101.1 # Optional - allow specific network to reach out to
#
# # Sample entry for PKS PKS-Clusters network - egress
# - t0_router: DefaultT0Router
# nat_type: snat
# source_network: 172.23.2.0/24 # PKS Services network cidr
# translated_network: 10.13.12.6 # SNAT External Address for PKS networks
# rule_priority: 8001 # Lower priority
# #destination_network: 101.101.101.1 # Optional - allow specific network to reach out to
#
# # Sample entry for allowing inbound to PKS Ops manager
# - t0_router: DefaultT0Router
# nat_type: dnat
# destination_network: 10.13.12.5 # External IP address for PKS opsmanager
# translated_network: 172.23.1.5 # Internal IP of PKS Ops manager
# rule_priority: 1024 # Higher priority
# # Sample entry for allowing outbound from PKS Ops Mgr to external
# - t0_router: DefaultT0Router
# nat_type: snat
# source_network: 172.23.1.5 # Internal IP of PAS opsmanager
# translated_network: 10.13.12.5 # External IP address for PAS opsmanager
# rule_priority: 1024 # Higher priority
#
# # Sample entry for allowing inbound to PKS Controller
# No need to create this if using nsx-t-ci-pipeline
# - t0_router: DefaultT0Router
# nat_type: dnat
# destination_network: 10.13.12.7 # External IP address for PKS controller
# translated_network: 172.23.2.4 # Internal IP of PKS Ops Controller
# rule_priority: 1024 # Higher priority
# #destination_network: 101.101.101.1 # Optional - allow specific network to reach out to
# # Sample entry for allowing outbound from PKS Controller to external
# - t0_router: DefaultT0Router
# nat_type: snat
# source_network: 172.23.2.4 # Internal IP of PKS controller
# translated_network: 10.13.12.7 # External IP address for PKS controller
# rule_priority: 1024 # Higher priority
nsx_t_csr_request_spec: |
csr_request:
#common_name not required - would use nsx_t_manager_host_name
org_name: Company # EDIT
org_unit: net-integ # EDIT
country: US # EDIT
state: CA # EDIT
city: SF # EDIT
key_size: 2048 # Valid values: 2048 or 3072
algorithm: RSA # Valid values: RSA or DSA
# LBR definition
# By default, all the server pools would use Layer 4 - TCP pass through
# No ssl termination or handling, uses default tcp active monitor & auto map for virtual server
# Auto Map mode uses LB interface IP and ephemeral port.
# In scenarios where both Clients and Pool Members are attached to the same Logical Router,
# SNAT (Auto Map or IP List) must be used.
# LBR Sizing guide
# LB Size | Virtual Servers | Pool Members
# small | 10 | 30
# medium | 100 | 300
# large | 1000 | 3000
# No. of LBs per edge based on size of edge
# Edge Size | Small LBs| Medium LBs| Large LBs
#
# small | 0 | 0 | 0
# medium | 1 | 0 | 0 # Recommended for running only PAS or PKS
# large | 4 | 1 | 1 # Recommended for running PAS + PKS
# Bare metal - not handled here
# Create Active monitors for LBR security group integration, cannot use passive or default monitors
nsx_t_monitor_spec: |
active_monitors:
# Sample entry for creating Active Monitors for PAS ERT
- name: gorouter-monitor
type: LbHttpMonitor
monitor_port: 8080
path: /health # Http monitor requires path
- name: diego-brain-monitor
type: LbTcpMonitor
monitor_port: 2222
- name: mysql-proxy-monitor
type: LbTcpMonitor
monitor_port: 1936
# Create Security groups
# These would be later linked with the PAS GoRouters or other groups as needed
nsx_t_nsgroup_spec: |
nsgroups:
# Sample entry for creating LBR for PAS ERT
- name: PAS1-GoRouter-NSG
tags:
pas-foundation: PAS1 # Tag to associate with the NSGroup
- name: PAS1-TCPRouter-NSG
tags:
pas-foundation: PAS1 # Tag to associate with the NSGroup
- name: PAS1-SSHProxy-NSG
tags:
pas-foundation: PAS1 # Tag to associate with the NSGroup
- name: PAS1-MySQLProxy-NSG
tags:
pas-foundation: PAS1 # Tag to associate with the NSGroup
- name: PAS2-GoRouter-NSG
tags:
pas-foundation: PAS2 # Tag to associate with the NSGroup
## Allow user to specify the NS Group association -> in that case, member details wont be used.
## Also, if ns group is used, specify the active monitor name. Otherwise default to the default builtin tcp monitor
## Additionally, max pool member size (max_members) need to be specified when using NSG/Dynamic membership
## The maximum size of pool members for SMALL load balancer service form factor is 30
## So, ensure the total pool members does not go over the supported config for a given lbr size
## Default max members per pool is 4, if not specified
nsx_t_lbr_spec: |
loadbalancers:
# Sample entry for creating LBR for PAS ERT
- name: PAS-ERT-LBR
t1_router: T1-Router-PAS-ERT # Should match a previously declared T1 Router
size: small # Allowed sizes: small, medium, large
virtual_servers:
- name: goRouter443 # Unique name that signifies function being exposed
vip: 10.13.12.150 # Exposed VIP for LBR to listen on
port: 443
# If nsgroup is defined, then static members entry will be ignored
nsgroup: 'PAS1-GoRouter-NSG'
monitor: gorouter-monitor
max_members: 5
# Ignored... as nsgroup is defined above
members:
- ip: 192.168.2.31 # Internal ip of GoRouter instance 1
port: 443
- ip: 192.168.2.32 # Internal ip of GoRouter instance 2
port: 443
- name: goRouter80
vip: 10.13.12.150
port: 80
# If nsgroup is defined, then static members will be ignored
nsgroup: 'PAS1-GoRouter-NSG'
monitor: gorouter-monitor
max_members: 5
# Ignored... as nsgroup is defined above
members:
- ip: 192.168.2.31 # Internal ip of GoRouter instance 1
port: 80
- ip: 192.168.2.32 # Internal ip of GoRouter instance 2
port: 80
- name: sshProxy # SSH Proxy exposed to outside
vip: 10.13.12.151
port: 2222 # Port 2222 for ssh
# If nsgroup is defined, then static members will be ignored
nsgroup: 'PAS1-SSHProxy-NSG'
monitor: diego-brain-monitor
max_members: 2
# Ignored... as nsgroup is defined above
members:
- ip: 192.168.2.41 # Internal ip of Diego Brain where ssh proxy runs
port: 2222
- name: mySQLProxy # Mysql Proxy exposed to outside
vip: 10.13.12.153
port: 1936 # Port 1936 for mysql proxy
# If nsgroup is defined, then static members will be ignored
nsgroup: 'PAS1-MySQLProxy-NSG'
monitor: mysql-proxy-monitor
max_members: 2
# Ignored... as nsgroup is defined above
members:
- ip: 192.168.2.45 # Internal ip of Mysql proxy
port: 1936
- name: sampleTcpRouter # Sample for TCP Router that uses a port range
vip: 10.13.12.152
port: 10000-10500 # Uses a port range rather than a fixed port
# no nsgroup defined, so use static members and default tcp monitoring
members:
- ip: 192.168.2.42 # Internal ip of TcpRouter instance 1
port: 10000-10500
- ip: 192.168.2.43 # Internal ip of TcpRouter instance 2
port: 10000-10500