Showing with 1,323 additions and 144 deletions.
  1. +13 −0 .fixtures.yml
  2. +2 −0 .gitignore
  3. +1 −0 .ruby-version
  4. +2 −0 README.md
  5. +80 −0 documentation/docker_examples.md
  6. +108 −0 documentation/upgrade.md
  7. +3 −3 functions/{target_host.pp → target_name.pp}
  8. +25 −0 manifests/setup/master.pp
  9. +1 −1 metadata.json
  10. +6 −6 plans/unit/configure.pp
  11. +21 −17 plans/unit/install.pp
  12. +119 −117 plans/upgrade.pp
  13. +1 −0 spec/docker/.dockerignore
  14. +43 −0 spec/docker/Dockerfile
  15. +10 −0 spec/docker/Dockerfile_bolt
  16. +129 −0 spec/docker/extra-large-ha/docker-compose.yaml
  17. +17 −0 spec/docker/extra-large-ha/inventory.yaml
  18. +12 −0 spec/docker/extra-large-ha/params.json
  19. +9 −0 spec/docker/extra-large-ha/upgrade_params.json
  20. +78 −0 spec/docker/extra-large/docker-compose.yaml
  21. +15 −0 spec/docker/extra-large/inventory.yaml
  22. +9 −0 spec/docker/extra-large/params.json
  23. +7 −0 spec/docker/extra-large/upgrade_params.json
  24. +86 −0 spec/docker/large-ha/docker-compose.yaml
  25. +15 −0 spec/docker/large-ha/inventory.yaml
  26. +9 −0 spec/docker/large-ha/params.json
  27. +8 −0 spec/docker/large-ha/upgrade_params.json
  28. +59 −0 spec/docker/large/docker-compose.yaml
  29. +14 −0 spec/docker/large/inventory.yaml
  30. +8 −0 spec/docker/large/params.json
  31. +7 −0 spec/docker/large/upgrade_params.json
  32. +8 −0 spec/docker/live_audit.service
  33. +20 −0 spec/docker/live_audit.sh
  34. +28 −0 spec/docker/provision.sh
  35. +66 −0 spec/docker/standard-ha/docker-compose.yaml
  36. +14 −0 spec/docker/standard-ha/inventory.yaml
  37. +8 −0 spec/docker/standard-ha/params.json
  38. +7 −0 spec/docker/standard-ha/upgrade_params.json
  39. +43 −0 spec/docker/standard/docker-compose.yaml
  40. +13 −0 spec/docker/standard/inventory.yaml
  41. +7 −0 spec/docker/standard/params.json
  42. +6 −0 spec/docker/standard/upgrade_params.json
  43. +27 −0 spec/docker/upgrade.sh
  44. +8 −0 tasks/precheck.json
  45. +18 −0 tasks/precheck.sh
  46. +13 −0 tasks/sign_csr.json
  47. +31 −0 tasks/sign_csr.rb
  48. +8 −0 tasks/submit_csr.json
  49. +41 −0 tasks/submit_csr.rb
  50. +8 −0 tasks/trusted_facts.json
  51. +32 −0 tasks/trusted_facts.rb
13 changes: 13 additions & 0 deletions .fixtures.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
---
fixtures:
repositories:
facts: 'https://github.com/puppetlabs/puppetlabs-facts.git'
puppet_agent: 'https://github.com/puppetlabs/puppetlabs-puppet_agent.git'
provision: 'https://github.com/puppetlabs/provision.git'
stdlib: 'https://github.com/puppetlabs/puppetlabs-stdlib.git'
node_manager: 'https://github.com/WhatsARanjit/puppet-node_manager'
apply_helpers: 'https://github.com/puppetlabs/puppetlabs-apply_helpers'
bolt_shim: 'https://github.com/puppetlabs/puppetlabs-bolt_shim'
debug: 'https://github.com/nwops/puppet-debug'
symlinks:
"pe_xl": "#{source_dir}"
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -25,3 +25,5 @@
.project
.envrc
/inventory.yaml
.rerun.json
*.tar.gz
1 change: 1 addition & 0 deletions .ruby-version
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
2.6.5
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,5 @@ See this README file and any documents in the [documentation](documentation) dir
## Architecture

![architecture](documentation/images/architecture.png)
* [Classification](documentation/classification.md)
* [Docker Based Examples](documentation/docker_examples.md)
80 changes: 80 additions & 0 deletions documentation/docker_examples.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
## Docker Based Examples
This module provides docker compose files for the various architectures for experimentation purposes. This gives you the ability to stand up an entire PE stack in order to learn how this module and HA works. If you have docker and docker-compose you can start up a full Puppet architecture with a single command. Please note that Puppet does not support PE on containers in production.

In order to decouple Bolt from a dev system, a special bolt container is created that will run all the bolt commands. This is
required to achieve maximum portability. Should you want to run bolt commands against the PE stack you must
first login to this bolt container via ssh, docker or docker-compose.

Example: `docker-compose run --entrypoint=/bin/bash bolt`

### Requirements
To run the container based examples you will need the following requirements:

2. Docker
3. Docker compose
4. 16GB memory, 24GB+ for XL and XL-HA architectures
5. CPU with many cores (Tested with Core i7 6700)

### Starting the example
We have provided a provision.sh script to help making these examples simple.
To use perform the following:

1. cd spec/docker
2. bash provision.sh
3. select desired architecture when prompted (ie. extra-large-ha )
4. Wait 10-20 minutes for provisioning to complete

```
Please choose a PE architecture to build:
1) extra-large/ 3) large/ 5) standard/
2) extra-large-ha/ 4) large-ha/ 6) standard-ha/
#?
```

### Stopping the example
In order to stop and remove the containers you will need to perform the following.

1. cd spec/docker
2. `cd <chosen architecture>`
3. docker-compose down

### Logging into the console
You can login to the PE Console after successful provision. However, first you will need to
grab the mapped port number of the PE console. The port numbers are mapped dynamically as to not
cause port conflicts on your system. To see how the ports are mapped you can view them via:

1. docker ps
```
80c6f0b5525c pe-base "/sbin/init" 2 hours ago Up 2 hours 0.0.0.0:32774->22/tcp, 0.0.0.0:32773->443/tcp, 0.0.0.0:32772->4433/tcp, 0.0.0.0:32771->8080/tcp, 0.0.0.0:32770->8081/tcp, 0.0.0.0:32769->8140/tcp, 0.0.0.0:32768->8443/tcp pe-lg.puppet.vm
```
2. Note the mapped port for 443, which in this case is 32773
3. Visit https://localhost:32773 in your browser
4. Accept security risk (self signed cert)
5. Login: admin/puppetlabs

### Logging into any of the containers
Ssh is running in all the containers so you can use ssh if you grab the mapped ssh port number. `ssh root@localhost -p 32774`

Login: root/test

You can also bypass ssh and run docker exec or docker-compose exec

1. cd spec/docker/extra-large
2. docker-compose exec pe_xl_core /bin/bash

**Note:** pe_xl_core is the name of the service defined in the respective docker-compose file.

This will run an interactive bash shell in the running container.

### Upgrades
There is also a upgrade.sh script that is similar to the provision.sh script. This script will upgrade an already provisioned PE stack to the version specified in the update_params.json file.

### Other notes
1. The provision plan is not fully idempotent.
2. Some tasks may fail when run due to resource constraints.
3. You can rerun the provision.sh script on the same architecture without destroying the containers. This can sometimes complete the provision process successfully.
4. Rerunning the provision script may result in errors due to idempotency issues with tasks and plans.
5. Please remember you are starting the equilivent of 3-6 VMs on a single system.
6. You can use top to view all the processes being run in the containers.
7. Docker will use the privilege mode option when running these examples (systemd support)
8. Systemd is running inside these containers! The real systemd, not the fake one.
108 changes: 108 additions & 0 deletions documentation/upgrade.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
# Upgrade Puppet Enterprise using the pe\_xl module

Puppet Enterprise deployments provisioned using the pe\_xl module can be upgrading using the pe\_xl module as well.

## Usage

The `pe_xl::upgrade` plan requires as input the version of PE to upgrade to, and the names of each PE infrastructure host. Master, replica, compilers, etc.

The following is an example parameters file for upgrading an Extra Large architecture deployment of PE 2018.1.9 to PE 2018.1.11.

```json
{
"version": "2018.1.11",
"master_host": "pe-master-09a40c-0.us-west1-a.c.reidmv-pe_xl.internal",
"puppetdb_database_host": "pe-psql-09a40c-0.us-west1-a.c.reidmv-pe_xl.internal",
"master_replica_host": "pe-master-09a40c-1.us-west1-b.c.reidmv-pe_xl.internal",
"puppetdb_database_replica_host": "pe-psql-09a40c-1.us-west1-b.c.reidmv-pe_xl.internal",
"compiler_hosts": [
"pe-compiler-09a40c-0.us-west1-a.c.reidmv-pe_xl.internal",
"pe-compiler-09a40c-1.us-west1-b.c.reidmv-pe_xl.internal",
"pe-compiler-09a40c-2.us-west1-c.c.reidmv-pe_xl.internal",
"pe-compiler-09a40c-3.us-west1-a.c.reidmv-pe_xl.internal"
]
}
```

The upgrade plan may be run as:

```
bolt plan run pe_xl::upgrade --params @params.json
```

## Offline Usage

The pe\_xl::upgrade plan downloads installation content from an online repository by default. To perform an offline installation, you can prefetch the needed content and place it in the staging directory. If content is available in the staging directory, pe\_xl::upgrade will not try to download it.

The default staging directory is `/tmp`. If a different staging dir is being used, it can be specified using the `stagingdir` parameter to the pe\_xl::upgrade plan.

The content needed is the PE installation tarball for the target version. The installation content should be in the staging dir, and should have its original name. E.g. `/tmp/puppet-enterprise-2019.2.2-el-7-x86_64.tar.gz`.

Installation content can be downloaded from [https://puppet.com/try-puppet/puppet-enterprise/download/](https://puppet.com/try-puppet/puppet-enterprise/download/).

## Usage over the Orchestrator transport

The pe\_xl::upgrade plan can be used with the Orchestrator (pcp) transport, provided that the Bolt executor is running as root on the master. To use the Orchestrator transport prepare an inventory file such as the following to set the default transport to be `pcp`, but the master specifically to be `local`.

```
---
version: 2
config:
transport: pcp
pcp:
cacert: /etc/puppetlabs/puppet/ssl/certs/ca.pem
service-url: https://pe-master-ad1d88-0.us-west1-a.c.reidmv-pe_xl.internal:8143
task-environment: production
token-file: /root/.puppetlabs/token
groups:
- name: pe-targets
targets:
- name: "pe-master-ad1d88-0.us-west1-a.c.reidmv-pe_xl.internal"
config:
transport: local
- name: "pe-master-ad1d88-1.us-west1-b.c.reidmv-pe_xl.internal"
- name: "pe-compiler-ad1d88-0.us-west1-a.c.reidmv-pe_xl.internal"
- name: "pe-compiler-ad1d88-1.us-west1-b.c.reidmv-pe_xl.internal"
- name: "pe-compiler-ad1d88-2.us-west1-c.c.reidmv-pe_xl.internal"
- name: "pe-compiler-ad1d88-3.us-west1-a.c.reidmv-pe_xl.internal"
- name: "pe-psql-ad1d88-0.us-west1-a.c.reidmv-pe_xl.internal"
- name: "pe-psql-ad1d88-1.us-west1-b.c.reidmv-pe_xl.internal"
```

Additionally, you MUST pre-stage a copy of the PE installation media in /tmp on the PuppetDB PostgreSQL node(s), if present. The Orchestrator transport cannot be used to send large files to remote systems, and the plan will fail if tries.

Pre-staging the installation media and using an inventory definition such as the example above, the pe\_xl::upgrade plan can be run as normal. It will not rely on the Orchestrator service to operate on the master, and it will use the Orchestrator transport to operate on other PE nodes.

```
bolt plan run pe_xl::upgrade --params @params.json
```

## Manual Upgrades

In the event a manual upgrade is required, the steps may be followed along by reading directly from [the upgrade plan](../plans/upgrade.pp), which is itself the most accurate technical description of the steps required. In general form, the upgrade process is as given below.

Note: it is assumed that the Puppet master is in cluster A when the upgrade starts, and that the replica is in cluster B. If the master is in cluster B, the A/B designations in the instruction should be inverted.

**Phase 1: stop puppet service**

* Stop the `puppet` service on all PE infrastructure nodes to prevent normal automatic runs from interfering with the upgrade process

**Phase 2: upgrade HA cluster A**

1. Shut down the `pe-puppetdb` service on the compilers in cluster A
2. If different from the master, run the `install-puppet-enterprise` script for the new PE version on the PuppetDB PostgreSQL node for cluster A
3. Run the `install-puppet-enterprise` script for the new PE version on the master
4. If different from the master, Run `puppet agent -t` on the PuppetDB PostgreSQL node for cluster A
5. Perform the standard `curl upgrade.sh | bash` procedure on the compilers for cluster A

**Phase 3: upgrade HA cluster B**

1. Shut down the `pe-puppetdb` service on the compilers in cluster B
2. If different from the master (replica), run the `install-puppet-enterprise` script for the new PE version on the PuppetDB PostgreSQL node for cluster B
3. If different from the master (replica), Run `puppet agent -t` on the PuppetDB PostgreSQL node for cluster B
4. Perform the standard `curl upgrade.sh | bash` procedure on the master (replica)
5. Perform the standard `curl upgrade.sh | bash` procedure on the compilers for cluster B

**Phase 4: resume puppet service**

* Ensure the `puppet` service on all PE infrastructure nodes is running again
6 changes: 3 additions & 3 deletions functions/target_host.pp → functions/target_name.pp
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
function pe_xl::target_host(
function pe_xl::target_name(
Variant[Target, Array[Target,0,1]] $target,
) >> Variant[String, Undef] {
case $target {
Target: {
$target.host
$target.name
}
Array[Target,1,1]: {
$target[0].host
$target[0].name
}
Array[Target,0,0]: {
undef
Expand Down
25 changes: 25 additions & 0 deletions manifests/setup/master.pp
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# @summary Defines configuration needed at install time
#
class pe_xl::setup::master {

# This is needed so that compiler certs can be signed. It's included by
# default in 2019.0 and newer, but isn't present in 2018.1. It would be
# preferable to use the hocon_setting resource, but we can't because it
# requires a gem not present by default. It would be preferable to use the
# pe_hocon_setting resource, but we can't because there's no Forge module
# that provides it for Bolt to use. So this is what we are reduced to.
$caconf = @(EOF)
# CA-related settings
certificate-authority: {
allow-subject-alt-names: true
}
| EOF

file { '/etc/puppetlabs/puppetserver/conf.d/ca.conf':
ensure => file,
content => $caconf,
notify => Service['pe-puppetserver'],
}

service { 'pe-puppetserver': }
}
2 changes: 1 addition & 1 deletion metadata.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "puppetlabs-pe_xl",
"version": "0.4.0",
"version": "0.4.3",
"author": "Reid Vandewiele",
"summary": "Bolt plans used to deploy an at-scale Puppet Enterprise architecture",
"license": "Apache-2.0",
Expand Down
12 changes: 6 additions & 6 deletions plans/unit/configure.pp
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,10 @@
# Set up the console node groups to configure the various hosts in their
# roles
run_task('pe_xl::configure_node_groups', $master_target,
master_host => $master_target.pe_xl::target_host(),
master_replica_host => $master_replica_target.pe_xl::target_host(),
puppetdb_database_host => $puppetdb_database_target.pe_xl::target_host(),
puppetdb_database_replica_host => $puppetdb_database_replica_target.pe_xl::target_host(),
master_host => $master_target.pe_xl::target_name(),
master_replica_host => $master_replica_target.pe_xl::target_name(),
puppetdb_database_host => $puppetdb_database_target.pe_xl::target_name(),
puppetdb_database_replica_host => $puppetdb_database_replica_target.pe_xl::target_name(),
compiler_pool_address => $compiler_pool_address,
)

Expand Down Expand Up @@ -86,13 +86,13 @@
if $arch['high-availability'] {
# Run the PE Replica Provision
run_task('pe_xl::provision_replica', $master_target,
master_replica => $master_replica_target.pe_xl::target_host(),
master_replica => $master_replica_target.pe_xl::target_name(),
token_file => $token_file,
)

# Run the PE Replica Enable
run_task('pe_xl::enable_replica', $master_target,
master_replica => $master_replica_target.pe_xl::target_host(),
master_replica => $master_replica_target.pe_xl::target_name(),
token_file => $token_file,
)
}
Expand Down
38 changes: 21 additions & 17 deletions plans/unit/install.pp
Original file line number Diff line number Diff line change
Expand Up @@ -117,24 +117,24 @@
# Generate all the needed pe.conf files
$master_pe_conf = pe_xl::generate_pe_conf({
'console_admin_password' => $console_password,
'puppet_enterprise::puppet_master_host' => $master_target.pe_xl::target_host(),
'puppet_enterprise::puppet_master_host' => $master_target.pe_xl::target_name(),
'pe_install::puppet_master_dnsaltnames' => $dns_alt_names,
'puppet_enterprise::profile::puppetdb::database_host' => $puppetdb_database_target.pe_xl::target_host(),
'puppet_enterprise::profile::puppetdb::database_host' => $puppetdb_database_target.pe_xl::target_name(),
'puppet_enterprise::profile::master::code_manager_auto_configure' => true,
'puppet_enterprise::profile::master::r10k_private_key' => '/etc/puppetlabs/puppetserver/ssh/id-control_repo.rsa',
'puppet_enterprise::profile::master::r10k_remote' => $r10k_remote,
} + $pe_conf_data)

$puppetdb_database_pe_conf = pe_xl::generate_pe_conf({
'console_admin_password' => 'not used',
'puppet_enterprise::puppet_master_host' => $master_target.pe_xl::target_host(),
'puppet_enterprise::database_host' => $puppetdb_database_target.pe_xl::target_host(),
'puppet_enterprise::puppet_master_host' => $master_target.pe_xl::target_name(),
'puppet_enterprise::database_host' => $puppetdb_database_target.pe_xl::target_name(),
} + $pe_conf_data)

$puppetdb_database_replica_pe_conf = pe_xl::generate_pe_conf({
'console_admin_password' => 'not used',
'puppet_enterprise::puppet_master_host' => $master_target.pe_xl::target_host(),
'puppet_enterprise::database_host' => $puppetdb_database_replica_target.pe_xl::target_host(),
'puppet_enterprise::puppet_master_host' => $master_target.pe_xl::target_name(),
'puppet_enterprise::database_host' => $puppetdb_database_replica_target.pe_xl::target_name(),
} + $pe_conf_data)

# Upload the pe.conf files to the hosts that need them
Expand Down Expand Up @@ -213,7 +213,7 @@
}
# Configure autosigning for the puppetdb database hosts 'cause they need it
$autosign_conf = $database_targets.reduce('') |$memo,$target| { "${target.host}\n${memo}" }
$autosign_conf = $database_targets.reduce('') |$memo,$target| { "${target.name}\n${memo}" }
run_task('pe_xl::mkdir_p_file', $master_target,
path => '/etc/puppetlabs/puppet/autosign.conf',
owner => 'pe-puppet',
Expand Down Expand Up @@ -256,7 +256,7 @@

# Deploy the PE agent to all remaining hosts
run_task('pe_xl::agent_install', $master_replica_target,
server => $master_target.pe_xl::target_host(),
server => $master_target.pe_xl::target_name(),
install_flags => [
'--puppet-service-ensure', 'stopped',
"main:dns_alt_names=${dns_alt_names_csv}",
Expand All @@ -267,7 +267,7 @@
)

run_task('pe_xl::agent_install', $compiler_a_targets,
server => $master_target.pe_xl::target_host(),
server => $master_target.pe_xl::target_name(),
install_flags => [
'--puppet-service-ensure', 'stopped',
"main:dns_alt_names=${dns_alt_names_csv}",
Expand All @@ -278,7 +278,7 @@
)

run_task('pe_xl::agent_install', $compiler_b_targets,
server => $master_target.pe_xl::target_host(),
server => $master_target.pe_xl::target_name(),
install_flags => [
'--puppet-service-ensure', 'stopped',
"main:dns_alt_names=${dns_alt_names_csv}",
Expand All @@ -289,19 +289,23 @@
)

# Ensure certificate requests have been submitted
run_command(@(HEREDOC), $agent_installer_targets)
/opt/puppetlabs/bin/puppet ssl submit_request
| HEREDOC
run_task('pe_xl::submit_csr', $agent_installer_targets)

# TODO: come up with an intelligent way to validate that the expected CSRs
# have been submitted and are available for signing, prior to signing them.
# For now, waiting a short period of time is necessary to avoid a small race.
ctrl::sleep(15)

run_command(inline_epp(@(HEREDOC/L)), $master_target)
/opt/puppetlabs/bin/puppetserver ca sign --certname \
<%= $agent_installer_targets.map |$target| { $target.host }.join(',') -%>
| HEREDOC
# Ensure some basic configuration on the master needed at install time.
if ($version.versioncmp('2019.0') < 0) {
apply($master_host) { include pe_xl::setup::master }.pe_xl::print_apply_result
}

if !empty($agent_installer_targets) {
run_task('pe_xl::sign_csr', $master_target,
certnames => $agent_installer_targets.map |$target| { $target.name },
)
}

run_task('pe_xl::puppet_runonce', $master_target)
run_task('pe_xl::puppet_runonce', $all_targets - $master_target)
Expand Down
Loading