diff --git a/test.mesos.yml b/test.mesos.yml deleted file mode 100644 index 6a3ab7b34..000000000 --- a/test.mesos.yml +++ /dev/null @@ -1,138 +0,0 @@ ---- -# CHECK SECURITY - when customizing you should leave this in. If you take it out -# and forget to specify security.yml, security could be turned off on components -# in your cluster! -- hosts: localhost - gather_facts: no - tasks: - - name: check for security - when: security_enabled is not defined or not security_enabled - fail: - msg: | - Security is not enabled. Please run `security-setup` in the root - directory and re-run this playbook with the `--extra-vars`/`-e` option - pointing to your `security.yml` (e.g., `-e @security.yml`) - -# BASICS - we need every node in the cluster to have common software running to -# increase stability and enable service discovery. You can look at the -# documentation for each of these components in their README file in the -# `roles/` directory, or by checking the online documentation at -# microservices-infrastructure.readthedocs.org. -- hosts: all - vars: - # consul_acl_datacenter should be set to the datacenter you want to control - # Consul ACLs. If you only have one datacenter, set it to that or remove - # this variable. - # consul_acl_datacenter: your_primary_datacenter - - # consul_dns_domain is repeated across these plays so that all the - # components know what information to use for this values to help with - # service discovery. - consul_dns_domain: consul - consul_servers_group: role=control - roles: - - common - - lvm - - collectd - - logrotate - - consul-template - - docker - - consul - - etcd - - calico - - logstash - - nginx - - dnsmasq - -# ROLES - after provisioning the software that's common to all hosts, we do -# specialized hosts. This configuration has two major groups: control nodes and -# worker nodes. We provision the worker nodes first so that we don't create any -# race conditions. This could happen in the Mesos components - if there are no -# worker nodes when trying to schedule control software, the deployment process -# would hang. -# -# The worker role itself has a minimal configuration, as it's designed mainly to -# run software that the Mesos leader shedules. It also forwards traffic to -# globally known ports configured through Marathon. -- hosts: role=worker - # role=worker hosts are a subset of "all". Since we already gathered facts on - # all servers, we can skip it here to speed up the deployment. - gather_facts: no - vars: - consul_dns_domain: consul - mesos_mode: follower - - # Set this to whatever domain you want to reach haproxy from; it should be - # in this format: [short-name]-lb.[domain], where [short-name] and [domain] - # are the same as configured in terraform. - # In this example, short-name is "mi" and domain is "example.com": - haproxy_domain: mi-lb.example.com - roles: - - mesos - - haproxy - -- hosts: role=kubeworker - gather_facts: yes - vars: - consul_dns_domain: consul - roles: - - calico - - kubernetes - - kubernetes-node - -# the control nodes are necessarily more complex than the worker nodes, and have -# ZooKeeper, Mesos, and Marathon leaders. In addition, they control Vault to -# manage secrets in the cluster. These servers do not run applications by -# themselves, they only schedule work. That said, there should be at least 3 of -# them (and always an odd number) so that ZooKeeper can get and keep a quorum. -- hosts: role=control - gather_facts: yes - vars: - consul_dns_domain: consul - consul_servers_group: role=control - mesos_leaders_group: role=control - mesos_mode: leader - zookeeper_server_group: role=control - roles: - - vault - - zookeeper - - mesos - - marathon - - chronos - - mantlui - - kubernetes - - kubernetes-master - - kubernetes-addons - -# The edge role exists solely for routing traffic into the cluster. Firewall -# settings should be such that web traffic (ports 80 and 443) is exposed to the -# world. -- hosts: role=edge - gather_facts: no - vars: - # this is the domain that traefik will match on to do host-based HTTP - # routing. Set it to a domain you control and add a star domain to route - # traffic. (EG *.marathon.localhost) - # - # For those migrating from haproxy, this variable serves the same purpose - # and format as `haproxy_domain`. - traefik_marathon_domain: marathon.localhost - roles: - - traefik - -# GlusterFS has to be provisioned in reverse order from Mesos: servers then -# clients. -- hosts: role=control - gather_facts: no - vars: - consul_dns_domain: consul - glusterfs_mode: server - roles: - - glusterfs - -- hosts: role=worker - vars: - consul_dns_domain: consul - glusterfs_mode: client - roles: - - glusterfs