Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Getting it to work #1

Merged
merged 10 commits into from
Oct 5, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 6 additions & 34 deletions Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -8,49 +8,21 @@ Vagrant.require_version ">= 1.7.0"

Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "ubuntu/trusty64"
cchq_proxy_port = ENV.fetch("VAGRANT_CCHQ_PROXY_PORT", 8080)
config.ssh.insert_key = false

config.vm.define "app1" do |app1|
app1.vm.hostname = "app1"
app1.vm.network "private_network", ip: "192.168.33.15"
app1.vm.provider "virtualbox" do |v|
config.vm.define "monolith" do |monolith|
monolith.vm.hostname = "monolith"
monolith.vm.network "private_network", ip: "192.168.33.21"
monolith.vm.provider "virtualbox" do |v|
v.memory = 768
v.cpus = 1
end
app1.vm.provision "shell", path: "provisioning/nodes.sh"
end

# config.vm.define "app2" do |app1|
# app1.vm.hostname = "app2"
# app1.vm.network "private_network", ip: "192.168.33.18"
# app1.vm.provision "shell", path: "provisioning/nodes.sh"
# db1.vm.provider "virtualbox" do |v|
# v.memory = 768
# v.cpus = 1
# end
# end

config.vm.define "db1" do |db1|
db1.vm.hostname = "db1"
db1.vm.network "private_network", ip: "192.168.33.16"
db1.vm.provider "virtualbox" do |v|
v.memory = 768
v.cpus = 1
end
db1.vm.provision "shell", path: "provisioning/nodes.sh"
end

config.vm.define "proxy1" do |proxy1|
proxy1.vm.hostname = "proxy1"
proxy1.vm.network "private_network", ip: "192.168.33.17"
proxy1.vm.provision "shell", path: "provisioning/nodes.sh"
proxy1.vm.network "forwarded_port", guest: 80, host: cchq_proxy_port
monolith.vm.provision "shell", path: "provisioning/nodes.sh"
end

config.vm.define "control" do |control|
control.vm.hostname = "control"
control.vm.network "private_network", ip: "192.168.33.14"
control.vm.network "private_network", ip: "192.168.33.20"
control.vm.provider "virtualbox" do |v|
v.memory = 768
v.cpus = 1
Expand Down
32 changes: 0 additions & 32 deletions Vagrantfile-monolith

This file was deleted.

60 changes: 60 additions & 0 deletions Vagrantfile.multi
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :

# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"

Vagrant.require_version ">= 1.7.0"

Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "ubuntu/trusty64"
cchq_proxy_port = ENV.fetch("VAGRANT_CCHQ_PROXY_PORT", 8080)
config.ssh.insert_key = false

config.vm.define "app1" do |app1|
app1.vm.hostname = "app1"
app1.vm.network "private_network", ip: "192.168.33.15"
app1.vm.provider "virtualbox" do |v|
v.memory = 768
v.cpus = 1
end
app1.vm.provision "shell", path: "provisioning/nodes.sh"
end

# config.vm.define "app2" do |app1|
# app1.vm.hostname = "app2"
# app1.vm.network "private_network", ip: "192.168.33.18"
# app1.vm.provision "shell", path: "provisioning/nodes.sh"
# db1.vm.provider "virtualbox" do |v|
# v.memory = 768
# v.cpus = 1
# end
# end

config.vm.define "db1" do |db1|
db1.vm.hostname = "db1"
db1.vm.network "private_network", ip: "192.168.33.16"
db1.vm.provider "virtualbox" do |v|
v.memory = 768
v.cpus = 1
end
db1.vm.provision "shell", path: "provisioning/nodes.sh"
end

config.vm.define "proxy1" do |proxy1|
proxy1.vm.hostname = "proxy1"
proxy1.vm.network "private_network", ip: "192.168.33.17"
proxy1.vm.provision "shell", path: "provisioning/nodes.sh"
proxy1.vm.network "forwarded_port", guest: 80, host: cchq_proxy_port
end

config.vm.define "control" do |control|
control.vm.hostname = "control"
control.vm.network "private_network", ip: "192.168.33.14"
control.vm.provider "virtualbox" do |v|
v.memory = 768
v.cpus = 1
end
control.vm.provision "shell", path: "provisioning/control.sh"
end
end
10 changes: 5 additions & 5 deletions ansible/deploy_webworker.yml
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
---

- name: Keystore
hosts: webworkers
become: true
roles:
- {role: keystore, tags: keystore}
# - name: Keystore
# hosts: webworkers
# become: true
# roles:
# - {role: keystore, tags: keystore}

- name: Webworkers
hosts: webworkers
Expand Down
8 changes: 4 additions & 4 deletions ansible/group_vars/all.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ shared_dir_gid: 1500 # This GID cannot already be allocated
shared_dir_name: "shared{{ '_' ~ deploy_env if deploy_env != 'production' else '' }}"
shared_data_dir: "/opt/{{ shared_dir_name }}"
shared_mount_dir: "/mnt/{{ shared_dir_name }}"
is_monolith: '{{ groups["all"]|length == 1 }}'
is_monolith: true
transfer_payload_dir_name: "transfer_payloads"
restore_payload_dir_name: "restore_payloads"
shared_temp_dir_name: "temp"
Expand All @@ -22,6 +22,9 @@ cchq_user: cchq
dev_group: dimagidev
dev_users:
present:
- cchq
- jenkins_slave
absent:
- astone
- biyeun
- cellowitz
Expand All @@ -43,13 +46,10 @@ dev_users:
- skelly
- sravfeyn
- wpride
- cchq
absent:
- preseed
- twymer
- tsheffels
- dmyung
- jenkins_slave
- brudolph

django_port: 9010
Expand Down
34 changes: 17 additions & 17 deletions ansible/inventories/monolith
Original file line number Diff line number Diff line change
@@ -1,52 +1,52 @@
[webworkers]
192.168.33.21
165.227.172.214

[postgresql]
192.168.33.21
165.227.172.214

[couchdb]
192.168.33.21
165.227.172.214

[couchdb2]
192.168.33.21
165.227.172.214

[redis]
192.168.33.21
165.227.172.214

[touchforms]
192.168.33.21
165.227.172.214

[formplayer]
192.168.33.21
165.227.172.214

[celery]
192.168.33.21
165.227.172.214

[elasticsearch]
192.168.33.21 elasticsearch_node_name=es0
165.227.172.214 elasticsearch_node_name=es0

[proxy]
192.168.33.21
165.227.172.214

[rabbitmq]
192.168.33.21
165.227.172.214

[zookeeper]
192.168.33.21
165.227.172.214

[kafka]
192.168.33.21 kafka_broker_id=0
165.227.172.214 kafka_broker_id=0

[pillowtop]
192.168.33.21
165.227.172.214

[shared_dir_host]
192.168.33.21
165.227.172.214

[riakcs]
192.168.33.21
165.227.172.214

[stanchion]
192.168.33.21
165.227.172.214

[pg_standby]
12 changes: 6 additions & 6 deletions ansible/roles/commcarehq/templates/localsettings.py.j2
Original file line number Diff line number Diff line change
Expand Up @@ -199,12 +199,12 @@ LOCAL_PILLOWTOPS = json.loads('{{ localsettings.LOCAL_PILLOWS|to_json }}')
BITLY_LOGIN = '{{ localsettings.BITLY_LOGIN }}'
BITLY_APIKEY = '{{ localsettings.BITLY_APIKEY }}'

JAR_SIGN = dict(
key_store = os.path.join('{{ www_dir }}', "DimagiKeyStore"),
key_alias = "{{ localsettings.JAR_KEY_ALIAS }}",
store_pass = "{{ localsettings.JAR_STORE_PASS }}",
key_pass = "{{ localsettings.JAR_KEY_PASS }}",
)
#JAR_SIGN = dict(
# key_store = os.path.join('{{ www_dir }}', "DimagiKeyStore"),
# key_alias = "{{ localsettings.JAR_KEY_ALIAS }}",
# store_pass = "{{ localsettings.JAR_STORE_PASS }}",
# key_pass = "{{ localsettings.JAR_KEY_PASS }}",
#)

#xep
#GET_URL_BASE = 'dimagi.utils.web.get_secure_url_base'
Expand Down
2 changes: 1 addition & 1 deletion ansible/roles/common_installs/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@
npm: name="{{ item }}" state=present global=yes
sudo: yes
with_items:
- npm@3.6.0
- npm
- less@1.3.1
- n@1.3.0
- bower@1.5.3
Expand Down
24 changes: 22 additions & 2 deletions ansible/roles/couchdb/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# CouchDB
- name: Install prequisites
- name: Install CouchDB prequisites
apt: name="{{ item }}" state=present
with_items:
- build-essential
Expand All @@ -13,6 +13,15 @@
- libcurl4-gnutls-dev
- libtool

- name: Install couchdb pip packages
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You might want to consider using couchdb2

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hey @snopoke , even on your current ansible master branch, it looks like you guys use both couchdb and couchdb2. Am I reading this right? Sorry for being thick—just not sure I understand what you mean here.

From line 42 of deploy_db.yml on master at commcarehq-ansible:

- name: Couchdb
  hosts: couchdb
  become: true
  roles:
    - {role: couchdb, tags: 'couchdb'}

- name: Couchdb log rolling configurations
  hosts: couchdb
  become: true
  roles:
    - role: ansible-logrotate
      tags: couchdb
      logrotate_scripts:
        - name: "{{ deploy_env }}_couchdb"
          path: "{{ couch_log_dir }}/*.log"
          options:
            - daily
            - size 100M
            - rotate 10
            - missingok
            - compress
            - delaycompress
            - copytruncate
            - nocreate
            - notifempty

- name: Couchdb 2.0
  hosts: couchdb2
  become: true
  roles:
    - {role: couchdb2, tags: 'couchdb2'}

- name: Couchdb2 log rolling configurations
  hosts: couchdb2
  become: true
  roles:
    - role: ansible-logrotate
      tags: couchdb2
      logrotate_scripts:
        - name: "{{ deploy_env }}_couchdb2"
          path: "{{ couchdb_dir }}/var/log/*.stderr"
          options:
            - daily
            - size 100M
            - rotate 10
            - missingok
            - compress
            - delaycompress
            - copytruncate
            - nocreate
            - notifempty

pip: name="{{ item }}"
sudo: yes
with_items:
- urllib3
- pyopenssl
- ndg-httpsclient
- pyasn1

- name: Check CouchDB existence
stat: path="{{ couchdb_install_path }}"
register: couchdb_path
Expand Down Expand Up @@ -59,7 +68,7 @@
register: update_config

- name: CouchDB ownership permissions
file: path="{{ item }}" owner=couchdb group=couchdb recurse=yes state=directory
file: path="{{ item }}" owner=couchdb recurse=yes state=directory
with_items:
- "{{ couch_log_dir }}"
- /usr/local/var/lib/couchdb
Expand Down Expand Up @@ -93,8 +102,19 @@
when: not item.skipped|default(false) and item.status == 404
with_items: "{{ couch_response.results }}"

# TD thinks that only the first one should run here, since the second would be
# creating a user with the same username/password.
# TASK [couchdb : Set CouchDB username and password] *****************************
# ok: [165.227.172.214] => (item={u'username': u'commcarehq', u'name': u'commcarehq', u'is_https': False, u'host': u'165.227.172.214', u'password': u'commcarehq', u'port': 5984})
# failed: [165.227.172.214] (item={u'username': u'commcarehq', u'name': u'commcarehq__users', u'is_https': False, u'host': u'165.227.172.214', u'password': u'commcarehq', u'port': 5984}) => {"cache_control": "must-revalidate", "content": "{\"error\":\"unauthorized\",\"reason\":\"You are not a server admin.\"}\n", "content_length": "64", "content_type": "text/plain; charset=utf-8", "date": "Thu, 05 Oct 2017 11:10:34 GMT", "failed": true, "item": {"host": "165.227.172.214", "is_https": false, "name": "commcarehq__users", "password": "commcarehq", "port": 5984, "username": "commcarehq"}, "msg": "Status code was not [200]: HTTP Error 401: Unauthorized", "redirected": false, "server": "CouchDB/1.6.1 (Erlang OTP/R16B03)", "status": 401, "url": "http://165.227.172.214:5984/_config/admins/commcarehq"}
# to retry, use: --limit @/vagrant/ansible/deploy_stack.retry
#
# PLAY RECAP *********************************************************************
# 165.227.172.214 : ok=135 changed=90 unreachable=0 failed=1
#
- name: Set CouchDB username and password
uri:
# url: "http{% if item.is_https %}s{% endif %}://{{ item.username }}:{{ item.password }}@{{ item.host }}:{{ item.port }}/_config/admins/{{ item.username }}"
url: "http{% if item.is_https %}s{% endif %}://{{ item.host }}:{{ item.port }}/_config/admins/{{ item.username }}"
method: PUT
status_code: 200
Expand Down
14 changes: 13 additions & 1 deletion ansible/roles/couchdb2/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
---
- name: set couchdb username and password
uri:
url: "http{% if couchdb_secure %}s{% endif %}://{{ inventory_hostname|ipaddr }}:{{ couchdb_port }}/_node/couchdb@{{ inventory_hostname|ipaddr }}/_config/admins/{{ couchdb2.username }}"
# root@commcare-testing:~# curl -X PUT -H "Content-Type: text/plain" -d '"commcarehq"' http://commcarehq:commcarehq@188.226.180.143:5984/_config/admins/commcarehq
url: "http{% if couchdb_secure %}s{% endif %}://{{ couchdb2.username }}:{{ couchdb2.password }}@{{ inventory_hostname|ipaddr }}:{{ couchdb_port }}/_node/couchdb@{{ inventory_hostname|ipaddr }}/_config/admins/{{ couchdb2.username }}"
method: PUT
status_code: 200, 401 # 401 means this is already set up
body: '"{{ couchdb2.password }}"'
Expand Down Expand Up @@ -96,6 +97,17 @@
notify: reload monit
tags: monit

# TODO: consider starting the monit daemon before calling this. Currently, it fails with:
# TASK [couchdb2 : monit] ********************************************************
# fatal: [165.227.172.214]: FAILED! => {"changed": false, "failed": true, "msg": "couchdb2 process not presently configured with monit", "name": "couchdb2", "state": "monitored"}
#
# RUNNING HANDLER [monit : reload monit] *****************************************
# to retry, use: --limit @/vagrant/ansible/deploy_stack.retry
#
# PLAY RECAP *********************************************************************
# 165.227.172.214 : ok=36 changed=20 unreachable=0 failed=1
# I get it to run after ssh-ing in and running `monit` as root, awakening the monit daemon.
# I love awakening daemons...
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Collaborator Author

@taylordowns2000 taylordowns2000 Oct 19, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the quick reply Just to be clear, do you mean --skip-tags couchdb2 or --skip-tags couchdb?

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sorry , I mean --skip-tags couchdb

- monit:
name: couchdb2
state: monitored
Expand Down
Loading