Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP

Loading…

Want to give you credit for the initial repo, or at least get the benefit of my changes #1

Merged
merged 11 commits into from
This page is out of date. Refresh to see the latest.
View
1  .gitignore
@@ -0,0 +1 @@
+.vagrant
View
7 Readme
@@ -15,9 +15,12 @@ vagrant up percona3
And this is all, you have now 3 servers running Percona XtraDB Cluster !
-These recipes also install GLB (Galera Load Balancer) on percona1.
+These recipes also install HAProxy on percona1. One for writes (single cluster node) and one for reads (rr of all cluster nodes).
-You can read more on shinguz's post : http://www.fromdual.com/mysql-and-galera-load-balancer
+See cluster status: http://192.168.70.2:9999/
+
+Writes: 192.168.70.2:4306
+Reads: 192.168.70.2:5306
Note: SElinux is disabled, there is a selinux policy file included that allows the server to start but the other nodes aren't yet able to sync, I need to investigate...
View
17 manifests/site.pp
@@ -1,30 +1,37 @@
node percona1 {
include percona::repository
include percona::cluster
+ include percona::toolkit
+ include xinet
include myhosts
+ include haproxy
- Class['percona::repository'] -> Class['percona::cluster'] -> Class['galera::glb']
+ Class['percona::repository'] -> Class['percona::cluster']
+ Class['percona::repository'] -> Class['percona::toolkit']
- class {
- 'galera::glb':
- glb_list_backend => "192.168.70.2:3306:1 192.168.70.3:3306:1 192.168.70.4:3306"
- }
}
node percona2 {
include percona::repository
include percona::cluster
+ include percona::toolkit
+ include xinet
include myhosts
Class['percona::repository'] -> Class['percona::cluster']
+ Class['percona::repository'] -> Class['percona::toolkit']
}
node percona3 {
include percona::repository
include percona::cluster
+ include percona::toolkit
+ include xinet
include myhosts
Class['percona::repository'] -> Class['percona::cluster']
+ Class['percona::repository'] -> Class['percona::toolkit']
+
}
View
2  modules/galera/manifests/glb/service.pp
@@ -2,7 +2,7 @@
exec {
"run_glb":
- command => "glbd --daemon --threads $glb_threads --control $glb_ip_control:4444 $glb_ip_loadbalancer:3306 $glb_list_backend",
+ command => "glbd --daemon --threads $glb_threads --control $glb_ip_control:4445 $glb_ip_loadbalancer:3306 $glb_list_backend",
path => ["/usr/sbin", "/bin" ],
require => Class['Galera::Glb::Packages'],
unless => "ps ax | grep [g]lbd 2> /dev/null",
View
47 modules/haproxy/files/xtradb_cluster.cfg
@@ -0,0 +1,47 @@
+# this config needs haproxy-1.4.19
+
+global
+ log 127.0.0.1 local0
+ log 127.0.0.1 local1 notice
+ maxconn 4096
+ uid 99
+ gid 99
+ daemon
+ # debug
+ #quiet
+
+defaults
+ log global
+ mode http
+ option tcplog
+ option dontlognull
+ retries 3
+ option redispatch
+ maxconn 2000
+ contimeout 5000
+ clitimeout 50000
+ srvtimeout 50000
+
+listen cluster-writes 0.0.0.0:4306
+ mode tcp
+ balance roundrobin
+ option httpchk
+
+ server percona1 192.168.70.2:3306 check port 9200 inter 12000 rise 3 fall 3 backup
+ server percona2 192.168.70.3:3306 check port 9200 inter 12000 rise 3 fall 3 backup
+ server percona3 192.168.70.4:3306 check port 9200 inter 12000 rise 3 fall 3 backup
+
+listen cluster-reads 0.0.0.0:5306
+ mode tcp
+ balance roundrobin
+ option httpchk
+
+ server percona1 192.168.70.2:3306 check port 9200 inter 12000 rise 3 fall 3
+ server percona2 192.168.70.3:3306 check port 9200 inter 12000 rise 3 fall 3
+ server percona3 192.168.70.4:3306 check port 9200 inter 12000 rise 3 fall 3
+
+
+listen admin_page 0.0.0.0:9999
+ mode http
+ balance roundrobin
+ stats uri /
View
8 modules/haproxy/manifests/config.pp
@@ -0,0 +1,8 @@
+class haproxy::config {
+ file {
+ "/etc/haproxy/haproxy.cfg":
+ ensure => present,
+ require => Class['haproxy::packages'],
+ source => "/vagrant/modules/haproxy/files/xtradb_cluster.cfg";
+ }
+}
View
9 modules/haproxy/manifests/init.pp
@@ -0,0 +1,9 @@
+class haproxy {
+
+ include haproxy::packages
+ include haproxy::config
+ include haproxy::service
+
+ Class['haproxy::packages'] -> Class['haproxy::config'] -> Class['haproxy::service']
+
+}
View
6 modules/haproxy/manifests/packages.pp
@@ -0,0 +1,6 @@
+class haproxy::packages {
+ package {
+ "haproxy": ensure => installed;
+ }
+}
+
View
10 modules/haproxy/manifests/service.pp
@@ -0,0 +1,10 @@
+class haproxy::service {
+
+ service {
+ "haproxy":
+ ensure => 'running',
+ require => Class['haproxy::config'],
+ subscribe => File['/etc/haproxy/haproxy.cfg']
+ }
+
+}
View
3  modules/percona/manifests/cluster/config.pp
@@ -1,8 +1,11 @@
class percona::cluster::config {
if $hostname == "percona1" {
+ # Percona1 can't join itself, so if this node gets wacked out, it tries to talk to percona2
+ # $joinip = "192.168.70.3"
$joinip = " "
} else {
+ # All other nodes join percona1
$joinip = "192.168.70.2"
}
file {
View
10 modules/percona/manifests/toolkit.pp
@@ -8,10 +8,8 @@
"perl-DBD-MySQL":
ensure => installed,
require => Package['Percona-Server-shared-compat'];
- "percona-toolkit":
- provider => rpm,
- ensure => installed,
- require => [ Package['perl-Time-HiRes'], Package['perl-TermReadKey'], Package['perl-DBD-MySQL'] ],
- source => "http://www.percona.com/redir/downloads/percona-toolkit/percona-toolkit-1.0.1-1.noarch.rpm";
- }
+ "percona-toolkit":
+ ensure => installed,
+ require => [ Package['perl-Time-HiRes'], Package['perl-TermReadKey'], Package['perl-DBD-MySQL'] ];
+ }
}
View
8 modules/percona/templates/cluster/my.cnf.erb
@@ -1,15 +1,18 @@
+[mysqld_safe]
+wsrep_urls=gcomm://192.168.70.2:4567,gcomm://192.168.70.3:4567,gcomm://192.168.70.4:4567,gcomm://
+
[mysqld]
datadir=/var/lib/mysql
user=mysql
log_error=error.log
binlog_format=ROW
wsrep_provider=/usr/lib64/libgalera_smm.so
-wsrep_cluster_address=gcomm://<%= joinip %>
wsrep_sst_receive_address=<%= ipaddress_eth1 %>
wsrep_node_incoming_address=<%= ipaddress_eth1 %>
wsrep_slave_threads=2
wsrep_cluster_name=trimethylxanthine
-wsrep_sst_method=rsync
+#wsrep_sst_method=rsync
+wsrep_sst_method=xtrabackup
wsrep_node_name=<%= hostname %>
innodb_locks_unsafe_for_binlog=1
innodb_autoinc_lock_mode=2
@@ -17,4 +20,5 @@ innodb_log_file_size=64M
bind-address=<%= ipaddress_eth1 %>
[mysql]
+user=root
prompt="<%=hostname %> mysql> "
View
19 modules/xinet/files/mysqlchk
@@ -0,0 +1,19 @@
+# default: on
+# description: mysqlchk
+service mysqlchk
+{
+# this is a config for xinetd, place it in /etc/xinetd.d/
+ disable = no
+ type = UNLISTED
+ flags = REUSE
+ socket_type = stream
+ port = 9200
+ wait = no
+ user = nobody
+ server = /usr/bin/clustercheck
+ log_on_failure += USERID
+ only_from = 192.168.70.0/24
+ # recommended to put the IPs that need
+ # to connect exclusively (security purposes)
+ per_source = UNLIMITED
+}
View
16 modules/xinet/manifests/config.pp
@@ -0,0 +1,16 @@
+class xinet::config {
+ file {
+ "/etc/xinetd.d/mysqlchk":
+ ensure => present,
+ owner => root, group => root,
+ require => [Class['xinet::packages'], Class['percona::cluster::config']],
+ source => "/vagrant/modules/xinet/files/mysqlchk";
+ }
+
+ mysql::rights { "clustercheck":
+ user => "clustercheckuser",
+ password => "clustercheckpassword!",
+ database => "mysql",
+ priv => ["process_priv"]
+ }
+}
View
9 modules/xinet/manifests/init.pp
@@ -0,0 +1,9 @@
+class xinet {
+
+ include xinet::packages
+ include xinet::config
+ include xinet::service
+
+ Class['xinet::packages'] -> Class['xinet::config'] -> Class['xinet::service']
+
+}
View
5 modules/xinet/manifests/packages.pp
@@ -0,0 +1,5 @@
+class xinet::packages {
+ package {
+ "xinetd": ensure => installed;
+ }
+}
View
8 modules/xinet/manifests/service.pp
@@ -0,0 +1,8 @@
+class xinet::service {
+ service {
+ "xinetd":
+ ensure => 'running',
+ require => Class['xinet::packages'],
+ subscribe => File['/etc/xinetd.d/mysqlchk'];
+ }
+}
Something went wrong with that request. Please try again.