diff --git a/addons/auto-clustering/scripts/auto-cluster-logic.jps b/addons/auto-clustering/scripts/auto-cluster-logic.jps index 699a2c7f..6979d1d0 100644 --- a/addons/auto-clustering/scripts/auto-cluster-logic.jps +++ b/addons/auto-clustering/scripts/auto-cluster-logic.jps @@ -41,14 +41,14 @@ onInstall: SCHEME: ${settings.scheme} - install: - jps: https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/master/addons/recovery/manifest.yml?_r=${fn.random} + jps: https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/stage-addon/addons/recovery/manifest.yml?_r=${fn.random} nodeGroup: sqldb targetNodes: sqldb settings: install: true - install: - jps: https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/master/addons/check-corrupts/manifest.yml?_r=${fn.random} + jps: https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/stage-addon/addons/check-corrupts/manifest.yml?_r=${fn.random} nodeGroup: sqldb targetNodes: sqldb settings: diff --git a/scripts/common.yml b/scripts/common.yml index c3509004..d042fa30 100644 --- a/scripts/common.yml +++ b/scripts/common.yml @@ -75,7 +75,7 @@ actions: setupAdminUser: - cmd[${this.id}]: |- - wget ${baseUrl}/scripts/setupUser.sh -O ~/setupUser.sh &>> /var/log/run.log; + wget ${globals.db_cluster_path}/scripts/setupUser.sh -O ~/setupUser.sh; bash ~/setupUser.sh ${globals.DB_USER} ${globals.DB_PASS} &>> /var/log/run.log; user: root @@ -88,7 +88,7 @@ actions: pswd: ${globals.REPLICA_PSWD} addCustomConfig: - - cmd[sqldb]: wget ${baseUrl}/configs/custom.cnf -O ${globals.customConfigFile} &>> /var/log/run.log; + - cmd[sqldb]: wget ${globals.db_cluster_path}/configs/custom.cnf -O ${globals.customConfigFile}; - env.file.AddFavorite: nodeGroup: sqldb path: ${globals.customConfigFile} @@ -123,3 +123,14 @@ actions: sed -i "s/expire_logs_days.*/binlog_expire_logs_seconds = 604800/" ${this.conf_file}; sed -i "/master_info_repository/d" ${this.conf_file}; sed -i "/relay_log_info_repository/d" ${this.conf_file}; + + - if (/mysql/.test("${nodes.sqldb.nodeType}") && '${fn.compare([nodes.sqldb.version], 8.0.33)}' == 1): + cmd[${this.id}]: |- + sed -i "/binlog_format/d" ${this.conf_file}; + + - if (/percona/.test("${nodes.sqldb.nodeType}") && '${fn.compare([nodes.sqldb.version], 8.0.33)}' == 1): + cmd[${this.id}]: |- + sed -i "s/log-slave-updates/log_replica_updates/" ${this.conf_file}; + sed -i "s/slave-skip-errors/replica_skip_errors/" ${this.conf_file}; + sed -i "s/expire_logs_days.*/binlog_expire_logs_seconds = 604800/" ${this.conf_file}; + sed -i "/binlog_format/d" ${this.conf_file}; diff --git a/scripts/firewallRules.js b/scripts/firewallRules.js new file mode 100644 index 00000000..8b40ed76 --- /dev/null +++ b/scripts/firewallRules.js @@ -0,0 +1,40 @@ +var envName = "${env.name}", + bFireWallEnabled, + outputRule, + inputRule, + rules, + resp; + +inputRule = { + "direction": "INPUT", + "name": name, + "protocol": "ALL", + "ports": ports, + "src": "ALL", + "priority": 1080, + "action": "ALLOW" +}; +outputRule = { + "direction":'OUTPUT', + "name":name, + "protocol":'ALL', + "ports":ports, + "dst":'ALL', + "priority":1000, + "action":'ALLOW' +}; + +if (jelastic.environment.security) { + resp = jelastic.billing.account.GetOwnerQuotas(appid, session, 'firewall.enabled'); + if (!resp || resp.result !== 0) return resp; + bFireWallEnabled = resp.array[0] ? resp.array[0].value : 0; + if (bFireWallEnabled) { + resp = jelastic.environment.security.AddRule(envName, session, inputRule, nodeGroup); + if (!resp || resp.result !== 0) return resp; + return jelastic.environment.security.AddRule(envName, session, outputRule, nodeGroup); + } +} + +return { + result: 0 +} diff --git a/scripts/galera.jps b/scripts/galera.jps index 5fab6d73..deb007a0 100644 --- a/scripts/galera.jps +++ b/scripts/galera.jps @@ -3,7 +3,7 @@ id: mysql-galera-cluster name: MariaDB Galera Cluster description: MariaDB Galera Auto Clustering -baseUrl: https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/v3.0.0 +baseUrl: https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/stage-cluster success: text: /texts/phpmyadmin-credentials.md diff --git a/scripts/master-master.jps b/scripts/master-master.jps index ccc4f913..7134999d 100644 --- a/scripts/master-master.jps +++ b/scripts/master-master.jps @@ -108,7 +108,7 @@ actions: setupPrimaryPrimaryReplication: - cmd[${globals.primary2_id}]: |- - curl --silent https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/master/addons/recovery/scripts/db-recovery.sh > /tmp/db-recovery.sh; + curl --silent https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/stage-addon/addons/recovery/scripts/db-recovery.sh > /tmp/db-recovery.sh; bash /tmp/db-recovery.sh --scenario restore_primary_from_primary --donor-ip ${globals.primary1_ip}; user: root @@ -143,6 +143,6 @@ actions: setupSecondaryReplication: - cmd[${this}]: |- - curl --silent https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/master/addons/recovery/scripts/db-recovery.sh > /tmp/db-recovery.sh; + curl --silent https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/stage-addon/addons/recovery/scripts/db-recovery.sh > /tmp/db-recovery.sh; bash /tmp/db-recovery.sh --scenario restore_secondary_from_primary --donor-ip ${globals.primary1_ip} --additional-primary ${globals.primary2_ip}; user: root diff --git a/scripts/master-slave.jps b/scripts/master-slave.jps index ca4bf53b..47272fde 100644 --- a/scripts/master-slave.jps +++ b/scripts/master-slave.jps @@ -107,6 +107,6 @@ actions: - setGlobals: PRIMARY_IP: ${nodes.sqldb.master.address} - cmd[${this}]: |- - curl --silent https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/master/addons/recovery/scripts/db-recovery.sh > /tmp/db-recovery.sh; + curl --silent https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/stage-addon/addons/recovery/scripts/db-recovery.sh > /tmp/db-recovery.sh; bash /tmp/db-recovery.sh --scenario restore_secondary_from_primary --donor-ip ${globals.PRIMARY_IP}; user: root diff --git a/scripts/proxy-common.yml b/scripts/proxy-common.yml index abe1d32b..c3f5ee2a 100644 --- a/scripts/proxy-common.yml +++ b/scripts/proxy-common.yml @@ -1,4 +1,44 @@ +globals: + db_cluster_path: https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/stage-cluster + proxy_web_port: 4848 + actions: + + setReplicaCommands: + - if ((/mariadb/.test("${nodes.sqldb.nodeType}")) && ('${fn.compare([nodes.sqldb.version], 11.0)}' >= 0)): + setGlobals: + show_slave_hosts: "SHOW REPLICA HOSTS" + - elif ((/mysql/.test("${nodes.sqldb.nodeType}")) && ('${fn.compare([nodes.sqldb.version], 8.1)}' >= 0)): + setGlobals: + show_slave_hosts: "SHOW REPLICAS" + - elif ((/percona/.test("${nodes.sqldb.nodeType}")) && ('${fn.compare([nodes.sqldb.version], 8.1)}' >= 0)): + setGlobals: + show_slave_hosts: "SHOW REPLICAS" + - else: + setGlobals: + show_slave_hosts: "SHOW SLAVE HOSTS" + + disableOrchestrator: + - log: Disabling orchestrator + - cmd [proxy]: |- + mysql -h 127.0.0.1 -P3360 -uroot -e "DROP DATABASE IF EXISTS orchestrator;" + if systemctl list-unit-files --type=service | grep -q 'orchestrator.service'; then + systemctl stop orchestrator; + systemctl disable orchestrator; + fi + user: root + + setupProxySQLGUI: + - log: ProxySQL GUI configuration + - cmd[proxy]: |- + MYSQL_PWD=admin mysql -h 127.0.0.1 -P6032 -uadmin -e "UPDATE global_variables SET variable_value='true' WHERE variable_name='admin-web_enabled';" + MYSQL_PWD=admin mysql -h 127.0.0.1 -P6032 -uadmin -e "UPDATE global_variables SET variable_value='${globals.proxy_web_port}' WHERE variable_name='admin-web_port';" + MYSQL_PWD=admin mysql -h 127.0.0.1 -P6032 -uadmin -e "UPDATE global_variables SET variable_value='${globals.ADMIN_USER}:${globals.ADMIN_PASS}' WHERE variable_name='admin-stats_credentials';" + MYSQL_PWD=admin mysql -h 127.0.0.1 -P6032 -uadmin -e "LOAD ADMIN VARIABLES TO RUNTIME; SAVE ADMIN VARIABLES TO DISK;" + - script[proxy]: ${globals.db_cluster_path}/scripts/firewallRules.js?_r=${fn.random} + ports: ${globals.proxy_web_port} + name: ProxySQL Admin Web + setupOrchestrator: - log: Orchestrator configuration - cmd [proxy]: |- @@ -11,7 +51,7 @@ actions: mysql -h 127.0.0.1 -P3360 -uroot -e "SET PASSWORD FOR 'admin'@'127.0.0.1' = PASSWORD('${globals.ORCH_PASS}');" fi mysql -h 127.0.0.1 -P3360 -uroot -e "FLUSH PRIVILEGES;" - wget ${baseUrl}/configs/orchestrator.conf.json -O /etc/orchestrator.conf.json &>> /var/log/run.log + wget ${globals.db_cluster_path}/configs/orchestrator.conf.json -O /etc/orchestrator.conf.json &>> /var/log/run.log sed -i -e 's|orc_client_user|${globals.DB_USER}|g' /etc/orchestrator.conf.json sed -i -e 's|orc_client_password|${globals.DB_PASS}|g' /etc/orchestrator.conf.json sed -i -e 's|orc_server_user|admin|g' /etc/orchestrator.conf.json @@ -31,6 +71,7 @@ actions: MYSQL_PWD=admin mysql -h 127.0.0.1 -P6032 -uadmin -e "UPDATE global_variables SET variable_value=200 WHERE variable_name='admin-cluster_check_interval_ms';" MYSQL_PWD=admin mysql -h 127.0.0.1 -P6032 -uadmin -e "UPDATE global_variables SET variable_value=100 WHERE variable_name='admin-cluster_check_status_frequency';" MYSQL_PWD=admin mysql -h 127.0.0.1 -P6032 -uadmin -e "UPDATE global_variables SET variable_value='true' WHERE variable_name='admin-cluster_mysql_servers_save_to_disk';" + MYSQL_PWD=admin mysql -h 127.0.0.1 -P6032 -uadmin -e "UPDATE global_variables SET variable_value='false' WHERE variable_name='admin-hash_passwords';" MYSQL_PWD=admin mysql -h 127.0.0.1 -P6032 -uadmin -e "UPDATE global_variables SET variable_value=1 WHERE variable_name='admin-cluster_mysql_servers_diffs_before_sync';" MYSQL_PWD=admin mysql -h 127.0.0.1 -P6032 -uadmin -e "load admin variables to RUNTIME; save admin variables to disk;" - forEach(n:nodes.proxy): diff --git a/scripts/proxy-galera.jps b/scripts/proxy-galera.jps index a16f6f3f..7d049b22 100644 --- a/scripts/proxy-galera.jps +++ b/scripts/proxy-galera.jps @@ -3,7 +3,7 @@ id: mysql-proxy-galera-cluster name: Galera/XtraDB Cluster with ProxySQL description: Galera/XtraDB Cluster -baseUrl: https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/v3.0.0 +baseUrl: https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/stage-cluster success: /texts/proxy-entrypoint.md @@ -53,8 +53,9 @@ onAfterClone: onInstall: - getReplicaUser - resetProxyNodes - - setupOrchestrator + - disableOrchestrator - setupProxySQLCluster + - setupProxySQLGUI - if(${fn.compare([nodes.proxy.master.version], 2.0)} > 0): initGaleraForProxy2 - if(${fn.compare([nodes.proxy.master.version], 2.0)} < 0): initGaleraForProxy1 - forEach(nodes.sqldb): diff --git a/scripts/proxy-master-slave.jps b/scripts/proxy-master-slave.jps index 2093b200..226d0dd3 100644 --- a/scripts/proxy-master-slave.jps +++ b/scripts/proxy-master-slave.jps @@ -3,7 +3,7 @@ id: mysql-proxy-master-slave-cluster name: MySQL/MariaDB/Percona Database Cluster with ProxySQL description: ProxySQL Load Balancer Entry Point -baseUrl: https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/v3.0.0 +baseUrl: https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/stage-cluster success: /texts/proxy-entrypoint.md @@ -56,11 +56,13 @@ onAfterMigrate: db_pass: "${globals.DB_PASS}" onInstall: + - setReplicaCommands - getReplicaUser - resetProxyNodes - - setupOrchestrator + - disableOrchestrator - setupProxySQLCluster - setupProxySQL + - setupProxySQLGUI - if ('${globals.SCHEME}'.toLowerCase() == 'master'): - setGlobals: MAX_REPL_LAG: 0 @@ -103,9 +105,6 @@ actions: cmd [proxy]: |- MYSQL_PWD=admin mysql -h 127.0.0.1 -P6032 -uadmin -e "INSERT INTO mysql_servers (hostgroup_id, hostname, port) VALUES (10, 'node${this.id}', 3306);" MYSQL_PWD=admin mysql -h 127.0.0.1 -P6032 -uadmin -e "LOAD MYSQL SERVERS TO RUNTIME; SAVE MYSQL SERVERS TO DISK;" - MYSQL_PWD=${globals.DB_PASS} mysql -u${globals.DB_USER} -hnode${this.id} -e "show slave hosts;" - /usr/local/orchestrator/orchestrator -c discover -i node${this.id} cli; - user: root addSlave: cmd[proxy]: |- @@ -117,7 +116,6 @@ actions: cmd[proxy]: |- MYSQL_PWD=admin mysql -h 127.0.0.1 -P6032 -uadmin -e "DELETE FROM mysql_servers WHERE hostname = 'node${this.id}';" MYSQL_PWD=admin mysql -h 127.0.0.1 -P6032 -uadmin -e "LOAD MYSQL SERVERS TO RUNTIME; SAVE MYSQL SERVERS TO DISK;" - MYSQL_PWD=${globals.ORCH_PASS} mysql -h 127.0.0.1 -P3360 -uadmin -e "DELETE FROM orchestrator.database_instance where hostname='node${this.id}-${env.domain}';" getMastersIDs: - script: | diff --git a/scripts/xtradb.jps b/scripts/xtradb.jps index ebe634eb..9a6c1117 100644 --- a/scripts/xtradb.jps +++ b/scripts/xtradb.jps @@ -3,7 +3,7 @@ id: percona-xtradb-cluster name: Percona XtraDB Cluster description: Percona XtraDB Auto Clustering -baseUrl: https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/v3.0.0 +baseUrl: https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/stage-cluster success: text: /texts/phpmyadmin-credentials.md diff --git a/texts/proxy-entrypoint-galera.md b/texts/proxy-entrypoint-galera.md index dfd21924..d13607e6 100644 --- a/texts/proxy-entrypoint-galera.md +++ b/texts/proxy-entrypoint-galera.md @@ -20,11 +20,11 @@ ___ ___ -### Cluster Orchestrator Panel +### ProxySQL Web Panel -**Admin panel URL:** [http://proxy.${env.domain}](http://proxy.${env.domain}/) -**Username:** admin -**Password:** ${globals.ORCH_PASS} +**Web panel URL:** [https://node${nodes.proxy.master.id}-${env.domain}:${globals.proxy_web_port}](https://node${nodes.proxy.master.id}-${env.domain}:${globals.proxy_web_port}) +**Username:** ${globals.ADMIN_USER} +**Password:** ${globals.ADMIN_PASS} ___ diff --git a/texts/proxy-entrypoint.md b/texts/proxy-entrypoint.md index 1ab2a6b1..ac910f9b 100644 --- a/texts/proxy-entrypoint.md +++ b/texts/proxy-entrypoint.md @@ -15,11 +15,11 @@ ___ ___ -### Cluster Orchestrator Panel +### ProxySQL Web Panel -**Admin panel URL:** [http://proxy.${env.domain}](http://proxy.${env.domain}/) -**Username:** admin -**Password:** ${globals.ORCH_PASS} +**Web panel URL:** [https://node${nodes.proxy.master.id}-${env.domain}:${globals.proxy_web_port}](https://node${nodes.proxy.master.id}-${env.domain}:${globals.proxy_web_port}) +**Username:** ${globals.ADMIN_USER} +**Password:** ${globals.ADMIN_PASS} ___