Skip to content

Commit

Permalink
Merge 644de25 into 2abf88d
Browse files Browse the repository at this point in the history
  • Loading branch information
Gerhut committed Feb 10, 2020
2 parents 2abf88d + 644de25 commit aa43482
Show file tree
Hide file tree
Showing 21 changed files with 569 additions and 530 deletions.
3 changes: 3 additions & 0 deletions src/ClusterBootstrap/az_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,13 @@
"infra_node_num": 1,
"worker_node_num": 2,
"mysqlserver_node_num": 0,
"elasticsearch_node_num": 0,
"nfs_node_num": 1,
"azure_location": "westus2",
"infra_vm_size": "Standard_D1_v2",
"worker_vm_size": "Standard_NC6",
"mysqlserver_vm_size": "Standard_D1_v2",
"elasticsearch_vm_size": "Standard_D1_v2",
"nfs_vm_size": "Standard_D1_v2",
"vm_image": "Canonical:UbuntuServer:18.04-LTS:18.04.201912180",
"os_storage_sku": "Premium_LRS",
Expand All @@ -17,6 +19,7 @@
"infra_local_storage_sz": 1024,
"worker_local_storage_sz": 1024,
"mysqlserver_local_storage_sz": 2048,
"elasticsearch_local_storage_sz" : 2048,
"nfs_data_disk_sku": "Premium_LRS",
"nfs_data_disk_sz": 1024,
"nfs_data_disk_num": 1,
Expand Down
29 changes: 25 additions & 4 deletions src/ClusterBootstrap/az_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def create_vm(vmname, vm_ip, role, vm_size, pwd, vmcnf):
else:
auth = """--generate-ssh-keys --authentication-type ssh --ssh-key-value '%s' """ % config["azure_cluster"]["sshkey"]

priv_IP = "--private-ip-address %s " % vm_ip if not role in ["worker", "mysqlserver", "nfs"] else ""
priv_IP = "--private-ip-address %s " % vm_ip if not role in ["worker", "mysqlserver", "elasticsearch", "nfs"] else ""
nsg = "nfs_nsg_name" if role == "nfs" else "nsg_name"

availability_set = ""
Expand All @@ -106,7 +106,7 @@ def create_vm(vmname, vm_ip, role, vm_size, pwd, vmcnf):
assert os.path.exists(config["cloud_init_%s" % role])
cloud_init = "--custom-data {}".format(config["cloud_init_%s" % role])

if role in ["infra", "worker", "mysqlserver"]:
if role in ["infra", "worker", "mysqlserver", "elasticsearch"]:
storage = "--storage-sku {} --data-disk-sizes-gb {} ".format(config["azure_cluster"]["vm_local_storage_sku"],
config["azure_cluster"]["%s_local_storage_sz" % role])
# corner case: NFS on infra
Expand Down Expand Up @@ -475,6 +475,11 @@ def create_cluster(arm_vm_password=None, parallelism=1):
create_vm_param(i, "mysqlserver", config["azure_cluster"]["mysqlserver_vm_size"],
arm_vm_password is not None, arm_vm_password)

# create elasticsearch server if specified.
for i in range(int(config["azure_cluster"]["elasticsearch_node_num"])):
create_vm_param(i, "elasticsearch", config["azure_cluster"]["elasticsearch_vm_size"],
arm_vm_password is not None, arm_vm_password)

# create nfs server if specified.
for i in range(int(config["azure_cluster"]["nfs_node_num"])):
create_vm_param(i, "nfs", config["azure_cluster"]["nfs_vm_size"], False,
Expand Down Expand Up @@ -513,6 +518,9 @@ def create_vm_param(i, role, vm_size, no_az=False, arm_vm_password=None, vmcnf =
["cluster_name"], i + 1)
elif role == "mysqlserver":
vmname = "%s-mysqlserver%02d" % (config["azure_cluster"]["cluster_name"], i + 1)
elif role == "elasticsearch":
vmname = "%s-elasticsearch%02d" % (config["azure_cluster"]
["cluster_name"], i + 1)
elif role == "dev":
vmname = "%s-dev" % (config["azure_cluster"]["cluster_name"])

Expand Down Expand Up @@ -822,6 +830,14 @@ def gen_cluster_config(output_file_name, output_file=True, no_az=False):
"role": "mysqlserver",
"node-group": vm["vmSize"]}

# Add elasticsearch nodes
for vm in vm_list:
vmname = vm["name"]
if "-elasticsearch" in vmname:
cc["machines"][vmname.lower()] = {
"role": "elasticsearch",
"node-group": vm["vmSize"]}

nfs_nodes = []
for vm in vm_list:
vmname = vm["name"]
Expand Down Expand Up @@ -967,13 +983,18 @@ def delete_cluster():

def check_subscription():
chkcmd ="az account list | grep -A5 -B5 '\"isDefault\": true'"
output = utils.exec_cmd_local(chkcmd).decode()
output = utils.exec_cmd_local(chkcmd)
if isinstance(output, bytes):
output = output.decode()
if not config["azure_cluster"]["subscription"] in output:
setcmd = "az account set --subscription \"{}\"".format(config["azure_cluster"]["subscription"])
setout = utils.exec_cmd_local(setcmd)
print("Set your subscription to {}, please login.\nIf you want to specify another subscription, please configure azure_cluster.subscription".format(config["azure_cluster"]["subscription"]))
utils.exec_cmd_local("az login")
assert config["azure_cluster"]["subscription"] in utils.exec_cmd_local(chkcmd).decode()
output = utils.exec_cmd_local(chkcmd)
if isinstance(output, bytes):
output = output.decode()
assert config["azure_cluster"]["subscription"] in output

def run_command(args, command, nargs, parser):
if command == "genconfig":
Expand Down
49 changes: 46 additions & 3 deletions src/ClusterBootstrap/deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,8 @@
nocache = False
limitnodes = None
allroles = {"infra", "infrastructure", "worker",
"nfs", "sql", "samba", "mysqlserver"}
"nfs", "sql", "samba", "mysqlserver",
"elasticsearch"}


# Path to mount name
Expand Down Expand Up @@ -256,7 +257,8 @@ def update_config():
if ("influxdb_node" not in config):
config["influxdb_node"] = config["webportal_node"]
if ("elasticsearch_node" not in config):
config["elasticsearch_node"] = config["webportal_node"]
config["elasticsearch_node"] = None if len(get_node_lists_for_service(
"elasticsearch"))==0 else get_node_lists_for_service("elasticsearch")[0]
if ("mysql_node" not in config):
config["mysql_node"] = None if len(get_node_lists_for_service(
"mysql")) == 0 else get_node_lists_for_service("mysql")[0]
Expand Down Expand Up @@ -604,6 +606,7 @@ def check_master_ETCD_status():
get_ETCD_master_nodes(config["clusterId"])
get_worker_nodes(config["clusterId"], False)
get_nodes_by_roles(["mysqlserver"])
get_nodes_by_roles(["elasticsearch"])
get_nodes_by_roles(["nfs"])
get_nodes_by_roles(["samba"])
print("===============================================")
Expand All @@ -615,6 +618,8 @@ def check_master_ETCD_status():
(len(config["worker_node"]), ",".join(config["worker_node"])))
print("Activate MySQLServer Node(s):%s\n %s \n" %
(len(config["mysqlserver_node"]), ",".join(config["mysqlserver_node"])))
print("Activate Elasticsearch Node(s):%s\n %s \n" %
(len(config["elasticsearch_node"]), ",".join(config["elasticsearch_node"])))
print("Activate NFS Node(s):%s\n %s \n" %
(len(config["nfs_node"]), ",".join(config["nfs_node"])))
print("Activate Samba Node(s):%s\n %s \n" %
Expand Down Expand Up @@ -1540,6 +1545,35 @@ def update_mysqlserver_nodes(nargs):
os.system("rm ./deploy/kubelet/worker-kubeconfig.yaml")


def update_elasticsearch_nodes(nargs):
"""Internally use update_worker_node.
TODO: Should be covered by update_role_nodes in deploy.py V2
"""
# This is to temporarily replace gpu_type with None to disallow nvidia runtime config to appear in /etc/docker/daemon.json
prev_gpu_type = config["gpu_type"]
config["gpu_type"] = "None"
utils.render_template_directory("./template/kubelet", "./deploy/kubelet", config)
config["gpu_type"] = prev_gpu_type

write_nodelist_yaml()

os.system('sed "s/##etcd_endpoints##/%s/" "./deploy/kubelet/options.env.template" > "./deploy/kubelet/options.env"' % config["etcd_endpoints"].replace("/", "\\/"))
os.system('sed "s/##api_servers##/%s/" ./deploy/kubelet/kubelet.service.template > ./deploy/kubelet/kubelet.service' % config["api_servers"].replace("/", "\\/"))
os.system('sed "s/##api_servers##/%s/" ./deploy/kubelet/worker-kubeconfig.yaml.template > ./deploy/kubelet/worker-kubeconfig.yaml' % config["api_servers"].replace("/", "\\/"))

get_hyperkube_docker()

elasticsearch_nodes = get_nodes_by_roles(["elasticsearch"])
elasticsearch_nodes = limit_nodes(elasticsearch_nodes)
for node in elasticsearch_nodes:
if in_list(node, nargs):
update_worker_node(node)

os.system("rm ./deploy/kubelet/options.env")
os.system("rm ./deploy/kubelet/kubelet.service")
os.system("rm ./deploy/kubelet/worker-kubeconfig.yaml")


def deploy_restful_API_on_node(ipAddress):
masterIP = ipAddress
dockername = "%s/dlws-restfulapi" % (config["dockerregistry"])
Expand Down Expand Up @@ -2516,6 +2550,8 @@ def get_node_lists_for_service(service):
nodes = config["worker_node"]
elif nodetype == "mysqlserver_node":
nodes = config["mysqlserver_node"]
elif nodetype == "elasticsearch_node":
nodes = config["elasticsearch_node"]
elif nodetype == "nfs_node":
nodes = config["nfs_node"]
elif nodetype == "etcd_node":
Expand Down Expand Up @@ -2967,7 +3003,7 @@ def run_command(args, command, nargs, parser):
role2connect = nargs[0]
if len(nargs) < 1 or role2connect == "master":
nodes = config["kubernetes_master_node"]
elif role2connect in ["etcd", "worker", "nfs", "samba", "mysqlserver"]:
elif role2connect in ["etcd", "worker", "nfs", "samba", "mysqlserver", "elasticsearch"]:
nodes = config["{}_node".format(role2connect)]
else:
parser.print_help()
Expand Down Expand Up @@ -3182,6 +3218,13 @@ def run_command(args, command, nargs, parser):
gen_configs()
update_mysqlserver_nodes(nargs)

elif command == "updateelasticsearch":
response = raw_input_with_default("Deploy Elasticsearch Node(s) (y/n)?")
if first_char(response) == "y":
check_master_ETCD_status()
gen_configs()
update_elasticsearch_nodes(nargs)

elif command == "updatenfs":
response = raw_input_with_default("Deploy NFS Node(s) (y/n)?")
if first_char(response) == "y":
Expand Down
26 changes: 20 additions & 6 deletions src/ClusterBootstrap/params.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# These are the default configuration parameter
default_config_parameters = {
"supported_platform": ["azure_cluster", "onpremise"],
"allroles": {"infra", "infrastructure", "worker", "nfs", "sql", "dev", "etcd", "kubernetes_master", "mysqlserver"},
"allroles": {"infra", "infrastructure", "worker", "nfs", "sql", "dev", "etcd", "kubernetes_master", "mysqlserver", "elasticsearch"},
# Kubernetes setting
"service_cluster_ip_range": "10.3.0.0/16",
"pod_ip_range": "10.2.0.0/16",
Expand All @@ -15,8 +15,18 @@
"cloud_elasticsearch_node": "dlws-influxdb.westus.cloudapp.azure.com",
"cloud_elasticsearch_port": "9200",

"elasticsearch_db_port": "9200",
"elasticsearch_tp_port": "9300",
"elasticsearch": {
"port": {
"http": 9200,
"transport": 9300,
"exporter": 9114,
"kibana": 5601,
},
},

"fluentd": {
"port": 24231,
},

"influxdb_port": "8086",
"influxdb_tp_port": "25826",
Expand Down Expand Up @@ -240,7 +250,7 @@
"prometheus": "etcd_node_1",
"alert-manager": "etcd_node_1",
"watchdog": "etcd_node_1",
"elasticsearch": "etcd_node_1",
"elasticsearch": "elasticsearch_node",
"kibana": "etcd_node_1",
"mysql": "etcd_node_1",
"mysql-server": "mysqlserver_node",
Expand Down Expand Up @@ -649,6 +659,10 @@
"etcd":{"fullname":"dlws/etcd:3.1.10"},
"mysql":{"fullname":"dlws/mysql:5.6"},
"phpmyadmin":{"fullname":"dlws/phpmyadmin:4.7.6"},
"elasticsearch":{"fullname":"dlws/elasticsearch:6.8.5"},
"elasticsearch-exporter":{"fullname":"dlws/elasticsearch-exporter:1.1.0"},
"fluentd-kubernetes-daemonset":{"fullname":"dlws/fluentd-kubernetes-daemonset:v1.7.4-debian-elasticsearch6-1.1"},
"kibana":{"fullname":"dlws/kibana:6.8.5"},
"fluentd-elasticsearch":{"fullname":"dlws/fluentd-elasticsearch:v2.0.2"},
"binstore":{"fullname":"dlws/binstore:v1.0"},

Expand All @@ -670,12 +684,12 @@
# There is no udp port requirement for now
#"udp_port_ranges": "25826",
"inter_connect": {
"tcp_port_ranges": "22 1443 2379 3306 5000 8086 10250",
"tcp_port_ranges": "22 1443 2379 3306 5000 8086 9114 9200 9300 10250",
# Need to white list dev machines to connect
# "source_addresses_prefixes": [ "52.151.0.0/16"]
},
"dev_network": {
"tcp_port_ranges": "22 1443 2379 3306 5000 8086 10250 10255 22222",
"tcp_port_ranges": "22 1443 2379 3306 5000 8086 5601 10250 10255 22222",
# Need to white list dev machines to connect
# "source_addresses_prefixes": [ "52.151.0.0/16"]
},
Expand Down
Loading

0 comments on commit aa43482

Please sign in to comment.