Permalink
Browse files

Supports multi-cloud deployment

  • Loading branch information...
1 parent 98e5983 commit 2747f2e0e6984a2e14f1702e9d6725e585fb6ce6 earthmant committed Aug 24, 2016
View
@@ -6,17 +6,15 @@ Limitations (as of 22/6/2016):
+ Kubernetes Master & Nodes will only run on Ubuntu 14.04
+ Tested on Openstack Kilo (More coming soon!)
+ Tested on Cloudify 3.3.1 and 3.4
++ Tested on AWS
### Installation Instructions
Ubuntu Image description: 4 GB RAM 2 CPU 40 GB Disk
-1. Create an inputs.yaml file with the inputs defined in the kubernetes-blueprint.yaml file and the other imports.
-
+1. Create an inputs.yaml file with the inputs defined in the kubernetes-blueprint.yaml file and the other imports. See the example-inputs.yaml.
2. Upload the blueprint to your Cloudify Manager: `cfy blueprints upload -p kubernetes-blueprint.yaml -b kubernetes`
-
2. Create a deployment: `cfy deployments create -b kubernetes -d kubernetes -i inputs.yaml`
-
3. Run the install workflow: `cfy executions start -w install -d kc`
@@ -62,3 +60,9 @@ Scaling up is initiated when CPU Percent used by hyperkube on a single node is a
Scaling down is initiated when CPU Percent used by hyperkube on a single node is below 1% for more than 200 consecutive seconds. This scale policy will not go below 2 instances.
Healing is initiated on a VM when no CPU metrics are being received by the manager.
+
+#### Multicloud
+
+By default this blueprint is configured to deploy in AWS. You can switch that to Openstack by toggling the imports.
+
+If you want to have a manager in one IaaS and combine nodes in Openstack or AWS or both, use the imports in the respective IaaS's 'blueprint.yaml'.
View
@@ -0,0 +1,48 @@
+# OPENSTACK_INPUTS
+#keystone_password: S3CUR3PASSW0RD
+#keystone_tenant_name: example_tenant
+#keystone_url: https://compute.datacentred.io:5000/
+#keystone_username: user@example.com
+#region: region1
+#
+#openstack_external_network_name: external
+#use_existing_openstack_router: true
+#existing_openstack_router_id: ''
+#use_existing_openstack_network: true
+#existing_openstack_network_id: ''
+#openstack_management_network_name: { get_input: existing_openstack_network_id }
+#use_existing_openstack_public_subnet: true
+#existing_openstack_public_subnet_id: ''
+#use_existing_openstack_cloudify_group: true
+#existing_cloudify_group_id: ''
+#use_existing_openstack_port: true
+#existing_openstack_port_id: ''
+#
+#openstack_node_vm_image_id: 6c3047c6-17b1-4aaf-a657-9229bb481e50
+#openstack_node_vm_flavor_id: 8f4b7ae1-b8c2-431f-bb0c-362a5ece0381
+
+#master_openstack_image_id: { get_input: openstack_node_vm_image_id }
+#master_openstack_flavor_id: { get_input: openstack_node_vm_flavor_id }
+
+# AWS INPUTS
+aws_secret_access_key: SOME/ACCESS/SECRET
+aws_access_key_id: 1234567890ABCDEFGHIJ
+
+use_existing_vpc: true
+existing_vpc_id: ''
+use_existing_aws_public_subnet: true
+existing_aws_public_subnet_id: ''
+use_existing_aws_igw: true
+existing_aws_igw_id: ''
+use_existing_aws_public_routetable: true
+existing_aws_public_routetable_id: ''
+use_existing_aws_nat_instance: true
+existing_aws_nat_instance_id: ''
+use_existing_aws_nat_instance_ip: true
+existing_aws_nat_instance_ip: ''
+use_existing_aws_private_subnet: true
+existing_aws_private_subnet_id: ''
+use_existing_aws_private_routetable: true
+existing_aws_private_routetable_id: ''
+use_existing_aws_cloudify_group: true
+existing_aws_cloudify_group_id: ''
@@ -0,0 +1,96 @@
+tosca_definitions_version: cloudify_dsl_1_3
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - aws-node-vm.yaml
+
+dsl_definitions:
+
+ kubernetes_environment: &kubernetes_environment
+ the_master_ip_here: { get_attribute: [ kubernetes_master_vm, ip ] }
+
+node_templates:
+
+ aws_docker_kubernetes_node:
+ # installs Docker if Docker is not installed.
+ # Starts the Docker Bootstrap daemon.
+ type: cloudify.nodes.Root
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ implementation: scripts/docker/install.py
+ start:
+ implementation: scripts/docker/bootstrap.py
+ inputs:
+ <<: *kubernetes_environment
+ relationships:
+ - type: cloudify.relationships.contained_in
+ target: aws_kubernetes_node_vm
+
+ aws_flannel_kubernetes_node:
+ # Installs flannel on the node.
+ type: cloudify.nodes.Root
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ implementation: scripts/flannel/node/create.py
+ inputs:
+ <<: *kubernetes_environment
+ relationships:
+ - type: cloudify.relationships.contained_in
+ target: aws_kubernetes_node_vm
+ - type: cloudify.relationships.depends_on
+ target: aws_docker_kubernetes_node
+ - type: cloudify.relationships.connected_to
+ target: kubernetes_dns
+
+ aws_kubernetes_node:
+ # This installs the Hyperkube process on the node host.
+ # The verify.py script checks that the host has been added to the nodes in the master.
+ type: cloudify.nodes.Root
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ start:
+ implementation: scripts/kubernetes/node/start.py
+ inputs:
+ <<: *kubernetes_environment
+ stop:
+ implementation: scripts/kubernetes/node/stop.py
+ executor: central_deployment_agent
+ inputs:
+ master_ip: { get_attribute: [ kubernetes_master_vm, ip ] }
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: kubernetes_master
+ - type: cloudify.relationships.contained_in
+ target: aws_kubernetes_node_vm
+ - type: cloudify.relationships.depends_on
+ target: aws_flannel_kubernetes_node
+ target_interfaces:
+ cloudify.interfaces.relationship_lifecycle:
+ establish:
+ implementation: scripts/kubernetes/verify.py
+ executor: central_deployment_agent
+ inputs:
+ master_ip: { get_attribute: [ kubernetes_master_vm, ip ] }
+ hostname: {}
+ max_retries: 30
+
+ aws_node_sanity_application:
+ # This node is a test application.
+ # The nginx application is defined in scripts/kubernetes/resources is the same as located here: https://github.com/kubernetes/kubernetes/tree/v1.2.0/docs/user-guide/walkthrough
+ # The service is started via the API. The API returns the IP and Port of the service.
+ # The test_running.py script checks to see if 200 is returned from the IP/Port.
+ type: cloudify.nodes.Root
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ implementation: scripts/kubernetes/test_api.py
+ executor: central_deployment_agent
+ start:
+ implementation: scripts/kubernetes/test_running.py
+ relationships:
+ - type: cloudify.relationships.contained_in
+ target: kubernetes_master_vm
+ - type: cloudify.relationships.depends_on
+ target: aws_kubernetes_node
@@ -0,0 +1,118 @@
+tosca_definitions_version: cloudify_dsl_1_3
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - http://www.getcloudify.org/spec/aws-plugin/1.4.1/plugin.yaml
+ - http://www.getcloudify.org/spec/diamond-plugin/1.3.3/plugin.yaml
+
+inputs:
+
+ use_existing_aws_kubernetes_master_ip:
+ default: false
+
+ existing_aws_kubernetes_ip_id:
+ default: ''
+
+ use_existing_aws_kubernetes_master_vm_key:
+ default: false
+
+ existing_aws_kubernetes_master_vm_key_name:
+ default: kubernetes_master_vm_key
+
+ master_agent_local_path_to_key_file:
+ type: string
+ default: ~/.ssh/kubernetes-master-vm-key.pem
+
+ use_existing_aws_instance_for_master_vm:
+ default: false
+
+ existing_aws_instance_for_master_vm:
+ default: ''
+
+ aws_instance_master_vm_name:
+ default: Kubernetes Master VM
+
+ master_agent_username:
+ default: ubuntu
+
+ master_agent_port:
+ default: 22
+
+ master_aws_image_id:
+ default: ami-2d39803a
+
+ master_aws_instance_type:
+ default: m3.medium
+
+dsl_definitions:
+
+ aws_config: &AWS_CONFIG
+ aws_access_key_id: { get_input: aws_access_key_id }
+ aws_secret_access_key: { get_input: aws_secret_access_key }
+ ec2_region_name: { get_input: ec2_region_name }
+ ec2_region_endpoint: { get_input: ec2_region_endpoint }
+
+node_templates:
+
+ kubernetes_master_ip:
+ type: cloudify.aws.nodes.ElasticIP
+ properties:
+ use_external_resource: { get_input: use_existing_aws_kubernetes_master_ip }
+ resource_id: { existing_aws_kubernetes_ip_id }
+ domain: vpc
+ aws_config: *AWS_CONFIG
+ relationships:
+ - type: cloudify.relationships.depends_on
+ target: aws_kubernetes_internet_gateway
+
+ aws_kubernetes_master_vm_key:
+ type: cloudify.aws.nodes.KeyPair
+ properties:
+ use_external_resource: { get_input: use_existing_aws_kubernetes_master_vm_key }
+ resource_id: { get_input: existing_aws_kubernetes_master_vm_key_name }
+ private_key_path: { get_input: master_agent_local_path_to_key_file }
+ aws_config: *AWS_CONFIG
+
+ kubernetes_master_vm:
+ type: cloudify.aws.nodes.Instance
+ properties:
+ use_external_resource: { get_input: use_existing_aws_instance_for_master_vm }
+ resource_id: { existing_aws_instance_for_master_vm }
+ name: { get_input: aws_instance_master_vm_name }
+ agent_config:
+ user: { get_input: master_agent_username }
+ key: { get_input: master_agent_local_path_to_key_file }
+ port: { get_input: master_agent_port }
+ env:
+ K8S_VERSION: { get_input: your_kubernetes_version }
+ ETCD_VERSION: { get_input: your_etcd_version }
+ FLANNEL_VERSION: { get_input: your_flannel_version }
+ FLANNEL_IFACE: { get_input: flannel_interface }
+ FLANNEL_IPMASQ: { get_input: flannel_ipmasq_flag }
+ image_id: { get_input: master_aws_image_id }
+ instance_type: { get_input: master_aws_instance_type }
+ parameters:
+ placement: { get_property: [ aws_kubernetes_public_subnet, availability_zone ] }
+ user_data: |
+ #!/bin/bash
+ sudo groupadd docker
+ sudo gpasswd -a ubuntu docker
+ aws_config: *AWS_CONFIG
+ relationships:
+ - type: cloudify.aws.relationships.instance_contained_in_subnet
+ target: aws_kubernetes_public_subnet
+ - type: cloudify.aws.relationships.instance_connected_to_security_group
+ target: aws_cloudify_group
+ - type: cloudify.aws.relationships.instance_connected_to_security_group
+ target: aws_kubernetes_group
+ - type: cloudify.aws.relationships.instance_connected_to_keypair
+ target: aws_kubernetes_master_vm_key
+ - type: cloudify.aws.relationships.instance_connected_to_elastic_ip
+ target: kubernetes_master_ip
+
+outputs:
+
+ Kubernetes_Dashboard:
+ description: Kubernetes Dashboard URL
+ value:
+ url: {concat: ["http://",{ get_attribute: [ kubernetes_master_ip, aws_resource_id ]},":8080/ui" ] }
Oops, something went wrong.

0 comments on commit 2747f2e

Please sign in to comment.