Skip to content

Commit

Permalink
Move to creating SP in TF /w least privilidge
Browse files Browse the repository at this point in the history
  • Loading branch information
lawrencegripper committed Aug 6, 2018
1 parent 0b3dfc5 commit 6b8366b
Show file tree
Hide file tree
Showing 6 changed files with 329 additions and 231 deletions.
11 changes: 3 additions & 8 deletions Readme.md
Expand Up @@ -24,14 +24,9 @@ Then the connection details from the `redis` and the `log analytics workspace` a

1. Login to the Azure CLI `az login`
2. Clone this repository and `cd` into the directory
2. Create a service principal for `az ad sp create-for-rbac --skip-assignment` [How-to here](https://docs.microsoft.com/en-us/azure/aks/kubernetes-service-principal#pre-create-a-new-sp)
3. Create a `varaibles.tfvars` file and add your service principal `clientid` and `clientsecret` as variables. Also add an ssh key and username for logging into k8s agent nodes.
3. Create a `varaibles.tfvars` file and add an ssh key and username for logging into k8s agent nodes.

```hcl
client_id = "2f61810e-7f8d-49fd-8c0e-c4ffake51f9f"
client_secret = "57f8b670-012d-42b2-a0f8-c3fakee239ad"
linux_admin_username = ""
linux_admin_ssh_publickey = "ssh-rsa AAAasdfasdc2EasdfasdfAAABAQC+b42lMQef/l5D8c7kcNZNf6m37bdfITpUVcfakerFT/UAWAjym5rxda0PwdkasdfasdfasdfasdfVspDGCYWvHpa3M9UMM6cgdlq+R4ISif4W04yeOmjkRR5j9pcasdfasdfasdfW6PJcgw7IyWIWSONYCSNK6Tk5Yki3N+nAvIxU34+YxPTOpRw42w1AcuorsomethinglikethisnO15SGqFhNagUP/wV/18fvwENt3hsukiBmZ21aP8YqoFWuBg3 james@something"
Expand All @@ -42,9 +37,9 @@ linux_admin_ssh_publickey = "ssh-rsa AAAasdfasdc2EasdfasdfAAABAQC+b42lMQef/l5D8c

## Notes/FAQ

1. Why haven't you used `modules` to organize the template? We'd suggest using them but to keep things simple, and easy readable for those new to Terraform, we haven't included them.
1. ~~Why haven't you used `modules` to organize the template? We'd suggest using them but to keep things simple, and easy readable for those new to Terraform, we haven't included them.~~ I changed my mind on this

2. I receive the error `Error: kubernetes_daemonset.container_agent: Provider doesn't support resource: kubernetes_daemonset`: Delete the `.terraform` folder from the directory then make sure you have downloaded the community edition of the kubernetes provider and it is named correctly stored in the current directory.
2. I receive the error `Error: kubernetes_daemonset.container_agent: Provider doesn't support resource: kubernetes_daemonset`: Delete the `.terraform` folder from the directory then make sure you have downloaded the community edition of the kubernetes provider and it is named correctly stored in the current directory. In the root dir run `rm -r .terraform` then rerun the correct bootstrap script.

3. I receive the error `* provider.azurerm: No valid (unexpired) Azure CLI Auth Tokens found. Please run az login.`: Run any `az` command which talks to Azure and it will update the token. For example run `az group list` then retry the Terraform command.

213 changes: 30 additions & 183 deletions main.tf
@@ -1,15 +1,24 @@
locals {
cluster_name = "aks-${random_integer.random_int.result}"
agents_resource_group_name = "MC_${var.resource_group_name}_${local.cluster_name}_${azurerm_resource_group.cluster.location}"
}

resource "azurerm_resource_group" "cluster" {
name = "${var.resource_group_name}"
location = "${var.resource_group_location}"
}

resource "random_id" "workspace" {
keepers = {
# Generate a new id each time we switch to a new resource group
group_name = "${azurerm_resource_group.cluster.name}"
}
resource "azurerm_resource_group" "agents" {
name = "${local.agents_resource_group_name}"
location = "${var.resource_group_location}"
}

byte_length = 8
module "service_principal" {
source = "service_principal"

cluster_name = "${local.cluster_name}"
cluster_resource_group_name = "${azurerm_resource_group.cluster.name}"
agents_resource_group_name = "${azurerm_resource_group.agents.name}"
}

#an attempt to keep the AKS name (and dns label) somewhat unique
Expand All @@ -19,51 +28,33 @@ resource "random_integer" "random_int" {
}

resource "azurerm_kubernetes_cluster" "aks" {
name = "aks-${random_integer.random_int.result}"
name = "${local.cluster_name}"
location = "${azurerm_resource_group.cluster.location}"
dns_prefix = "aks-${random_integer.random_int.result}"
dns_prefix = "${local.cluster_name}"

resource_group_name = "${azurerm_resource_group.cluster.name}"
kubernetes_version = "1.8.7"
kubernetes_version = "${var.kubetnetes_version}"

linux_profile {
admin_username = "${var.linux_admin_username}"

ssh_key {
key_data = "${var.linux_admin_ssh_publickey}"
// If the user hasn't set a key the default will be "user_users_ssh_key", here we check for that and
// load the ssh from file if this is the case.
key_data = "${var.linux_admin_ssh_publickey == "use_users_ssh_key" ? file("~/.ssh/id_rsa.pub") : var.linux_admin_ssh_publickey}"
}
}

agent_pool_profile {
name = "agentpool"
count = "2"
vm_size = "Standard_DS2_v2"
count = "${var.node_count}"
vm_size = "${var.vm_size}"
os_type = "Linux"
}

service_principal {
client_id = "${var.client_id}"
client_secret = "${var.client_secret}"
}
}

resource "azurerm_log_analytics_workspace" "workspace" {
name = "k8s-workspace-${random_id.workspace.hex}"
location = "${azurerm_resource_group.cluster.location}"
resource_group_name = "${azurerm_resource_group.cluster.name}"
sku = "Free"
}

resource "azurerm_log_analytics_solution" "container_monitoring" {
location = "${azurerm_resource_group.cluster.location}"
resource_group_name = "${azurerm_resource_group.cluster.name}"
workspace_resource_id = "${azurerm_log_analytics_workspace.workspace.id}"
workspace_name = "${azurerm_log_analytics_workspace.workspace.name}"
solution_name = "Containers"

plan {
publisher = "Microsoft"
product = "OMSGallery/Containers"
client_id = "${module.service_principal.client_id}"
client_secret = "${module.service_principal.client_secret}"
}
}

Expand All @@ -84,9 +75,7 @@ resource "azurerm_redis_cache" "redis" {
sku_name = "Basic"
enable_non_ssl_port = false

redis_configuration {
maxclients = 256
}
redis_configuration {}
}

provider "kubernetes" {
Expand All @@ -109,151 +98,9 @@ resource "kubernetes_secret" "redis_secret" {
}
}

resource "kubernetes_namespace" "monitoring" {
metadata {
name = "monitoring"
}
}

resource "kubernetes_secret" "log_analytics_secret" {
metadata {
name = "omsagentkeys"
namespace = "${kubernetes_namespace.monitoring.metadata.0.name}"
}
module "oms" {
source = "oms"

data {
workspace_id = "${azurerm_log_analytics_workspace.workspace.workspace_id}"
workspace_key = "${azurerm_log_analytics_workspace.workspace.primary_shared_key}"
}
}

resource "kubernetes_daemonset" "container_agent" {
metadata {
name = "omsagent"
namespace = "${kubernetes_namespace.monitoring.metadata.0.name}"
}

spec {
selector {
agentVersion = "1.4.0-12"
dockerProviderVersion = "1.0.0-25"
app = "omsagent"
}

template {
metadata {
labels {
agentVersion = "1.4.0-12"
dockerProviderVersion = "1.0.0-25"
app = "omsagent"
}
}

spec {
volume {
name = "docker-sock"

host_path {
path = "/var/run/docker.sock"
}
}

volume {
name = "container-hostname"

host_path {
path = "/etc/hostname"
}
}

volume {
name = "host-log"

host_path {
path = "/var/log"
}
}

volume {
name = "container-log"

host_path {
path = "/var/lib/docker/containers/"
}
}

container {
name = "omsagent"
image = "microsoft/oms"
image_pull_policy = "Always"

security_context {
privileged = true
}

port {
container_port = 25225
protocol = "TCP"
}

port {
container_port = 25224
protocol = "UDP"
}

volume_mount {
name = "docker-sock"
mount_path = "/var/run/docker.sock"
}

volume_mount {
mount_path = "/var/log"
name = "host-log"
}

volume_mount {
mount_path = "/var/lib/docker/containers/"
name = "container-log"
}

volume_mount {
mount_path = "/var/opt/microsoft/omsagent/state/containerhostname"
name = "container-hostname"
}

liveness_probe {
exec {
command = ["/bin/bash", "-c", "ps -ef | grep omsagent | grep -v \"grep\""]
}

initial_delay_seconds = 60
period_seconds = 60
}

env = [
{
name = "WSID"

value_from {
secret_key_ref {
name = "${kubernetes_secret.log_analytics_secret.metadata.0.name}"
key = "workspace_id"
}
}
},
{
name = "KEY"

value_from {
secret_key_ref {
name = "${kubernetes_secret.log_analytics_secret.metadata.0.name}"
key = "workspace_key"
}
}
},
]
}
}
}
}
resource_group_name = "${var.resource_group_name}"
resource_group_location = "${var.resource_group_location}"
}

0 comments on commit 6b8366b

Please sign in to comment.