Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add feature to mount tmpfs volumes #91

Merged
merged 1 commit into from
Feb 5, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions charts/k8s-service/templates/_deployment_spec.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,9 @@ We need this because certain sections are omitted if there are no volumes or env
{{- if gt (len .Values.persistentVolumes) 0 -}}
{{- $_ := set $hasInjectionTypes "hasVolume" true -}}
{{- end -}}
{{- if gt (len .Values.scratchPaths) 0 -}}
{{- $_ := set $hasInjectionTypes "hasVolume" true -}}
{{- end -}}
apiVersion: apps/v1
kind: Deployment
metadata:
Expand Down Expand Up @@ -288,6 +291,10 @@ spec:
- name: {{ $name }}
mountPath: {{ quote $value.mountPath }}
{{- end }}
{{- range $name, $value := .Values.scratchPaths }}
- name: {{ $name }}
mountPath: {{ quote $value }}
{{- end }}
{{- /* END VOLUME MOUNT LOGIC */ -}}

{{- range $key, $value := .Values.sideCarContainers }}
Expand Down Expand Up @@ -357,6 +364,11 @@ spec:
persistentVolumeClaim:
claimName: {{ $value.claimName }}
{{- end }}
{{- range $name, $value := .Values.scratchPaths }}
- name: {{ $name }}
emptyDir:
medium: "Memory"
{{- end }}
{{- /* END VOLUME LOGIC */ -}}

{{- with .Values.nodeSelector }}
Expand Down
9 changes: 9 additions & 0 deletions charts/k8s-service/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -404,6 +404,15 @@ configMaps: {}
#
persistentVolumes: {}

# scratchPaths is a map of key value pairs that specifies which paths in the container should be setup as scratch space.
# Under the hood each entry in the map is converted to a tmpfs volume with the name set to the key and mounted into the
# container on the path provided as the value.
#
# EXAMPLE:
# scratchPaths:
# example: /mnt/scratch
scratchPaths: {}

# secrets is a map that specifies the Secret resources that should be exposed to the main application container. Each entry in
# the map represents a Secret resource. The key refers to the name of the Secret that should be exposed, with the value
# specifying how to expose the Secret. The value is also a map and has the following attributes:
Expand Down
6 changes: 3 additions & 3 deletions test/k8s_service_config_injection_example_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func TestK8SServiceConfigInjectionBaseExample(t *testing.T) {
// Verify the app comes up cleanly and returns the expected text
expectedText := "Hello from backend"
validationFunction := sampleAppValidationFunctionGenerator(t, expectedText)
verifyPodsCreatedSuccessfully(t, kubectlOptions, "sample-sinatra-app", releaseName)
verifyPodsCreatedSuccessfully(t, kubectlOptions, "sample-sinatra-app", releaseName, NumPodsExpected)
verifyAllPodsAvailable(t, kubectlOptions, "sample-sinatra-app", releaseName, validationFunction)
verifyServiceAvailable(t, kubectlOptions, "sample-sinatra-app", releaseName, validationFunction)

Expand Down Expand Up @@ -119,7 +119,7 @@ func TestK8SServiceConfigInjectionConfigMapExample(t *testing.T) {
// Verify the app comes up cleanly and returns the expected text
expectedText := "Hello! I was configured using a ConfigMap!"
validationFunction := sampleAppValidationFunctionGenerator(t, expectedText)
verifyPodsCreatedSuccessfully(t, kubectlOptions, "sample-sinatra-app", releaseName)
verifyPodsCreatedSuccessfully(t, kubectlOptions, "sample-sinatra-app", releaseName, NumPodsExpected)
verifyAllPodsAvailable(t, kubectlOptions, "sample-sinatra-app", releaseName, validationFunction)
verifyServiceAvailable(t, kubectlOptions, "sample-sinatra-app", releaseName, validationFunction)
}
Expand Down Expand Up @@ -189,7 +189,7 @@ func TestK8SServiceConfigInjectionSecretExample(t *testing.T) {
// Verify the app comes up cleanly and returns the expected text
expectedText := "Hello! I was configured using a Secret!"
validationFunction := sampleAppValidationFunctionGenerator(t, expectedText)
verifyPodsCreatedSuccessfully(t, kubectlOptions, "sample-sinatra-app", releaseName)
verifyPodsCreatedSuccessfully(t, kubectlOptions, "sample-sinatra-app", releaseName, NumPodsExpected)
verifyAllPodsAvailable(t, kubectlOptions, "sample-sinatra-app", releaseName, validationFunction)
verifyServiceAvailable(t, kubectlOptions, "sample-sinatra-app", releaseName, validationFunction)
}
3 changes: 2 additions & 1 deletion test/k8s_service_example_test_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,14 @@ func verifyPodsCreatedSuccessfully(
kubectlOptions *k8s.KubectlOptions,
appName string,
releaseName string,
numPods int,
) {
// Get the pods and wait until they are all ready
filters := metav1.ListOptions{
LabelSelector: fmt.Sprintf("app.kubernetes.io/name=%s,app.kubernetes.io/instance=%s", appName, releaseName),
}

k8s.WaitUntilNumPodsCreated(t, kubectlOptions, filters, NumPodsExpected, WaitTimerRetries, WaitTimerSleep)
k8s.WaitUntilNumPodsCreated(t, kubectlOptions, filters, numPods, WaitTimerRetries, WaitTimerSleep)
pods := k8s.ListPods(t, kubectlOptions, filters)

for _, pod := range pods {
Expand Down
6 changes: 3 additions & 3 deletions test/k8s_service_nginx_example_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import (
"testing"

"github.com/gruntwork-io/terratest/modules/helm"
"github.com/gruntwork-io/terratest/modules/http-helper"
http_helper "github.com/gruntwork-io/terratest/modules/http-helper"
"github.com/gruntwork-io/terratest/modules/k8s"
"github.com/gruntwork-io/terratest/modules/random"
"github.com/stretchr/testify/require"
Expand Down Expand Up @@ -67,7 +67,7 @@ func TestK8SServiceNginxExample(t *testing.T) {
defer helm.Delete(t, options, releaseName, true)
helm.Install(t, options, helmChartPath, releaseName)

verifyPodsCreatedSuccessfully(t, kubectlOptions, "nginx", releaseName)
verifyPodsCreatedSuccessfully(t, kubectlOptions, "nginx", releaseName, NumPodsExpected)
verifyAllPodsAvailable(t, kubectlOptions, "nginx", releaseName, nginxValidationFunction)
verifyServiceAvailable(t, kubectlOptions, "nginx", releaseName, nginxValidationFunction)

Expand All @@ -85,7 +85,7 @@ func TestK8SServiceNginxExample(t *testing.T) {
helm.Upgrade(t, options, helmChartPath, releaseName)

// We expect the service to still come up cleanly
verifyPodsCreatedSuccessfully(t, kubectlOptions, "nginx", releaseName)
verifyPodsCreatedSuccessfully(t, kubectlOptions, "nginx", releaseName, NumPodsExpected)
verifyAllPodsAvailable(t, kubectlOptions, "nginx", releaseName, nginxValidationFunction)
verifyServiceAvailable(t, kubectlOptions, "nginx", releaseName, nginxValidationFunction)

Expand Down
34 changes: 0 additions & 34 deletions test/k8s_service_template_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -753,37 +753,3 @@ func TestK8SServicePodAddingAdditionalLabels(t *testing.T) {
assert.Equal(t, deployment.Spec.Template.Labels["first-label"], first_custom_pod_label_value)
assert.Equal(t, deployment.Spec.Template.Labels["second-label"], second_custom_pod_label_value)
}

func TestK8SServiceDeploymentAddingPersistentVolumes(t *testing.T) {
t.Parallel()

volName := "pv-1"
volClaim := "claim-1"
volMountPath := "/mnt/path/1"

deployment := renderK8SServiceDeploymentWithSetValues(
t,
map[string]string{
"persistentVolumes.pv-1.claimName": volClaim,
"persistentVolumes.pv-1.mountPath": volMountPath,
},
)

// Verify that there is only one container and that the environments section is populated.
renderedPodContainers := deployment.Spec.Template.Spec.Containers
require.Equal(t, len(renderedPodContainers), 1)

// Verify that a mount has been created for the PV
mounts := renderedPodContainers[0].VolumeMounts
assert.Equal(t, len(mounts), 1)
mount := mounts[0]
assert.Equal(t, volName, mount.Name)
assert.Equal(t, volMountPath, mount.MountPath)

// Verify that a volume has been declared for the PV
volumes := deployment.Spec.Template.Spec.Volumes
assert.Equal(t, len(volumes), 1)
volume := volumes[0]
assert.Equal(t, volName, volume.Name)
assert.Equal(t, volClaim, volume.PersistentVolumeClaim.ClaimName)
}
83 changes: 83 additions & 0 deletions test/k8s_service_volume_template_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
// +build all tpl

// NOTE: We use build flags to differentiate between template tests and integration tests so that you can conveniently
// run just the template tests. See the test README for more information.

package test

import (
"fmt"
"testing"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
)

func TestK8SServiceDeploymentAddingScratchVolumes(t *testing.T) {
t.Parallel()

volName := "scratch"
volMountPath := "/mnt/scratch"

deployment := renderK8SServiceDeploymentWithSetValues(
t,
map[string]string{
fmt.Sprintf("scratchPaths.%s", volName): volMountPath,
},
)

// Verify that there is only one container
renderedPodContainers := deployment.Spec.Template.Spec.Containers
require.Equal(t, len(renderedPodContainers), 1)
podContainer := renderedPodContainers[0]

// Verify that a mount has been created for the scratch path
mounts := podContainer.VolumeMounts
assert.Equal(t, len(mounts), 1)
mount := mounts[0]
assert.Equal(t, volName, mount.Name)
assert.Equal(t, volMountPath, mount.MountPath)

// Verify that a volume has been declared for the scratch path and is using tmpfs
volumes := deployment.Spec.Template.Spec.Volumes
assert.Equal(t, len(volumes), 1)
volume := volumes[0]
assert.Equal(t, volName, volume.Name)
assert.Equal(t, corev1.StorageMediumMemory, volume.EmptyDir.Medium)

}

func TestK8SServiceDeploymentAddingPersistentVolumes(t *testing.T) {
t.Parallel()

volName := "pv-1"
volClaim := "claim-1"
volMountPath := "/mnt/path/1"

deployment := renderK8SServiceDeploymentWithSetValues(
t,
map[string]string{
"persistentVolumes.pv-1.claimName": volClaim,
"persistentVolumes.pv-1.mountPath": volMountPath,
},
)

// Verify that there is only one container
renderedPodContainers := deployment.Spec.Template.Spec.Containers
require.Equal(t, len(renderedPodContainers), 1)

// Verify that a mount has been created for the PV
mounts := renderedPodContainers[0].VolumeMounts
assert.Equal(t, len(mounts), 1)
mount := mounts[0]
assert.Equal(t, volName, mount.Name)
assert.Equal(t, volMountPath, mount.MountPath)

// Verify that a volume has been declared for the PV
volumes := deployment.Spec.Template.Spec.Volumes
assert.Equal(t, len(volumes), 1)
volume := volumes[0]
assert.Equal(t, volName, volume.Name)
assert.Equal(t, volClaim, volume.PersistentVolumeClaim.ClaimName)
}
64 changes: 64 additions & 0 deletions test/k8s_service_volume_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
// +build all integration

// NOTE: We use build flags to differentiate between template tests and integration tests so that you can conveniently
// run just the template tests. See the test README for more information.

package test

import (
"fmt"
"path/filepath"
"strings"
"testing"

"github.com/gruntwork-io/terratest/modules/helm"
"github.com/gruntwork-io/terratest/modules/k8s"
"github.com/gruntwork-io/terratest/modules/random"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func TestK8SServiceScratchSpaceIsTmpfs(t *testing.T) {
t.Parallel()

helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-service"))
require.NoError(t, err)

// Create a test namespace to deploy resources into, to avoid colliding with other tests
kubectlOptions := k8s.NewKubectlOptions("", "", "")
uniqueID := random.UniqueId()
testNamespace := fmt.Sprintf("k8s-service-scratch-%s", strings.ToLower(uniqueID))
k8s.CreateNamespace(t, kubectlOptions, testNamespace)
defer k8s.DeleteNamespace(t, kubectlOptions, testNamespace)
kubectlOptions.Namespace = testNamespace

// Construct the values to run a pod with scratch space
releaseName := fmt.Sprintf("k8s-service-scratch-%s", strings.ToLower(uniqueID))
appName := "scratch-tester"
options := &helm.Options{
KubectlOptions: kubectlOptions,
SetValues: map[string]string{
"applicationName": appName,
"containerImage.repository": "alpine",
"containerImage.tag": "3.13",
"containerImage.pullPolicy": "IfNotPresent",
"containerCommand[0]": "sh",
"containerCommand[1]": "-c",
"containerCommand[2]": "mount && sleep 9999999",
"scratchPaths.scratch-mnt": "/mnt/scratch",
},
}
defer helm.Delete(t, options, releaseName, true)
helm.Install(t, options, helmChartPath, releaseName)

// Make sure all the pods are deployed and available
verifyPodsCreatedSuccessfully(t, kubectlOptions, appName, releaseName, 1)

// Get the logs from the pod to verify /mnt/scratch is mounted as tmpfs.
pods := k8s.ListPods(t, kubectlOptions, metav1.ListOptions{})
require.Equal(t, 1, len(pods))
pod := pods[0]
logs, err := k8s.RunKubectlAndGetOutputE(t, kubectlOptions, "logs", pod.Name)
require.NoError(t, err)
require.Contains(t, logs, "tmpfs on /mnt/scratch type tmpfs (rw,relatime)")
}