Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: persistent volumes work on multi node k8s clusters #1943

Merged
merged 29 commits into from Dec 15, 2023
Merged
Show file tree
Hide file tree
Changes from 25 commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
614013d
cascading storage class changes
h4ck3rk3y Dec 12, 2023
fdfe9f7
added some docs
h4ck3rk3y Dec 12, 2023
3c840fa
apic has storage class stuff now
h4ck3rk3y Dec 12, 2023
2ee34a4
remove some dupes
h4ck3rk3y Dec 12, 2023
bd2117c
remove apic launcher change
h4ck3rk3y Dec 12, 2023
592755c
get rid of some more functions
h4ck3rk3y Dec 12, 2023
2ea0a01
fix missing struct value
h4ck3rk3y Dec 12, 2023
e9e31d1
fix changelog image building pr
h4ck3rk3y Dec 12, 2023
8980073
Merge branch 'main' into gyani/storage-class
h4ck3rk3y Dec 13, 2023
5e8e7a3
feat: allow specifying size of persistent directories (#1939)
h4ck3rk3y Dec 13, 2023
9d91c26
claim -> volume
h4ck3rk3y Dec 13, 2023
03e436a
fix ci
h4ck3rk3y Dec 13, 2023
a59a366
dev stuff, do revert
h4ck3rk3y Dec 13, 2023
3e912df
fix constant volume szie
h4ck3rk3y Dec 13, 2023
c260fa7
added back funciton
h4ck3rk3y Dec 14, 2023
f45cde3
--amend
h4ck3rk3y Dec 14, 2023
438c5a1
Revert "dev stuff, do revert"
h4ck3rk3y Dec 14, 2023
acb5a1f
Merge branch 'main' into gyani/persistent-volumes
h4ck3rk3y Dec 14, 2023
55db506
Revert "Revert "dev stuff, do revert""
h4ck3rk3y Dec 14, 2023
ae64ac5
Revert "Revert "Revert "dev stuff, do revert"""
h4ck3rk3y Dec 14, 2023
5709553
better k3s instructions
h4ck3rk3y Dec 14, 2023
62e4d63
added a note on deletion
h4ck3rk3y Dec 14, 2023
722f7ca
remove unused code
h4ck3rk3y Dec 14, 2023
3bcb536
remove unused variable
h4ck3rk3y Dec 14, 2023
6398322
Merge branch 'main' into gyani/persistent-volumes
h4ck3rk3y Dec 15, 2023
e06481b
cleanup a lot of unused code
h4ck3rk3y Dec 15, 2023
b814b07
more unused code
h4ck3rk3y Dec 15, 2023
cc572be
minikube uses standard
h4ck3rk3y Dec 15, 2023
7dbeb77
fix more docs
h4ck3rk3y Dec 15, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion .circleci/config.yml
Expand Up @@ -59,7 +59,7 @@ steps_prepare_testing_k8s_k3s: &steps_prepare_testing_k8s_k3s
type: kubernetes
config:
kubernetes-cluster-name: k3d-k3s-default
storage-class: standard
storage-class: local-path
enclave-size-in-megabytes: 2048
EOF
# Set the K3S cluster with some previous steps to force this cluster type in the cluster-setting file. This save us to start the engine with the cluster set command
Expand Down
Expand Up @@ -145,7 +145,7 @@ func getSuppliers(clusterId string, clusterType KurtosisClusterType, kubernetesC
}

backendSupplier = func(ctx context.Context) (backend_interface.KurtosisBackend, error) {
backend, err := kubernetes_kurtosis_backend.GetCLIBackend(ctx)
backend, err := kubernetes_kurtosis_backend.GetCLIBackend(ctx, *kubernetesConfig.StorageClass)
if err != nil {
return nil, stacktrace.Propagate(
err,
Expand Down
4 changes: 3 additions & 1 deletion cli/cli/kurtosis_gateway/connection/provider.go
Expand Up @@ -22,6 +22,8 @@ import (
const (
grpcPortIdStr = "grpc"
httpApplicationProtocol = "http"
// this doesn't have any effect as this is just the gateway
emptyStorageClassName = ""
)

var noWait *port_spec.Wait = nil
Expand All @@ -42,7 +44,7 @@ func NewGatewayConnectionProvider(ctx context.Context, kubernetesConfig *restcli
if err != nil {
return nil, stacktrace.Propagate(err, "Expected to be able to get config for Kubernetes client set, instead a non nil error was returned")
}
kubernetesManager := kubernetes_manager.NewKubernetesManager(clientSet, kubernetesConfig)
kubernetesManager := kubernetes_manager.NewKubernetesManager(clientSet, kubernetesConfig, emptyStorageClassName)

return &GatewayConnectionProvider{
config: kubernetesConfig,
Expand Down
Expand Up @@ -67,8 +67,9 @@ func NewAPIContainerKubernetesKurtosisBackend(
kubernetesManager *kubernetes_manager.KubernetesManager,
ownEnclaveUuid enclave.EnclaveUUID,
ownNamespaceName string,
storageClassName string,
) *KubernetesKurtosisBackend {
modeArgs := shared_helpers.NewApiContainerModeArgs(ownEnclaveUuid, ownNamespaceName)
modeArgs := shared_helpers.NewApiContainerModeArgs(ownEnclaveUuid, ownNamespaceName, storageClassName)
return newKubernetesKurtosisBackend(
kubernetesManager,
nil,
Expand Down
Expand Up @@ -21,7 +21,7 @@ var kubeConfigFileFilepath = filepath.Join(
os.Getenv("HOME"), ".kube", "config",
)

func GetCLIBackend(ctx context.Context) (backend_interface.KurtosisBackend, error) {
func GetCLIBackend(ctx context.Context, storageClass string) (backend_interface.KurtosisBackend, error) {
kubernetesConfig, err := clientcmd.BuildConfigFromFlags(emptyMasterURL, kubeConfigFileFilepath)
if err != nil {
return nil, stacktrace.Propagate(err, "An error occurred creating kubernetes configuration from flags in file '%v'", kubeConfigFileFilepath)
Expand All @@ -35,6 +35,7 @@ func GetCLIBackend(ctx context.Context) (backend_interface.KurtosisBackend, erro
ctx,
kubernetesConfig,
backendSupplier,
storageClass,
)
if err != nil {
return nil, stacktrace.Propagate(err, "An error occurred wrapping the CLI Kubernetes backend")
Expand All @@ -44,7 +45,7 @@ func GetCLIBackend(ctx context.Context) (backend_interface.KurtosisBackend, erro
}

func GetEngineServerBackend(
ctx context.Context,
ctx context.Context, storageClass string,
) (backend_interface.KurtosisBackend, error) {
kubernetesConfig, err := rest.InClusterConfig()
if err != nil {
Expand All @@ -61,6 +62,7 @@ func GetEngineServerBackend(
ctx,
kubernetesConfig,
backendSupplier,
storageClass,
)
if err != nil {
return nil, stacktrace.Propagate(err, "An error occurred wrapping the Kurtosis Engine Kubernetes backend")
Expand All @@ -71,6 +73,7 @@ func GetEngineServerBackend(

func GetApiContainerBackend(
ctx context.Context,
storageClass string,
) (backend_interface.KurtosisBackend, error) {
kubernetesConfig, err := rest.InClusterConfig()
if err != nil {
Expand Down Expand Up @@ -103,13 +106,15 @@ func GetApiContainerBackend(
kubernetesManager,
enclaveId,
namespaceName,
storageClass,
), nil
}

wrappedBackend, err := getWrappedKubernetesKurtosisBackend(
ctx,
kubernetesConfig,
backendSupplier,
storageClass,
)
if err != nil {
return nil, stacktrace.Propagate(err, "An error occurred wrapping the APIC Kubernetes backend")
Expand All @@ -127,13 +132,14 @@ func getWrappedKubernetesKurtosisBackend(
ctx context.Context,
kubernetesConfig *rest.Config,
kurtosisBackendSupplier func(context.Context, *kubernetes_manager.KubernetesManager) (*KubernetesKurtosisBackend, error),
storageClass string,
) (*metrics_reporting.MetricsReportingKurtosisBackend, error) {
clientSet, err := kubernetes.NewForConfig(kubernetesConfig)
if err != nil {
return nil, stacktrace.Propagate(err, "Expected to be able to create kubernetes client set using Kubernetes config '%+v', instead a non nil error was returned", kubernetesConfig)
}

kubernetesManager := kubernetes_manager.NewKubernetesManager(clientSet, kubernetesConfig)
kubernetesManager := kubernetes_manager.NewKubernetesManager(clientSet, kubernetesConfig, storageClass)

kubernetesBackend, err := kurtosisBackendSupplier(ctx, kubernetesManager)
if err != nil {
Expand Down
Expand Up @@ -102,11 +102,11 @@ type dumpPodResult struct {

func NewApiContainerModeArgs(
ownEnclaveId enclave.EnclaveUUID,
ownNamespaceName string) *ApiContainerModeArgs {
ownNamespaceName string, storageClassName string) *ApiContainerModeArgs {
return &ApiContainerModeArgs{
ownEnclaveId: ownEnclaveId,
ownNamespaceName: ownNamespaceName,
storageClassName: "",
storageClassName: storageClassName,
filesArtifactExpansionVolumeSizeInMegabytes: 0,
}
}
Expand Down
Expand Up @@ -12,14 +12,12 @@ import (
)

type kubernetesVolumeWithClaim struct {
h4ck3rk3y marked this conversation as resolved.
Show resolved Hide resolved
VolumeName string

VolumeClaimName string
}

func (volumeAndClaim *kubernetesVolumeWithClaim) GetVolume() *apiv1.Volume {
return &apiv1.Volume{
Name: volumeAndClaim.VolumeName,
Name: volumeAndClaim.VolumeClaimName,
VolumeSource: apiv1.VolumeSource{
HostPath: nil,
EmptyDir: nil,
Expand Down Expand Up @@ -59,7 +57,7 @@ func (volumeAndClaim *kubernetesVolumeWithClaim) GetVolume() *apiv1.Volume {

func (volumeAndClaim *kubernetesVolumeWithClaim) GetVolumeMount(mountPath string) *apiv1.VolumeMount {
return &apiv1.VolumeMount{
Name: volumeAndClaim.VolumeName,
Name: volumeAndClaim.VolumeClaimName,
ReadOnly: false,
MountPath: mountPath,
SubPath: "",
Expand All @@ -77,7 +75,6 @@ func preparePersistentDirectoriesResources(
kubernetesManager *kubernetes_manager.KubernetesManager,
) (map[string]*kubernetesVolumeWithClaim, error) {
shouldDeleteVolumesAndClaimsCreated := true
volumesCreated := map[string]*apiv1.PersistentVolume{}
volumeClaimsCreated := map[string]*apiv1.PersistentVolumeClaim{}

persistentVolumesAndClaims := map[string]*kubernetesVolumeWithClaim{}
Expand All @@ -96,16 +93,7 @@ func preparePersistentDirectoriesResources(

persistentVolumeSize := int64(persistentDirectory.Size)

var persistentVolume *apiv1.PersistentVolume
if persistentVolume, err = kubernetesManager.GetPersistentVolume(ctx, volumeName); err != nil {
persistentVolume, err = kubernetesManager.CreatePersistentVolume(ctx, namespace, volumeName, volumeLabelsStrs, persistentVolumeSize)
if err != nil {
return nil, stacktrace.Propagate(err, "An error occurred creating the persistent volume for '%s'", persistentDirectory.PersistentKey)
}
volumesCreated[persistentVolume.Name] = persistentVolume
}

// For now, we have a 1:1 mapping between volume and volume claims, so it's fine giving it the same name
// This claim works with a dynamic driver - it will spin up its own volume - the volume will get deleted when said claims is deleted
h4ck3rk3y marked this conversation as resolved.
Show resolved Hide resolved
var persistentVolumeClaim *apiv1.PersistentVolumeClaim
if persistentVolumeClaim, err = kubernetesManager.GetPersistentVolumeClaim(ctx, namespace, volumeName); err != nil {
persistentVolumeClaim, err = kubernetesManager.CreatePersistentVolumeClaim(ctx, namespace, volumeName, volumeLabelsStrs, persistentVolumeSize)
Expand All @@ -116,7 +104,6 @@ func preparePersistentDirectoriesResources(
}

persistentVolumesAndClaims[dirPath] = &kubernetesVolumeWithClaim{
VolumeName: persistentVolume.Name,
VolumeClaimName: persistentVolumeClaim.Name,
}
}
Expand All @@ -136,17 +123,6 @@ func preparePersistentDirectoriesResources(
logrus.Warnf("You'll need to clean up volume claim '%v' manually!", volumeClaimNameStr)
}
}
for volumeNameStr := range volumesCreated {
// Background context so we still run this even if the input context was cancelled
if err := kubernetesManager.RemovePersistentVolumeClaim(context.Background(), namespace, volumeNameStr); err != nil {
h4ck3rk3y marked this conversation as resolved.
Show resolved Hide resolved
logrus.Warnf(
"Creating persistent directory volumes didn't complete successfully so we tried to delete volume '%v' that we created, but doing so threw an error:\n%v",
volumeNameStr,
err,
)
logrus.Warnf("You'll need to clean up volume '%v' manually!", volumeNameStr)
}
}
}()

shouldDeleteVolumesAndClaimsCreated = false
Expand Down
Expand Up @@ -372,15 +372,10 @@ func createStartServiceOperation(
}
for _, volumeAndClaim := range createVolumesWithClaims {
volumeClaimName := volumeAndClaim.VolumeClaimName
volumeName := volumeAndClaim.VolumeName
if err := kubernetesManager.RemovePersistentVolumeClaim(ctx, namespaceName, volumeClaimName); err != nil {
logrus.Errorf("Starting service didn't complete successfully so we tried to remove the persistent volume claim we created but doing so threw an error:\n%v", err)
logrus.Errorf("ACTION REQUIRED: You'll need to remove persistent volume claim '%v' in '%v' manually!!!", volumeClaimName, namespaceName)
}
if err := kubernetesManager.RemovePersistentVolume(ctx, volumeAndClaim.VolumeName); err != nil {
logrus.Errorf("Starting service didn't complete successfully so we tried to remove the persistent volume we created but doing so threw an error:\n%v", err)
logrus.Errorf("ACTION REQUIRED: You'll need to remove persistent volume '%v' manually!!!", volumeName)
}
}
}()

Expand Down