diff --git a/cmd/client.go b/cmd/client.go index 9c4fc81987..c23a305c31 100644 --- a/cmd/client.go +++ b/cmd/client.go @@ -9,13 +9,23 @@ import ( "knative.dev/func/pkg/builders/buildpacks" "knative.dev/func/pkg/config" "knative.dev/func/pkg/creds" + k8sdeployer "knative.dev/func/pkg/deployer/k8s" + knativedeployer "knative.dev/func/pkg/deployer/knative" + "knative.dev/func/pkg/describer" + k8sdescriber "knative.dev/func/pkg/describer/k8s" + knativedescriber "knative.dev/func/pkg/describer/knative" "knative.dev/func/pkg/docker" fn "knative.dev/func/pkg/functions" fnhttp "knative.dev/func/pkg/http" "knative.dev/func/pkg/k8s" - "knative.dev/func/pkg/knative" + "knative.dev/func/pkg/lister" + k8slister "knative.dev/func/pkg/lister/k8s" + knativelister "knative.dev/func/pkg/lister/knative" "knative.dev/func/pkg/oci" "knative.dev/func/pkg/pipelines/tekton" + "knative.dev/func/pkg/remover" + k8sremover "knative.dev/func/pkg/remover/k8s" + knativeremover "knative.dev/func/pkg/remover/knative" ) // ClientConfig settings for use with NewClient @@ -58,16 +68,16 @@ func NewClient(cfg ClientConfig, options ...fn.Option) (*fn.Client, func()) { var ( t = newTransport(cfg.InsecureSkipVerify) // may provide a custom impl which proxies c = newCredentialsProvider(config.Dir(), t) // for accessing registries - d = newKnativeDeployer(cfg.Verbose) + d = newKnativeDeployer(cfg.Verbose) // default deployer (can be overridden via options) pp = newTektonPipelinesProvider(c, cfg.Verbose) o = []fn.Option{ // standard (shared) options for all commands fn.WithVerbose(cfg.Verbose), fn.WithTransport(t), fn.WithRepositoriesPath(config.RepositoriesPath()), fn.WithBuilder(buildpacks.NewBuilder(buildpacks.WithVerbose(cfg.Verbose))), - fn.WithRemover(knative.NewRemover(cfg.Verbose)), - fn.WithDescriber(knative.NewDescriber(cfg.Verbose)), - fn.WithLister(knative.NewLister(cfg.Verbose)), + fn.WithRemover(remover.NewMultiRemover(cfg.Verbose, knativeremover.NewRemover(cfg.Verbose), k8sremover.NewRemover(cfg.Verbose))), + fn.WithDescriber(describer.NewMultiDescriber(cfg.Verbose, knativedescriber.NewDescriber(cfg.Verbose), k8sdescriber.NewDescriber(cfg.Verbose))), + fn.WithLister(lister.NewLister(cfg.Verbose, knativelister.NewGetter(cfg.Verbose), k8slister.NewGetter(cfg.Verbose))), fn.WithDeployer(d), fn.WithPipelinesProvider(pp), fn.WithPusher(docker.NewPusher( @@ -127,12 +137,21 @@ func newTektonPipelinesProvider(creds oci.CredentialsProvider, verbose bool) *te } func newKnativeDeployer(verbose bool) fn.Deployer { - options := []knative.DeployerOpt{ - knative.WithDeployerVerbose(verbose), - knative.WithDeployerDecorator(deployDecorator{}), + options := []knativedeployer.DeployerOpt{ + knativedeployer.WithDeployerVerbose(verbose), + knativedeployer.WithDeployerDecorator(deployDecorator{}), } - return knative.NewDeployer(options...) + return knativedeployer.NewDeployer(options...) +} + +func newK8sDeployer(verbose bool) fn.Deployer { + options := []k8sdeployer.DeployerOpt{ + k8sdeployer.WithDeployerVerbose(verbose), + k8sdeployer.WithDeployerDecorator(deployDecorator{}), + } + + return k8sdeployer.NewDeployer(options...) } type deployDecorator struct { diff --git a/cmd/completion_util.go b/cmd/completion_util.go index 7a4df6b848..b6b42d9107 100644 --- a/cmd/completion_util.go +++ b/cmd/completion_util.go @@ -9,13 +9,15 @@ import ( "strings" "github.com/spf13/cobra" + "knative.dev/func/pkg/lister" + k8slister "knative.dev/func/pkg/lister/k8s" + knativelister "knative.dev/func/pkg/lister/knative" fn "knative.dev/func/pkg/functions" - "knative.dev/func/pkg/knative" ) func CompleteFunctionList(cmd *cobra.Command, args []string, toComplete string) (strings []string, directive cobra.ShellCompDirective) { - lister := knative.NewLister(false) + lister := lister.NewLister(false, knativelister.NewGetter(false), k8slister.NewGetter(false)) list, err := lister.List(cmd.Context(), "") if err != nil { diff --git a/cmd/deploy.go b/cmd/deploy.go index 42cb032140..8f71f00501 100644 --- a/cmd/deploy.go +++ b/cmd/deploy.go @@ -15,9 +15,9 @@ import ( "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/api/resource" "knative.dev/client/pkg/util" - "knative.dev/func/pkg/builders" "knative.dev/func/pkg/config" + "knative.dev/func/pkg/deployer" fn "knative.dev/func/pkg/functions" "knative.dev/func/pkg/k8s" ) @@ -131,7 +131,7 @@ EXAMPLES PreRunE: bindEnv("build", "build-timestamp", "builder", "builder-image", "base-image", "confirm", "domain", "env", "git-branch", "git-dir", "git-url", "image", "namespace", "path", "platform", "push", "pvc-size", - "service-account", "registry", "registry-insecure", "remote", + "service-account", "deploy-type", "registry", "registry-insecure", "remote", "username", "password", "token", "verbose", "remote-storage-class"), RunE: func(cmd *cobra.Command, args []string) error { return runDeploy(cmd, newClient) @@ -192,6 +192,8 @@ EXAMPLES "When triggering a remote deployment, set a custom volume size to allocate for the build operation ($FUNC_PVC_SIZE)") cmd.Flags().String("service-account", f.Deploy.ServiceAccountName, "Service account to be used in the deployed function ($FUNC_SERVICE_ACCOUNT)") + cmd.Flags().String("deploy-type", f.Deploy.DeployType, + fmt.Sprintf("Type of deployment to use: '%s' for Knative Service (default) or '%s' for Kubernetes Deployment ($FUNC_DEPLOY_TYPE)", deployer.KnativeDeployerName, deployer.KubernetesDeployerName)) // Static Flags: // Options which have static defaults only (not globally configurable nor // persisted with the function) @@ -565,6 +567,9 @@ type deployConfig struct { //Service account to be used in deployed function ServiceAccountName string + // DeployType specifies the type of deployment: "knative" or "deployment" + DeployType string + // Remote indicates the deployment (and possibly build) process are to // be triggered in a remote environment rather than run locally. Remote bool @@ -598,6 +603,7 @@ func newDeployConfig(cmd *cobra.Command) deployConfig { PVCSize: viper.GetString("pvc-size"), Timestamp: viper.GetBool("build-timestamp"), ServiceAccountName: viper.GetString("service-account"), + DeployType: viper.GetString("deploy-type"), } // NOTE: .Env should be viper.GetStringSlice, but this returns unparsed // results and appears to be an open issue since 2017: @@ -632,6 +638,7 @@ func (c deployConfig) Configure(f fn.Function) (fn.Function, error) { f.Build.Git.Revision = c.GitBranch // TODO: should match; perhaps "refSpec" f.Build.RemoteStorageClass = c.RemoteStorageClass f.Deploy.ServiceAccountName = c.ServiceAccountName + f.Deploy.DeployType = c.DeployType f.Local.Remote = c.Remote // PVCSize @@ -789,6 +796,32 @@ func (c deployConfig) Validate(cmd *cobra.Command) (err error) { return } +// clientOptions returns client options specific to deploy, including the appropriate deployer +func (c deployConfig) clientOptions() ([]fn.Option, error) { + // Start with build config options + o, err := c.buildConfig.clientOptions() + if err != nil { + return o, err + } + + // Add the appropriate deployer based on deploy type + deployType := c.DeployType + if deployType == "" { + deployType = deployer.KnativeDeployerName // default to knative for backwards compatibility + } + + switch deployType { + case deployer.KnativeDeployerName: + o = append(o, fn.WithDeployer(newKnativeDeployer(c.Verbose))) + case deployer.KubernetesDeployerName: + o = append(o, fn.WithDeployer(newK8sDeployer(c.Verbose))) + default: + return o, fmt.Errorf("unsupported deploy type: %s (supported: %s, %s)", deployType, deployer.KnativeDeployerName, deployer.KubernetesDeployerName) + } + + return o, nil +} + // printDeployMessages to the output. Non-error deployment messages. func printDeployMessages(out io.Writer, f fn.Function) { digest, err := isDigested(f.Image) diff --git a/cmd/describe.go b/cmd/describe.go index c2d1d836d8..2d0811f331 100644 --- a/cmd/describe.go +++ b/cmd/describe.go @@ -153,6 +153,9 @@ func (i info) Human(w io.Writer) error { fmt.Fprintf(w, " %v\n", route) } + fmt.Fprintln(w, "Deploy-Type:") + fmt.Fprintf(w, " %v\n", i.DeployType) + if len(i.Subscriptions) > 0 { fmt.Fprintln(w, "Subscriptions (Source, Type, Broker):") for _, s := range i.Subscriptions { @@ -178,6 +181,8 @@ func (i info) Plain(w io.Writer) error { fmt.Fprintf(w, "Route %v\n", route) } + fmt.Fprintf(w, "Deploy-Type %v\n", i.DeployType) + if len(i.Subscriptions) > 0 { for _, s := range i.Subscriptions { fmt.Fprintf(w, "Subscription %v %v %v\n", s.Source, s.Type, s.Broker) diff --git a/cmd/func-util/main.go b/cmd/func-util/main.go index c7b2bb4e8f..60992bb8e9 100644 --- a/cmd/func-util/main.go +++ b/cmd/func-util/main.go @@ -19,9 +19,9 @@ import ( "k8s.io/klog/v2" "knative.dev/func/pkg/builders/s2i" + "knative.dev/func/pkg/deployer/knative" fn "knative.dev/func/pkg/functions" "knative.dev/func/pkg/k8s" - "knative.dev/func/pkg/knative" "knative.dev/func/pkg/scaffolding" "knative.dev/func/pkg/tar" ) diff --git a/cmd/list.go b/cmd/list.go index 879cc713e4..f57b50430f 100644 --- a/cmd/list.go +++ b/cmd/list.go @@ -187,9 +187,9 @@ func (items listItems) Plain(w io.Writer) error { tabWriter := tabwriter.NewWriter(w, 0, 8, 2, ' ', 0) defer tabWriter.Flush() - fmt.Fprintf(tabWriter, "%s\t%s\t%s\t%s\t%s\n", "NAME", "NAMESPACE", "RUNTIME", "URL", "READY") + fmt.Fprintf(tabWriter, "%s\t%s\t%s\t%s\t%s\t%s\n", "NAME", "NAMESPACE", "RUNTIME", "DEPLOY-TYPE", "URL", "READY") for _, item := range items { - fmt.Fprintf(tabWriter, "%s\t%s\t%s\t%s\t%s\n", item.Name, item.Namespace, item.Runtime, item.URL, item.Ready) + fmt.Fprintf(tabWriter, "%s\t%s\t%s\t%s\t%s\t%s\n", item.Name, item.Namespace, item.Runtime, item.DeployType, item.URL, item.Ready) } return nil } diff --git a/docs/reference/func_deploy.md b/docs/reference/func_deploy.md index 1de14a1059..2664d710c7 100644 --- a/docs/reference/func_deploy.md +++ b/docs/reference/func_deploy.md @@ -119,6 +119,7 @@ func deploy -b, --builder string Builder to use when creating the function's container. Currently supported builders are "host", "pack" and "s2i". (default "pack") --builder-image string Specify a custom builder image for use by the builder other than its default. ($FUNC_BUILDER_IMAGE) -c, --confirm Prompt to confirm options interactively ($FUNC_CONFIRM) + --deploy-type string Type of deployment to use: 'knative' for Knative Service (default) or 'raw' for Kubernetes Deployment ($FUNC_DEPLOY_TYPE) --domain string Domain to use for the function's route. Cluster must be configured with domain matching for the given domain (ignored if unrecognized) ($FUNC_DOMAIN) -e, --env stringArray Environment variable to set in the form NAME=VALUE. You may provide this flag multiple times for setting multiple environment variables. To unset, specify the environment variable name followed by a "-" (e.g., NAME-). -t, --git-branch string Git revision (branch) to be used when deploying via the Git repository ($FUNC_GIT_BRANCH) diff --git a/pkg/deployer/common.go b/pkg/deployer/common.go new file mode 100644 index 0000000000..ce4e69a300 --- /dev/null +++ b/pkg/deployer/common.go @@ -0,0 +1,590 @@ +package deployer + +import ( + "context" + "fmt" + "os" + "regexp" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/sets" + clienteventingv1 "knative.dev/client/pkg/eventing/v1" + eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" + + fn "knative.dev/func/pkg/functions" + "knative.dev/func/pkg/k8s" +) + +const ( + DeployTypeAnnotation = "function.knative.dev/deploy-type" + + KnativeDeployerName = "knative" + KubernetesDeployerName = "raw" + + DefaultLivenessEndpoint = "/health/liveness" + DefaultReadinessEndpoint = "/health/readiness" + DefaultHTTPPort = 8080 + + // Dapr constants + DaprEnabled = "true" + DaprMetricsPort = "9092" + DaprEnableAPILogging = "true" +) + +// DeployDecorator is an interface for customizing deployment metadata +type DeployDecorator interface { + UpdateAnnotations(fn.Function, map[string]string) map[string]string + UpdateLabels(fn.Function, map[string]string) map[string]string +} + +// GenerateCommonLabels creates labels common to both Knative and K8s deployments +func GenerateCommonLabels(f fn.Function, decorator DeployDecorator) (map[string]string, error) { + ll, err := f.LabelsMap() + if err != nil { + return nil, err + } + + // Standard function labels + ll["boson.dev/function"] = "true" + ll["function.knative.dev/name"] = f.Name + ll["function.knative.dev/runtime"] = f.Runtime + + if f.Domain != "" { + ll["func.domain"] = f.Domain + } + + if decorator != nil { + ll = decorator.UpdateLabels(f, ll) + } + + return ll, nil +} + +// GenerateCommonAnnotations creates annotations common to both Knative and K8s deployments +func GenerateCommonAnnotations(f fn.Function, decorator DeployDecorator, daprInstalled bool, deployType string) map[string]string { + aa := make(map[string]string) + + // Add Dapr annotations if Dapr is installed + if daprInstalled { + for k, v := range GenerateDaprAnnotations(f.Name) { + aa[k] = v + } + } + + if len(deployType) > 0 { + aa[DeployTypeAnnotation] = deployType + } + + // Add user-defined annotations + for k, v := range f.Deploy.Annotations { + aa[k] = v + } + + // Apply decorator + if decorator != nil { + aa = decorator.UpdateAnnotations(f, aa) + } + + return aa +} + +// SetHealthEndpoints configures health probes for a container +func SetHealthEndpoints(f fn.Function, container *corev1.Container) { + livenessPath := DefaultLivenessEndpoint + if f.Deploy.HealthEndpoints.Liveness != "" { + livenessPath = f.Deploy.HealthEndpoints.Liveness + } + + readinessPath := DefaultReadinessEndpoint + if f.Deploy.HealthEndpoints.Readiness != "" { + readinessPath = f.Deploy.HealthEndpoints.Readiness + } + + container.LivenessProbe = &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: livenessPath, + Port: intstr.FromInt32(DefaultHTTPPort), + }, + }, + } + + container.ReadinessProbe = &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: readinessPath, + Port: intstr.FromInt32(DefaultHTTPPort), + }, + }, + } +} + +// SetSecurityContext configures security settings for a container +func SetSecurityContext(container *corev1.Container) { + runAsNonRoot := true + allowPrivilegeEscalation := false + capabilities := corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + } + seccompProfile := corev1.SeccompProfile{ + Type: "RuntimeDefault", + } + container.SecurityContext = &corev1.SecurityContext{ + RunAsNonRoot: &runAsNonRoot, + AllowPrivilegeEscalation: &allowPrivilegeEscalation, + Capabilities: &capabilities, + SeccompProfile: &seccompProfile, + } +} + +// GenerateDaprAnnotations generates annotations for Dapr support +// These annotations, if included and Dapr control plane is installed in +// the target cluster, will result in a sidecar exposing the Dapr HTTP API +// on localhost:3500 and metrics on 9092 +func GenerateDaprAnnotations(appID string) map[string]string { + aa := make(map[string]string) + aa["dapr.io/app-id"] = appID + aa["dapr.io/enabled"] = DaprEnabled + aa["dapr.io/metrics-port"] = DaprMetricsPort + aa["dapr.io/app-port"] = "8080" + aa["dapr.io/enable-api-logging"] = DaprEnableAPILogging + return aa +} + +// ProcessEnvs generates array of EnvVars and EnvFromSources from a function config +// envs: +// - name: EXAMPLE1 # ENV directly from a value +// value: value1 +// - name: EXAMPLE2 # ENV from the local ENV var +// value: {{ env:MY_ENV }} +// - name: EXAMPLE3 +// value: {{ secret:example-secret:key }} # ENV from a key in Secret +// - value: {{ secret:example-secret }} # all ENVs from Secret +// - name: EXAMPLE4 +// value: {{ configMap:configMapName:key }} # ENV from a key in ConfigMap +// - value: {{ configMap:configMapName }} # all key-pair values from ConfigMap are set as ENV +func ProcessEnvs(envs []fn.Env, referencedSecrets, referencedConfigMaps *sets.Set[string]) ([]corev1.EnvVar, []corev1.EnvFromSource, error) { + + envs = withOpenAddress(envs) // prepends ADDRESS=0.0.0.0 if not extant + + envVars := []corev1.EnvVar{{Name: "BUILT", Value: time.Now().Format("20060102T150405")}} + envFrom := []corev1.EnvFromSource{} + + for _, env := range envs { + if env.Name == nil && env.Value != nil { + // all key-pair values from secret/configMap are set as ENV, eg. {{ secret:secretName }} or {{ configMap:configMapName }} + if strings.HasPrefix(*env.Value, "{{") { + envFromSource, err := createEnvFromSource(*env.Value, referencedSecrets, referencedConfigMaps) + if err != nil { + return nil, nil, err + } + envFrom = append(envFrom, *envFromSource) + continue + } + } else if env.Name != nil && env.Value != nil { + if strings.HasPrefix(*env.Value, "{{") { + slices := strings.Split(strings.Trim(*env.Value, "{} "), ":") + if len(slices) == 3 { + // ENV from a key in secret/configMap, eg. FOO={{ secret:secretName:key }} FOO={{ configMap:configMapName.key }} + valueFrom, err := createEnvVarSource(slices, referencedSecrets, referencedConfigMaps) + envVars = append(envVars, corev1.EnvVar{Name: *env.Name, ValueFrom: valueFrom}) + if err != nil { + return nil, nil, err + } + continue + } else if len(slices) == 2 { + // ENV from the local ENV var, eg. FOO={{ env:LOCAL_ENV }} + localValue, err := processLocalEnvValue(*env.Value) + if err != nil { + return nil, nil, err + } + envVars = append(envVars, corev1.EnvVar{Name: *env.Name, Value: localValue}) + continue + } + } else { + // a standard ENV with key and value, eg. FOO=bar + envVars = append(envVars, corev1.EnvVar{Name: *env.Name, Value: *env.Value}) + continue + } + } + return nil, nil, fmt.Errorf("unsupported env source entry \"%v\"", env) + } + + return envVars, envFrom, nil +} + +// withOpenAddress prepends ADDRESS=0.0.0.0 to the envs if not present. +// +// This is combined with the value of PORT at runtime to determine the full +// Listener address on which a Function will listen tcp requests. +// +// Runtimes should, by default, only listen on the loopback interface by +// default, as they may be `func run` locally, for security purposes. +// This environment variable instructs the runtimes to listen on all interfaces +// by default when actually being deployed, since they will need to actually +// listen for client requests and for health readiness/liveness probes. +// +// Should a user wish to securely open their function to only receive requests +// on a specific interface, such as a WireGuard-encrypted mesh network which +// presents as a specific interface, that can be achieved by setting the +// ADDRESS value as an environment variable on their function to the interface +// on which to listen. +// +// NOTE this env is currently only respected by scaffolded Go functions, because +// they are the only ones which support being `func run` locally. Other +// runtimes will respect the value as they are updated to support scaffolding. +func withOpenAddress(ee []fn.Env) []fn.Env { + // TODO: this is unnecessarily complex due to both key and value of the + // envs slice being being pointers. There is an outstanding tech-debt item + // to remove pointers from Function Envs, Volumes, Labels, and Options. + var found bool + for _, e := range ee { + if e.Name != nil && *e.Name == "ADDRESS" { + found = true + break + } + } + if !found { + k := "ADDRESS" + v := "0.0.0.0" + ee = append(ee, fn.Env{Name: &k, Value: &v}) + } + return ee +} + +func createEnvFromSource(value string, referencedSecrets, referencedConfigMaps *sets.Set[string]) (*corev1.EnvFromSource, error) { + slices := strings.Split(strings.Trim(value, "{} "), ":") + if len(slices) != 2 { + return nil, fmt.Errorf("env requires a value in form \"resourceType:name\" where \"resourceType\" can be one of \"configMap\" or \"secret\"; got %q", slices) + } + + envVarSource := corev1.EnvFromSource{} + + typeString := strings.TrimSpace(slices[0]) + sourceName := strings.TrimSpace(slices[1]) + + var sourceType string + + switch typeString { + case "configMap": + sourceType = "ConfigMap" + envVarSource.ConfigMapRef = &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: sourceName, + }} + + if !referencedConfigMaps.Has(sourceName) { + referencedConfigMaps.Insert(sourceName) + } + case "secret": + sourceType = "Secret" + envVarSource.SecretRef = &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: sourceName, + }} + if !referencedSecrets.Has(sourceName) { + referencedSecrets.Insert(sourceName) + } + default: + return nil, fmt.Errorf("unsupported env source type %q; supported source types are \"configMap\" or \"secret\"", slices[0]) + } + + if len(sourceName) == 0 { + return nil, fmt.Errorf("the name of %s cannot be an empty string", sourceType) + } + + return &envVarSource, nil +} + +func createEnvVarSource(slices []string, referencedSecrets, referencedConfigMaps *sets.Set[string]) (*corev1.EnvVarSource, error) { + if len(slices) != 3 { + return nil, fmt.Errorf("env requires a value in form \"resourceType:name:key\" where \"resourceType\" can be one of \"configMap\" or \"secret\"; got %q", slices) + } + + envVarSource := corev1.EnvVarSource{} + + typeString := strings.TrimSpace(slices[0]) + sourceName := strings.TrimSpace(slices[1]) + sourceKey := strings.TrimSpace(slices[2]) + + var sourceType string + + switch typeString { + case "configMap": + sourceType = "ConfigMap" + envVarSource.ConfigMapKeyRef = &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: sourceName, + }, + Key: sourceKey} + + if !referencedConfigMaps.Has(sourceName) { + referencedConfigMaps.Insert(sourceName) + } + case "secret": + sourceType = "Secret" + envVarSource.SecretKeyRef = &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: sourceName, + }, + Key: sourceKey} + + if !referencedSecrets.Has(sourceName) { + referencedSecrets.Insert(sourceName) + } + default: + return nil, fmt.Errorf("unsupported env source type %q; supported source types are \"configMap\" or \"secret\"", slices[0]) + } + + if len(sourceName) == 0 { + return nil, fmt.Errorf("the name of %s cannot be an empty string", sourceType) + } + + if len(sourceKey) == 0 { + return nil, fmt.Errorf("the key referenced by resource %s %q cannot be an empty string", sourceType, sourceName) + } + + return &envVarSource, nil +} + +var evRegex = regexp.MustCompile(`^{{\s*(\w+)\s*:(\w+)\s*}}$`) + +const ( + ctxIdx = 1 + valIdx = 2 +) + +func processLocalEnvValue(val string) (string, error) { + match := evRegex.FindStringSubmatch(val) + if len(match) > valIdx { + if match[ctxIdx] != "env" { + return "", fmt.Errorf("allowed env value entry is \"{{ env:LOCAL_VALUE }}\"; got: %q", match[ctxIdx]) + } + if v, ok := os.LookupEnv(match[valIdx]); ok { + return v, nil + } else { + return "", fmt.Errorf("required local environment variable %q is not set", match[valIdx]) + } + } else { + return val, nil + } +} + +// ProcessVolumes generates Volumes and VolumeMounts from a function config +// volumes: +// - secret: example-secret # mount Secret as Volume +// path: /etc/secret-volume +// - configMap: example-configMap # mount ConfigMap as Volume +// path: /etc/configMap-volume +// - persistentVolumeClaim: { claimName: example-pvc } # mount PersistentVolumeClaim as Volume +// path: /etc/secret-volume +// - emptyDir: {} # mount EmptyDir as Volume +// path: /etc/configMap-volume +func ProcessVolumes(volumes []fn.Volume, referencedSecrets, referencedConfigMaps, referencedPVCs *sets.Set[string]) ([]corev1.Volume, []corev1.VolumeMount, error) { + createdVolumes := sets.NewString() + usedPaths := sets.NewString() + + newVolumes := []corev1.Volume{} + newVolumeMounts := []corev1.VolumeMount{} + + for _, vol := range volumes { + + volumeName := "" + + if vol.Secret != nil { + volumeName = "secret-" + *vol.Secret + + if !createdVolumes.Has(volumeName) { + newVolumes = append(newVolumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: *vol.Secret, + }, + }, + }) + createdVolumes.Insert(volumeName) + + if !referencedSecrets.Has(*vol.Secret) { + referencedSecrets.Insert(*vol.Secret) + } + } + } else if vol.ConfigMap != nil { + volumeName = "config-map-" + *vol.ConfigMap + + if !createdVolumes.Has(volumeName) { + newVolumes = append(newVolumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: *vol.ConfigMap, + }, + }, + }, + }) + createdVolumes.Insert(volumeName) + + if !referencedConfigMaps.Has(*vol.ConfigMap) { + referencedConfigMaps.Insert(*vol.ConfigMap) + } + } + } else if vol.PersistentVolumeClaim != nil { + volumeName = "pvc-" + *vol.PersistentVolumeClaim.ClaimName + + if !createdVolumes.Has(volumeName) { + newVolumes = append(newVolumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: *vol.PersistentVolumeClaim.ClaimName, + ReadOnly: vol.PersistentVolumeClaim.ReadOnly, + }, + }, + }) + createdVolumes.Insert(volumeName) + + if !referencedPVCs.Has(*vol.PersistentVolumeClaim.ClaimName) { + referencedPVCs.Insert(*vol.PersistentVolumeClaim.ClaimName) + } + } + } else if vol.EmptyDir != nil { + volumeName = "empty-dir-" + rand.String(7) + + if !createdVolumes.Has(volumeName) { + + var sizeLimit *resource.Quantity + if vol.EmptyDir.SizeLimit != nil { + sl, err := resource.ParseQuantity(*vol.EmptyDir.SizeLimit) + if err != nil { + return nil, nil, fmt.Errorf("invalid quantity for sizeLimit: %s. Error: %s", *vol.EmptyDir.SizeLimit, err) + } + sizeLimit = &sl + } + + newVolumes = append(newVolumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMedium(vol.EmptyDir.Medium), + SizeLimit: sizeLimit, + }, + }, + }) + createdVolumes.Insert(volumeName) + } + } + + if volumeName != "" { + if !usedPaths.Has(*vol.Path) { + newVolumeMounts = append(newVolumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: *vol.Path, + }) + usedPaths.Insert(*vol.Path) + } else { + return nil, nil, fmt.Errorf("mount path %s is defined multiple times", *vol.Path) + } + } + } + + return newVolumes, newVolumeMounts, nil +} + +// CheckResourcesArePresent returns error if Secrets or ConfigMaps +// referenced in input sets are not deployed on the cluster in the specified namespace +func CheckResourcesArePresent(ctx context.Context, namespace string, referencedSecrets, referencedConfigMaps, referencedPVCs *sets.Set[string], referencedServiceAccount string) error { + errMsg := "" + for s := range *referencedSecrets { + _, err := k8s.GetSecret(ctx, s, namespace) + if err != nil { + if errors.IsForbidden(err) { + errMsg += " Ensure that the service account has the necessary permissions to access the secret.\n" + } else { + errMsg += fmt.Sprintf(" referenced Secret \"%s\" is not present in namespace \"%s\"\n", s, namespace) + } + } + } + + for cm := range *referencedConfigMaps { + _, err := k8s.GetConfigMap(ctx, cm, namespace) + if err != nil { + errMsg += fmt.Sprintf(" referenced ConfigMap \"%s\" is not present in namespace \"%s\"\n", cm, namespace) + } + } + + for pvc := range *referencedPVCs { + _, err := k8s.GetPersistentVolumeClaim(ctx, pvc, namespace) + if err != nil { + errMsg += fmt.Sprintf(" referenced PersistentVolumeClaim \"%s\" is not present in namespace \"%s\"\n", pvc, namespace) + } + } + + // check if referenced ServiceAccount is present in the namespace if it is not default + if referencedServiceAccount != "" && referencedServiceAccount != "default" { + err := k8s.GetServiceAccount(ctx, referencedServiceAccount, namespace) + if err != nil { + errMsg += fmt.Sprintf(" referenced ServiceAccount \"%s\" is not present in namespace \"%s\"\n", referencedServiceAccount, namespace) + } + } + + if errMsg != "" { + return fmt.Errorf("error(s) while validating resources:\n%s", errMsg) + } + + return nil +} + +func CreateTriggers(ctx context.Context, f fn.Function, obj kmeta.Accessor, eventingClient clienteventingv1.KnEventingClient) error { + fmt.Fprintf(os.Stderr, "🎯 Creating Triggers on the cluster\n") + + for i, sub := range f.Deploy.Subscriptions { + // create the filter: + attributes := make(map[string]string) + for key, value := range sub.Filters { + attributes[key] = value + } + + err := eventingClient.CreateTrigger(ctx, &eventingv1.Trigger{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-function-trigger-%d", obj.GetName(), i), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: obj.GroupVersionKind().Version, + Kind: obj.GroupVersionKind().Kind, + Name: obj.GetName(), + UID: obj.GetUID(), + }, + }, + }, + Spec: eventingv1.TriggerSpec{ + Broker: sub.Source, + + Subscriber: duckv1.Destination{ + Ref: &duckv1.KReference{ + APIVersion: obj.GroupVersionKind().Version, + Kind: obj.GroupVersionKind().Kind, + Name: obj.GetName(), + }}, + + Filter: &eventingv1.TriggerFilter{ + Attributes: attributes, + }, + }, + }) + if err != nil && !errors.IsAlreadyExists(err) { + err = fmt.Errorf("knative deployer failed to create the Trigger: %v", err) + return err + } + } + return nil +} diff --git a/pkg/deployer/common_test.go b/pkg/deployer/common_test.go new file mode 100644 index 0000000000..bce7db56ce --- /dev/null +++ b/pkg/deployer/common_test.go @@ -0,0 +1,47 @@ +package deployer + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + + fn "knative.dev/func/pkg/functions" +) + +func Test_SetHealthEndpoints(t *testing.T) { + f := fn.Function{ + Name: "testing", + Deploy: fn.DeploySpec{ + HealthEndpoints: fn.HealthEndpoints{ + Liveness: "/lively", + Readiness: "/readyAsIllEverBe", + }, + }, + } + c := corev1.Container{} + SetHealthEndpoints(f, &c) + got := c.LivenessProbe.HTTPGet.Path + if got != "/lively" { + t.Errorf("expected \"/lively\" but got %v", got) + } + got = c.ReadinessProbe.HTTPGet.Path + if got != "/readyAsIllEverBe" { + t.Errorf("expected \"readyAsIllEverBe\" but got %v", got) + } +} + +func Test_SetHealthEndpointDefaults(t *testing.T) { + f := fn.Function{ + Name: "testing", + } + c := corev1.Container{} + SetHealthEndpoints(f, &c) + got := c.LivenessProbe.HTTPGet.Path + if got != DefaultLivenessEndpoint { + t.Errorf("expected \"%v\" but got %v", DefaultLivenessEndpoint, got) + } + got = c.ReadinessProbe.HTTPGet.Path + if got != DefaultReadinessEndpoint { + t.Errorf("expected \"%v\" but got %v", DefaultReadinessEndpoint, got) + } +} diff --git a/pkg/knative/deployer_int_test.go b/pkg/deployer/integration_test_helper.go similarity index 59% rename from pkg/knative/deployer_int_test.go rename to pkg/deployer/integration_test_helper.go index 5b47cd0830..fb10519927 100644 --- a/pkg/knative/deployer_int_test.go +++ b/pkg/deployer/integration_test_helper.go @@ -1,11 +1,13 @@ //go:build integration +// +build integration -package knative_test +package deployer import ( "context" "encoding/json" "fmt" + "io" "net/http" "os" "path/filepath" @@ -16,34 +18,35 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" - + "k8s.io/apimachinery/pkg/util/wait" eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" - fn "knative.dev/func/pkg/functions" - "knative.dev/func/pkg/k8s" - "knative.dev/func/pkg/knative" "knative.dev/func/pkg/oci" + fntest "knative.dev/func/pkg/testing" + k8stest "knative.dev/func/pkg/testing/k8s" v1 "knative.dev/pkg/apis/duck/v1" - fntest "knative.dev/func/pkg/testing" + fn "knative.dev/func/pkg/functions" + "knative.dev/func/pkg/k8s" + "knative.dev/func/pkg/knative" ) -// TestInt_Deploy ensures that the deployer creates a callable service. +// IntegrationTest_Deploy ensures that the deployer creates a callable service. // See TestInt_Metadata for Labels, Volumes, Envs. // See TestInt_Events for Subscriptions -func TestInt_Deploy(t *testing.T) { +func IntegrationTest_Deploy(t *testing.T, deployer fn.Deployer, remover fn.Remover, describer fn.Describer, deployType string) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) name := "func-int-knative-deploy-" + rand.String(5) root := t.TempDir() - ns := namespace(t, ctx) + ns := k8stest.Namespace(t, ctx) t.Cleanup(cancel) client := fn.New( fn.WithBuilder(oci.NewBuilder("", false)), fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithRemover(knative.NewRemover(false)), + fn.WithDeployer(deployer), + fn.WithDescriber(describer), + fn.WithRemover(remover), ) f, err := client.Init(fn.Function{ @@ -51,7 +54,10 @@ func TestInt_Deploy(t *testing.T) { Name: name, Runtime: "go", Namespace: ns, - Registry: registry(), + Registry: fntest.Registry(), + Deploy: fn.DeploySpec{ + DeployType: deployType, + }, }) if err != nil { t.Fatal(err) @@ -93,29 +99,28 @@ func TestInt_Deploy(t *testing.T) { } // Invoke - statusCode, _ := invoke(t, ctx, instance.Route) + statusCode, _ := invoke(t, ctx, instance.Route, deployType) if statusCode != http.StatusOK { t.Fatalf("expected 200 OK, got %d", statusCode) } - } -// TestInt_Metadata ensures that Secrets, Labels, and Volumes are applied +// IntegrationTest_Metadata ensures that Secrets, Labels, and Volumes are applied // when deploying. -func TestInt_Metadata(t *testing.T) { +func IntegrationTest_Metadata(t *testing.T, deployer fn.Deployer, remover fn.Remover, describer fn.Describer, deployType string) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) name := "func-int-knative-metadata-" + rand.String(5) root := t.TempDir() - ns := namespace(t, ctx) + ns := k8stest.Namespace(t, ctx) t.Cleanup(cancel) client := fn.New( fn.WithBuilder(oci.NewBuilder("", false)), fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithRemover(knative.NewRemover(false)), + fn.WithDeployer(deployer), + fn.WithDescriber(describer), + fn.WithRemover(remover), ) // Cluster Resources @@ -146,7 +151,10 @@ func TestInt_Metadata(t *testing.T) { Name: name, Runtime: "go", Namespace: ns, - Registry: registry(), + Registry: fntest.Registry(), + Deploy: fn.DeploySpec{ + DeployType: deployType, + }, }) if err != nil { t.Fatal(err) @@ -227,7 +235,7 @@ func TestInt_Metadata(t *testing.T) { // ---------- // Invoke - _, result := invoke(t, ctx, instance.Route) + _, result := invoke(t, ctx, instance.Route, deployType) // Verify Envs if result.EnvVars["STATIC"] != "static-value" { @@ -267,28 +275,23 @@ func TestInt_Metadata(t *testing.T) { } } -// TestInt_Events ensures that eventing triggers work. -func TestInt_Events(t *testing.T) { +// IntegrationTest_Events ensures that eventing triggers work. +func IntegrationTest_Events(t *testing.T, deployer fn.Deployer, remover fn.Remover, describer fn.Describer, deployType string) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) name := "func-int-knative-events-" + rand.String(5) root := t.TempDir() - ns := namespace(t, ctx) + ns := k8stest.Namespace(t, ctx) t.Cleanup(cancel) client := fn.New( fn.WithBuilder(oci.NewBuilder("", false)), fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithRemover(knative.NewRemover(false)), + fn.WithDeployer(deployer), + fn.WithDescriber(describer), + fn.WithRemover(remover), ) - // Trigger - // ------- - triggerName := "func-int-knative-events-trigger" - validator := createTrigger(t, ctx, ns, triggerName, name) - // Function // -------- f, err := client.Init(fn.Function{ @@ -296,12 +299,20 @@ func TestInt_Events(t *testing.T) { Name: name, Runtime: "go", Namespace: ns, - Registry: registry(), + Registry: fntest.Registry(), + Deploy: fn.DeploySpec{ + DeployType: deployType, + }, }) if err != nil { t.Fatal(err) } + // Trigger + // ------- + triggerName := "func-int-knative-events-trigger" + validator := createTrigger(t, ctx, ns, triggerName, f) + // Deploy // ------ @@ -342,22 +353,22 @@ func TestInt_Events(t *testing.T) { } } -// TestInt_Scale spot-checks that the scale settings are applied by +// IntegrationTest_Scale spot-checks that the scale settings are applied by // ensuring the service is started multiple times when minScale=2 -func TestInt_Scale(t *testing.T) { +func IntegrationTest_Scale(t *testing.T, deployer fn.Deployer, remover fn.Remover, describer fn.Describer, deployType string) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) name := "func-int-knative-scale-" + rand.String(5) root := t.TempDir() - ns := namespace(t, ctx) + ns := k8stest.Namespace(t, ctx) t.Cleanup(cancel) client := fn.New( fn.WithBuilder(oci.NewBuilder("", false)), fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithRemover(knative.NewRemover(false)), + fn.WithDeployer(deployer), + fn.WithDescriber(describer), + fn.WithRemover(remover), ) f, err := client.Init(fn.Function{ @@ -365,7 +376,10 @@ func TestInt_Scale(t *testing.T) { Name: name, Runtime: "go", Namespace: ns, - Registry: registry(), + Registry: fntest.Registry(), + Deploy: fn.DeploySpec{ + DeployType: deployType, + }, }) if err != nil { t.Fatal(err) @@ -419,14 +433,6 @@ func TestInt_Scale(t *testing.T) { if err != nil { t.Fatal(err) } - servingClient, err := knative.NewServingClient(ns) - if err != nil { - t.Fatal(err) - } - ksvc, err := servingClient.GetService(ctx, name) - if err != nil { - t.Fatal(err) - } podList, err := cliSet.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) if err != nil { t.Fatal(err) @@ -440,7 +446,6 @@ func TestInt_Scale(t *testing.T) { } } } - t.Logf("Found %d ready pods for revision %s (minScale=%d)", readyPods, ksvc.Status.LatestCreatedRevisionName, minScale) // Verify minScale is respected if readyPods < int(minScale) { @@ -455,22 +460,22 @@ func TestInt_Scale(t *testing.T) { // } } -// TestInt_EnvsUpdate ensures that removing and updating envs are correctly +// IntegrationTest_EnvsUpdate ensures that removing and updating envs are correctly // reflected during a deployment update. -func TestInt_EnvsUpdate(t *testing.T) { +func IntegrationTest_EnvsUpdate(t *testing.T, deployer fn.Deployer, remover fn.Remover, describer fn.Describer, deployType string) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) name := "func-int-knative-envsupdate-" + rand.String(5) root := t.TempDir() - ns := namespace(t, ctx) + ns := k8stest.Namespace(t, ctx) t.Cleanup(cancel) client := fn.New( fn.WithBuilder(oci.NewBuilder("", false)), fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithRemover(knative.NewRemover(false)), + fn.WithDeployer(deployer), + fn.WithDescriber(describer), + fn.WithRemover(remover), ) // Function @@ -480,7 +485,10 @@ func TestInt_EnvsUpdate(t *testing.T) { Name: name, Runtime: "go", Namespace: ns, - Registry: registry(), + Registry: fntest.Registry(), + Deploy: fn.DeploySpec{ + DeployType: deployType, + }, }) if err != nil { t.Fatal(err) @@ -531,7 +539,7 @@ func TestInt_EnvsUpdate(t *testing.T) { // Assert Initial ENVS are set // ---------- - _, result := invoke(t, ctx, instance.Route) + _, result := invoke(t, ctx, instance.Route, deployType) // Verify Envs if result.EnvVars["STATIC_A"] != "static-value-a" { @@ -565,9 +573,19 @@ func TestInt_EnvsUpdate(t *testing.T) { t.Fatal(err) } + cliSet, err := k8s.NewKubernetesClientset() + if err != nil { + t.Fatal(err) + } + selector := fmt.Sprintf("function.knative.dev/name=%s", f.Name) + err = k8s.WaitForDeploymentAvailableBySelector(ctx, cliSet, ns, selector, time.Minute) + if err != nil { + t.Fatal(err) + } + // Assertions // ---------- - _, result = invoke(t, ctx, instance.Route) + _, result = invoke(t, ctx, instance.Route, deployType) // Verify Envs // Log all environment variables for debugging @@ -600,22 +618,20 @@ func TestInt_EnvsUpdate(t *testing.T) { } } -// Helper functions -// ================ +// Basic happy path test of deploy->describe->list->re-deploy->delete. +func IntegrationTest_FullPath(t *testing.T, deployer fn.Deployer, remover fn.Remover, lister fn.Lister, describer fn.Describer, deployType string) { + var err error + functionName := "fn-testing" -// namespace returns the integration test namespace or that specified by -// FUNC_INT_NAMESPACE (creating if necessary) -func namespace(t *testing.T, ctx context.Context) string { - t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) + t.Cleanup(cancel) cliSet, err := k8s.NewKubernetesClientset() if err != nil { t.Fatal(err) } - // TODO: choose FUNC_INT_NAMESPACE if it exists? - - namespace := fntest.DefaultIntTestNamespacePrefix + "-" + rand.String(5) + namespace := "knative-integration-test-ns-" + rand.String(5) ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -627,39 +643,294 @@ func namespace(t *testing.T, ctx context.Context) string { if err != nil { t.Fatal(err) } - t.Cleanup(func() { - err := cliSet.CoreV1().Namespaces().Delete(context.Background(), namespace, metav1.DeleteOptions{}) - if err != nil { - t.Logf("error deleting namespace: %v", err) + t.Cleanup(func() { _ = cliSet.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{}) }) + t.Log("created namespace: ", namespace) + + secret := "credentials-secret" + sc := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secret, + }, + Data: map[string][]byte{ + "FUNC_TEST_SC_A": []byte("A"), + "FUNC_TEST_SC_B": []byte("B"), + }, + StringData: nil, + Type: corev1.SecretTypeOpaque, + } + + _, err = cliSet.CoreV1().Secrets(namespace).Create(ctx, sc, metav1.CreateOptions{}) + if err != nil { + t.Fatal(err) + } + + configMap := "testing-config-map" + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMap, + }, + Data: map[string]string{"FUNC_TEST_CM_A": "1"}, + } + _, err = cliSet.CoreV1().ConfigMaps(namespace).Create(ctx, cm, metav1.CreateOptions{}) + if err != nil { + t.Fatal(err) + } + + subscriberRef := v1.KReference{ + Kind: "Service", + Namespace: namespace, + Name: functionName, + } + + switch deployType { + case KnativeDeployerName: + subscriberRef.APIVersion = "serving.knative.dev" + case KubernetesDeployerName: + subscriberRef.APIVersion = "v1" + } + + trigger := "testing-trigger" + tr := &eventingv1.Trigger{ + ObjectMeta: metav1.ObjectMeta{ + Name: trigger, + }, + Spec: eventingv1.TriggerSpec{ + Broker: "testing-broker", + Subscriber: v1.Destination{Ref: &subscriberRef}, + Filter: &eventingv1.TriggerFilter{ + Attributes: map[string]string{ + "source": "test-event-source", + "type": "test-event-type", + }, + }, + }, + } + + eventingClient, err := knative.NewEventingClient(namespace) + if err != nil { + t.Fatal(err) + } + err = eventingClient.CreateTrigger(ctx, tr) + if err != nil { + t.Fatal(err) + } + + minScale := int64(2) + maxScale := int64(100) + + now := time.Now() + function := fn.Function{ + SpecVersion: "SNAPSHOT", + Root: "/non/existent", + Name: functionName, + Runtime: "blub", + Template: "cloudevents", + // Basic HTTP service: + // * POST / will do echo -- return body back + // * GET /info will get info about environment: + // * environment variables starting which name starts with FUNC_TEST, + // * files under /etc/cm and /etc/sc. + // * application also prints the same info to stderr on startup + Created: now, + Deploy: fn.DeploySpec{ + // TODO: gauron99 - is it okay to have this explicitly set to deploy.image already? + // With this I skip the logic of setting the .Deploy.Image field but it should be fine for this test + Image: "quay.io/mvasek/func-test-service@sha256:2eca4de00d7569c8791634bdbb0c4d5ec8fb061b001549314591e839dabd5269", + Namespace: namespace, + Labels: []fn.Label{{Key: ptr("my-label"), Value: ptr("my-label-value")}}, + Options: fn.Options{ + Scale: &fn.ScaleOptions{ + Min: &minScale, + Max: &maxScale, + }, + }, + DeployType: deployType, + }, + Run: fn.RunSpec{ + Envs: []fn.Env{ + {Name: ptr("FUNC_TEST_VAR"), Value: ptr("nbusr123")}, + {Name: ptr("FUNC_TEST_SC_A"), Value: ptr("{{ secret: " + secret + ":FUNC_TEST_SC_A }}")}, + {Value: ptr("{{configMap:" + configMap + "}}")}, + }, + Volumes: []fn.Volume{ + {Secret: ptr(secret), Path: ptr("/etc/sc")}, + {ConfigMap: ptr(configMap), Path: ptr("/etc/cm")}, + }, + }, + } + + buff := new(knative.SynchronizedBuffer) + go func() { + selector := fmt.Sprintf("function.knative.dev/name=%s", functionName) + _ = k8s.GetPodLogsBySelector(ctx, namespace, selector, "user-container", "", &now, buff) + }() + + depRes, err := deployer.Deploy(ctx, function) + if err != nil { + t.Fatal(err) + } + + outStr := buff.String() + t.Logf("deploy result: %+v", depRes) + t.Log("function output:\n" + outStr) + + if strings.Count(outStr, "starting app") < int(minScale) { + t.Errorf("application should be scaled at least to %d pods", minScale) + } + + // verify that environment variables and volumes works + if !strings.Contains(outStr, "FUNC_TEST_VAR=nbusr123") { + t.Error("plain environment variable was not propagated") + } + if !strings.Contains(outStr, "FUNC_TEST_SC_A=A") { + t.Error("environment variables from secret was not propagated") + } + if strings.Contains(outStr, "FUNC_TEST_SC_B=") { + t.Error("environment variables from secret was propagated but should have not been") + } + if !strings.Contains(outStr, "FUNC_TEST_CM_A=1") { + t.Error("environment variable from config-map was not propagated") + } + if !strings.Contains(outStr, "/etc/sc/FUNC_TEST_SC_A") { + t.Error("secret was not mounted") + } + if !strings.Contains(outStr, "/etc/cm/FUNC_TEST_CM_A") { + t.Error("config-map was not mounted") + } + + instance, err := describer.Describe(ctx, functionName, namespace) + if err != nil { + t.Fatal(err) + } + t.Logf("instance: %+v", instance) + + // try to invoke the function + reqBody := "Hello World!" + respBody, err := postText(ctx, instance.Route, reqBody, deployType) + if err != nil { + t.Fatalf("failed to invoke function: %v", err) + } else { + t.Log("resp body:\n" + respBody) + if !strings.Contains(respBody, reqBody) { + t.Error("response body doesn't contain request body") + } + } + + // verify that trigger info is included in describe output + if len(instance.Subscriptions) != 1 { + t.Error("exactly one subscription is expected") + } else { + if instance.Subscriptions[0].Broker != "testing-broker" { + t.Error("bad broker") + } + if instance.Subscriptions[0].Source != "test-event-source" { + t.Error("bad source") } + if instance.Subscriptions[0].Type != "test-event-type" { + t.Error("bad type") + } + } + + list, err := lister.List(ctx, namespace) + if err != nil { + t.Fatal(err) + } + t.Logf("functions list: %+v", list) + + if len(list) != 1 { + t.Errorf("expected exactly one functions but got: %d", len(list)) + } else { + if list[0].URL != instance.Route { + t.Error("URL mismatch") + } + } + + t.Setenv("LOCAL_ENV_TO_DEPLOY", "iddqd") + function.Run.Envs = []fn.Env{ + {Name: ptr("FUNC_TEST_VAR"), Value: ptr("{{ env:LOCAL_ENV_TO_DEPLOY }}")}, + {Value: ptr("{{ secret: " + secret + " }}")}, + {Name: ptr("FUNC_TEST_CM_A_ALIASED"), Value: ptr("{{configMap:" + configMap + ":FUNC_TEST_CM_A}}")}, + } + now = time.Now() // reset timer for new log receiver + + redeployLogBuff := new(knative.SynchronizedBuffer) + go func() { + selector := fmt.Sprintf("function.knative.dev/name=%s", functionName) + _ = k8s.GetPodLogsBySelector(ctx, namespace, selector, "user-container", "", &now, redeployLogBuff) + }() + + _, err = deployer.Deploy(ctx, function) + if err != nil { + t.Fatal(err) + } + + // Give logs time to be collected (not sure, why we need this here and not on the first collector too :thinking:) + outStr = "" + err = wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (done bool, err error) { + outStr = redeployLogBuff.String() + if len(outStr) > 0 || + outStr == "Hello World!" { // wait for more as only the "Hello World!" + return true, nil + } + + return false, nil }) - t.Log("created namespace: ", namespace) + if err != nil { + t.Fatal(err) + } - return namespace -} + t.Log("function output:\n" + outStr) -// registry returns the registry to use for tests -func registry() string { - // Use environment variable if set, otherwise use localhost registry - if reg := os.Getenv("FUNC_INT_TEST_REGISTRY"); reg != "" { - return reg + // verify that environment variables has been changed by re-deploy + if strings.Contains(outStr, "FUNC_TEST_CM_A=") { + t.Error("environment variables from previous deployment was not removed") + } + if !strings.Contains(outStr, "FUNC_TEST_SC_A=A") || !strings.Contains(outStr, "FUNC_TEST_SC_B=B") { + t.Error("environment variables were not imported from secret") + } + if !strings.Contains(outStr, "FUNC_TEST_VAR=iddqd") { + t.Error("environment variable was not set from local environment variable") + } + if !strings.Contains(outStr, "FUNC_TEST_CM_A_ALIASED=1") { + t.Error("environment variable was not set from config-map") + } + + err = remover.Remove(ctx, functionName, namespace) + if err != nil { + t.Fatal(err) + } + + list, err = lister.List(ctx, namespace) + if err != nil { + t.Fatal(err) + } + + if len(list) != 0 { + t.Errorf("expected exactly zero functions but got: %d", len(list)) } - // Default to localhost registry (same as E2E tests) - return fntest.DefaultIntTestRegistry } +// Helper functions +// ================ + // Decode response type result struct { EnvVars map[string]string Mounts map[string]bool } -func invoke(t *testing.T, ctx context.Context, route string) (statusCode int, r result) { +func invoke(t *testing.T, ctx context.Context, route string, deployType string) (statusCode int, r result) { req, err := http.NewRequestWithContext(ctx, "GET", route, nil) if err != nil { t.Fatal(err) } - httpClient := &http.Client{Timeout: 30 * time.Second} + + httpClient, closeFunc, err := getHttpClient(ctx, deployType) + if err != nil { + t.Fatal(err) + } + defer closeFunc() + resp, err := httpClient.Do(req) if err != nil { t.Fatal(err) @@ -674,8 +945,19 @@ func invoke(t *testing.T, ctx context.Context, route string) (statusCode int, r return resp.StatusCode, r } -func createTrigger(t *testing.T, ctx context.Context, namespace, triggerName, functionName string) func(fn.Instance) error { +func createTrigger(t *testing.T, ctx context.Context, namespace, triggerName string, function fn.Function) func(fn.Instance) error { t.Helper() + + var subscriberAPIVersion string + switch function.Deploy.DeployType { + case KnativeDeployerName: + subscriberAPIVersion = "serving.knative.dev/v1" + case KubernetesDeployerName: + subscriberAPIVersion = "v1" + default: + t.Fatalf("unknown deploy type: %s", function.Deploy.DeployType) + } + tr := &eventingv1.Trigger{ ObjectMeta: metav1.ObjectMeta{ Name: triggerName, @@ -685,8 +967,8 @@ func createTrigger(t *testing.T, ctx context.Context, namespace, triggerName, fu Subscriber: v1.Destination{Ref: &v1.KReference{ Kind: "Service", Namespace: namespace, - Name: functionName, - APIVersion: "serving.knative.dev/v1", + Name: function.Name, + APIVersion: subscriberAPIVersion, }}, Filter: &eventingv1.TriggerFilter{ Attributes: map[string]string{ @@ -856,3 +1138,66 @@ func (f *Function) Handle(w http.ResponseWriter, req *http.Request) { json.NewEncoder(w).Encode(resp) } ` + +func postText(ctx context.Context, url, reqBody, deployType string) (respBody string, err error) { + req, err := http.NewRequestWithContext(ctx, "POST", url, strings.NewReader(reqBody)) + if err != nil { + return "", err + } + req.Header.Add("Content-Type", "text/plain") + + client, closeFunc, err := getHttpClient(ctx, deployType) + if err != nil { + return "", fmt.Errorf("error creating http client: %w", err) + } + defer closeFunc() + + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer func() { + _ = resp.Body.Close() + }() + + bs, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(bs), nil +} + +func ptr[T interface{}](s T) *T { + return &s +} + +func getHttpClient(ctx context.Context, deployType string) (*http.Client, func(), error) { + noopDeferFunc := func() {} + // For Kubernetes deployments, use in-cluster dialer to access ClusterIP services + switch deployType { + case KubernetesDeployerName: + clientConfig := k8s.GetClientConfig() + dialer, err := k8s.NewInClusterDialer(ctx, clientConfig) + if err != nil { + return nil, noopDeferFunc, fmt.Errorf("failed to create in-cluster dialer: %w", err) + } + + transport := &http.Transport{ + DialContext: dialer.DialContext, + } + + deferFunc := func() { + _ = dialer.Close() + } + + return &http.Client{ + Transport: transport, + Timeout: time.Minute, + }, deferFunc, nil + case KnativeDeployerName: + // For Knative deployments, use default client (service is externally accessible) + return http.DefaultClient, noopDeferFunc, nil + default: + return nil, noopDeferFunc, fmt.Errorf("unknown deploy type: %s", deployType) + } +} diff --git a/pkg/deployer/k8s/deployer.go b/pkg/deployer/k8s/deployer.go new file mode 100644 index 0000000000..d68cc9ee0e --- /dev/null +++ b/pkg/deployer/k8s/deployer.go @@ -0,0 +1,279 @@ +package k8s + +import ( + "context" + "fmt" + "os" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" + clienteventingv1 "knative.dev/client/pkg/eventing/v1" + "knative.dev/func/pkg/deployer" + fn "knative.dev/func/pkg/functions" + "knative.dev/func/pkg/k8s" + "knative.dev/func/pkg/knative" +) + +type DeployerOpt func(*Deployer) + +type Deployer struct { + verbose bool + decorator deployer.DeployDecorator +} + +func NewDeployer(opts ...DeployerOpt) *Deployer { + d := &Deployer{} + for _, opt := range opts { + opt(d) + } + return d +} + +func WithDeployerVerbose(verbose bool) DeployerOpt { + return func(d *Deployer) { + d.verbose = verbose + } +} + +func WithDeployerDecorator(decorator deployer.DeployDecorator) DeployerOpt { + return func(d *Deployer) { + d.decorator = decorator + } +} + +func (d *Deployer) Deploy(ctx context.Context, f fn.Function) (fn.DeploymentResult, error) { + namespace := f.Namespace + if namespace == "" { + namespace = f.Deploy.Namespace + } + if namespace == "" { + return fn.DeploymentResult{}, fmt.Errorf("deployer requires either a target namespace or that the function be already deployed") + } + + // Choosing an image to deploy: + // If the service has not been deployed before, but there exists a + // build image, this build image should be used for the deploy. + // TODO: test/consider the case where it HAS been deployed, and the + // build image has been updated /since/ deployment: do we need a + // timestamp? Incrementation? + if f.Deploy.Image == "" { + f.Deploy.Image = f.Build.Image + } + + clientset, err := k8s.NewKubernetesClientset() + if err != nil { + return fn.DeploymentResult{}, err + } + + // Check if Dapr is installed + daprInstalled := false + _, err = clientset.CoreV1().Namespaces().Get(ctx, "dapr-system", metav1.GetOptions{}) + if err == nil { + daprInstalled = true + } + + deploymentClient := clientset.AppsV1().Deployments(namespace) + serviceClient := clientset.CoreV1().Services(namespace) + eventingClient, err := knative.NewEventingClient(namespace) + if err != nil { + return fn.DeploymentResult{}, err + } + + existingDeployment, err := deploymentClient.Get(ctx, f.Name, metav1.GetOptions{}) + + var status fn.Status + if err == nil { + deployment, svc, err := d.generateResources(f, namespace, daprInstalled) + if err != nil { + return fn.DeploymentResult{}, fmt.Errorf("failed to generate resources: %w", err) + } + + // Preserve resource version for update + deployment.ResourceVersion = existingDeployment.ResourceVersion + + if _, err = deploymentClient.Update(ctx, deployment, metav1.UpdateOptions{}); err != nil { + return fn.DeploymentResult{}, fmt.Errorf("failed to update deployment: %w", err) + } + + existingService, err := serviceClient.Get(ctx, f.Name, metav1.GetOptions{}) + if err == nil { + svc.ResourceVersion = existingService.ResourceVersion + if _, err = serviceClient.Update(ctx, svc, metav1.UpdateOptions{}); err != nil { + return fn.DeploymentResult{}, fmt.Errorf("failed to update service: %w", err) + } + } else if errors.IsNotFound(err) { + // Service doesn't exist, create it + if _, err = serviceClient.Create(ctx, svc, metav1.CreateOptions{}); err != nil { + return fn.DeploymentResult{}, fmt.Errorf("failed to create service: %w", err) + } + } else { + return fn.DeploymentResult{}, fmt.Errorf("failed to get existing service: %w", err) + } + + err = createTriggers(ctx, f, serviceClient, eventingClient) + if err != nil { + return fn.DeploymentResult{}, err + } + + status = fn.Updated + if d.verbose { + fmt.Fprintf(os.Stderr, "Updated deployment and service %s in namespace %s\n", f.Name, namespace) + } + } else { + if !errors.IsNotFound(err) { + return fn.DeploymentResult{}, fmt.Errorf("failed to check for existing deployment: %w", err) + } + + deployment, svc, err := d.generateResources(f, namespace, daprInstalled) + if err != nil { + return fn.DeploymentResult{}, fmt.Errorf("failed to generate resources: %w", err) + } + + if _, err = deploymentClient.Create(ctx, deployment, metav1.CreateOptions{}); err != nil { + return fn.DeploymentResult{}, fmt.Errorf("failed to create deployment: %w", err) + } + + if _, err = serviceClient.Create(ctx, svc, metav1.CreateOptions{}); err != nil { + return fn.DeploymentResult{}, fmt.Errorf("failed to create service: %w", err) + } + + err = createTriggers(ctx, f, serviceClient, eventingClient) + if err != nil { + return fn.DeploymentResult{}, err + } + + status = fn.Deployed + if d.verbose { + fmt.Fprintf(os.Stderr, "Created deployment and service %s in namespace %s\n", f.Name, namespace) + } + } + + if err := k8s.WaitForDeploymentAvailable(ctx, clientset, namespace, f.Name, 2*time.Minute); err != nil { + return fn.DeploymentResult{}, fmt.Errorf("deployment did not become ready: %w", err) + } + + url := fmt.Sprintf("http://%s.%s.svc.cluster.local", f.Name, namespace) + + return fn.DeploymentResult{ + Status: status, + URL: url, + Namespace: namespace, + }, nil +} + +func (d *Deployer) generateResources(f fn.Function, namespace string, daprInstalled bool) (*appsv1.Deployment, *corev1.Service, error) { + labels, err := deployer.GenerateCommonLabels(f, d.decorator) + if err != nil { + return nil, nil, err + } + + annotations := deployer.GenerateCommonAnnotations(f, d.decorator, daprInstalled, f.Deploy.DeployType) + + // Use annotations for pod template + podAnnotations := make(map[string]string) + for k, v := range annotations { + podAnnotations[k] = v + } + + // Process environment variables and volumes + referencedSecrets := sets.New[string]() + referencedConfigMaps := sets.New[string]() + referencedPVCs := sets.New[string]() + + envVars, envFrom, err := deployer.ProcessEnvs(f.Run.Envs, &referencedSecrets, &referencedConfigMaps) + if err != nil { + return nil, nil, fmt.Errorf("failed to process environment variables: %w", err) + } + + volumes, volumeMounts, err := deployer.ProcessVolumes(f.Run.Volumes, &referencedSecrets, &referencedConfigMaps, &referencedPVCs) + if err != nil { + return nil, nil, fmt.Errorf("failed to process volumes: %w", err) + } + + container := corev1.Container{ + Name: "user-container", + Image: f.Deploy.Image, + Ports: []corev1.ContainerPort{ + { + ContainerPort: deployer.DefaultHTTPPort, + Protocol: corev1.ProtocolTCP, + }, + }, + Env: envVars, + EnvFrom: envFrom, + VolumeMounts: volumeMounts, + } + + deployer.SetHealthEndpoints(f, &container) + deployer.SetSecurityContext(&container) + + replicas := int32(1) + if f.Deploy.Options.Scale != nil && f.Deploy.Options.Scale.Min != nil && *f.Deploy.Options.Scale.Min > 0 { + replicas = int32(*f.Deploy.Options.Scale.Min) + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: f.Name, + Namespace: namespace, + Labels: labels, + Annotations: annotations, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + Annotations: podAnnotations, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{container}, + ServiceAccountName: f.Deploy.ServiceAccountName, + Volumes: volumes, + }, + }, + }, + } + + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: f.Name, + Namespace: namespace, + Labels: labels, + Annotations: annotations, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: labels, + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: 80, + TargetPort: intstr.FromInt32(deployer.DefaultHTTPPort), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } + + return deployment, service, nil +} + +func createTriggers(ctx context.Context, f fn.Function, serviceClient v1.ServiceInterface, eventingClient clienteventingv1.KnEventingClient) error { + svc, err := serviceClient.Get(ctx, f.Name, metav1.GetOptions{}) + if err != nil { + err = fmt.Errorf("failed to get the Service for Trigger: %v", err) + return err + } + + return deployer.CreateTriggers(ctx, f, svc, eventingClient) +} diff --git a/pkg/deployer/k8s/integration_test.go b/pkg/deployer/k8s/integration_test.go new file mode 100644 index 0000000000..4906a35bf7 --- /dev/null +++ b/pkg/deployer/k8s/integration_test.go @@ -0,0 +1,64 @@ +//go:build integration +// +build integration + +package k8s_test + +import ( + "testing" + + "knative.dev/func/pkg/deployer" + k8sdeployer "knative.dev/func/pkg/deployer/k8s" + k8sdescriber "knative.dev/func/pkg/describer/k8s" + "knative.dev/func/pkg/lister" + k8slister "knative.dev/func/pkg/lister/k8s" + k8sremover "knative.dev/func/pkg/remover/k8s" +) + +func TestIntegration(t *testing.T) { + deployer.IntegrationTest_FullPath(t, + k8sdeployer.NewDeployer(k8sdeployer.WithDeployerVerbose(false)), + k8sremover.NewRemover(false), + lister.NewLister(false, nil, k8slister.NewGetter(false)), + k8sdescriber.NewDescriber(false), + deployer.KubernetesDeployerName) +} + +func TestIntegration_Deploy(t *testing.T) { + deployer.IntegrationTest_Deploy(t, + k8sdeployer.NewDeployer(k8sdeployer.WithDeployerVerbose(false)), + k8sremover.NewRemover(false), + k8sdescriber.NewDescriber(false), + deployer.KubernetesDeployerName) +} + +func TestIntegration_Metadata(t *testing.T) { + deployer.IntegrationTest_Metadata(t, + k8sdeployer.NewDeployer(k8sdeployer.WithDeployerVerbose(false)), + k8sremover.NewRemover(false), + k8sdescriber.NewDescriber(false), + deployer.KubernetesDeployerName) +} + +func TestIntegration_Events(t *testing.T) { + deployer.IntegrationTest_Events(t, + k8sdeployer.NewDeployer(k8sdeployer.WithDeployerVerbose(false)), + k8sremover.NewRemover(false), + k8sdescriber.NewDescriber(false), + deployer.KubernetesDeployerName) +} + +func TestIntegration_Scale(t *testing.T) { + deployer.IntegrationTest_Scale(t, + k8sdeployer.NewDeployer(k8sdeployer.WithDeployerVerbose(false)), + k8sremover.NewRemover(false), + k8sdescriber.NewDescriber(false), + deployer.KubernetesDeployerName) +} + +func TestIntegration_EnvsUpdate(t *testing.T) { + deployer.IntegrationTest_EnvsUpdate(t, + k8sdeployer.NewDeployer(k8sdeployer.WithDeployerVerbose(false)), + k8sremover.NewRemover(false), + k8sdescriber.NewDescriber(false), + deployer.KubernetesDeployerName) +} diff --git a/pkg/deployer/knative/deployer.go b/pkg/deployer/knative/deployer.go new file mode 100644 index 0000000000..2eb0d28ca5 --- /dev/null +++ b/pkg/deployer/knative/deployer.go @@ -0,0 +1,576 @@ +package knative + +import ( + "context" + "fmt" + "io" + "os" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + clienteventingv1 "knative.dev/client/pkg/eventing/v1" + "knative.dev/client/pkg/flags" + servingclientlib "knative.dev/client/pkg/serving" + clientservingv1 "knative.dev/client/pkg/serving/v1" + "knative.dev/client/pkg/wait" + "knative.dev/serving/pkg/apis/autoscaling" + v1 "knative.dev/serving/pkg/apis/serving/v1" + + "knative.dev/func/pkg/deployer" + fn "knative.dev/func/pkg/functions" + "knative.dev/func/pkg/k8s" + "knative.dev/func/pkg/knative" +) + +type DeployerOpt func(*Deployer) + +type Deployer struct { + // verbose logging enablement flag. + verbose bool + + decorator deployer.DeployDecorator +} + +func NewDeployer(opts ...DeployerOpt) *Deployer { + d := &Deployer{} + + for _, opt := range opts { + opt(d) + } + + return d +} + +func WithDeployerVerbose(verbose bool) DeployerOpt { + return func(d *Deployer) { + d.verbose = verbose + } +} + +func WithDeployerDecorator(decorator deployer.DeployDecorator) DeployerOpt { + return func(d *Deployer) { + d.decorator = decorator + } +} + +// Checks the status of the "user-container" for the ImagePullBackOff reason meaning that +// the container image is not reachable probably because a private registry is being used. +func (d *Deployer) isImageInPrivateRegistry(ctx context.Context, client clientservingv1.KnServingClient, f fn.Function) bool { + ksvc, err := client.GetService(ctx, f.Name) + if err != nil { + return false + } + k8sClient, err := k8s.NewKubernetesClientset() + if err != nil { + return false + } + list, err := k8sClient.CoreV1().Pods(f.Deploy.Namespace).List(ctx, metav1.ListOptions{ + LabelSelector: "serving.knative.dev/revision=" + ksvc.Status.LatestCreatedRevisionName + ",serving.knative.dev/service=" + f.Name, + FieldSelector: "status.phase=Pending", + }) + if err != nil { + return false + } + if len(list.Items) != 1 { + return false + } + + for _, cont := range list.Items[0].Status.ContainerStatuses { + if cont.Name == "user-container" { + return cont.State.Waiting != nil && cont.State.Waiting.Reason == "ImagePullBackOff" + } + } + return false +} + +func onClusterFix(f fn.Function) fn.Function { + // This only exists because of a bootstapping problem with On-Cluster + // builds: It appears that, when sending a function to be built on-cluster + // the target namespace is not being transmitted in the pipeline + // configuration. We should figure out how to transmit this information + // to the pipeline run for initial builds. This is a new problem because + // earlier versions of this logic relied entirely on the current + // kubernetes context. + if f.Namespace == "" && f.Deploy.Namespace == "" { + f.Namespace, _ = k8s.GetDefaultNamespace() + } + return f +} + +func (d *Deployer) Deploy(ctx context.Context, f fn.Function) (fn.DeploymentResult, error) { + f = onClusterFix(f) + // Choosing f.Namespace vs f.Deploy.Namespace: + // This is minimal logic currently required of all deployer impls. + // If f.Namespace is defined, this is the (possibly new) target + // namespace. Otherwise use the last deployed namespace. Error if + // neither are set. The logic which arbitrates between curret k8s context, + // flags, environment variables and global defaults to determine the + // effective namespace is not logic for the deployer implementation, which + // should have a minimum of logic. In this case limited to "new ns or + // existing namespace? + namespace := f.Namespace + if namespace == "" { + namespace = f.Deploy.Namespace + } + if namespace == "" { + return fn.DeploymentResult{}, fmt.Errorf("deployer requires either a target namespace or that the function be already deployed") + } + + // Choosing an image to deploy: + // If the service has not been deployed before, but there exists a + // build image, this build image should be used for the deploy. + // TODO: test/consider the case where it HAS been deployed, and the + // build image has been updated /since/ deployment: do we need a + // timestamp? Incrementation? + if f.Deploy.Image == "" { + f.Deploy.Image = f.Build.Image + } + + // Clients + client, err := knative.NewServingClient(namespace) + if err != nil { + return fn.DeploymentResult{}, err + } + eventingClient, err := knative.NewEventingClient(namespace) + if err != nil { + return fn.DeploymentResult{}, err + } + // check if 'dapr-system' namespace exists + daprInstalled := false + k8sClient, err := k8s.NewKubernetesClientset() + if err != nil { + return fn.DeploymentResult{}, err + } + _, err = k8sClient.CoreV1().Namespaces().Get(ctx, "dapr-system", metav1.GetOptions{}) + if err == nil { + daprInstalled = true + } + + var outBuff knative.SynchronizedBuffer + var out io.Writer = &outBuff + + if d.verbose { + out = os.Stderr + } + since := time.Now() + go func() { + _ = knative.GetKServiceLogs(ctx, namespace, f.Name, f.Deploy.Image, &since, out) + }() + + previousService, err := client.GetService(ctx, f.Name) + if err != nil { + if errors.IsNotFound(err) { + + referencedSecrets := sets.New[string]() + referencedConfigMaps := sets.New[string]() + referencedPVCs := sets.New[string]() + + service, err := generateNewService(f, d.decorator, daprInstalled) + if err != nil { + err = fmt.Errorf("knative deployer failed to generate the Knative Service: %v", err) + return fn.DeploymentResult{}, err + } + + err = deployer.CheckResourcesArePresent(ctx, namespace, &referencedSecrets, &referencedConfigMaps, &referencedPVCs, f.Deploy.ServiceAccountName) + if err != nil { + err = fmt.Errorf("knative deployer failed to generate the Knative Service: %v", err) + return fn.DeploymentResult{}, err + } + + err = client.CreateService(ctx, service) + if err != nil { + err = fmt.Errorf("knative deployer failed to deploy the Knative Service: %v", err) + return fn.DeploymentResult{}, err + } + + if d.verbose { + fmt.Println("Waiting for Knative Service to become ready") + } + chprivate := make(chan bool) + cherr := make(chan error) + go func() { + private := false + for !private { + time.Sleep(5 * time.Second) + private = d.isImageInPrivateRegistry(ctx, client, f) + chprivate <- private + } + close(chprivate) + }() + go func() { + err, _ := client.WaitForService(ctx, f.Name, + clientservingv1.WaitConfig{Timeout: knative.DefaultWaitingTimeout, ErrorWindow: knative.DefaultErrorWindowTimeout}, + wait.NoopMessageCallback()) + cherr <- err + close(cherr) + }() + + presumePrivate := false + main: + // Wait for either a timeout or a container condition signaling the image is unreachable + for { + select { + case private := <-chprivate: + if private { + presumePrivate = true + break main + } + case err = <-cherr: + break main + } + } + if presumePrivate { + err := fmt.Errorf("your function image is unreachable. It is possible that your docker registry is private. If so, make sure you have set up pull secrets https://knative.dev/docs/developer/serving/deploying-from-private-registry") + return fn.DeploymentResult{}, err + } + if err != nil { + err = fmt.Errorf("knative deployer failed to wait for the Knative Service to become ready: %v", err) + if !d.verbose { + fmt.Fprintln(os.Stderr, "\nService output:") + _, _ = io.Copy(os.Stderr, &outBuff) + fmt.Fprintln(os.Stderr) + } + return fn.DeploymentResult{}, err + } + + route, err := client.GetRoute(ctx, f.Name) + if err != nil { + err = fmt.Errorf("knative deployer failed to get the Route: %v", err) + return fn.DeploymentResult{}, err + } + + err = createTriggers(ctx, f, client, eventingClient) + if err != nil { + return fn.DeploymentResult{}, err + } + + if d.verbose { + fmt.Printf("Function deployed in namespace %q and exposed at URL:\n%s\n", namespace, route.Status.URL.String()) + } + return fn.DeploymentResult{ + Status: fn.Deployed, + URL: route.Status.URL.String(), + Namespace: namespace, + }, nil + + } else { + err = fmt.Errorf("knative deployer failed to get the Knative Service: %v", err) + return fn.DeploymentResult{}, err + } + } else { + // Update the existing Service + referencedSecrets := sets.New[string]() + referencedConfigMaps := sets.New[string]() + referencedPVCs := sets.New[string]() + + newEnv, newEnvFrom, err := deployer.ProcessEnvs(f.Run.Envs, &referencedSecrets, &referencedConfigMaps) + if err != nil { + return fn.DeploymentResult{}, err + } + + newVolumes, newVolumeMounts, err := deployer.ProcessVolumes(f.Run.Volumes, &referencedSecrets, &referencedConfigMaps, &referencedPVCs) + if err != nil { + return fn.DeploymentResult{}, err + } + + err = deployer.CheckResourcesArePresent(ctx, namespace, &referencedSecrets, &referencedConfigMaps, &referencedPVCs, f.Deploy.ServiceAccountName) + if err != nil { + err = fmt.Errorf("knative deployer failed to update the Knative Service: %v", err) + return fn.DeploymentResult{}, err + } + + _, err = client.UpdateServiceWithRetry(ctx, f.Name, updateService(f, previousService, newEnv, newEnvFrom, newVolumes, newVolumeMounts, d.decorator, daprInstalled), 3) + if err != nil { + err = fmt.Errorf("knative deployer failed to update the Knative Service: %v", err) + return fn.DeploymentResult{}, err + } + + err, _ = client.WaitForService(ctx, f.Name, + clientservingv1.WaitConfig{Timeout: knative.DefaultWaitingTimeout, ErrorWindow: knative.DefaultErrorWindowTimeout}, + wait.NoopMessageCallback()) + if err != nil { + if !d.verbose { + fmt.Fprintln(os.Stderr, "\nService output:") + _, _ = io.Copy(os.Stderr, &outBuff) + fmt.Fprintln(os.Stderr) + } + return fn.DeploymentResult{}, err + } + + route, err := client.GetRoute(ctx, f.Name) + if err != nil { + err = fmt.Errorf("knative deployer failed to get the Route: %v", err) + return fn.DeploymentResult{}, err + } + + err = createTriggers(ctx, f, client, eventingClient) + if err != nil { + return fn.DeploymentResult{}, err + } + + return fn.DeploymentResult{ + Status: fn.Updated, + URL: route.Status.URL.String(), + Namespace: namespace, + }, nil + } +} + +func createTriggers(ctx context.Context, f fn.Function, client clientservingv1.KnServingClient, eventingClient clienteventingv1.KnEventingClient) error { + ksvc, err := client.GetService(ctx, f.Name) + if err != nil { + err = fmt.Errorf("knative deployer failed to get the Service for Trigger: %v", err) + return err + } + + return deployer.CreateTriggers(ctx, f, ksvc, eventingClient) +} + +func generateNewService(f fn.Function, decorator deployer.DeployDecorator, daprInstalled bool) (*v1.Service, error) { + container := corev1.Container{ + Image: f.Deploy.Image, + } + + deployer.SetSecurityContext(&container) + deployer.SetHealthEndpoints(f, &container) + + referencedSecrets := sets.New[string]() + referencedConfigMaps := sets.New[string]() + referencedPVC := sets.New[string]() + + newEnv, newEnvFrom, err := deployer.ProcessEnvs(f.Run.Envs, &referencedSecrets, &referencedConfigMaps) + if err != nil { + return nil, err + } + container.Env = newEnv + container.EnvFrom = newEnvFrom + + newVolumes, newVolumeMounts, err := deployer.ProcessVolumes(f.Run.Volumes, &referencedSecrets, &referencedConfigMaps, &referencedPVC) + if err != nil { + return nil, err + } + container.VolumeMounts = newVolumeMounts + + labels, err := deployer.GenerateCommonLabels(f, decorator) + if err != nil { + return nil, err + } + + annotations := generateServiceAnnotations(f, decorator, nil, daprInstalled) + + // we need to create a separate map for Annotations specified in a Revision, + // in case we will need to specify autoscaling annotations -> these could be only in a Revision not in a Service + revisionAnnotations := make(map[string]string) + for k, v := range annotations { + revisionAnnotations[k] = v + } + + service := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: f.Name, + Labels: labels, + Annotations: annotations, + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + Annotations: revisionAnnotations, + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{ + container, + }, + ServiceAccountName: f.Deploy.ServiceAccountName, + Volumes: newVolumes, + }, + }, + }, + }, + }, + } + + err = setServiceOptions(&service.Spec.Template, f.Deploy.Options) + if err != nil { + return service, err + } + + return service, nil +} + +// generateServiceAnnotations creates a final map of service annotations. +// It uses the common annotation generator and adds Knative-specific annotations. +func generateServiceAnnotations(f fn.Function, d deployer.DeployDecorator, previousService *v1.Service, daprInstalled bool) (aa map[string]string) { + // Start with common annotations (includes Dapr, user annotations, and decorator) + aa = deployer.GenerateCommonAnnotations(f, d, daprInstalled, f.Deploy.DeployType) + + // Set correct creator if we are updating a function (Knative-specific) + // This annotation is immutable and must be preserved when updating + if previousService != nil { + knativeCreatorAnnotation := "serving.knative.dev/creator" + if val, ok := previousService.Annotations[knativeCreatorAnnotation]; ok { + aa[knativeCreatorAnnotation] = val + } + } + + return +} + +func updateService(f fn.Function, previousService *v1.Service, newEnv []corev1.EnvVar, newEnvFrom []corev1.EnvFromSource, newVolumes []corev1.Volume, newVolumeMounts []corev1.VolumeMount, decorator deployer.DeployDecorator, daprInstalled bool) func(service *v1.Service) (*v1.Service, error) { + return func(service *v1.Service) (*v1.Service, error) { + // Removing the name so the k8s server can fill it in with generated name, + // this prevents conflicts in Revision name when updating the KService from multiple places. + service.Spec.Template.Name = "" + + annotations := generateServiceAnnotations(f, decorator, previousService, daprInstalled) + + // we need to create a separate map for Annotations specified in a Revision, + // in case we will need to specify autoscaling annotations -> these could be only in a Revision not in a Service + revisionAnnotations := make(map[string]string) + for k, v := range annotations { + revisionAnnotations[k] = v + } + + service.Annotations = annotations + service.Spec.Template.Annotations = revisionAnnotations + + // I hate that we have to do this. Users should not see these values. + // It is an implementation detail. These health endpoints should not be + // a part of func.yaml since the user can only mess things up by changing + // them. Ultimately, this information is determined by the language pack. + // Which is another reason to consider having a global config to store + // some metadata which is fairly static. For example, a .config/func/global.yaml + // file could contain information about all known language packs. As new + // language packs are discovered through use of the --repository flag when + // creating a function, this information could be extracted from + // language-pack.yaml for each template and written to the local global + // config. At runtime this configuration file could be consulted. I don't + // know what this would mean for developers using the func library directly. + cp := &service.Spec.Template.Spec.Containers[0] + deployer.SetHealthEndpoints(f, cp) + + err := setServiceOptions(&service.Spec.Template, f.Deploy.Options) + if err != nil { + return service, err + } + + labels, err := deployer.GenerateCommonLabels(f, decorator) + if err != nil { + return nil, err + } + + service.Labels = labels + service.Spec.Template.Labels = labels + + err = flags.UpdateImage(&service.Spec.Template.Spec.PodSpec, f.Deploy.Image) + if err != nil { + return service, err + } + + cp.Env = newEnv + cp.EnvFrom = newEnvFrom + cp.VolumeMounts = newVolumeMounts + service.Spec.Template.Spec.Volumes = newVolumes + service.Spec.Template.Spec.ServiceAccountName = f.Deploy.ServiceAccountName + return service, nil + } +} + +// setServiceOptions sets annotations on Service Revision Template or in the Service Spec +// from values specified in function configuration options +func setServiceOptions(template *v1.RevisionTemplateSpec, options fn.Options) error { + toRemove := []string{} + toUpdate := map[string]string{} + + if options.Scale != nil { + if options.Scale.Min != nil { + toUpdate[autoscaling.MinScaleAnnotationKey] = fmt.Sprintf("%d", *options.Scale.Min) + } else { + toRemove = append(toRemove, autoscaling.MinScaleAnnotationKey) + } + + if options.Scale.Max != nil { + toUpdate[autoscaling.MaxScaleAnnotationKey] = fmt.Sprintf("%d", *options.Scale.Max) + } else { + toRemove = append(toRemove, autoscaling.MaxScaleAnnotationKey) + } + + if options.Scale.Metric != nil { + toUpdate[autoscaling.MetricAnnotationKey] = *options.Scale.Metric + } else { + toRemove = append(toRemove, autoscaling.MetricAnnotationKey) + } + + if options.Scale.Target != nil { + toUpdate[autoscaling.TargetAnnotationKey] = fmt.Sprintf("%f", *options.Scale.Target) + } else { + toRemove = append(toRemove, autoscaling.TargetAnnotationKey) + } + + if options.Scale.Utilization != nil { + toUpdate[autoscaling.TargetUtilizationPercentageKey] = fmt.Sprintf("%f", *options.Scale.Utilization) + } else { + toRemove = append(toRemove, autoscaling.TargetUtilizationPercentageKey) + } + + } + + // in the container always set Requests/Limits & Concurrency values based on the contents of config + template.Spec.Containers[0].Resources.Requests = nil + template.Spec.Containers[0].Resources.Limits = nil + template.Spec.ContainerConcurrency = nil + + if options.Resources != nil { + if options.Resources.Requests != nil { + template.Spec.Containers[0].Resources.Requests = corev1.ResourceList{} + + if options.Resources.Requests.CPU != nil { + value, err := resource.ParseQuantity(*options.Resources.Requests.CPU) + if err != nil { + return err + } + template.Spec.Containers[0].Resources.Requests[corev1.ResourceCPU] = value + } + + if options.Resources.Requests.Memory != nil { + value, err := resource.ParseQuantity(*options.Resources.Requests.Memory) + if err != nil { + return err + } + template.Spec.Containers[0].Resources.Requests[corev1.ResourceMemory] = value + } + } + + if options.Resources.Limits != nil { + template.Spec.Containers[0].Resources.Limits = corev1.ResourceList{} + + if options.Resources.Limits.CPU != nil { + value, err := resource.ParseQuantity(*options.Resources.Limits.CPU) + if err != nil { + return err + } + template.Spec.Containers[0].Resources.Limits[corev1.ResourceCPU] = value + } + + if options.Resources.Limits.Memory != nil { + value, err := resource.ParseQuantity(*options.Resources.Limits.Memory) + if err != nil { + return err + } + template.Spec.Containers[0].Resources.Limits[corev1.ResourceMemory] = value + } + + if options.Resources.Limits.Concurrency != nil { + template.Spec.ContainerConcurrency = options.Resources.Limits.Concurrency + } + } + } + + return servingclientlib.UpdateRevisionTemplateAnnotations(template, toUpdate, toRemove) +} diff --git a/pkg/deployer/knative/integration_test.go b/pkg/deployer/knative/integration_test.go new file mode 100644 index 0000000000..277a49ebba --- /dev/null +++ b/pkg/deployer/knative/integration_test.go @@ -0,0 +1,64 @@ +//go:build integration +// +build integration + +package knative_test + +import ( + "testing" + + "knative.dev/func/pkg/deployer" + knativedeployer "knative.dev/func/pkg/deployer/knative" + knativedescriber "knative.dev/func/pkg/describer/knative" + "knative.dev/func/pkg/lister" + knativelister "knative.dev/func/pkg/lister/knative" + knativeremover "knative.dev/func/pkg/remover/knative" +) + +func TestIntegration(t *testing.T) { + deployer.IntegrationTest_FullPath(t, + knativedeployer.NewDeployer(knativedeployer.WithDeployerVerbose(true)), + knativeremover.NewRemover(true), + lister.NewLister(true, knativelister.NewGetter(true), nil), + knativedescriber.NewDescriber(true), + deployer.KnativeDeployerName) +} + +func TestIntegration_Deploy(t *testing.T) { + deployer.IntegrationTest_Deploy(t, + knativedeployer.NewDeployer(knativedeployer.WithDeployerVerbose(true)), + knativeremover.NewRemover(false), + knativedescriber.NewDescriber(false), + deployer.KnativeDeployerName) +} + +func TestIntegration_Metadata(t *testing.T) { + deployer.IntegrationTest_Metadata(t, + knativedeployer.NewDeployer(knativedeployer.WithDeployerVerbose(true)), + knativeremover.NewRemover(false), + knativedescriber.NewDescriber(false), + deployer.KnativeDeployerName) +} + +func TestIntegration_Events(t *testing.T) { + deployer.IntegrationTest_Events(t, + knativedeployer.NewDeployer(knativedeployer.WithDeployerVerbose(true)), + knativeremover.NewRemover(false), + knativedescriber.NewDescriber(false), + deployer.KnativeDeployerName) +} + +func TestIntegration_Scale(t *testing.T) { + deployer.IntegrationTest_Scale(t, + knativedeployer.NewDeployer(knativedeployer.WithDeployerVerbose(true)), + knativeremover.NewRemover(false), + knativedescriber.NewDescriber(false), + deployer.KnativeDeployerName) +} + +func TestIntegration_EnvsUpdate(t *testing.T) { + deployer.IntegrationTest_EnvsUpdate(t, + knativedeployer.NewDeployer(knativedeployer.WithDeployerVerbose(true)), + knativeremover.NewRemover(false), + knativedescriber.NewDescriber(false), + deployer.KnativeDeployerName) +} diff --git a/pkg/knative/describer_int_test.go b/pkg/describer/integration_test_helper.go similarity index 75% rename from pkg/knative/describer_int_test.go rename to pkg/describer/integration_test_helper.go index 8db6200a28..bf8aaa30f7 100644 --- a/pkg/knative/describer_int_test.go +++ b/pkg/describer/integration_test_helper.go @@ -1,6 +1,6 @@ //go:build integration -package knative_test +package describer import ( "context" @@ -8,26 +8,26 @@ import ( "time" "k8s.io/apimachinery/pkg/util/rand" - fn "knative.dev/func/pkg/functions" - "knative.dev/func/pkg/knative" "knative.dev/func/pkg/oci" + fntest "knative.dev/func/pkg/testing" + fnk8stest "knative.dev/func/pkg/testing/k8s" ) -func TestInt_Describe(t *testing.T) { +func DescribeIntegrationTest(t *testing.T, describer fn.Describer, deployer fn.Deployer, remover fn.Remover, deployType string) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) name := "func-int-knative-describe-" + rand.String(5) root := t.TempDir() - ns := namespace(t, ctx) + ns := fnk8stest.Namespace(t, ctx) t.Cleanup(cancel) client := fn.New( fn.WithBuilder(oci.NewBuilder("", false)), fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithRemover(knative.NewRemover(false)), + fn.WithDescriber(describer), + fn.WithDeployer(deployer), + fn.WithRemover(remover), ) f, err := client.Init(fn.Function{ @@ -35,7 +35,10 @@ func TestInt_Describe(t *testing.T) { Name: name, Runtime: "go", Namespace: ns, - Registry: registry(), + Registry: fntest.Registry(), + Deploy: fn.DeploySpec{ + DeployType: deployType, + }, }) if err != nil { t.Fatal(err) diff --git a/pkg/describer/k8s/describer.go b/pkg/describer/k8s/describer.go new file mode 100644 index 0000000000..b5699e3277 --- /dev/null +++ b/pkg/describer/k8s/describer.go @@ -0,0 +1,99 @@ +package k8s + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" + "knative.dev/func/pkg/deployer" + "knative.dev/func/pkg/k8s" + "knative.dev/func/pkg/knative" + + fn "knative.dev/func/pkg/functions" +) + +type Describer struct { + verbose bool +} + +func NewDescriber(verbose bool) *Describer { + return &Describer{ + verbose: verbose, + } +} + +// Describe a function by name. Note that the consuming API uses domain style +// notation, whereas Kubernetes restricts to label-syntax, which is thus +// escaped. Therefor as a knative (kube) implementation detail proper full +// names have to be escaped on the way in and unescaped on the way out. ex: +// www.example-site.com -> www-example--site-com +func (d *Describer) Describe(ctx context.Context, name, namespace string) (fn.Instance, error) { + if namespace == "" { + return fn.Instance{}, fmt.Errorf("function namespace is required when describing %q", name) + } + + clientset, err := k8s.NewKubernetesClientset() + if err != nil { + return fn.Instance{}, fmt.Errorf("unable to create k8s client: %v", err) + } + + deploymentClient := clientset.AppsV1().Deployments(namespace) + eventingClient, err := knative.NewEventingClient(namespace) + if err != nil { + return fn.Instance{}, fmt.Errorf("unable to create eventing client: %v", err) + } + + deployment, err := deploymentClient.Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fn.Instance{}, fmt.Errorf("unable to get deployment %q: %v", name, err) + } + + primaryRouteURL := fmt.Sprintf("http://%s.%s.svc", name, namespace) // TODO: get correct scheme? + + description := fn.Instance{ + Name: name, + Namespace: namespace, + Route: primaryRouteURL, + Routes: []string{primaryRouteURL}, + DeployType: deployer.KubernetesDeployerName, + } + + triggers, err := eventingClient.ListTriggers(ctx) + // IsNotFound -- Eventing is probably not installed on the cluster + if err != nil && !errors.IsNotFound(err) { + return description, nil + } else if err != nil { + return fn.Instance{}, fmt.Errorf("unable to list triggers: %v", err) + } + + triggerMatches := func(t *eventingv1.Trigger) bool { + return t.Spec.Subscriber.Ref != nil && + t.Spec.Subscriber.Ref.Name == name && + t.Spec.Subscriber.Ref.APIVersion == "v1" && + t.Spec.Subscriber.Ref.Kind == "Service" + } + + subscriptions := make([]fn.Subscription, 0, len(triggers.Items)) + for _, trigger := range triggers.Items { + if triggerMatches(&trigger) { + filterAttrs := trigger.Spec.Filter.Attributes + subscription := fn.Subscription{ + Source: filterAttrs["source"], + Type: filterAttrs["type"], + Broker: trigger.Spec.Broker, + } + subscriptions = append(subscriptions, subscription) + } + } + + description.Subscriptions = subscriptions + + // Populate labels from the deployment + if deployment.Labels != nil { + description.Labels = deployment.Labels + } + + return description, nil +} diff --git a/pkg/describer/k8s/integration_test.go b/pkg/describer/k8s/integration_test.go new file mode 100644 index 0000000000..98cd1f534a --- /dev/null +++ b/pkg/describer/k8s/integration_test.go @@ -0,0 +1,21 @@ +//go:build integration + +package k8s_test + +import ( + "testing" + + "knative.dev/func/pkg/deployer" + k8sdeployer "knative.dev/func/pkg/deployer/k8s" + "knative.dev/func/pkg/describer" + k8sdescriber "knative.dev/func/pkg/describer/k8s" + k8sremover "knative.dev/func/pkg/remover/k8s" +) + +func TestInt_Describe(t *testing.T) { + describer.DescribeIntegrationTest(t, + k8sdescriber.NewDescriber(true), + k8sdeployer.NewDeployer(k8sdeployer.WithDeployerVerbose(true)), + k8sremover.NewRemover(true), + deployer.KubernetesDeployerName) +} diff --git a/pkg/knative/describer.go b/pkg/describer/knative/describer.go similarity index 91% rename from pkg/knative/describer.go rename to pkg/describer/knative/describer.go index 6bfd8919d0..74ae287cc2 100644 --- a/pkg/knative/describer.go +++ b/pkg/describer/knative/describer.go @@ -7,6 +7,8 @@ import ( "k8s.io/apimachinery/pkg/api/errors" clientservingv1 "knative.dev/client/pkg/serving/v1" eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" + "knative.dev/func/pkg/deployer" + "knative.dev/func/pkg/knative" fn "knative.dev/func/pkg/functions" ) @@ -32,12 +34,12 @@ func (d *Describer) Describe(ctx context.Context, name, namespace string) (descr return } - servingClient, err := NewServingClient(namespace) + servingClient, err := knative.NewServingClient(namespace) if err != nil { return } - eventingClient, err := NewEventingClient(namespace) + eventingClient, err := knative.NewEventingClient(namespace) if err != nil { return } @@ -66,6 +68,7 @@ func (d *Describer) Describe(ctx context.Context, name, namespace string) (descr description.Namespace = namespace description.Route = primaryRouteURL description.Routes = routeURLs + description.DeployType = deployer.KnativeDeployerName triggers, err := eventingClient.ListTriggers(ctx) // IsNotFound -- Eventing is probably not installed on the cluster diff --git a/pkg/describer/knative/integration_test.go b/pkg/describer/knative/integration_test.go new file mode 100644 index 0000000000..f570086b13 --- /dev/null +++ b/pkg/describer/knative/integration_test.go @@ -0,0 +1,21 @@ +//go:build integration + +package knative_test + +import ( + "testing" + + "knative.dev/func/pkg/deployer" + knativedeployer "knative.dev/func/pkg/deployer/knative" + "knative.dev/func/pkg/describer" + knativedescriber "knative.dev/func/pkg/describer/knative" + knativeremover "knative.dev/func/pkg/remover/knative" +) + +func TestInt_Describe(t *testing.T) { + describer.DescribeIntegrationTest(t, + knativedescriber.NewDescriber(true), + knativedeployer.NewDeployer(knativedeployer.WithDeployerVerbose(true)), + knativeremover.NewRemover(true), + deployer.KnativeDeployerName) +} diff --git a/pkg/describer/multi_describer.go b/pkg/describer/multi_describer.go new file mode 100644 index 0000000000..9c011d0d19 --- /dev/null +++ b/pkg/describer/multi_describer.go @@ -0,0 +1,56 @@ +package describer + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/func/pkg/deployer" + fn "knative.dev/func/pkg/functions" + "knative.dev/func/pkg/k8s" +) + +type MultiDescriber struct { + verbose bool + + knativeDescriber fn.Describer + kubernetesDescriber fn.Describer +} + +func NewMultiDescriber(verbose bool, knativeDescriber, kubernetesDescriber fn.Describer) *MultiDescriber { + return &MultiDescriber{ + verbose: verbose, + knativeDescriber: knativeDescriber, + kubernetesDescriber: kubernetesDescriber, + } +} + +// Describe a function by name +func (d *MultiDescriber) Describe(ctx context.Context, name, namespace string) (fn.Instance, error) { + clientset, err := k8s.NewKubernetesClientset() + if err != nil { + return fn.Instance{}, fmt.Errorf("unable to create k8s client: %v", err) + } + + serviceClient := clientset.CoreV1().Services(namespace) + + service, err := serviceClient.Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fn.Instance{}, fmt.Errorf("unable to get service for function: %v", err) + } + + deployType, ok := service.Annotations[deployer.DeployTypeAnnotation] + if !ok { + // fall back to the Knative Describer in case no annotation is given + return d.knativeDescriber.Describe(ctx, name, namespace) + } + + switch deployType { + case deployer.KnativeDeployerName: + return d.knativeDescriber.Describe(ctx, name, namespace) + case deployer.KubernetesDeployerName: + return d.kubernetesDescriber.Describe(ctx, name, namespace) + default: + return fn.Instance{}, fmt.Errorf("unknown deploy type: %s", deployType) + } +} diff --git a/pkg/functions/client.go b/pkg/functions/client.go index 6bb46612e3..f627d023fe 100644 --- a/pkg/functions/client.go +++ b/pkg/functions/client.go @@ -149,11 +149,12 @@ type Lister interface { } type ListItem struct { - Name string `json:"name" yaml:"name"` - Namespace string `json:"namespace" yaml:"namespace"` - Runtime string `json:"runtime" yaml:"runtime"` - URL string `json:"url" yaml:"url"` - Ready string `json:"ready" yaml:"ready"` + Name string `json:"name" yaml:"name"` + Namespace string `json:"namespace" yaml:"namespace"` + Runtime string `json:"runtime" yaml:"runtime"` + URL string `json:"url" yaml:"url"` + Ready string `json:"ready" yaml:"ready"` + DeployType string `json:"deploy_type" yaml:"deploy_type"` } // Describer of function instances @@ -180,6 +181,7 @@ type Instance struct { Name string `json:"name" yaml:"name"` Image string `json:"image" yaml:"image"` Namespace string `json:"namespace" yaml:"namespace"` + DeployType string `json:"deploy_type" yaml:"deploy_type"` Subscriptions []Subscription `json:"subscriptions" yaml:"subscriptions"` Labels map[string]string `json:"labels" yaml:"labels" xml:"-"` } diff --git a/pkg/functions/client_int_test.go b/pkg/functions/client_int_test.go index 09c2abf84a..45e8307145 100644 --- a/pkg/functions/client_int_test.go +++ b/pkg/functions/client_int_test.go @@ -18,8 +18,18 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/volume" "github.com/docker/docker/client" + "knative.dev/func/pkg/describer" + k8sdescriber "knative.dev/func/pkg/describer/k8s" + knativedescriber "knative.dev/func/pkg/describer/knative" + "knative.dev/func/pkg/lister" + k8slister "knative.dev/func/pkg/lister/k8s" + knativelister "knative.dev/func/pkg/lister/knative" + "knative.dev/func/pkg/remover" + k8sremover "knative.dev/func/pkg/remover/k8s" + knativeremover "knative.dev/func/pkg/remover/knative" "knative.dev/func/pkg/builders/s2i" + knativedeployer "knative.dev/func/pkg/deployer/knative" "knative.dev/func/pkg/docker" fn "knative.dev/func/pkg/functions" "knative.dev/func/pkg/knative" @@ -64,11 +74,11 @@ const ( var ( Go = getEnvAsBin("FUNC_INT_GO", "go") - Git = getEnvAsBin("FUNC_INT_GIT", "git") + GitBin = getEnvAsBin("FUNC_INT_GIT", "git") Kubeconfig = getEnvAsPath("FUNC_INT_KUBECONFIG", DefaultIntTestKubeconfig) Verbose = getEnvAsBool("FUNC_INT_VERBOSE", DefaultIntTestVerbose) - Registry = getEnv("FUNC_INT_REGISTRY", DefaultIntTestRegistry) Home, _ = filepath.Abs(DefaultIntTestHome) + //Registry = // see testing package (it's shared) ) // containsInstance checks if the list includes the given instance. @@ -637,12 +647,12 @@ func resetEnv() { os.Setenv("HOME", Home) os.Setenv("KUBECONFIG", Kubeconfig) os.Setenv("FUNC_GO", Go) - os.Setenv("FUNC_GIT", Git) + os.Setenv("FUNC_GIT", GitBin) os.Setenv("FUNC_VERBOSE", fmt.Sprintf("%t", Verbose)) // The Registry will be set either during first-time setup using the // global config, or already defaulted by the user via environment variable. - os.Setenv("FUNC_REGISTRY", Registry) + os.Setenv("FUNC_REGISTRY", Registry()) // The following host-builder related settings will become the defaults // once the host builder supports the core runtimes. Setting them here in @@ -660,10 +670,10 @@ func newClient(verbose bool) *fn.Client { fn.WithRegistry(DefaultIntTestRegistry), fn.WithBuilder(oci.NewBuilder("", verbose)), fn.WithPusher(oci.NewPusher(true, true, verbose)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(verbose))), - fn.WithDescriber(knative.NewDescriber(verbose)), - fn.WithRemover(knative.NewRemover(verbose)), - fn.WithLister(knative.NewLister(verbose)), + fn.WithDeployer(knativedeployer.NewDeployer(knativedeployer.WithDeployerVerbose(verbose))), + fn.WithDescriber(describer.NewMultiDescriber(verbose, knativedescriber.NewDescriber(verbose), k8sdescriber.NewDescriber(verbose))), + fn.WithRemover(remover.NewMultiRemover(verbose, knativeremover.NewRemover(verbose), k8sremover.NewRemover(verbose))), + fn.WithLister(lister.NewLister(verbose, knativelister.NewGetter(verbose), k8slister.NewGetter(verbose))), fn.WithVerbose(verbose), ) } @@ -672,10 +682,10 @@ func newClient(verbose bool) *fn.Client { func newClientWithS2i(verbose bool) *fn.Client { builder := s2i.NewBuilder(s2i.WithVerbose(verbose)) pusher := docker.NewPusher(docker.WithVerbose(verbose)) - deployer := knative.NewDeployer(knative.WithDeployerVerbose(verbose)) - describer := knative.NewDescriber(verbose) - remover := knative.NewRemover(verbose) - lister := knative.NewLister(verbose) + deployer := knativedeployer.NewDeployer(knativedeployer.WithDeployerVerbose(verbose)) + describer := describer.NewMultiDescriber(verbose, knativedescriber.NewDescriber(verbose), k8sdescriber.NewDescriber(verbose)) + remover := remover.NewMultiRemover(verbose, knativeremover.NewRemover(verbose), k8sremover.NewRemover(verbose)) + lister := lister.NewLister(verbose, knativelister.NewGetter(verbose), k8slister.NewGetter(verbose)) return fn.New( fn.WithRegistry(DefaultIntTestRegistry), diff --git a/pkg/functions/function.go b/pkg/functions/function.go index e6b50128e6..2629e498a6 100644 --- a/pkg/functions/function.go +++ b/pkg/functions/function.go @@ -208,6 +208,10 @@ type DeploySpec struct { // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ ServiceAccountName string `yaml:"serviceAccountName,omitempty"` + // DeployType specifies the type of deployment to use: "knative" or "raw" + // Defaults to "knative" for backwards compatibility + DeployType string `yaml:"deployType,omitempty" jsonschema:"enum=knative,enum=deployment"` + Subscriptions []KnativeSubscription `yaml:"subscriptions,omitempty"` } diff --git a/pkg/functions/function_unit_test.go b/pkg/functions/function_unit_test.go index 5082e33a3e..6fd6134b30 100644 --- a/pkg/functions/function_unit_test.go +++ b/pkg/functions/function_unit_test.go @@ -1,4 +1,4 @@ -package functions +package functions_test import ( "os" @@ -7,6 +7,7 @@ import ( "testing" "gopkg.in/yaml.v2" + . "knative.dev/func/pkg/functions" fnlabels "knative.dev/func/pkg/k8s/labels" . "knative.dev/func/pkg/testing" diff --git a/pkg/functions/instances_test.go b/pkg/functions/instances_test.go index 1c08e4b7d1..66ec319afe 100644 --- a/pkg/functions/instances_test.go +++ b/pkg/functions/instances_test.go @@ -1,4 +1,4 @@ -package functions +package functions_test import ( "context" @@ -6,6 +6,7 @@ import ( "strings" "testing" + . "knative.dev/func/pkg/functions" . "knative.dev/func/pkg/testing" ) diff --git a/pkg/functions/job_test.go b/pkg/functions/job_test.go index 191aaa8101..0cf2de6162 100644 --- a/pkg/functions/job_test.go +++ b/pkg/functions/job_test.go @@ -1,10 +1,11 @@ -package functions +package functions_test import ( "context" "errors" "testing" + . "knative.dev/func/pkg/functions" . "knative.dev/func/pkg/testing" ) diff --git a/pkg/k8s/logs.go b/pkg/k8s/logs.go index a8f5cc4cb6..1cb4a824c1 100644 --- a/pkg/k8s/logs.go +++ b/pkg/k8s/logs.go @@ -3,9 +3,15 @@ package k8s import ( "bytes" "context" + "fmt" "io" + "sync" + "time" + "golang.org/x/sync/errgroup" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" ) // GetPodLogs returns logs from a specified Container in a Pod, if container is empty string, @@ -33,3 +39,107 @@ func GetPodLogs(ctx context.Context, namespace, podName, containerName string) ( return buffer.String(), nil } + +// GetPodLogsBySelector will get logs of a pod. +// +// It will do so by gathering logs of the given container of all affiliated pods. +// In addition, filtering on image can be done so only logs for given image are logged. +// +// This function runs as long as the passed context is active (i.e. it is required cancel the context to stop log gathering). +func GetPodLogsBySelector(ctx context.Context, namespace, labelSelector, containerName, image string, since *time.Time, out io.Writer) error { + client, namespace, err := NewClientAndResolvedNamespace(namespace) + if err != nil { + return fmt.Errorf("cannot create k8s client: %w", err) + } + + pods := client.CoreV1().Pods(namespace) + + podListOpts := metav1.ListOptions{ + Watch: true, + LabelSelector: labelSelector, + } + + w, err := pods.Watch(ctx, podListOpts) + if err != nil { + return fmt.Errorf("cannot create watch: %w", err) + } + defer w.Stop() + + beingProcessed := make(map[string]bool) + var beingProcessedMu sync.Mutex + + copyLogs := func(pod corev1.Pod) error { + defer func() { + beingProcessedMu.Lock() + delete(beingProcessed, pod.Name) + beingProcessedMu.Unlock() + }() + podLogOpts := corev1.PodLogOptions{ + Container: containerName, + Follow: true, + } + if since != nil { + sinceTime := metav1.NewTime(*since) + podLogOpts.SinceTime = &sinceTime + } + req := client.CoreV1().Pods(namespace).GetLogs(pod.Name, &podLogOpts) + + r, e := req.Stream(ctx) + if e != nil { + return fmt.Errorf("cannot get stream: %w", e) + } + defer r.Close() + _, e = io.Copy(out, r) + if e != nil { + return fmt.Errorf("error copying logs: %w", e) + } + return nil + } + + mayReadLogs := func(pod corev1.Pod) bool { + for _, status := range pod.Status.ContainerStatuses { + if status.Name == containerName { + return status.State.Running != nil || status.State.Terminated != nil + } + } + return false + } + + getImage := func(pod corev1.Pod) string { + for _, ctr := range pod.Spec.Containers { + if ctr.Name == containerName { + return ctr.Image + } + } + return "" + } + + var eg errgroup.Group + + for event := range w.ResultChan() { + if event.Type == watch.Modified || event.Type == watch.Added { + pod := *event.Object.(*corev1.Pod) + + beingProcessedMu.Lock() + _, loggingAlready := beingProcessed[pod.Name] + beingProcessedMu.Unlock() + + if !loggingAlready && (image == "" || image == getImage(pod)) && mayReadLogs(pod) { + + beingProcessedMu.Lock() + beingProcessed[pod.Name] = true + beingProcessedMu.Unlock() + + // Capture pod value for the goroutine to avoid closure over loop variable + pod := pod + eg.Go(func() error { return copyLogs(pod) }) + } + } + } + + err = eg.Wait() + if err != nil { + return fmt.Errorf("error while gathering logs: %w", err) + } + return nil +} diff --git a/pkg/k8s/wait.go b/pkg/k8s/wait.go new file mode 100644 index 0000000000..84fbcdadda --- /dev/null +++ b/pkg/k8s/wait.go @@ -0,0 +1,134 @@ +package k8s + +import ( + "context" + "fmt" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +// WaitForDeploymentAvailable waits for a specific deployment to be fully available. +// A deployment is considered available when: +// - The number of available replicas matches the desired replicas +// - All replicas are updated to the latest version +// - There are no unavailable replicas +// - All pods associated with the deployment are running +func WaitForDeploymentAvailable(ctx context.Context, clientset *kubernetes.Clientset, namespace, deploymentName string, timeout time.Duration) error { + return wait.PollUntilContextTimeout(ctx, 1*time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deployment, err := clientset.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + return checkIfDeploymentIsAvailable(ctx, clientset, deployment) + }) +} + +func WaitForDeploymentAvailableBySelector(ctx context.Context, clientset *kubernetes.Clientset, namespace, selector string, timeout time.Duration) error { + return wait.PollUntilContextTimeout(ctx, 1*time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deployments, err := clientset.AppsV1().Deployments(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: selector, + }) + if err != nil { + return false, err + } + + for _, deployment := range deployments.Items { + ready, err := checkIfDeploymentIsAvailable(ctx, clientset, &deployment) + if err != nil || !ready { + return ready, err + } + } + + return true, nil + }) +} + +func checkIfDeploymentIsAvailable(ctx context.Context, clientset *kubernetes.Clientset, deployment *appsv1.Deployment) (bool, error) { + // Check if the deployment has the desired number of replicas + if deployment.Spec.Replicas == nil { + return false, fmt.Errorf("deployment %s has nil replicas", deployment.Name) + } + + desiredReplicas := *deployment.Spec.Replicas + + // Check if deployment is available + for _, condition := range deployment.Status.Conditions { + if condition.Type == appsv1.DeploymentAvailable && condition.Status == corev1.ConditionTrue { + // Also verify that all replicas are updated, ready, and available + if deployment.Status.UpdatedReplicas == desiredReplicas && + deployment.Status.ReadyReplicas == desiredReplicas && + deployment.Status.AvailableReplicas == desiredReplicas && + deployment.Status.UnavailableReplicas == 0 { + + // Get the current ReplicaSet for this deployment + replicaSets, err := clientset.AppsV1().ReplicaSets(deployment.Namespace).List(ctx, metav1.ListOptions{ + LabelSelector: metav1.FormatLabelSelector(deployment.Spec.Selector), + }) + if err != nil { + return false, err + } + + // Find the current active ReplicaSet (the one with desired replicas > 0) + var currentPodTemplateHash string + for _, rs := range replicaSets.Items { + if rs.Spec.Replicas != nil && *rs.Spec.Replicas > 0 { + // The pod-template-hash label identifies pods from this ReplicaSet + if hash, ok := rs.Labels["pod-template-hash"]; ok { + currentPodTemplateHash = hash + break + } + } + } + + if currentPodTemplateHash == "" { + return false, fmt.Errorf("could not find current pod-template-hash for deployment %s", deployment.Name) + } + + // Verify all pods are from the current ReplicaSet and are running + labelSelector := metav1.FormatLabelSelector(deployment.Spec.Selector) + pods, err := clientset.CoreV1().Pods(deployment.Namespace).List(ctx, metav1.ListOptions{ + LabelSelector: labelSelector, + }) + if err != nil { + return false, err + } + + // Count ready pods from current ReplicaSet only + readyPods := 0 + for _, pod := range pods.Items { + // Check if pod belongs to current ReplicaSet + podHash, hasPodHash := pod.Labels["pod-template-hash"] + if !hasPodHash || podHash != currentPodTemplateHash { + // Pod is from an old ReplicaSet - deployment not fully rolled out + if pod.DeletionTimestamp == nil { + // Old pod still exists and not being deleted + return false, nil + } + continue + } + + // Check if pod is ready + for _, podCondition := range pod.Status.Conditions { + if podCondition.Type == corev1.PodReady && podCondition.Status == corev1.ConditionTrue { + readyPods++ + break + } + } + } + + // Ensure we have the desired number of running pods from current ReplicaSet + if int32(readyPods) == desiredReplicas { + return true, nil + } + } + } + } + + return false, nil +} diff --git a/pkg/knative/deployer.go b/pkg/knative/deployer.go deleted file mode 100644 index f082c1f4b6..0000000000 --- a/pkg/knative/deployer.go +++ /dev/null @@ -1,1120 +0,0 @@ -package knative - -import ( - "context" - "fmt" - "io" - "os" - "regexp" - "strings" - "time" - - clienteventingv1 "knative.dev/client/pkg/eventing/v1" - eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" - duckv1 "knative.dev/pkg/apis/duck/v1" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/apimachinery/pkg/util/sets" - "knative.dev/client/pkg/flags" - servingclientlib "knative.dev/client/pkg/serving" - clientservingv1 "knative.dev/client/pkg/serving/v1" - "knative.dev/client/pkg/wait" - "knative.dev/serving/pkg/apis/autoscaling" - v1 "knative.dev/serving/pkg/apis/serving/v1" - - fn "knative.dev/func/pkg/functions" - "knative.dev/func/pkg/k8s" -) - -const LIVENESS_ENDPOINT = "/health/liveness" -const READINESS_ENDPOINT = "/health/readiness" - -type DeployDecorator interface { - UpdateAnnotations(fn.Function, map[string]string) map[string]string - UpdateLabels(fn.Function, map[string]string) map[string]string -} - -type DeployerOpt func(*Deployer) - -type Deployer struct { - // verbose logging enablement flag. - verbose bool - - decorator DeployDecorator -} - -// ActiveNamespace attempts to read the Kubernetes active namespace. -// Missing configs or not having an active Kubernetes configuration are -// equivalent to having no default namespace (empty string). -func ActiveNamespace() string { - // Get client config, if it exists, and from that the namespace - ns, _, err := k8s.GetClientConfig().Namespace() - if err != nil { - fmt.Fprintf(os.Stderr, "Warning: unable to get active namespace: %v\n", err) - } - return ns -} - -func NewDeployer(opts ...DeployerOpt) *Deployer { - d := &Deployer{} - - for _, opt := range opts { - opt(d) - } - - return d -} - -func WithDeployerVerbose(verbose bool) DeployerOpt { - return func(d *Deployer) { - d.verbose = verbose - } -} - -func WithDeployerDecorator(decorator DeployDecorator) DeployerOpt { - return func(d *Deployer) { - d.decorator = decorator - } -} - -// Checks the status of the "user-container" for the ImagePullBackOff reason meaning that -// the container image is not reachable probably because a private registry is being used. -func (d *Deployer) isImageInPrivateRegistry(ctx context.Context, client clientservingv1.KnServingClient, f fn.Function) bool { - ksvc, err := client.GetService(ctx, f.Name) - if err != nil { - return false - } - k8sClient, err := k8s.NewKubernetesClientset() - if err != nil { - return false - } - list, err := k8sClient.CoreV1().Pods(f.Deploy.Namespace).List(ctx, metav1.ListOptions{ - LabelSelector: "serving.knative.dev/revision=" + ksvc.Status.LatestCreatedRevisionName + ",serving.knative.dev/service=" + f.Name, - FieldSelector: "status.phase=Pending", - }) - if err != nil { - return false - } - if len(list.Items) != 1 { - return false - } - - for _, cont := range list.Items[0].Status.ContainerStatuses { - if cont.Name == "user-container" { - return cont.State.Waiting != nil && cont.State.Waiting.Reason == "ImagePullBackOff" - } - } - return false -} - -func onClusterFix(f fn.Function) fn.Function { - // This only exists because of a bootstapping problem with On-Cluster - // builds: It appears that, when sending a function to be built on-cluster - // the target namespace is not being transmitted in the pipeline - // configuration. We should figure out how to transmit this information - // to the pipeline run for initial builds. This is a new problem because - // earlier versions of this logic relied entirely on the current - // kubernetes context. - if f.Namespace == "" && f.Deploy.Namespace == "" { - f.Namespace, _ = k8s.GetDefaultNamespace() - } - return f -} - -func (d *Deployer) Deploy(ctx context.Context, f fn.Function) (fn.DeploymentResult, error) { - f = onClusterFix(f) - // Choosing f.Namespace vs f.Deploy.Namespace: - // This is minimal logic currently required of all deployer impls. - // If f.Namespace is defined, this is the (possibly new) target - // namespace. Otherwise use the last deployed namespace. Error if - // neither are set. The logic which arbitrates between curret k8s context, - // flags, environment variables and global defaults to determine the - // effective namespace is not logic for the deployer implementation, which - // should have a minimum of logic. In this case limited to "new ns or - // existing namespace? - namespace := f.Namespace - if namespace == "" { - namespace = f.Deploy.Namespace - } - if namespace == "" { - return fn.DeploymentResult{}, fmt.Errorf("deployer requires either a target namespace or that the function be already deployed") - } - - // Choosing an image to deploy: - // If the service has not been deployed before, but there exists a - // build image, this build image should be used for the deploy. - // TODO: test/consdier the case where it HAS been deployed, and the - // build image has been updated /since/ deployment: do we need a - // timestamp? Incrementation? - if f.Deploy.Image == "" { - f.Deploy.Image = f.Build.Image - } - - // Clients - client, err := NewServingClient(namespace) - if err != nil { - return fn.DeploymentResult{}, err - } - eventingClient, err := NewEventingClient(namespace) - if err != nil { - return fn.DeploymentResult{}, err - } - // check if 'dapr-system' namespace exists - daprInstalled := false - k8sClient, err := k8s.NewKubernetesClientset() - if err != nil { - return fn.DeploymentResult{}, err - } - _, err = k8sClient.CoreV1().Namespaces().Get(ctx, "dapr-system", metav1.GetOptions{}) - if err == nil { - daprInstalled = true - } - - var outBuff SynchronizedBuffer - var out io.Writer = &outBuff - - if d.verbose { - out = os.Stderr - } - since := time.Now() - go func() { - _ = GetKServiceLogs(ctx, namespace, f.Name, f.Deploy.Image, &since, out) - }() - - previousService, err := client.GetService(ctx, f.Name) - if err != nil { - if errors.IsNotFound(err) { - - referencedSecrets := sets.New[string]() - referencedConfigMaps := sets.New[string]() - referencedPVCs := sets.New[string]() - - service, err := generateNewService(f, d.decorator, daprInstalled) - if err != nil { - err = fmt.Errorf("knative deployer failed to generate the Knative Service: %v", err) - return fn.DeploymentResult{}, err - } - - err = checkResourcesArePresent(ctx, namespace, &referencedSecrets, &referencedConfigMaps, &referencedPVCs, f.Deploy.ServiceAccountName) - if err != nil { - err = fmt.Errorf("knative deployer failed to generate the Knative Service: %v", err) - return fn.DeploymentResult{}, err - } - - err = client.CreateService(ctx, service) - if err != nil { - err = fmt.Errorf("knative deployer failed to deploy the Knative Service: %v", err) - return fn.DeploymentResult{}, err - } - - if d.verbose { - fmt.Println("Waiting for Knative Service to become ready") - } - chprivate := make(chan bool) - cherr := make(chan error) - go func() { - private := false - for !private { - time.Sleep(5 * time.Second) - private = d.isImageInPrivateRegistry(ctx, client, f) - chprivate <- private - } - close(chprivate) - }() - go func() { - err, _ := client.WaitForService(ctx, f.Name, - clientservingv1.WaitConfig{Timeout: DefaultWaitingTimeout, ErrorWindow: DefaultErrorWindowTimeout}, - wait.NoopMessageCallback()) - cherr <- err - close(cherr) - }() - - presumePrivate := false - main: - // Wait for either a timeout or a container condition signaling the image is unreachable - for { - select { - case private := <-chprivate: - if private { - presumePrivate = true - break main - } - case err = <-cherr: - break main - } - } - if presumePrivate { - err := fmt.Errorf("your function image is unreachable. It is possible that your docker registry is private. If so, make sure you have set up pull secrets https://knative.dev/docs/developer/serving/deploying-from-private-registry") - return fn.DeploymentResult{}, err - } - if err != nil { - err = fmt.Errorf("knative deployer failed to wait for the Knative Service to become ready: %v", err) - if !d.verbose { - fmt.Fprintln(os.Stderr, "\nService output:") - _, _ = io.Copy(os.Stderr, &outBuff) - fmt.Fprintln(os.Stderr) - } - return fn.DeploymentResult{}, err - } - - route, err := client.GetRoute(ctx, f.Name) - if err != nil { - err = fmt.Errorf("knative deployer failed to get the Route: %v", err) - return fn.DeploymentResult{}, err - } - - err = createTriggers(ctx, f, client, eventingClient) - if err != nil { - return fn.DeploymentResult{}, err - } - - if d.verbose { - fmt.Printf("Function deployed in namespace %q and exposed at URL:\n%s\n", namespace, route.Status.URL.String()) - } - return fn.DeploymentResult{ - Status: fn.Deployed, - URL: route.Status.URL.String(), - Namespace: namespace, - }, nil - - } else { - err = fmt.Errorf("knative deployer failed to get the Knative Service: %v", err) - return fn.DeploymentResult{}, err - } - } else { - // Update the existing Service - referencedSecrets := sets.New[string]() - referencedConfigMaps := sets.New[string]() - referencedPVCs := sets.New[string]() - - newEnv, newEnvFrom, err := processEnvs(f.Run.Envs, &referencedSecrets, &referencedConfigMaps) - if err != nil { - return fn.DeploymentResult{}, err - } - - newVolumes, newVolumeMounts, err := processVolumes(f.Run.Volumes, &referencedSecrets, &referencedConfigMaps, &referencedPVCs) - if err != nil { - return fn.DeploymentResult{}, err - } - - err = checkResourcesArePresent(ctx, namespace, &referencedSecrets, &referencedConfigMaps, &referencedPVCs, f.Deploy.ServiceAccountName) - if err != nil { - err = fmt.Errorf("knative deployer failed to update the Knative Service: %v", err) - return fn.DeploymentResult{}, err - } - - _, err = client.UpdateServiceWithRetry(ctx, f.Name, updateService(f, previousService, newEnv, newEnvFrom, newVolumes, newVolumeMounts, d.decorator, daprInstalled), 3) - if err != nil { - err = fmt.Errorf("knative deployer failed to update the Knative Service: %v", err) - return fn.DeploymentResult{}, err - } - - err, _ = client.WaitForService(ctx, f.Name, - clientservingv1.WaitConfig{Timeout: DefaultWaitingTimeout, ErrorWindow: DefaultErrorWindowTimeout}, - wait.NoopMessageCallback()) - if err != nil { - if !d.verbose { - fmt.Fprintln(os.Stderr, "\nService output:") - _, _ = io.Copy(os.Stderr, &outBuff) - fmt.Fprintln(os.Stderr) - } - return fn.DeploymentResult{}, err - } - - route, err := client.GetRoute(ctx, f.Name) - if err != nil { - err = fmt.Errorf("knative deployer failed to get the Route: %v", err) - return fn.DeploymentResult{}, err - } - - err = createTriggers(ctx, f, client, eventingClient) - if err != nil { - return fn.DeploymentResult{}, err - } - - return fn.DeploymentResult{ - Status: fn.Updated, - URL: route.Status.URL.String(), - Namespace: namespace, - }, nil - } -} - -func createTriggers(ctx context.Context, f fn.Function, client clientservingv1.KnServingClient, eventingClient clienteventingv1.KnEventingClient) error { - ksvc, err := client.GetService(ctx, f.Name) - if err != nil { - err = fmt.Errorf("knative deployer failed to get the Service for Trigger: %v", err) - return err - } - - fmt.Fprintf(os.Stderr, "🎯 Creating Triggers on the cluster\n") - - for i, sub := range f.Deploy.Subscriptions { - // create the filter: - attributes := make(map[string]string) - for key, value := range sub.Filters { - attributes[key] = value - } - - err = eventingClient.CreateTrigger(ctx, &eventingv1.Trigger{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-function-trigger-%d", ksvc.Name, i), - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: ksvc.APIVersion, - Kind: ksvc.Kind, - Name: ksvc.GetName(), - UID: ksvc.GetUID(), - }, - }, - }, - Spec: eventingv1.TriggerSpec{ - Broker: sub.Source, - - Subscriber: duckv1.Destination{ - Ref: &duckv1.KReference{ - APIVersion: ksvc.APIVersion, - Kind: ksvc.Kind, - Name: ksvc.Name, - }}, - - Filter: &eventingv1.TriggerFilter{ - Attributes: attributes, - }, - }, - }) - if err != nil && !errors.IsAlreadyExists(err) { - err = fmt.Errorf("knative deployer failed to create the Trigger: %v", err) - return err - } - } - return nil -} - -func probeFor(url string) *corev1.Probe { - return &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: url, - }, - }, - } -} - -func setHealthEndpoints(f fn.Function, c *corev1.Container) *corev1.Container { - // Set the defaults - c.LivenessProbe = probeFor(LIVENESS_ENDPOINT) - c.ReadinessProbe = probeFor(READINESS_ENDPOINT) - - // If specified in func.yaml, the provided values override the defaults - if f.Deploy.HealthEndpoints.Liveness != "" { - c.LivenessProbe = probeFor(f.Deploy.HealthEndpoints.Liveness) - } - if f.Deploy.HealthEndpoints.Readiness != "" { - c.ReadinessProbe = probeFor(f.Deploy.HealthEndpoints.Readiness) - } - return c -} - -func generateNewService(f fn.Function, decorator DeployDecorator, daprInstalled bool) (*v1.Service, error) { - // set defaults to the values that avoid the following warning "Kubernetes default value is insecure, Knative may default this to secure in a future release" - runAsNonRoot := true - allowPrivilegeEscalation := false - capabilities := corev1.Capabilities{ - Drop: []corev1.Capability{"ALL"}, - } - seccompProfile := corev1.SeccompProfile{ - Type: corev1.SeccompProfileType("RuntimeDefault"), - } - container := corev1.Container{ - Image: f.Deploy.Image, - SecurityContext: &corev1.SecurityContext{ - RunAsNonRoot: &runAsNonRoot, - AllowPrivilegeEscalation: &allowPrivilegeEscalation, - Capabilities: &capabilities, - SeccompProfile: &seccompProfile, - }, - } - setHealthEndpoints(f, &container) - - referencedSecrets := sets.New[string]() - referencedConfigMaps := sets.New[string]() - referencedPVC := sets.New[string]() - - newEnv, newEnvFrom, err := processEnvs(f.Run.Envs, &referencedSecrets, &referencedConfigMaps) - if err != nil { - return nil, err - } - container.Env = newEnv - container.EnvFrom = newEnvFrom - - newVolumes, newVolumeMounts, err := processVolumes(f.Run.Volumes, &referencedSecrets, &referencedConfigMaps, &referencedPVC) - if err != nil { - return nil, err - } - container.VolumeMounts = newVolumeMounts - - labels, err := generateServiceLabels(f, decorator) - if err != nil { - return nil, err - } - - annotations := generateServiceAnnotations(f, decorator, nil, daprInstalled) - - // we need to create a separate map for Annotations specified in a Revision, - // in case we will need to specify autoscaling annotations -> these could be only in a Revision not in a Service - revisionAnnotations := make(map[string]string) - for k, v := range annotations { - revisionAnnotations[k] = v - } - - service := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: f.Name, - Labels: labels, - Annotations: annotations, - }, - Spec: v1.ServiceSpec{ - ConfigurationSpec: v1.ConfigurationSpec{ - Template: v1.RevisionTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - Annotations: revisionAnnotations, - }, - Spec: v1.RevisionSpec{ - PodSpec: corev1.PodSpec{ - Containers: []corev1.Container{ - container, - }, - ServiceAccountName: f.Deploy.ServiceAccountName, - Volumes: newVolumes, - }, - }, - }, - }, - }, - } - - err = setServiceOptions(&service.Spec.Template, f.Deploy.Options) - if err != nil { - return service, err - } - - return service, nil -} - -// generateServiceLabels creates a final map of service labels based -// on the function's defined labels plus the -// application of any provided label decorator. -func generateServiceLabels(f fn.Function, d DeployDecorator) (ll map[string]string, err error) { - ll, err = f.LabelsMap() - if err != nil { - return - } - - if f.Domain != "" { - ll["func.domain"] = f.Domain - } - - if d != nil { - ll = d.UpdateLabels(f, ll) - } - - return -} - -// generateServiceAnnotations creates a final map of service annotations based -// on static defaults plus the function's defined annotations plus the -// application of any provided annotation decorator. -// Also sets `serving.knative.dev/creator` to a value specified in annotations in the service reference in the previousService parameter, -// this is beneficial when we are updating a service to pass validation on Knative side - the annotation is immutable. -func generateServiceAnnotations(f fn.Function, d DeployDecorator, previousService *v1.Service, daprInstalled bool) (aa map[string]string) { - aa = make(map[string]string) - - if daprInstalled { - // Enables Dapr support. - // Has no effect unless the target cluster has Dapr control plane installed. - for k, v := range daprAnnotations(f.Name) { - aa[k] = v - } - } - - // Function-defined annotations - for k, v := range f.Deploy.Annotations { - aa[k] = v - } - - // Decorator - if d != nil { - aa = d.UpdateAnnotations(f, aa) - } - - // Set correct creator if we are updating a function - if previousService != nil { - knativeCreatorAnnotation := "serving.knative.dev/creator" - if val, ok := previousService.Annotations[knativeCreatorAnnotation]; ok { - aa[knativeCreatorAnnotation] = val - } - } - - return -} - -// annotations which, if included and Dapr control plane is installed in -// the target cluster will result in a sidecar exposing the dapr HTTP API -// on localhost:3500 and metrics on 9092 -func daprAnnotations(appid string) map[string]string { - // make optional - aa := make(map[string]string) - aa["dapr.io/app-id"] = appid - aa["dapr.io/enabled"] = DaprEnabled - aa["dapr.io/metrics-port"] = DaprMetricsPort - aa["dapr.io/app-port"] = "8080" - aa["dapr.io/enable-api-logging"] = DaprEnableAPILogging - return aa -} - -func updateService(f fn.Function, previousService *v1.Service, newEnv []corev1.EnvVar, newEnvFrom []corev1.EnvFromSource, newVolumes []corev1.Volume, newVolumeMounts []corev1.VolumeMount, decorator DeployDecorator, daprInstalled bool) func(service *v1.Service) (*v1.Service, error) { - return func(service *v1.Service) (*v1.Service, error) { - // Removing the name so the k8s server can fill it in with generated name, - // this prevents conflicts in Revision name when updating the KService from multiple places. - service.Spec.Template.Name = "" - - annotations := generateServiceAnnotations(f, decorator, previousService, daprInstalled) - - // we need to create a separate map for Annotations specified in a Revision, - // in case we will need to specify autoscaling annotations -> these could be only in a Revision not in a Service - revisionAnnotations := make(map[string]string) - for k, v := range annotations { - revisionAnnotations[k] = v - } - - service.Annotations = annotations - service.Spec.Template.Annotations = revisionAnnotations - - // I hate that we have to do this. Users should not see these values. - // It is an implementation detail. These health endpoints should not be - // a part of func.yaml since the user can only mess things up by changing - // them. Ultimately, this information is determined by the language pack. - // Which is another reason to consider having a global config to store - // some metadata which is fairly static. For example, a .config/func/global.yaml - // file could contain information about all known language packs. As new - // language packs are discovered through use of the --repository flag when - // creating a function, this information could be extracted from - // language-pack.yaml for each template and written to the local global - // config. At runtime this configuration file could be consulted. I don't - // know what this would mean for developers using the func library directly. - cp := &service.Spec.Template.Spec.Containers[0] - setHealthEndpoints(f, cp) - - err := setServiceOptions(&service.Spec.Template, f.Deploy.Options) - if err != nil { - return service, err - } - - labels, err := generateServiceLabels(f, decorator) - if err != nil { - return nil, err - } - - service.Labels = labels - service.Spec.Template.Labels = labels - - err = flags.UpdateImage(&service.Spec.Template.Spec.PodSpec, f.Deploy.Image) - if err != nil { - return service, err - } - - cp.Env = newEnv - cp.EnvFrom = newEnvFrom - cp.VolumeMounts = newVolumeMounts - service.Spec.Template.Spec.Volumes = newVolumes - service.Spec.Template.Spec.ServiceAccountName = f.Deploy.ServiceAccountName - return service, nil - } -} - -// processEnvs generates array of EnvVars and EnvFromSources from a function config -// envs: -// - name: EXAMPLE1 # ENV directly from a value -// value: value1 -// - name: EXAMPLE2 # ENV from the local ENV var -// value: {{ env:MY_ENV }} -// - name: EXAMPLE3 -// value: {{ secret:example-secret:key }} # ENV from a key in Secret -// - value: {{ secret:example-secret }} # all ENVs from Secret -// - name: EXAMPLE4 -// value: {{ configMap:configMapName:key }} # ENV from a key in ConfigMap -// - value: {{ configMap:configMapName }} # all key-pair values from ConfigMap are set as ENV -func processEnvs(envs []fn.Env, referencedSecrets, referencedConfigMaps *sets.Set[string]) ([]corev1.EnvVar, []corev1.EnvFromSource, error) { - - envs = withOpenAddress(envs) // prepends ADDRESS=0.0.0.0 if not extant - - envVars := []corev1.EnvVar{{Name: "BUILT", Value: time.Now().Format("20060102T150405")}} - envFrom := []corev1.EnvFromSource{} - - for _, env := range envs { - if env.Name == nil && env.Value != nil { - // all key-pair values from secret/configMap are set as ENV, eg. {{ secret:secretName }} or {{ configMap:configMapName }} - if strings.HasPrefix(*env.Value, "{{") { - envFromSource, err := createEnvFromSource(*env.Value, referencedSecrets, referencedConfigMaps) - if err != nil { - return nil, nil, err - } - envFrom = append(envFrom, *envFromSource) - continue - } - } else if env.Name != nil && env.Value != nil { - if strings.HasPrefix(*env.Value, "{{") { - slices := strings.Split(strings.Trim(*env.Value, "{} "), ":") - if len(slices) == 3 { - // ENV from a key in secret/configMap, eg. FOO={{ secret:secretName:key }} FOO={{ configMap:configMapName.key }} - valueFrom, err := createEnvVarSource(slices, referencedSecrets, referencedConfigMaps) - envVars = append(envVars, corev1.EnvVar{Name: *env.Name, ValueFrom: valueFrom}) - if err != nil { - return nil, nil, err - } - continue - } else if len(slices) == 2 { - // ENV from the local ENV var, eg. FOO={{ env:LOCAL_ENV }} - localValue, err := processLocalEnvValue(*env.Value) - if err != nil { - return nil, nil, err - } - envVars = append(envVars, corev1.EnvVar{Name: *env.Name, Value: localValue}) - continue - } - } else { - // a standard ENV with key and value, eg. FOO=bar - envVars = append(envVars, corev1.EnvVar{Name: *env.Name, Value: *env.Value}) - continue - } - } - return nil, nil, fmt.Errorf("unsupported env source entry \"%v\"", env) - } - - return envVars, envFrom, nil -} - -// withOpenAddresss prepends ADDRESS=0.0.0.0 to the envs if not present. -// -// This is combined with the value of PORT at runtime to determine the full -// Listener address on which a Function will listen tcp requests. -// -// Runtimes should, by default, only listen on the loopback interface by -// default, as they may be `func run` locally, for security purposes. -// This environment vriable instructs the runtimes to listen on all interfaces -// by default when actually being deployed, since they will need to actually -// listen for client requests and for health readiness/liveness probes. -// -// Should a user wish to securely open their function to only receive requests -// on a specific interface, such as a WireGuar-encrypted mesh network which -// presents as a specific interface, that can be achieved by setting the -// ADDRESS value as an environment variable on their function to the interface -// on which to listen. -// -// NOTE this env is currently only respected by scaffolded Go functions, because -// they are the only ones which support being `func run` locally. Other -// runtimes will respect the value as they are updated to support scaffolding. -func withOpenAddress(ee []fn.Env) []fn.Env { - // TODO: this is unnecessarily complex due to both key and value of the - // envs slice being being pointers. There is an outstanding tech-debt item - // to remove pointers from Function Envs, Volumes, Labels, and Options. - var found bool - for _, e := range ee { - if e.Name != nil && *e.Name == "ADDRESS" { - found = true - break - } - } - if !found { - k := "ADDRESS" - v := "0.0.0.0" - ee = append(ee, fn.Env{Name: &k, Value: &v}) - } - return ee -} - -func createEnvFromSource(value string, referencedSecrets, referencedConfigMaps *sets.Set[string]) (*corev1.EnvFromSource, error) { - slices := strings.Split(strings.Trim(value, "{} "), ":") - if len(slices) != 2 { - return nil, fmt.Errorf("env requires a value in form \"resourceType:name\" where \"resourceType\" can be one of \"configMap\" or \"secret\"; got %q", slices) - } - - envVarSource := corev1.EnvFromSource{} - - typeString := strings.TrimSpace(slices[0]) - sourceName := strings.TrimSpace(slices[1]) - - var sourceType string - - switch typeString { - case "configMap": - sourceType = "ConfigMap" - envVarSource.ConfigMapRef = &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: sourceName, - }} - - if !referencedConfigMaps.Has(sourceName) { - referencedConfigMaps.Insert(sourceName) - } - case "secret": - sourceType = "Secret" - envVarSource.SecretRef = &corev1.SecretEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: sourceName, - }} - if !referencedSecrets.Has(sourceName) { - referencedSecrets.Insert(sourceName) - } - default: - return nil, fmt.Errorf("unsupported env source type %q; supported source types are \"configMap\" or \"secret\"", slices[0]) - } - - if len(sourceName) == 0 { - return nil, fmt.Errorf("the name of %s cannot be an empty string", sourceType) - } - - return &envVarSource, nil -} - -func createEnvVarSource(slices []string, referencedSecrets, referencedConfigMaps *sets.Set[string]) (*corev1.EnvVarSource, error) { - - if len(slices) != 3 { - return nil, fmt.Errorf("env requires a value in form \"resourceType:name:key\" where \"resourceType\" can be one of \"configMap\" or \"secret\"; got %q", slices) - } - - envVarSource := corev1.EnvVarSource{} - - typeString := strings.TrimSpace(slices[0]) - sourceName := strings.TrimSpace(slices[1]) - sourceKey := strings.TrimSpace(slices[2]) - - var sourceType string - - switch typeString { - case "configMap": - sourceType = "ConfigMap" - envVarSource.ConfigMapKeyRef = &corev1.ConfigMapKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: sourceName, - }, - Key: sourceKey} - - if !referencedConfigMaps.Has(sourceName) { - referencedConfigMaps.Insert(sourceName) - } - case "secret": - sourceType = "Secret" - envVarSource.SecretKeyRef = &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: sourceName, - }, - Key: sourceKey} - - if !referencedSecrets.Has(sourceName) { - referencedSecrets.Insert(sourceName) - } - default: - return nil, fmt.Errorf("unsupported env source type %q; supported source types are \"configMap\" or \"secret\"", slices[0]) - } - - if len(sourceName) == 0 { - return nil, fmt.Errorf("the name of %s cannot be an empty string", sourceType) - } - - if len(sourceKey) == 0 { - return nil, fmt.Errorf("the key referenced by resource %s %q cannot be an empty string", sourceType, sourceName) - } - - return &envVarSource, nil -} - -var evRegex = regexp.MustCompile(`^{{\s*(\w+)\s*:(\w+)\s*}}$`) - -const ( - ctxIdx = 1 - valIdx = 2 -) - -func processLocalEnvValue(val string) (string, error) { - match := evRegex.FindStringSubmatch(val) - if len(match) > valIdx { - if match[ctxIdx] != "env" { - return "", fmt.Errorf("allowed env value entry is \"{{ env:LOCAL_VALUE }}\"; got: %q", match[ctxIdx]) - } - if v, ok := os.LookupEnv(match[valIdx]); ok { - return v, nil - } else { - return "", fmt.Errorf("required local environment variable %q is not set", match[valIdx]) - } - } else { - return val, nil - } -} - -// / processVolumes generates Volumes and VolumeMounts from a function config -// volumes: -// - secret: example-secret # mount Secret as Volume -// path: /etc/secret-volume -// - configMap: example-configMap # mount ConfigMap as Volume -// path: /etc/configMap-volume -// - persistentVolumeClaim: { claimName: example-pvc } # mount PersistentVolumeClaim as Volume -// path: /etc/secret-volume -// - emptyDir: {} # mount EmptyDir as Volume -// path: /etc/configMap-volume -func processVolumes(volumes []fn.Volume, referencedSecrets, referencedConfigMaps, referencedPVCs *sets.Set[string]) ([]corev1.Volume, []corev1.VolumeMount, error) { - - createdVolumes := sets.NewString() - usedPaths := sets.NewString() - - newVolumes := []corev1.Volume{} - newVolumeMounts := []corev1.VolumeMount{} - - for _, vol := range volumes { - - volumeName := "" - - if vol.Secret != nil { - volumeName = "secret-" + *vol.Secret - - if !createdVolumes.Has(volumeName) { - newVolumes = append(newVolumes, corev1.Volume{ - Name: volumeName, - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: *vol.Secret, - }, - }, - }) - createdVolumes.Insert(volumeName) - - if !referencedSecrets.Has(*vol.Secret) { - referencedSecrets.Insert(*vol.Secret) - } - } - } else if vol.ConfigMap != nil { - volumeName = "config-map-" + *vol.ConfigMap - - if !createdVolumes.Has(volumeName) { - newVolumes = append(newVolumes, corev1.Volume{ - Name: volumeName, - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: *vol.ConfigMap, - }, - }, - }, - }) - createdVolumes.Insert(volumeName) - - if !referencedConfigMaps.Has(*vol.ConfigMap) { - referencedConfigMaps.Insert(*vol.ConfigMap) - } - } - } else if vol.PersistentVolumeClaim != nil { - volumeName = "pvc-" + *vol.PersistentVolumeClaim.ClaimName - - if !createdVolumes.Has(volumeName) { - newVolumes = append(newVolumes, corev1.Volume{ - Name: volumeName, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: *vol.PersistentVolumeClaim.ClaimName, - ReadOnly: vol.PersistentVolumeClaim.ReadOnly, - }, - }, - }) - createdVolumes.Insert(volumeName) - - if !referencedPVCs.Has(*vol.PersistentVolumeClaim.ClaimName) { - referencedPVCs.Insert(*vol.PersistentVolumeClaim.ClaimName) - } - } - } else if vol.EmptyDir != nil { - volumeName = "empty-dir-" + rand.String(7) - - if !createdVolumes.Has(volumeName) { - - var sizeLimit *resource.Quantity - if vol.EmptyDir.SizeLimit != nil { - sl, err := resource.ParseQuantity(*vol.EmptyDir.SizeLimit) - if err != nil { - return nil, nil, fmt.Errorf("invalid quantity for sizeLimit: %s. Error: %s", *vol.EmptyDir.SizeLimit, err) - } - sizeLimit = &sl - } - - newVolumes = append(newVolumes, corev1.Volume{ - Name: volumeName, - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{ - Medium: corev1.StorageMedium(vol.EmptyDir.Medium), - SizeLimit: sizeLimit, - }, - }, - }) - createdVolumes.Insert(volumeName) - } - } - - if volumeName != "" { - if !usedPaths.Has(*vol.Path) { - newVolumeMounts = append(newVolumeMounts, corev1.VolumeMount{ - Name: volumeName, - MountPath: *vol.Path, - }) - usedPaths.Insert(*vol.Path) - } else { - return nil, nil, fmt.Errorf("mount path %s is defined multiple times", *vol.Path) - } - } - } - - return newVolumes, newVolumeMounts, nil -} - -// checkResourcesArePresent returns error if Secrets or ConfigMaps -// referenced in input sets are not deployed on the cluster in the specified namespace -func checkResourcesArePresent(ctx context.Context, namespace string, referencedSecrets, referencedConfigMaps, referencedPVCs *sets.Set[string], referencedServiceAccount string) error { - - errMsg := "" - for s := range *referencedSecrets { - _, err := k8s.GetSecret(ctx, s, namespace) - if err != nil { - if errors.IsForbidden(err) { - errMsg += " Ensure that the service account has the necessary permissions to access the secret.\n" - } else { - errMsg += fmt.Sprintf(" referenced Secret \"%s\" is not present in namespace \"%s\"\n", s, namespace) - } - } - } - - for cm := range *referencedConfigMaps { - _, err := k8s.GetConfigMap(ctx, cm, namespace) - if err != nil { - errMsg += fmt.Sprintf(" referenced ConfigMap \"%s\" is not present in namespace \"%s\"\n", cm, namespace) - } - } - - for pvc := range *referencedPVCs { - _, err := k8s.GetPersistentVolumeClaim(ctx, pvc, namespace) - if err != nil { - errMsg += fmt.Sprintf(" referenced PersistentVolumeClaim \"%s\" is not present in namespace \"%s\"\n", pvc, namespace) - } - } - - // check if referenced ServiceAccount is present in the namespace if it is not default - if referencedServiceAccount != "" && referencedServiceAccount != "default" { - err := k8s.GetServiceAccount(ctx, referencedServiceAccount, namespace) - if err != nil { - errMsg += fmt.Sprintf(" referenced ServiceAccount \"%s\" is not present in namespace \"%s\"\n", referencedServiceAccount, namespace) - } - } - - if errMsg != "" { - return fmt.Errorf("error(s) while validating resources:\n%s", errMsg) - } - - return nil -} - -// setServiceOptions sets annotations on Service Revision Template or in the Service Spec -// from values specified in function configuration options -func setServiceOptions(template *v1.RevisionTemplateSpec, options fn.Options) error { - - toRemove := []string{} - toUpdate := map[string]string{} - - if options.Scale != nil { - if options.Scale.Min != nil { - toUpdate[autoscaling.MinScaleAnnotationKey] = fmt.Sprintf("%d", *options.Scale.Min) - } else { - toRemove = append(toRemove, autoscaling.MinScaleAnnotationKey) - } - - if options.Scale.Max != nil { - toUpdate[autoscaling.MaxScaleAnnotationKey] = fmt.Sprintf("%d", *options.Scale.Max) - } else { - toRemove = append(toRemove, autoscaling.MaxScaleAnnotationKey) - } - - if options.Scale.Metric != nil { - toUpdate[autoscaling.MetricAnnotationKey] = *options.Scale.Metric - } else { - toRemove = append(toRemove, autoscaling.MetricAnnotationKey) - } - - if options.Scale.Target != nil { - toUpdate[autoscaling.TargetAnnotationKey] = fmt.Sprintf("%f", *options.Scale.Target) - } else { - toRemove = append(toRemove, autoscaling.TargetAnnotationKey) - } - - if options.Scale.Utilization != nil { - toUpdate[autoscaling.TargetUtilizationPercentageKey] = fmt.Sprintf("%f", *options.Scale.Utilization) - } else { - toRemove = append(toRemove, autoscaling.TargetUtilizationPercentageKey) - } - - } - - // in the container always set Requests/Limits & Concurrency values based on the contents of config - template.Spec.Containers[0].Resources.Requests = nil - template.Spec.Containers[0].Resources.Limits = nil - template.Spec.ContainerConcurrency = nil - - if options.Resources != nil { - if options.Resources.Requests != nil { - template.Spec.Containers[0].Resources.Requests = corev1.ResourceList{} - - if options.Resources.Requests.CPU != nil { - value, err := resource.ParseQuantity(*options.Resources.Requests.CPU) - if err != nil { - return err - } - template.Spec.Containers[0].Resources.Requests[corev1.ResourceCPU] = value - } - - if options.Resources.Requests.Memory != nil { - value, err := resource.ParseQuantity(*options.Resources.Requests.Memory) - if err != nil { - return err - } - template.Spec.Containers[0].Resources.Requests[corev1.ResourceMemory] = value - } - } - - if options.Resources.Limits != nil { - template.Spec.Containers[0].Resources.Limits = corev1.ResourceList{} - - if options.Resources.Limits.CPU != nil { - value, err := resource.ParseQuantity(*options.Resources.Limits.CPU) - if err != nil { - return err - } - template.Spec.Containers[0].Resources.Limits[corev1.ResourceCPU] = value - } - - if options.Resources.Limits.Memory != nil { - value, err := resource.ParseQuantity(*options.Resources.Limits.Memory) - if err != nil { - return err - } - template.Spec.Containers[0].Resources.Limits[corev1.ResourceMemory] = value - } - - if options.Resources.Limits.Concurrency != nil { - template.Spec.ContainerConcurrency = options.Resources.Limits.Concurrency - } - } - } - - return servingclientlib.UpdateRevisionTemplateAnnotations(template, toUpdate, toRemove) -} diff --git a/pkg/knative/deployer_test.go b/pkg/knative/deployer_test.go deleted file mode 100644 index e8832d3258..0000000000 --- a/pkg/knative/deployer_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package knative - -import ( - "os" - "testing" - - corev1 "k8s.io/api/core/v1" - - fn "knative.dev/func/pkg/functions" -) - -func Test_setHealthEndpoints(t *testing.T) { - f := fn.Function{ - Name: "testing", - Deploy: fn.DeploySpec{ - HealthEndpoints: fn.HealthEndpoints{ - Liveness: "/lively", - Readiness: "/readyAsIllEverBe", - }, - }, - } - c := corev1.Container{} - setHealthEndpoints(f, &c) - got := c.LivenessProbe.HTTPGet.Path - if got != "/lively" { - t.Errorf("expected \"/lively\" but got %v", got) - } - got = c.ReadinessProbe.HTTPGet.Path - if got != "/readyAsIllEverBe" { - t.Errorf("expected \"readyAsIllEverBe\" but got %v", got) - } -} - -func Test_setHealthEndpointDefaults(t *testing.T) { - f := fn.Function{ - Name: "testing", - } - c := corev1.Container{} - setHealthEndpoints(f, &c) - got := c.LivenessProbe.HTTPGet.Path - if got != LIVENESS_ENDPOINT { - t.Errorf("expected \"%v\" but got %v", LIVENESS_ENDPOINT, got) - } - got = c.ReadinessProbe.HTTPGet.Path - if got != READINESS_ENDPOINT { - t.Errorf("expected \"%v\" but got %v", READINESS_ENDPOINT, got) - } -} - -func Test_processValue(t *testing.T) { - testEnvVarOld, testEnvVarOldExists := os.LookupEnv("TEST_KNATIVE_DEPLOYER") - os.Setenv("TEST_KNATIVE_DEPLOYER", "VALUE_FOR_TEST_KNATIVE_DEPLOYER") - defer func() { - if testEnvVarOldExists { - os.Setenv("TEST_KNATIVE_DEPLOYER", testEnvVarOld) - } else { - os.Unsetenv("TEST_KNATIVE_DEPLOYER") - } - }() - - unsetVarOld, unsetVarOldExists := os.LookupEnv("UNSET_VAR") - os.Unsetenv("UNSET_VAR") - defer func() { - if unsetVarOldExists { - os.Setenv("UNSET_VAR", unsetVarOld) - } - }() - - tests := []struct { - name string - arg string - want string - wantErr bool - }{ - {name: "simple value", arg: "A_VALUE", want: "A_VALUE", wantErr: false}, - {name: "using envvar value", arg: "{{ env:TEST_KNATIVE_DEPLOYER }}", want: "VALUE_FOR_TEST_KNATIVE_DEPLOYER", wantErr: false}, - {name: "bad context", arg: "{{secret:S}}", want: "", wantErr: true}, - {name: "unset envvar", arg: "{{env:SOME_UNSET_VAR}}", want: "", wantErr: true}, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - got, err := processLocalEnvValue(test.arg) - if (err != nil) != test.wantErr { - t.Errorf("processValue() error = %v, wantErr %v", err, test.wantErr) - return - } - if got != test.want { - t.Errorf("processValue() got = %v, want %v", got, test.want) - } - }) - } -} diff --git a/pkg/knative/labels.go b/pkg/knative/labels.go deleted file mode 100644 index 2dc00214df..0000000000 --- a/pkg/knative/labels.go +++ /dev/null @@ -1,7 +0,0 @@ -package knative - -const ( - DaprEnabled = "true" - DaprMetricsPort = "9092" - DaprEnableAPILogging = "true" -) diff --git a/pkg/knative/labels_int_test.go b/pkg/knative/labels_int_test.go deleted file mode 100644 index f6b24eb9e0..0000000000 --- a/pkg/knative/labels_int_test.go +++ /dev/null @@ -1,81 +0,0 @@ -//go:build integration - -package knative_test - -import ( - "context" - "testing" - "time" - - "k8s.io/apimachinery/pkg/util/rand" - - fn "knative.dev/func/pkg/functions" - "knative.dev/func/pkg/knative" - "knative.dev/func/pkg/oci" -) - -func TestInt_Labels(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) - name := "func-int-knative-describe-" + rand.String(5) - root := t.TempDir() - ns := namespace(t, ctx) - - t.Cleanup(cancel) - - client := fn.New( - fn.WithBuilder(oci.NewBuilder("", false)), - fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithRemover(knative.NewRemover(false)), - ) - - f, err := client.Init(fn.Function{ - Root: root, - Name: name, - Runtime: "go", - Namespace: ns, - Registry: registry(), - }) - if err != nil { - t.Fatal(err) - } - - // Build - f, err = client.Build(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Push - f, _, err = client.Push(ctx, f) - if err != nil { - t.Fatal(err) - } - - // Deploy - f, err = client.Deploy(ctx, f) - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { - err := client.Remove(ctx, "", "", f, true) - if err != nil { - t.Logf("error removing Function: %v", err) - } - }) - - // Describe - desc, err := client.Describe(ctx, "", "", f) - if err != nil { - t.Fatal(err) - } - - if desc.Name != f.Name { - t.Fatalf("expected name %q, got %q", f.Name, desc.Name) - - } - if desc.Namespace != ns { - t.Fatalf("expected namespace %q, got %q", ns, desc.Namespace) - } -} diff --git a/pkg/knative/lister.go b/pkg/knative/lister.go deleted file mode 100644 index cc1eb756b0..0000000000 --- a/pkg/knative/lister.go +++ /dev/null @@ -1,58 +0,0 @@ -package knative - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - "knative.dev/pkg/apis" - - fn "knative.dev/func/pkg/functions" -) - -type Lister struct { - verbose bool -} - -func NewLister(verbose bool) *Lister { - return &Lister{verbose: verbose} -} - -// List functions, optionally specifying a namespace. -func (l *Lister) List(ctx context.Context, namespace string) (items []fn.ListItem, err error) { - client, err := NewServingClient(namespace) - if err != nil { - return - } - - lst, err := client.ListServices(ctx) - if err != nil { - return - } - - services := lst.Items[:] - - for _, service := range services { - - // get status - ready := corev1.ConditionUnknown - for _, con := range service.Status.Conditions { - if con.Type == apis.ConditionReady { - ready = con.Status - break - } - } - - runtimeLabel := "" - - listItem := fn.ListItem{ - Name: service.Name, - Namespace: service.Namespace, - Runtime: runtimeLabel, - URL: service.Status.URL.String(), - Ready: string(ready), - } - - items = append(items, listItem) - } - return -} diff --git a/pkg/knative/logs.go b/pkg/knative/logs.go index a3e9f7ab61..c2bbe85ba0 100644 --- a/pkg/knative/logs.go +++ b/pkg/knative/logs.go @@ -8,11 +8,6 @@ import ( "sync" "time" - "golang.org/x/sync/errgroup" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" "knative.dev/func/pkg/k8s" ) @@ -24,99 +19,8 @@ import ( // // This function runs as long as the passed context is active (i.e. it is required cancel the context to stop log gathering). func GetKServiceLogs(ctx context.Context, namespace, kServiceName, image string, since *time.Time, out io.Writer) error { - client, namespace, err := k8s.NewClientAndResolvedNamespace(namespace) - if err != nil { - return fmt.Errorf("cannot create k8s client: %w", err) - } - - pods := client.CoreV1().Pods(namespace) - - podListOpts := metav1.ListOptions{ - Watch: true, - LabelSelector: fmt.Sprintf("serving.knative.dev/service=%s", kServiceName), - } - - w, err := pods.Watch(ctx, podListOpts) - if err != nil { - return fmt.Errorf("cannot create watch: %w", err) - } - defer w.Stop() - - beingProcessed := make(map[string]bool) - var beingProcessedMu sync.Mutex - - copyLogs := func(pod corev1.Pod) error { - defer func() { - beingProcessedMu.Lock() - delete(beingProcessed, pod.Name) - beingProcessedMu.Unlock() - }() - podLogOpts := corev1.PodLogOptions{ - Container: "user-container", - Follow: true, - } - if since != nil { - sinceTime := metav1.NewTime(*since) - podLogOpts.SinceTime = &sinceTime - } - req := client.CoreV1().Pods(namespace).GetLogs(pod.Name, &podLogOpts) - - r, e := req.Stream(ctx) - if e != nil { - return fmt.Errorf("cannot get stream: %w", e) - } - defer r.Close() - _, e = io.Copy(out, r) - if e != nil { - return fmt.Errorf("error copying logs: %w", e) - } - return nil - } - - mayReadLogs := func(pod corev1.Pod) bool { - for _, status := range pod.Status.ContainerStatuses { - if status.Name == "user-container" { - return status.State.Running != nil || status.State.Terminated != nil - } - } - return false - } - - getImage := func(pod corev1.Pod) string { - for _, ctr := range pod.Spec.Containers { - if ctr.Name == "user-container" { - return ctr.Image - } - } - return "" - } - - var eg errgroup.Group - - for event := range w.ResultChan() { - if event.Type == watch.Modified || event.Type == watch.Added { - pod := *event.Object.(*corev1.Pod) - - beingProcessedMu.Lock() - _, loggingAlready := beingProcessed[pod.Name] - beingProcessedMu.Unlock() - - if !loggingAlready && (image == "" || image == getImage(pod)) && mayReadLogs(pod) { - - beingProcessedMu.Lock() - beingProcessed[pod.Name] = true - beingProcessedMu.Unlock() - - eg.Go(func() error { return copyLogs(pod) }) - } - } - } - - err = eg.Wait() - if err != nil { - return fmt.Errorf("error while gathering logs: %w", err) - } - return nil + selector := fmt.Sprintf("serving.knative.dev/service=%s", kServiceName) + return k8s.GetPodLogsBySelector(ctx, namespace, selector, "user-container", image, since, out) } type SynchronizedBuffer struct { diff --git a/pkg/knative/lister_int_test.go b/pkg/lister/integration_test_helper.go similarity index 73% rename from pkg/knative/lister_int_test.go rename to pkg/lister/integration_test_helper.go index a49b543130..9e7abadb9d 100644 --- a/pkg/knative/lister_int_test.go +++ b/pkg/lister/integration_test_helper.go @@ -1,6 +1,6 @@ //go:build integration -package knative_test +package lister import ( "context" @@ -8,27 +8,27 @@ import ( "time" "k8s.io/apimachinery/pkg/util/rand" - fn "knative.dev/func/pkg/functions" - "knative.dev/func/pkg/knative" "knative.dev/func/pkg/oci" + fntest "knative.dev/func/pkg/testing" + fnk8stest "knative.dev/func/pkg/testing/k8s" ) -func TestInt_List(t *testing.T) { +func IntegrationTest(t *testing.T, lister fn.Lister, deployer fn.Deployer, describer fn.Describer, remover fn.Remover, deployType string) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) name := "func-int-knative-list-" + rand.String(5) root := t.TempDir() - ns := namespace(t, ctx) + ns := fnk8stest.Namespace(t, ctx) t.Cleanup(cancel) client := fn.New( fn.WithBuilder(oci.NewBuilder("", false)), fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithLister(knative.NewLister(false)), - fn.WithRemover(knative.NewRemover(false)), + fn.WithDeployer(deployer), + fn.WithLister(lister), + fn.WithDescriber(describer), + fn.WithRemover(remover), ) f, err := client.Init(fn.Function{ @@ -36,7 +36,10 @@ func TestInt_List(t *testing.T) { Name: name, Runtime: "go", Namespace: ns, - Registry: registry(), + Registry: fntest.Registry(), + Deploy: fn.DeploySpec{ + DeployType: deployType, + }, }) if err != nil { t.Fatal(err) @@ -73,7 +76,7 @@ func TestInt_List(t *testing.T) { } // Verify with list - list, err := client.List(ctx, "") + list, err := client.List(ctx, ns) if err != nil { t.Fatal(err) } diff --git a/pkg/lister/k8s/getter.go b/pkg/lister/k8s/getter.go new file mode 100644 index 0000000000..b3d5df31d1 --- /dev/null +++ b/pkg/lister/k8s/getter.go @@ -0,0 +1,63 @@ +package k8s + +import ( + "context" + "fmt" + + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/func/pkg/deployer" + fn "knative.dev/func/pkg/functions" + "knative.dev/func/pkg/k8s" +) + +type Getter struct { + verbose bool +} + +func NewGetter(verbose bool) *Getter { + return &Getter{verbose: verbose} +} + +// Get a function, optionally specifying a namespace. +func (l *Getter) Get(ctx context.Context, name, namespace string) (fn.ListItem, error) { + clientset, err := k8s.NewKubernetesClientset() + if err != nil { + return fn.ListItem{}, fmt.Errorf("could not setup kubernetes clientset: %w", err) + } + + deploymentClient := clientset.AppsV1().Deployments(namespace) + serviceClient := clientset.CoreV1().Services(namespace) + + deployment, err := deploymentClient.Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fn.ListItem{}, fmt.Errorf("could not get deployment: %w", err) + } + + // get status + ready := corev1.ConditionUnknown + for _, con := range deployment.Status.Conditions { + if con.Type == v1.DeploymentAvailable { + ready = con.Status + break + } + } + + service, err := serviceClient.Get(ctx, deployment.Name, metav1.GetOptions{}) + if err != nil { + return fn.ListItem{}, fmt.Errorf("could not get service: %w", err) + } + + runtimeLabel := "" + listItem := fn.ListItem{ + Name: service.Name, + Namespace: service.Namespace, + Runtime: runtimeLabel, + URL: fmt.Sprintf("http://%s.%s.svc", service.Name, service.Namespace), // TODO: use correct scheme + Ready: string(ready), + DeployType: deployer.KubernetesDeployerName, + } + + return listItem, nil +} diff --git a/pkg/lister/k8s/integration_test.go b/pkg/lister/k8s/integration_test.go new file mode 100644 index 0000000000..596171376b --- /dev/null +++ b/pkg/lister/k8s/integration_test.go @@ -0,0 +1,26 @@ +//go:build integration + +package k8s_test + +import ( + "testing" + + "knative.dev/func/pkg/deployer" + k8sdeployer "knative.dev/func/pkg/deployer/k8s" + k8sdescriber "knative.dev/func/pkg/describer/k8s" + "knative.dev/func/pkg/lister" + k8slister "knative.dev/func/pkg/lister/k8s" + knativelister "knative.dev/func/pkg/lister/knative" + k8sremover "knative.dev/func/pkg/remover/k8s" +) + +func TestInt_List(t *testing.T) { + lister.IntegrationTest(t, + lister.NewLister(true, + knativelister.NewGetter(true), + k8slister.NewGetter(true)), + k8sdeployer.NewDeployer(k8sdeployer.WithDeployerVerbose(true)), + k8sdescriber.NewDescriber(true), + k8sremover.NewRemover(true), + deployer.KubernetesDeployerName) +} diff --git a/pkg/lister/knative/getter.go b/pkg/lister/knative/getter.go new file mode 100644 index 0000000000..0422f5df17 --- /dev/null +++ b/pkg/lister/knative/getter.go @@ -0,0 +1,56 @@ +package knative + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "knative.dev/func/pkg/deployer" + "knative.dev/func/pkg/knative" + "knative.dev/pkg/apis" + + fn "knative.dev/func/pkg/functions" +) + +type Getter struct { + verbose bool +} + +func NewGetter(verbose bool) *Getter { + return &Getter{verbose: verbose} +} + +// Get a function, optionally specifying a namespace. +func (l *Getter) Get(ctx context.Context, name, namespace string) (fn.ListItem, error) { + client, err := knative.NewServingClient(namespace) + if err != nil { + return fn.ListItem{}, fmt.Errorf("unable to create knative client: %v", err) + } + + service, err := client.GetService(ctx, name) + if err != nil { + return fn.ListItem{}, fmt.Errorf("unable to get knative service: %v", err) + } + + // get status + ready := corev1.ConditionUnknown + for _, con := range service.Status.Conditions { + if con.Type == apis.ConditionReady { + ready = con.Status + break + } + } + + runtimeLabel := "" + + listItem := fn.ListItem{ + Name: service.Name, + Namespace: service.Namespace, + Runtime: runtimeLabel, + URL: service.Status.URL.String(), + Ready: string(ready), + DeployType: deployer.KnativeDeployerName, + } + + return listItem, nil +} diff --git a/pkg/lister/knative/integration_test.go b/pkg/lister/knative/integration_test.go new file mode 100644 index 0000000000..964ac56944 --- /dev/null +++ b/pkg/lister/knative/integration_test.go @@ -0,0 +1,26 @@ +//go:build integration + +package knative_test + +import ( + "testing" + + "knative.dev/func/pkg/deployer" + knativedeployer "knative.dev/func/pkg/deployer/knative" + knativedescriber "knative.dev/func/pkg/describer/knative" + "knative.dev/func/pkg/lister" + k8slister "knative.dev/func/pkg/lister/k8s" + knativelister "knative.dev/func/pkg/lister/knative" + knativeremover "knative.dev/func/pkg/remover/knative" +) + +func TestInt_List(t *testing.T) { + lister.IntegrationTest(t, + lister.NewLister(true, + knativelister.NewGetter(true), + k8slister.NewGetter(true)), + knativedeployer.NewDeployer(knativedeployer.WithDeployerVerbose(true)), + knativedescriber.NewDescriber(true), + knativeremover.NewRemover(true), + deployer.KnativeDeployerName) +} diff --git a/pkg/lister/multi_lister.go b/pkg/lister/multi_lister.go new file mode 100644 index 0000000000..6400c68c49 --- /dev/null +++ b/pkg/lister/multi_lister.go @@ -0,0 +1,84 @@ +package lister + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/func/pkg/deployer" + fn "knative.dev/func/pkg/functions" + "knative.dev/func/pkg/k8s" +) + +type Getter interface { + Get(ctx context.Context, name, namespace string) (fn.ListItem, error) +} + +type Lister struct { + verbose bool + + knativeGetter Getter + kubernetesGetter Getter +} + +func NewLister(verbose bool, knativeGetter, kubernetesGetter Getter) fn.Lister { + return &Lister{ + verbose: verbose, + knativeGetter: knativeGetter, + kubernetesGetter: kubernetesGetter, + } +} + +func (d *Lister) List(ctx context.Context, namespace string) ([]fn.ListItem, error) { + clientset, err := k8s.NewKubernetesClientset() + if err != nil { + return nil, fmt.Errorf("unable to create k8s client: %v", err) + } + + serviceClient := clientset.CoreV1().Services(namespace) + + services, err := serviceClient.List(ctx, metav1.ListOptions{ + LabelSelector: "function.knative.dev/name", + }) + if err != nil { + return nil, fmt.Errorf("unable to list services: %v", err) + } + + listItems := make([]fn.ListItem, 0, len(services.Items)) + for _, service := range services.Items { + if _, ok := service.Labels["serving.knative.dev/revision"]; ok { + // skip the services for Knative Serving revisions, as we only take care on the "parent" ones + continue + } + + deployType, ok := service.Annotations[deployer.DeployTypeAnnotation] + if !ok { + // fall back to the Knative Describer in case no annotation is given + item, err := d.knativeGetter.Get(ctx, service.Name, namespace) + if err != nil { + return nil, fmt.Errorf("unable to get details about function: %v", err) + } + + listItems = append(listItems, item) + continue + } + + var item fn.ListItem + switch deployType { + case deployer.KnativeDeployerName: + item, err = d.knativeGetter.Get(ctx, service.Name, namespace) + case deployer.KubernetesDeployerName: + item, err = d.kubernetesGetter.Get(ctx, service.Name, namespace) + default: + return nil, fmt.Errorf("unknown deploy type %s for function %s/%s", deployType, service.Name, service.Namespace) + } + + if err != nil { + return nil, fmt.Errorf("unable to get details about function: %v", err) + } + + listItems = append(listItems, item) + } + + return listItems, nil +} diff --git a/pkg/pipelines/tekton/pipelines_int_test.go b/pkg/pipelines/tekton/pipelines_int_test.go index 103dc220be..9dbd748018 100644 --- a/pkg/pipelines/tekton/pipelines_int_test.go +++ b/pkg/pipelines/tekton/pipelines_int_test.go @@ -21,9 +21,18 @@ import ( rbacV1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativedeployer "knative.dev/func/pkg/deployer/knative" + "knative.dev/func/pkg/describer" + k8sdescriber "knative.dev/func/pkg/describer/k8s" + knativedescriber "knative.dev/func/pkg/describer/knative" "knative.dev/func/pkg/k8s" - "knative.dev/func/pkg/knative" + "knative.dev/func/pkg/lister" + k8slister "knative.dev/func/pkg/lister/k8s" + knativelister "knative.dev/func/pkg/lister/knative" "knative.dev/func/pkg/oci" + "knative.dev/func/pkg/remover" + k8sremover "knative.dev/func/pkg/remover/k8s" + knativeremover "knative.dev/func/pkg/remover/knative" "knative.dev/func/pkg/builders/buildpacks" pack "knative.dev/func/pkg/builders/buildpacks" @@ -52,10 +61,10 @@ func newRemoteTestClient(verbose bool) *fn.Client { return fn.New( fn.WithBuilder(pack.NewBuilder(pack.WithVerbose(verbose))), fn.WithPusher(docker.NewPusher(docker.WithCredentialsProvider(testCP))), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(verbose))), - fn.WithRemover(knative.NewRemover(verbose)), - fn.WithDescriber(knative.NewDescriber(verbose)), - fn.WithRemover(knative.NewRemover(verbose)), + fn.WithDeployer(knativedeployer.NewDeployer(knativedeployer.WithDeployerVerbose(verbose))), + fn.WithDescriber(describer.NewMultiDescriber(verbose, knativedescriber.NewDescriber(verbose), k8sdescriber.NewDescriber(verbose))), + fn.WithLister(lister.NewLister(verbose, knativelister.NewGetter(verbose), k8slister.NewGetter(verbose))), + fn.WithRemover(remover.NewMultiRemover(verbose, knativeremover.NewRemover(verbose), k8sremover.NewRemover(verbose))), fn.WithPipelinesProvider(tekton.NewPipelinesProvider(tekton.WithCredentialsProvider(testCP), tekton.WithVerbose(verbose))), ) } diff --git a/pkg/knative/remover_int_test.go b/pkg/remover/integration_test_helper.go similarity index 74% rename from pkg/knative/remover_int_test.go rename to pkg/remover/integration_test_helper.go index 743189a7c3..f298ebeba2 100644 --- a/pkg/knative/remover_int_test.go +++ b/pkg/remover/integration_test_helper.go @@ -1,6 +1,6 @@ //go:build integration -package knative_test +package remover import ( "context" @@ -8,27 +8,27 @@ import ( "time" "k8s.io/apimachinery/pkg/util/rand" - fn "knative.dev/func/pkg/functions" - "knative.dev/func/pkg/knative" "knative.dev/func/pkg/oci" + fntest "knative.dev/func/pkg/testing" + fnk8stest "knative.dev/func/pkg/testing/k8s" ) -func TestInt_Remove(t *testing.T) { +func IntegrationTest(t *testing.T, remover fn.Remover, deployer fn.Deployer, describer fn.Describer, lister fn.Lister, deployType string) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) name := "func-int-knative-remove-" + rand.String(5) root := t.TempDir() - ns := namespace(t, ctx) + ns := fnk8stest.Namespace(t, ctx) t.Cleanup(cancel) client := fn.New( fn.WithBuilder(oci.NewBuilder("", false)), fn.WithPusher(oci.NewPusher(true, true, true)), - fn.WithDeployer(knative.NewDeployer(knative.WithDeployerVerbose(true))), - fn.WithDescriber(knative.NewDescriber(false)), - fn.WithLister(knative.NewLister(false)), - fn.WithRemover(knative.NewRemover(false)), + fn.WithDeployer(deployer), + fn.WithRemover(remover), + fn.WithDescriber(describer), + fn.WithLister(lister), ) f, err := client.Init(fn.Function{ @@ -36,7 +36,10 @@ func TestInt_Remove(t *testing.T) { Name: name, Runtime: "go", Namespace: ns, - Registry: registry(), + Registry: fntest.Registry(), + Deploy: fn.DeploySpec{ + DeployType: deployType, + }, }) if err != nil { t.Fatal(err) @@ -67,7 +70,7 @@ func TestInt_Remove(t *testing.T) { } // Verify with list - list, err := client.List(ctx, "") + list, err := client.List(ctx, ns) if err != nil { t.Fatal(err) } @@ -88,7 +91,7 @@ func TestInt_Remove(t *testing.T) { } // Verify it is no longer listed - list, err = client.List(ctx, "") + list, err = client.List(ctx, ns) if err != nil { t.Fatal(err) } diff --git a/pkg/remover/k8s/integration_test.go b/pkg/remover/k8s/integration_test.go new file mode 100644 index 0000000000..f4dbdf5fcc --- /dev/null +++ b/pkg/remover/k8s/integration_test.go @@ -0,0 +1,25 @@ +//go:build integration + +package k8s_test + +import ( + "testing" + + "knative.dev/func/pkg/deployer" + k8sdescriber "knative.dev/func/pkg/describer/k8s" + "knative.dev/func/pkg/lister" + k8slister "knative.dev/func/pkg/lister/k8s" + "knative.dev/func/pkg/remover" + k8sremover "knative.dev/func/pkg/remover/k8s" + + k8sdeployer "knative.dev/func/pkg/deployer/k8s" +) + +func TestInt_Remove(t *testing.T) { + remover.IntegrationTest(t, + k8sremover.NewRemover(true), + k8sdeployer.NewDeployer(k8sdeployer.WithDeployerVerbose(true)), + k8sdescriber.NewDescriber(true), + lister.NewLister(true, nil, k8slister.NewGetter(true)), + deployer.KubernetesDeployerName) +} diff --git a/pkg/remover/k8s/remover.go b/pkg/remover/k8s/remover.go new file mode 100644 index 0000000000..6daba56f0e --- /dev/null +++ b/pkg/remover/k8s/remover.go @@ -0,0 +1,55 @@ +package k8s + +import ( + "context" + "fmt" + "os" + + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fn "knative.dev/func/pkg/functions" + "knative.dev/func/pkg/k8s" +) + +func NewRemover(verbose bool) *Remover { + return &Remover{ + verbose: verbose, + } +} + +type Remover struct { + verbose bool +} + +func (remover *Remover) Remove(ctx context.Context, name, ns string) error { + if ns == "" { + fmt.Fprintf(os.Stderr, "no namespace defined when trying to delete a function in knative remover\n") + return fn.ErrNamespaceRequired + } + + clientset, err := k8s.NewKubernetesClientset() + if err != nil { + return fmt.Errorf("could not setup kubernetes clientset: %w", err) + } + + deploymentClient := clientset.AppsV1().Deployments(ns) + serviceClient := clientset.CoreV1().Services(ns) + + err = deploymentClient.Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil { + if apiErrors.IsNotFound(err) { + return fn.ErrFunctionNotFound + } + return fmt.Errorf("k8s remover failed to delete the deployment: %v", err) + } + + err = serviceClient.Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil { + if apiErrors.IsNotFound(err) { + return fn.ErrFunctionNotFound + } + return fmt.Errorf("k8s remover failed to delete the service: %v", err) + } + + return nil +} diff --git a/pkg/remover/knative/integration_test.go b/pkg/remover/knative/integration_test.go new file mode 100644 index 0000000000..9be4cf14b1 --- /dev/null +++ b/pkg/remover/knative/integration_test.go @@ -0,0 +1,25 @@ +//go:build integration + +package knative_test + +import ( + "testing" + + "knative.dev/func/pkg/deployer" + knativedescriber "knative.dev/func/pkg/describer/knative" + "knative.dev/func/pkg/lister" + knativelister "knative.dev/func/pkg/lister/knative" + "knative.dev/func/pkg/remover" + knativeremover "knative.dev/func/pkg/remover/knative" + + knativedeployer "knative.dev/func/pkg/deployer/knative" +) + +func TestInt_Remove(t *testing.T) { + remover.IntegrationTest(t, + knativeremover.NewRemover(true), + knativedeployer.NewDeployer(knativedeployer.WithDeployerVerbose(true)), + knativedescriber.NewDescriber(true), + lister.NewLister(true, knativelister.NewGetter(true), nil), + deployer.KnativeDeployerName) +} diff --git a/pkg/knative/remover.go b/pkg/remover/knative/remover.go similarity index 91% rename from pkg/knative/remover.go rename to pkg/remover/knative/remover.go index e04c813769..f1f1dfcc13 100644 --- a/pkg/knative/remover.go +++ b/pkg/remover/knative/remover.go @@ -8,6 +8,7 @@ import ( apiErrors "k8s.io/apimachinery/pkg/api/errors" fn "knative.dev/func/pkg/functions" + "knative.dev/func/pkg/knative" ) const RemoveTimeout = 120 * time.Second @@ -28,7 +29,7 @@ func (remover *Remover) Remove(ctx context.Context, name, ns string) (err error) return fn.ErrNamespaceRequired } - client, err := NewServingClient(ns) + client, err := knative.NewServingClient(ns) if err != nil { return } diff --git a/pkg/remover/multi_remover.go b/pkg/remover/multi_remover.go new file mode 100644 index 0000000000..76485c7251 --- /dev/null +++ b/pkg/remover/multi_remover.go @@ -0,0 +1,55 @@ +package remover + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/func/pkg/deployer" + fn "knative.dev/func/pkg/functions" + "knative.dev/func/pkg/k8s" +) + +type MultiRemover struct { + verbose bool + + knativeRemover fn.Remover + kubernetesRemover fn.Remover +} + +func NewMultiRemover(verbose bool, knativeRemover, kubernetesRemover fn.Remover) *MultiRemover { + return &MultiRemover{ + verbose: verbose, + knativeRemover: knativeRemover, + kubernetesRemover: kubernetesRemover, + } +} + +func (d *MultiRemover) Remove(ctx context.Context, name, namespace string) (err error) { + clientset, err := k8s.NewKubernetesClientset() + if err != nil { + return fmt.Errorf("unable to create k8s client: %v", err) + } + + serviceClient := clientset.CoreV1().Services(namespace) + + service, err := serviceClient.Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("unable to get service for function: %v", err) + } + + deployType, ok := service.Annotations[deployer.DeployTypeAnnotation] + if !ok { + // fall back to the Knative Remover in case no annotation is given + return d.knativeRemover.Remove(ctx, name, namespace) + } + + switch deployType { + case deployer.KnativeDeployerName: + return d.knativeRemover.Remove(ctx, name, namespace) + case deployer.KubernetesDeployerName: + return d.kubernetesRemover.Remove(ctx, name, namespace) + default: + return fmt.Errorf("unknown deploy type: %s", deployType) + } +} diff --git a/pkg/testing/k8s/testing.go b/pkg/testing/k8s/testing.go new file mode 100644 index 0000000000..40542d2cff --- /dev/null +++ b/pkg/testing/k8s/testing.go @@ -0,0 +1,49 @@ +// package testing includes Kubernetes-specific testing helpers. +package k8s + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" + "knative.dev/func/pkg/k8s" +) + +const DefaultIntTestNamespacePrefix = "func-int-test" + +// Namespace returns the integration test namespace or that specified by +// FUNC_INT_NAMESPACE (creating if necessary) +func Namespace(t *testing.T, ctx context.Context) string { + t.Helper() + + cliSet, err := k8s.NewKubernetesClientset() + if err != nil { + t.Fatal(err) + } + + // TODO: choose FUNC_INT_NAMESPACE if it exists? + + namespace := DefaultIntTestNamespacePrefix + "-" + rand.String(5) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + Spec: corev1.NamespaceSpec{}, + } + _, err = cliSet.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + err := cliSet.CoreV1().Namespaces().Delete(context.Background(), namespace, metav1.DeleteOptions{}) + if err != nil { + t.Logf("error deleting namespace: %v", err) + } + }) + t.Log("created namespace: ", namespace) + + return namespace +} diff --git a/pkg/testing/testing.go b/pkg/testing/testing.go index 38e34361f7..2b5ad77e2e 100644 --- a/pkg/testing/testing.go +++ b/pkg/testing/testing.go @@ -32,8 +32,6 @@ import ( const DefaultIntTestRegistry = "localhost:50000/func" -const DefaultIntTestNamespacePrefix = "func-int-test" - // Using the given path, create it as a new directory and return a deferrable // which will remove it. // usage: @@ -323,3 +321,13 @@ func ClearEnvs(t *testing.T) { } } } + +// Registry returns the registry to use for tests +func Registry() string { + // Use environment variable if set, otherwise use localhost registry + if reg := os.Getenv("FUNC_INT_TEST_REGISTRY"); reg != "" { + return reg + } + // Default to localhost registry (same as E2E tests) + return DefaultIntTestRegistry +} diff --git a/schema/func_yaml-schema.json b/schema/func_yaml-schema.json index 0d8cd32dea..1aed6f8f92 100644 --- a/schema/func_yaml-schema.json +++ b/schema/func_yaml-schema.json @@ -107,6 +107,14 @@ "type": "string", "description": "ServiceAccountName is the name of the service account used for the\nfunction pod. The service account must exist in the namespace to\nsucceed.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/" }, + "deployType": { + "enum": [ + "knative", + "deployment" + ], + "type": "string", + "description": "DeployType specifies the type of deployment to use: \"knative\" or \"raw\"\nDefaults to \"knative\" for backwards compatibility" + }, "subscriptions": { "items": { "$schema": "http://json-schema.org/draft-04/schema#",