From 5a88636611ea90abe16a80b70bc8a048f5d0be9e Mon Sep 17 00:00:00 2001 From: Kate Osborn Date: Tue, 4 Apr 2023 13:17:38 -0600 Subject: [PATCH 01/16] Update agent version --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e6b1c12a7b..5e24feddd2 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ OUT_DIR=$(shell pwd)/build/.out .DEFAULT_GOAL := help -AGENT_VERSION ?= 2.22.1 +AGENT_VERSION ?= 2.24.1 ALPINE_VERSION ?= 3.16 NGINX_WITH_AGENT_PREFIX ?= nginx-with-agent From 44266cb410350e970dbb860fd4aae7bf3e8a7daf Mon Sep 17 00:00:00 2001 From: Kate Osborn Date: Tue, 4 Apr 2023 13:21:09 -0600 Subject: [PATCH 02/16] Update manifests and nginx-agent conf --- build/nginx-with-agent/nginx-agent.conf | 2 +- build/nginx-with-agent/nginx-with-agent.yaml | 1 + build/nginx-with-agent/service.yaml | 20 ++++++++ deploy/manifests/nginx-gateway.yaml | 49 ++------------------ 4 files changed, 26 insertions(+), 46 deletions(-) create mode 100644 build/nginx-with-agent/service.yaml diff --git a/build/nginx-with-agent/nginx-agent.conf b/build/nginx-with-agent/nginx-agent.conf index bcba3c1ceb..e623cd03f2 100644 --- a/build/nginx-with-agent/nginx-agent.conf +++ b/build/nginx-with-agent/nginx-agent.conf @@ -31,7 +31,7 @@ metrics: # OSS NGINX default config path # path to aux file dirs can also be added -config_dirs: "/etc/nginx" +config_dirs: "/etc/nginx:/etc/nginx/conf.d:/etc/nginx/secrets" server: host: 127.0.0.1 # change to nginx-gateway.nginx-gateway if testing agent in separate deployment diff --git a/build/nginx-with-agent/nginx-with-agent.yaml b/build/nginx-with-agent/nginx-with-agent.yaml index cd5ba34b6f..86b28c0b7a 100644 --- a/build/nginx-with-agent/nginx-with-agent.yaml +++ b/build/nginx-with-agent/nginx-with-agent.yaml @@ -3,6 +3,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: nginx-with-agent + namespace: nginx-gateway spec: replicas: 1 selector: diff --git a/build/nginx-with-agent/service.yaml b/build/nginx-with-agent/service.yaml new file mode 100644 index 0000000000..f7494acbb0 --- /dev/null +++ b/build/nginx-with-agent/service.yaml @@ -0,0 +1,20 @@ +# This manifest is for testing purposes and may not be the final manifest for the nginx-with-agent. +apiVersion: v1 +kind: Service +metadata: + name: nginx-with-agent + namespace: nginx-gateway +spec: + externalTrafficPolicy: Local + type: LoadBalancer + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + - port: 443 + targetPort: 443 + protocol: TCP + name: https + selector: + app: nginx-with-agent diff --git a/deploy/manifests/nginx-gateway.yaml b/deploy/manifests/nginx-gateway.yaml index 70e29b4018..1085b561c4 100644 --- a/deploy/manifests/nginx-gateway.yaml +++ b/deploy/manifests/nginx-gateway.yaml @@ -84,62 +84,21 @@ spec: labels: app: nginx-gateway spec: - shareProcessNamespace: true serviceAccountName: nginx-gateway - securityContext: - sysctls: - - name: "net.ipv4.ip_unprivileged_port_start" - value: "0" - volumes: - - name: nginx-config - emptyDir: { } - - name: var-lib-nginx - emptyDir: { } - - name: njs-modules - configMap: - name: njs-modules - initContainers: - - image: busybox:1.34 # FIXME(pleshakov): use gateway container to init the Config with proper main config - name: nginx-config-initializer - command: [ 'sh', '-c', 'echo "load_module /usr/lib/nginx/modules/ngx_http_js_module.so; events {} pid /etc/nginx/nginx.pid; error_log stderr debug; http { include /etc/nginx/conf.d/*.conf; js_import /usr/lib/nginx/modules/njs/httpmatches.js; }" > /etc/nginx/nginx.conf && mkdir /etc/nginx/conf.d /etc/nginx/secrets && chown 1001:0 /etc/nginx/conf.d /etc/nginx/secrets' ] - volumeMounts: - - name: nginx-config - mountPath: /etc/nginx containers: - - image: docker.io/nginx-kubernetes-gateway:edge # FIXME(kate-osborn): change back to ghcr before merging to main + - image: docker.io/library/nginx-kubernetes-gateway:edge # FIXME(kate-osborn): change back to ghcr before merging to main imagePullPolicy: IfNotPresent # FIXME(kate-osborn): change back to Always before merging to main name: nginx-gateway ports: - name: grpc containerPort: 54789 - volumeMounts: - - name: nginx-config - mountPath: /etc/nginx securityContext: - runAsUser: 1001 - # FIXME(pleshakov) - figure out which capabilities are required - # dropping ALL and adding only CAP_KILL doesn't work - # Note: CAP_KILL is needed for sending HUP signal to NGINX main process + runAsUser: 1001 #FIXME(kate-osborn): figure out what securityContext we need. args: - --gateway-ctlr-name=k8s-gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - - image: nginx:1.23 # I will remove the nginx container once the control plane can push config to nginx-with-agent. - imagePullPolicy: IfNotPresent - name: nginx - ports: - - name: http - containerPort: 80 - - name: https - containerPort: 443 - volumeMounts: - - name: nginx-config - mountPath: /etc/nginx - - name: var-lib-nginx - mountPath: /var/lib/nginx - - name: njs-modules - mountPath: /usr/lib/nginx/modules/njs - - name: nginx-with-agent - image: docker.io/nginx-kubernetes-gateway/nginx-with-agent:edge + - image: docker.io/nginx-kubernetes-gateway/nginx-with-agent:edge + name: nginx-with-agent imagePullPolicy: IfNotPresent securityContext: runAsNonRoot: true From b65765955b1b1d9d341adbf75a431b8b5dbfe5a6 Mon Sep 17 00:00:00 2001 From: Kate Osborn Date: Tue, 4 Apr 2023 13:57:21 -0600 Subject: [PATCH 03/16] Agent config store, builder, secret files, observer/subject, gen main conf --- internal/events/handler.go | 52 ++---- internal/manager/manager.go | 26 ++- internal/nginx/agent/config_builder.go | 158 +++++++++++++++++++ internal/nginx/agent/config_store.go | 100 ++++++++++++ internal/nginx/agent/doc.go | 9 ++ internal/nginx/config/generator.go | 26 +-- internal/nginx/config/generator_test.go | 14 +- internal/nginx/config/nginx_conf.go | 11 ++ internal/nginx/config/nginx_conf_template.go | 46 ++++++ internal/observer/observer.go | 13 ++ internal/state/secrets/secrets.go | 20 +++ 11 files changed, 407 insertions(+), 68 deletions(-) create mode 100644 internal/nginx/agent/config_builder.go create mode 100644 internal/nginx/agent/config_store.go create mode 100644 internal/nginx/agent/doc.go create mode 100644 internal/nginx/config/nginx_conf.go create mode 100644 internal/nginx/config/nginx_conf_template.go create mode 100644 internal/observer/observer.go diff --git a/internal/events/handler.go b/internal/events/handler.go index ee7ab749a3..9747e34de8 100644 --- a/internal/events/handler.go +++ b/internal/events/handler.go @@ -9,9 +9,6 @@ import ( discoveryV1 "k8s.io/api/discovery/v1" "sigs.k8s.io/gateway-api/apis/v1beta1" - "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/config" - "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/file" - "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/runtime" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/dataplane" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/secrets" @@ -27,20 +24,19 @@ type EventHandler interface { HandleEventBatch(ctx context.Context, batch EventBatch) } +// ConfigStorer stores dataplane configuration. +type ConfigStorer interface { + Store(conf dataplane.Configuration) error +} + // EventHandlerConfig holds configuration parameters for EventHandlerImpl. type EventHandlerConfig struct { // Processor is the state ChangeProcessor. Processor state.ChangeProcessor // SecretStore is the state SecretStore. SecretStore secrets.SecretStore - // SecretMemoryManager is the state SecretMemoryManager. - SecretMemoryManager secrets.SecretDiskMemoryManager - // Generator is the nginx config Generator. - Generator config.Generator - // NginxFileMgr is the file Manager for nginx. - NginxFileMgr file.Manager - // NginxRuntimeMgr manages nginx runtime. - NginxRuntimeMgr runtime.Manager + // ConfigStorer stores dataplane configuration. + ConfigStorer ConfigStorer // StatusUpdater updates statuses on Kubernetes resources. StatusUpdater status.Updater // Logger is the logger to be used by the EventHandler. @@ -53,6 +49,8 @@ type EventHandlerConfig struct { // (2) Keeping the statuses of the Gateway API resources updated. type EventHandlerImpl struct { cfg EventHandlerConfig + // changeCounter keeps track of the number of dataplane configuration changes (used as config generation). + changeCounter int } // NewEventHandlerImpl creates a new EventHandlerImpl. @@ -80,36 +78,16 @@ func (h *EventHandlerImpl) HandleEventBatch(ctx context.Context, batch EventBatc return } - err := h.updateNginx(ctx, conf) - if err != nil { - h.cfg.Logger.Error(err, "Failed to update NGINX configuration") - } else { - h.cfg.Logger.Info("NGINX configuration was successfully updated") - } - - h.cfg.StatusUpdater.Update(ctx, statuses) -} - -func (h *EventHandlerImpl) updateNginx(ctx context.Context, conf dataplane.Configuration) error { - // Write all secrets (nuke and pave). - // This will remove all secrets in the secrets directory before writing the requested secrets. - // FIXME(kate-osborn): We may want to rethink this approach in the future and write and remove secrets individually. - err := h.cfg.SecretMemoryManager.WriteAllRequestedSecrets() - if err != nil { - return err - } - - cfg := h.cfg.Generator.Generate(conf) + h.changeCounter++ + conf.Generation = h.changeCounter - // For now, we keep all http servers and upstreams in one config file. - // We might rethink that. For example, we can write each server to its file - // or group servers in some way. - err = h.cfg.NginxFileMgr.WriteHTTPConfig("http", cfg) + err := h.cfg.ConfigStorer.Store(conf) if err != nil { - return err + h.cfg.Logger.Error(err, "error storing dataplane configuration") + // FIXME(kate-osborn): Update status to indicate that the gateway is not accepted or programmed. } - return h.cfg.NginxRuntimeMgr.Reload(ctx) + h.cfg.StatusUpdater.Update(ctx, statuses) } func (h *EventHandlerImpl) propagateUpsert(e *UpsertEvent) { diff --git a/internal/manager/manager.go b/internal/manager/manager.go index 3d8cbc3c37..7a6ce20860 100644 --- a/internal/manager/manager.go +++ b/internal/manager/manager.go @@ -16,7 +16,6 @@ import ( gatewayv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" "sigs.k8s.io/gateway-api/apis/v1beta1/validation" - "github.com/nginxinc/nginx-kubernetes-gateway/internal/agent" "github.com/nginxinc/nginx-kubernetes-gateway/internal/config" "github.com/nginxinc/nginx-kubernetes-gateway/internal/events" "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc" @@ -24,9 +23,8 @@ import ( "github.com/nginxinc/nginx-kubernetes-gateway/internal/manager/filter" "github.com/nginxinc/nginx-kubernetes-gateway/internal/manager/index" "github.com/nginxinc/nginx-kubernetes-gateway/internal/manager/predicate" + "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/agent" ngxcfg "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/config" - "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/file" - ngxruntime "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/runtime" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/relationship" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/resolver" @@ -136,9 +134,6 @@ func Start(cfg config.Config) error { Logger: cfg.Logger.WithName("changeProcessor"), }) - configGenerator := ngxcfg.NewGeneratorImpl() - nginxFileMgr := file.NewManagerImpl() - nginxRuntimeMgr := ngxruntime.NewManagerImpl() statusUpdater := status.NewUpdater(status.UpdaterConfig{ GatewayCtlrName: cfg.GatewayCtlrName, GatewayClassName: cfg.GatewayClassName, @@ -150,15 +145,16 @@ func Start(cfg config.Config) error { Clock: status.NewRealClock(), }) + nginxAgentConfigBuilder := agent.NewNginxConfigBuilder(ngxcfg.NewGeneratorImpl(), secretMemoryMgr) + + agentConfigStore := agent.NewConfigStore(nginxAgentConfigBuilder, cfg.Logger.WithName("agentConfigStore")) + eventHandler := events.NewEventHandlerImpl(events.EventHandlerConfig{ - Processor: processor, - SecretStore: secretStore, - SecretMemoryManager: secretMemoryMgr, - Generator: configGenerator, - Logger: cfg.Logger.WithName("eventHandler"), - NginxFileMgr: nginxFileMgr, - NginxRuntimeMgr: nginxRuntimeMgr, - StatusUpdater: statusUpdater, + Processor: processor, + SecretStore: secretStore, + ConfigStorer: agentConfigStore, + Logger: cfg.Logger.WithName("eventHandler"), + StatusUpdater: statusUpdater, }) firstBatchPreparer := events.NewFirstEventBatchPreparerImpl( @@ -190,7 +186,7 @@ func Start(cfg config.Config) error { cfg.Logger.WithName("grpcServer"), grpcAddress, commander.NewCommander(cfg.Logger.WithName("commanderService"), - agent.NewPool(cfg.Logger.WithName("agentPool")), + agentConfigStore, ), ) if err != nil { diff --git a/internal/nginx/agent/config_builder.go b/internal/nginx/agent/config_builder.go new file mode 100644 index 0000000000..47e456fbb4 --- /dev/null +++ b/internal/nginx/agent/config_builder.go @@ -0,0 +1,158 @@ +package agent + +import ( + "bytes" + "fmt" + "os" + + "github.com/nginx/agent/sdk/v2/proto" + "github.com/nginx/agent/sdk/v2/zip" + + "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/config" + "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/dataplane" + "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/secrets" +) + +const ( + // TODO: do we need another file mode for config files? + secretsFileMode = 0o600 + confPrefix = "/etc/nginx" + secretsPrefix = "/etc/nginx/secrets" //nolint:gosec + nginxConfFilePath = "nginx.conf" + httpConfFilePath = "/conf.d/http.conf" +) + +type directory struct { + prefix string + files []file +} + +type file struct { + path string + contents []byte + mode os.FileMode +} + +// NginxConfigBuilder builds NginxConfig from the dataplane configuration. +type NginxConfigBuilder struct { + generator config.Generator + secretMemMgr secrets.SecretDiskMemoryManager +} + +// NewNginxConfigBuilder creates a new NginxConfigBuilder. +func NewNginxConfigBuilder( + generator config.Generator, + secretMemMgr secrets.SecretDiskMemoryManager, +) *NginxConfigBuilder { + return &NginxConfigBuilder{ + generator: generator, + secretMemMgr: secretMemMgr, + } +} + +// Build builds NginxConfig from the dataplane configuration. +// It generates the nginx configuration files using the config.Generator and the +// secrets files using the secrets.SecretDiskMemoryManager. +func (u *NginxConfigBuilder) Build(cfg dataplane.Configuration) (*NginxConfig, error) { + confDirectory := u.generateConfigDirectory(cfg) + auxDirectory := u.generateAuxConfigDirectory() + + directories := []*proto.Directory{ + convertToProtoDirectory(confDirectory), + convertToProtoDirectory(auxDirectory), + } + + zconfig, err := u.generateZippedFile(confDirectory) + if err != nil { + return nil, err + } + + zaux, err := u.generateZippedFile(auxDirectory) + if err != nil { + return nil, err + } + + return &NginxConfig{ + ID: fmt.Sprintf("%d", cfg.Generation), + Config: zconfig, + Aux: zaux, + Directories: directories, + }, nil +} + +func convertToProtoDirectory(d directory) *proto.Directory { + files := make([]*proto.File, len(d.files)) + + for idx, f := range d.files { + files[idx] = &proto.File{ + Name: f.path, + } + } + + return &proto.Directory{ + Name: d.prefix, + Files: files, + } +} + +func (u *NginxConfigBuilder) generateConfigDirectory(cfg dataplane.Configuration) directory { + return directory{ + prefix: confPrefix, + files: []file{ + { + path: nginxConfFilePath, + mode: secretsFileMode, + contents: u.generator.GenerateMainConf(cfg.Generation), + }, + { + path: httpConfFilePath, + mode: secretsFileMode, + contents: u.generator.GenerateHTTPConf(cfg), + }, + }, + } +} + +func (u *NginxConfigBuilder) generateAuxConfigDirectory() directory { + secretFiles := u.secretMemMgr.GetAllRequestedSecrets() + + files := make([]file, 0, len(secretFiles)) + for _, secret := range secretFiles { + files = append(files, file{ + path: secret.Name, + mode: secretsFileMode, + contents: secret.Contents, + }) + } + + return directory{ + prefix: secretsPrefix, + files: files, + } +} + +func (u *NginxConfigBuilder) generateZippedFile(dir directory) (*proto.ZippedFile, error) { + w, err := zip.NewWriter(dir.prefix) + if err != nil { + return nil, err + } + + for _, f := range dir.files { + if err := w.Add(f.path, f.mode, bytes.NewBuffer(f.contents)); err != nil { + return nil, err + } + } + + contents, prefix, checksum, err := w.Payloads() + if err != nil { + return nil, err + } + + zipFile := &proto.ZippedFile{ + Contents: contents, + Checksum: checksum, + RootDirectory: prefix, + } + + return zipFile, nil +} diff --git a/internal/nginx/agent/config_store.go b/internal/nginx/agent/config_store.go new file mode 100644 index 0000000000..ebb4c495a2 --- /dev/null +++ b/internal/nginx/agent/config_store.go @@ -0,0 +1,100 @@ +package agent + +import ( + "fmt" + "sync" + "sync/atomic" + + "github.com/go-logr/logr" + "github.com/nginx/agent/sdk/v2/proto" + + "github.com/nginxinc/nginx-kubernetes-gateway/internal/observer" + "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/dataplane" +) + +// NginxConfig is an intermediate object that contains nginx configuration in a form that agent expects. +// We convert the dataplane configuration to NginxConfig in the config store, so we only need to do it once +// per configuration change. The NginxConfig is then used by the agent to generate the nginx configuration payload. +type NginxConfig struct { + ID string + Config *proto.ZippedFile + Aux *proto.ZippedFile + Directories []*proto.Directory +} + +// ConfigStore stores accepts the latest dataplane configuration and stores is as NginxConfig. +// ConfigStore implements the observer.Subject interface, +// so that it can notify the agent observers when the configuration changes. +// ConfigStore is thread-safe. +type ConfigStore struct { + latestConfig atomic.Value + configBuilder *NginxConfigBuilder + logger logr.Logger + observers []observer.Observer + observerLock sync.Mutex +} + +// NewConfigStore creates a new ConfigStore. +func NewConfigStore(configBuilder *NginxConfigBuilder, logger logr.Logger) *ConfigStore { + return &ConfigStore{ + observers: make([]observer.Observer, 0), + configBuilder: configBuilder, + logger: logger, + } +} + +// Register registers an observer. +func (a *ConfigStore) Register(observer observer.Observer) { + a.observerLock.Lock() + defer a.observerLock.Unlock() + + a.observers = append(a.observers, observer) + a.logger.Info("Registering observer", "number of registered observers", len(a.observers)) +} + +// Notify notifies all registered observers. +func (a *ConfigStore) Notify() { + a.observerLock.Lock() + defer a.observerLock.Unlock() + + a.logger.Info("Notifying observers", "number of registered observers", len(a.observers)) + for _, o := range a.observers { + o.Update() + } +} + +// Remove removes an observer. +func (a *ConfigStore) Remove(observer observer.Observer) { + a.observerLock.Lock() + defer a.observerLock.Unlock() + + for i, o := range a.observers { + if o == observer { + a.observers = append(a.observers[:i], a.observers[i+1:]...) + a.logger.Info("Removed observer", "number of registered observers", len(a.observers)) + return + } + } +} + +// Store accepts the latest dataplane configuration, builds the NginxConfig from it, and stores it. +// It's possible for an error to occur when building the NginxConfig, +// in which case the error is returned, and the configuration is not stored. +// If the configuration is successfully stored, the observers are notified. +func (a *ConfigStore) Store(configuration dataplane.Configuration) error { + agentConf, err := a.configBuilder.Build(configuration) + if err != nil { + return fmt.Errorf("error building nginx agent configuration: %w", err) + } + + a.logger.Info("Storing configuration", "config generation", configuration.Generation) + + a.latestConfig.Store(agentConf) + a.Notify() + return nil +} + +// GetLatestConfig returns the latest NginxConfig. +func (a *ConfigStore) GetLatestConfig() *NginxConfig { + return a.latestConfig.Load().(*NginxConfig) +} diff --git a/internal/nginx/agent/doc.go b/internal/nginx/agent/doc.go new file mode 100644 index 0000000000..4f14317022 --- /dev/null +++ b/internal/nginx/agent/doc.go @@ -0,0 +1,9 @@ +/* +Package agent contains objects and methods for configuring agents. + +The package includes: +- ConfigStore: a thread-safe store for latest agent nginx configuration. +- NginxConfigBuilder: builds agent nginx configuration from dataplane.Configuration. +- NginxConfig: an intermediate object that contains nginx configuration in a form that agent expects. +*/ +package agent diff --git a/internal/nginx/config/generator.go b/internal/nginx/config/generator.go index 59403d24f7..c417c7288e 100644 --- a/internal/nginx/config/generator.go +++ b/internal/nginx/config/generator.go @@ -9,8 +9,10 @@ import ( // Generator generates NGINX configuration. // This interface is used for testing purposes only. type Generator interface { - // Generate generates NGINX configuration from internal representation. - Generate(configuration dataplane.Configuration) []byte + // GenerateHTTPConf generates NGINX HTTP configuration from internal representation. + GenerateHTTPConf(configuration dataplane.Configuration) []byte + // GenerateMainConf generates the main nginx.conf file. + GenerateMainConf(configGeneration int) []byte } // GeneratorImpl is an implementation of Generator. @@ -24,7 +26,16 @@ func NewGeneratorImpl() GeneratorImpl { // executeFunc is a function that generates NGINX configuration from internal representation. type executeFunc func(configuration dataplane.Configuration) []byte -func (g GeneratorImpl) Generate(conf dataplane.Configuration) []byte { +func getExecuteFuncs() []executeFunc { + return []executeFunc{ + executeUpstreams, + executeSplitClients, + executeServers, + } +} + +// GenerateHTTPConf generates NGINX HTTP configuration from internal representation. +func (g GeneratorImpl) GenerateHTTPConf(conf dataplane.Configuration) []byte { var generated []byte for _, execute := range getExecuteFuncs() { generated = append(generated, execute(conf)...) @@ -33,10 +44,7 @@ func (g GeneratorImpl) Generate(conf dataplane.Configuration) []byte { return generated } -func getExecuteFuncs() []executeFunc { - return []executeFunc{ - executeUpstreams, - executeSplitClients, - executeServers, - } +// GenerateMainConf generates the main nginx.conf file using the given configGeneration. +func (g GeneratorImpl) GenerateMainConf(configGeneration int) []byte { + return executeNginxConf(configGeneration) } diff --git a/internal/nginx/config/generator_test.go b/internal/nginx/config/generator_test.go index 6f39b3366a..97a36695af 100644 --- a/internal/nginx/config/generator_test.go +++ b/internal/nginx/config/generator_test.go @@ -11,9 +11,9 @@ import ( "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/graph" ) -// Note: this test only verifies that Generate() returns a byte array with upstream, server, and split_client blocks. +// Note: this test only verifies that GenerateHTTPConf() returns a byte array with upstream, server, and split_client blocks. // It does not test the correctness of those blocks. That functionality is covered by other tests in this package. -func TestGenerate(t *testing.T) { +func TestGenerateHTTPConf(t *testing.T) { bg := graph.BackendGroup{ Source: types.NamespacedName{Namespace: "test", Name: "hr"}, RuleIdx: 0, @@ -52,21 +52,21 @@ func TestGenerate(t *testing.T) { BackendGroups: []graph.BackendGroup{bg}, } generator := config.NewGeneratorImpl() - cfg := string(generator.Generate(conf)) + cfg := string(generator.GenerateHTTPConf(conf)) if !strings.Contains(cfg, "listen 80") { - t.Errorf("Generate() did not generate a config with a default HTTP server; config: %s", cfg) + t.Errorf("GenerateHTTPConf() did not generate a config with a default HTTP server; config: %s", cfg) } if !strings.Contains(cfg, "listen 443") { - t.Errorf("Generate() did not generate a config with an SSL server; config: %s", cfg) + t.Errorf("GenerateHTTPConf() did not generate a config with an SSL server; config: %s", cfg) } if !strings.Contains(cfg, "upstream") { - t.Errorf("Generate() did not generate a config with an upstream block; config: %s", cfg) + t.Errorf("GenerateHTTPConf() did not generate a config with an upstream block; config: %s", cfg) } if !strings.Contains(cfg, "split_clients") { - t.Errorf("Generate() did not generate a config with an split_clients block; config: %s", cfg) + t.Errorf("GenerateHTTPConf() did not generate a config with an split_clients block; config: %s", cfg) } } diff --git a/internal/nginx/config/nginx_conf.go b/internal/nginx/config/nginx_conf.go new file mode 100644 index 0000000000..4e618b7ee3 --- /dev/null +++ b/internal/nginx/config/nginx_conf.go @@ -0,0 +1,11 @@ +package config + +import ( + "text/template" +) + +var nginxConfTemplate = template.Must(template.New("nginx-conf").Parse(nginxConfTemplateText)) + +func executeNginxConf(configGeneration int) []byte { + return execute(nginxConfTemplate, configGeneration) +} diff --git a/internal/nginx/config/nginx_conf_template.go b/internal/nginx/config/nginx_conf_template.go new file mode 100644 index 0000000000..61176d42ca --- /dev/null +++ b/internal/nginx/config/nginx_conf_template.go @@ -0,0 +1,46 @@ +package config + +var nginxConfTemplateText = `# config generation: {{ . }} +load_module /usr/lib/nginx/modules/ngx_http_js_module.so; + +events {} + +pid /etc/nginx/nginx.pid; + +error_log /var/log/nginx/error.log debug; + +http { + include /etc/nginx/conf.d/*.conf; + js_import /usr/lib/nginx/modules/njs/httpmatches.js; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" '; + + access_log /var/log/nginx/access.log main; + + # stub status API + # needed by the agent in order to collect metrics + server { + listen 127.0.0.1:8082; + location /api { + stub_status; + allow 127.0.0.1; + deny all; + } + } + + server { + listen unix:/var/lib/nginx/nginx-502-server.sock; + access_log off; + + return 502; + } + + server { + listen unix:/var/lib/nginx/nginx-500-server.sock; + access_log off; + + return 500; + } +}` diff --git a/internal/observer/observer.go b/internal/observer/observer.go new file mode 100644 index 0000000000..10631bd3f5 --- /dev/null +++ b/internal/observer/observer.go @@ -0,0 +1,13 @@ +package observer + +// Subject is an interface for objects that can be observed. +type Subject interface { + Register(observer Observer) + Remove(observer Observer) + Notify() +} + +// Observer is an interface for objects that can observe a Subject. +type Observer interface { + Update() +} diff --git a/internal/state/secrets/secrets.go b/internal/state/secrets/secrets.go index 4096aec08a..c5cdaa986f 100644 --- a/internal/state/secrets/secrets.go +++ b/internal/state/secrets/secrets.go @@ -42,6 +42,12 @@ type Secret struct { Valid bool } +// File represents a secret as a file. Contains the file name and the file contents. +type File struct { + Name string + Contents []byte +} + func NewSecretStore() *SecretStoreImpl { return &SecretStoreImpl{ secrets: make(map[types.NamespacedName]*Secret), @@ -74,6 +80,8 @@ type SecretDiskMemoryManager interface { Request(nsname types.NamespacedName) (string, error) // WriteAllRequestedSecrets writes all requested secrets to disk. WriteAllRequestedSecrets() error + // GetAllRequestedSecrets returns all request secrets as Files. + GetAllRequestedSecrets() []File } // FileManager is an interface that exposes File I/O operations. @@ -203,6 +211,18 @@ func (s *SecretDiskMemoryManagerImpl) WriteAllRequestedSecrets() error { return nil } +func (s *SecretDiskMemoryManagerImpl) GetAllRequestedSecrets() []File { + files := make([]File, 0, len(s.requestedSecrets)) + for _, secret := range s.requestedSecrets { + files = append(files, File{ + Name: secret.path, + Contents: generateCertAndKeyFileContent(secret.secret), + }) + } + + return files +} + func isSecretValid(secret *apiv1.Secret) bool { if secret.Type != apiv1.SecretTypeTLS { return false From 9658b4e9ab3f407527132d5574d461a7427d1593 Mon Sep 17 00:00:00 2001 From: Kate Osborn Date: Tue, 4 Apr 2023 13:58:37 -0600 Subject: [PATCH 04/16] Add config gen --- internal/state/dataplane/configuration.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/state/dataplane/configuration.go b/internal/state/dataplane/configuration.go index 7251d824d7..4abdd3a4f5 100644 --- a/internal/state/dataplane/configuration.go +++ b/internal/state/dataplane/configuration.go @@ -15,6 +15,7 @@ const wildcardHostname = "~^" // Configuration is an intermediate representation of dataplane configuration. type Configuration struct { + Generation int // HTTPServers holds all HTTPServers. // FIXME(pleshakov) We assume that all servers are HTTP and listen on port 80. HTTPServers []VirtualServer From 864566bf38010bd3b2db8e26c4aa7f4699ac9f8c Mon Sep 17 00:00:00 2001 From: Kate Osborn Date: Tue, 4 Apr 2023 13:59:38 -0600 Subject: [PATCH 05/16] Remove gotemplate alias, add explicit listen 80, and move error servers to main conf --- internal/nginx/config/servers.go | 4 ++-- internal/nginx/config/servers_template.go | 15 ++------------- internal/nginx/config/split_clients.go | 4 ++-- internal/nginx/config/upstreams.go | 4 ++-- 4 files changed, 8 insertions(+), 19 deletions(-) diff --git a/internal/nginx/config/servers.go b/internal/nginx/config/servers.go index 1771a7d0c7..afdf5c7991 100644 --- a/internal/nginx/config/servers.go +++ b/internal/nginx/config/servers.go @@ -4,7 +4,7 @@ import ( "encoding/json" "fmt" "strings" - gotemplate "text/template" + "text/template" "sigs.k8s.io/gateway-api/apis/v1beta1" @@ -12,7 +12,7 @@ import ( "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/dataplane" ) -var serversTemplate = gotemplate.Must(gotemplate.New("servers").Parse(serversTemplateText)) +var serversTemplate = template.Must(template.New("servers").Parse(serversTemplateText)) const rootPath = "/" diff --git a/internal/nginx/config/servers_template.go b/internal/nginx/config/servers_template.go index c96da61371..2cf7d1071a 100644 --- a/internal/nginx/config/servers_template.go +++ b/internal/nginx/config/servers_template.go @@ -25,6 +25,8 @@ server { if ($ssl_server_name != $host) { return 421; } + {{ else }} + listen 80; {{ end }} server_name {{ $s.ServerName }}; @@ -53,17 +55,4 @@ server { } {{ end }} {{ end }} -server { - listen unix:/var/lib/nginx/nginx-502-server.sock; - access_log off; - - return 502; -} - -server { - listen unix:/var/lib/nginx/nginx-500-server.sock; - access_log off; - - return 500; -} ` diff --git a/internal/nginx/config/split_clients.go b/internal/nginx/config/split_clients.go index 28d39030d3..90e735381a 100644 --- a/internal/nginx/config/split_clients.go +++ b/internal/nginx/config/split_clients.go @@ -3,14 +3,14 @@ package config import ( "fmt" "math" - gotemplate "text/template" + "text/template" "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/config/http" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/dataplane" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/graph" ) -var splitClientsTemplate = gotemplate.Must(gotemplate.New("split_clients").Parse(splitClientsTemplateText)) +var splitClientsTemplate = template.Must(template.New("split_clients").Parse(splitClientsTemplateText)) func executeSplitClients(conf dataplane.Configuration) []byte { splitClients := createSplitClients(conf.BackendGroups) diff --git a/internal/nginx/config/upstreams.go b/internal/nginx/config/upstreams.go index 927745ed5a..0460a0ac1a 100644 --- a/internal/nginx/config/upstreams.go +++ b/internal/nginx/config/upstreams.go @@ -2,13 +2,13 @@ package config import ( "fmt" - gotemplate "text/template" + "text/template" "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/config/http" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/dataplane" ) -var upstreamsTemplate = gotemplate.Must(gotemplate.New("upstreams").Parse(upstreamsTemplateText)) +var upstreamsTemplate = template.Must(template.New("upstreams").Parse(upstreamsTemplateText)) const ( // nginx502Server is used as a backend for services that cannot be resolved (have no IP address). From 736a3dd5ff75c71ce054170323653f52625723f9 Mon Sep 17 00:00:00 2001 From: Kate Osborn Date: Tue, 4 Apr 2023 14:00:41 -0600 Subject: [PATCH 06/16] Remove agent interface, agent pool, and agent manager --- internal/agent/agent_suite_test.go | 13 - internal/agent/doc.go | 7 - internal/agent/pool.go | 60 ----- internal/agent/pool_test.go | 110 -------- internal/grpc/commander/agent.go | 24 -- .../commander/commanderfakes/fake_agent.go | 242 ------------------ .../commanderfakes/fake_agent_manager.go | 189 -------------- 7 files changed, 645 deletions(-) delete mode 100644 internal/agent/agent_suite_test.go delete mode 100644 internal/agent/doc.go delete mode 100644 internal/agent/pool.go delete mode 100644 internal/agent/pool_test.go delete mode 100644 internal/grpc/commander/agent.go delete mode 100644 internal/grpc/commander/commanderfakes/fake_agent.go delete mode 100644 internal/grpc/commander/commanderfakes/fake_agent_manager.go diff --git a/internal/agent/agent_suite_test.go b/internal/agent/agent_suite_test.go deleted file mode 100644 index ef6db6efd6..0000000000 --- a/internal/agent/agent_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package agent_test - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestAgent(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Agent Suite") -} diff --git a/internal/agent/doc.go b/internal/agent/doc.go deleted file mode 100644 index a3276c1c36..0000000000 --- a/internal/agent/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -/* -Package agent contains objects and methods for interacting with agents. - -The package includes: -- Pool: A concurrent-safe connection pool for managing commander.Agent. -*/ -package agent diff --git a/internal/agent/pool.go b/internal/agent/pool.go deleted file mode 100644 index 5089f87489..0000000000 --- a/internal/agent/pool.go +++ /dev/null @@ -1,60 +0,0 @@ -package agent - -import ( - "sync" - - "github.com/go-logr/logr" - - "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander" -) - -// Pool is a concurrent safe pool of commander.Agents. -type Pool struct { - agents map[string]commander.Agent - logger logr.Logger - - lock sync.Mutex -} - -// NewPool returns a new instance of Pool. -func NewPool(logger logr.Logger) *Pool { - return &Pool{ - agents: make(map[string]commander.Agent), - logger: logger, - } -} - -// AddAgent adds an agent to the Pool. -func (ap *Pool) AddAgent(agent commander.Agent) { - ap.lock.Lock() - defer ap.lock.Unlock() - - ap.agents[agent.ID()] = agent - - ap.logger.Info("Added new agent", "id", agent.ID(), "total number of agents", len(ap.agents)) -} - -// RemoveAgent removes an agent from the Pool with the given ID. -func (ap *Pool) RemoveAgent(id string) { - ap.lock.Lock() - defer ap.lock.Unlock() - - delete(ap.agents, id) - ap.logger.Info("Removed agent", "id", id, "total number of agents", len(ap.agents)) -} - -// GetAgent returns the agent with the given ID from the Pool. -func (ap *Pool) GetAgent(id string) commander.Agent { - ap.lock.Lock() - defer ap.lock.Unlock() - - return ap.agents[id] -} - -// Size is used for testing purposes. -func (ap *Pool) Size() int { - ap.lock.Lock() - defer ap.lock.Unlock() - - return len(ap.agents) -} diff --git a/internal/agent/pool_test.go b/internal/agent/pool_test.go deleted file mode 100644 index 36c31e2f67..0000000000 --- a/internal/agent/pool_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package agent_test - -import ( - "fmt" - "sync" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - "github.com/nginxinc/nginx-kubernetes-gateway/internal/agent" - "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander" - "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander/commanderfakes" -) - -func newFakeAgent(id string) commander.Agent { - return &commanderfakes.FakeAgent{ - IDStub: func() string { - return id - }, - } -} - -var _ = Describe("Agent Pool", func() { - var ( - pool *agent.Pool - agent1, agent2, agent3 commander.Agent - ) - - BeforeEach(func() { - pool = agent.NewPool(zap.New()) - agent1, agent2, agent3 = newFakeAgent("1"), newFakeAgent("2"), newFakeAgent("3") - }) - - It("can add and get agents", func() { - pool.AddAgent(agent1) - Expect(pool.Size()).To(Equal(1)) - Expect(pool.GetAgent("1")).To(Equal(agent1)) - - pool.AddAgent(agent2) - Expect(pool.Size()).To(Equal(2)) - Expect(pool.GetAgent("2")).To(Equal(agent2)) - - pool.AddAgent(agent3) - Expect(pool.Size()).To(Equal(3)) - Expect(pool.GetAgent("3")).To(Equal(agent3)) - }) - It("can remove agents", func() { - pool.AddAgent(agent1) - pool.AddAgent(agent2) - pool.AddAgent(agent3) - Expect(pool.Size()).To(Equal(3)) - - pool.RemoveAgent("2") - Expect(pool.Size()).To(Equal(2)) - Expect(pool.GetAgent("1")).To(Equal(agent1)) - Expect(pool.GetAgent("3")).To(Equal(agent3)) - - pool.RemoveAgent("1") - Expect(pool.Size()).To(Equal(1)) - Expect(pool.GetAgent("3")).To(Equal(agent3)) - - pool.RemoveAgent("3") - Expect(pool.Size()).To(Equal(0)) - }) - When("an agent does not exist in pool", func() { - It("remove agent does nothing", func() { - Expect(pool.Size()).To(Equal(0)) - pool.RemoveAgent("dne") - Expect(pool.Size()).To(Equal(0)) - }) - }) - It("can handle concurrent CRUD", func() { - // populate pool with 5 agents which will be removed. - for i := 1; i <= 5; i++ { - pool.AddAgent(newFakeAgent(fmt.Sprintf("%d", i))) - } - - addAndGetAgent := func(id string, wg *sync.WaitGroup) { - defer wg.Done() - pool.AddAgent(newFakeAgent(id)) - Expect(pool.GetAgent(id).ID()).To(Equal(id)) - } - - removeAndGetAgent := func(id string, wg *sync.WaitGroup) { - defer wg.Done() - Expect(pool.GetAgent(id).ID()).To(Equal(id)) - pool.RemoveAgent(id) - Expect(pool.GetAgent(id)).To(BeNil()) - } - - wg := &sync.WaitGroup{} - for i := 0; i < 15; i++ { - id := fmt.Sprintf("%d", i+1) - - wg.Add(1) - // remove first five - if i < 5 { - go removeAndGetAgent(id, wg) - } else { - // add 10 new - go addAndGetAgent(id, wg) - } - } - - wg.Wait() - - Expect(pool.Size()).To(Equal(10)) - }) -}) diff --git a/internal/grpc/commander/agent.go b/internal/grpc/commander/agent.go deleted file mode 100644 index d0a67f88cf..0000000000 --- a/internal/grpc/commander/agent.go +++ /dev/null @@ -1,24 +0,0 @@ -package commander - -//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . AgentManager -//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . Agent - -// Agent represents a connected Agent. -// This interface is used for testing purposes because it allows easy mocking of an agent. -type Agent interface { - // ID returns the unique ID of the Agent. - ID() string - // State returns the State of the Agent. - State() State -} - -// AgentManager manages all the connected agents. -// The commander uses the AgentManager to track all the connected Agents. -type AgentManager interface { - // AddAgent adds an Agent to the manager. - AddAgent(agent Agent) - // RemoveAgent removes the Agent with the provided ID from the manager. - RemoveAgent(id string) - // GetAgent returns the Agent with the provided ID. - GetAgent(id string) Agent -} diff --git a/internal/grpc/commander/commanderfakes/fake_agent.go b/internal/grpc/commander/commanderfakes/fake_agent.go deleted file mode 100644 index 6e0e2ff6ae..0000000000 --- a/internal/grpc/commander/commanderfakes/fake_agent.go +++ /dev/null @@ -1,242 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package commanderfakes - -import ( - "sync" - - "github.com/nginx/agent/sdk/v2/proto" - "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander" -) - -type FakeAgent struct { - IDStub func() string - iDMutex sync.RWMutex - iDArgsForCall []struct { - } - iDReturns struct { - result1 string - } - iDReturnsOnCall map[int]struct { - result1 string - } - ReceiveFromUploadServerStub func(proto.Commander_UploadServer) error - receiveFromUploadServerMutex sync.RWMutex - receiveFromUploadServerArgsForCall []struct { - arg1 proto.Commander_UploadServer - } - receiveFromUploadServerReturns struct { - result1 error - } - receiveFromUploadServerReturnsOnCall map[int]struct { - result1 error - } - StateStub func() commander.State - stateMutex sync.RWMutex - stateArgsForCall []struct { - } - stateReturns struct { - result1 commander.State - } - stateReturnsOnCall map[int]struct { - result1 commander.State - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeAgent) ID() string { - fake.iDMutex.Lock() - ret, specificReturn := fake.iDReturnsOnCall[len(fake.iDArgsForCall)] - fake.iDArgsForCall = append(fake.iDArgsForCall, struct { - }{}) - stub := fake.IDStub - fakeReturns := fake.iDReturns - fake.recordInvocation("ID", []interface{}{}) - fake.iDMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeAgent) IDCallCount() int { - fake.iDMutex.RLock() - defer fake.iDMutex.RUnlock() - return len(fake.iDArgsForCall) -} - -func (fake *FakeAgent) IDCalls(stub func() string) { - fake.iDMutex.Lock() - defer fake.iDMutex.Unlock() - fake.IDStub = stub -} - -func (fake *FakeAgent) IDReturns(result1 string) { - fake.iDMutex.Lock() - defer fake.iDMutex.Unlock() - fake.IDStub = nil - fake.iDReturns = struct { - result1 string - }{result1} -} - -func (fake *FakeAgent) IDReturnsOnCall(i int, result1 string) { - fake.iDMutex.Lock() - defer fake.iDMutex.Unlock() - fake.IDStub = nil - if fake.iDReturnsOnCall == nil { - fake.iDReturnsOnCall = make(map[int]struct { - result1 string - }) - } - fake.iDReturnsOnCall[i] = struct { - result1 string - }{result1} -} - -func (fake *FakeAgent) ReceiveFromUploadServer(arg1 proto.Commander_UploadServer) error { - fake.receiveFromUploadServerMutex.Lock() - ret, specificReturn := fake.receiveFromUploadServerReturnsOnCall[len(fake.receiveFromUploadServerArgsForCall)] - fake.receiveFromUploadServerArgsForCall = append(fake.receiveFromUploadServerArgsForCall, struct { - arg1 proto.Commander_UploadServer - }{arg1}) - stub := fake.ReceiveFromUploadServerStub - fakeReturns := fake.receiveFromUploadServerReturns - fake.recordInvocation("ReceiveFromUploadServer", []interface{}{arg1}) - fake.receiveFromUploadServerMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeAgent) ReceiveFromUploadServerCallCount() int { - fake.receiveFromUploadServerMutex.RLock() - defer fake.receiveFromUploadServerMutex.RUnlock() - return len(fake.receiveFromUploadServerArgsForCall) -} - -func (fake *FakeAgent) ReceiveFromUploadServerCalls(stub func(proto.Commander_UploadServer) error) { - fake.receiveFromUploadServerMutex.Lock() - defer fake.receiveFromUploadServerMutex.Unlock() - fake.ReceiveFromUploadServerStub = stub -} - -func (fake *FakeAgent) ReceiveFromUploadServerArgsForCall(i int) proto.Commander_UploadServer { - fake.receiveFromUploadServerMutex.RLock() - defer fake.receiveFromUploadServerMutex.RUnlock() - argsForCall := fake.receiveFromUploadServerArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeAgent) ReceiveFromUploadServerReturns(result1 error) { - fake.receiveFromUploadServerMutex.Lock() - defer fake.receiveFromUploadServerMutex.Unlock() - fake.ReceiveFromUploadServerStub = nil - fake.receiveFromUploadServerReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeAgent) ReceiveFromUploadServerReturnsOnCall(i int, result1 error) { - fake.receiveFromUploadServerMutex.Lock() - defer fake.receiveFromUploadServerMutex.Unlock() - fake.ReceiveFromUploadServerStub = nil - if fake.receiveFromUploadServerReturnsOnCall == nil { - fake.receiveFromUploadServerReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.receiveFromUploadServerReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeAgent) State() commander.State { - fake.stateMutex.Lock() - ret, specificReturn := fake.stateReturnsOnCall[len(fake.stateArgsForCall)] - fake.stateArgsForCall = append(fake.stateArgsForCall, struct { - }{}) - stub := fake.StateStub - fakeReturns := fake.stateReturns - fake.recordInvocation("State", []interface{}{}) - fake.stateMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeAgent) StateCallCount() int { - fake.stateMutex.RLock() - defer fake.stateMutex.RUnlock() - return len(fake.stateArgsForCall) -} - -func (fake *FakeAgent) StateCalls(stub func() commander.State) { - fake.stateMutex.Lock() - defer fake.stateMutex.Unlock() - fake.StateStub = stub -} - -func (fake *FakeAgent) StateReturns(result1 commander.State) { - fake.stateMutex.Lock() - defer fake.stateMutex.Unlock() - fake.StateStub = nil - fake.stateReturns = struct { - result1 commander.State - }{result1} -} - -func (fake *FakeAgent) StateReturnsOnCall(i int, result1 commander.State) { - fake.stateMutex.Lock() - defer fake.stateMutex.Unlock() - fake.StateStub = nil - if fake.stateReturnsOnCall == nil { - fake.stateReturnsOnCall = make(map[int]struct { - result1 commander.State - }) - } - fake.stateReturnsOnCall[i] = struct { - result1 commander.State - }{result1} -} - -func (fake *FakeAgent) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.iDMutex.RLock() - defer fake.iDMutex.RUnlock() - fake.receiveFromUploadServerMutex.RLock() - defer fake.receiveFromUploadServerMutex.RUnlock() - fake.stateMutex.RLock() - defer fake.stateMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeAgent) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ commander.Agent = new(FakeAgent) diff --git a/internal/grpc/commander/commanderfakes/fake_agent_manager.go b/internal/grpc/commander/commanderfakes/fake_agent_manager.go deleted file mode 100644 index d78d9e313d..0000000000 --- a/internal/grpc/commander/commanderfakes/fake_agent_manager.go +++ /dev/null @@ -1,189 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package commanderfakes - -import ( - "sync" - - "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander" -) - -type FakeAgentManager struct { - AddAgentStub func(commander.Agent) - addAgentMutex sync.RWMutex - addAgentArgsForCall []struct { - arg1 commander.Agent - } - GetAgentStub func(string) commander.Agent - getAgentMutex sync.RWMutex - getAgentArgsForCall []struct { - arg1 string - } - getAgentReturns struct { - result1 commander.Agent - } - getAgentReturnsOnCall map[int]struct { - result1 commander.Agent - } - RemoveAgentStub func(string) - removeAgentMutex sync.RWMutex - removeAgentArgsForCall []struct { - arg1 string - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeAgentManager) AddAgent(arg1 commander.Agent) { - fake.addAgentMutex.Lock() - fake.addAgentArgsForCall = append(fake.addAgentArgsForCall, struct { - arg1 commander.Agent - }{arg1}) - stub := fake.AddAgentStub - fake.recordInvocation("AddAgent", []interface{}{arg1}) - fake.addAgentMutex.Unlock() - if stub != nil { - fake.AddAgentStub(arg1) - } -} - -func (fake *FakeAgentManager) AddAgentCallCount() int { - fake.addAgentMutex.RLock() - defer fake.addAgentMutex.RUnlock() - return len(fake.addAgentArgsForCall) -} - -func (fake *FakeAgentManager) AddAgentCalls(stub func(commander.Agent)) { - fake.addAgentMutex.Lock() - defer fake.addAgentMutex.Unlock() - fake.AddAgentStub = stub -} - -func (fake *FakeAgentManager) AddAgentArgsForCall(i int) commander.Agent { - fake.addAgentMutex.RLock() - defer fake.addAgentMutex.RUnlock() - argsForCall := fake.addAgentArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeAgentManager) GetAgent(arg1 string) commander.Agent { - fake.getAgentMutex.Lock() - ret, specificReturn := fake.getAgentReturnsOnCall[len(fake.getAgentArgsForCall)] - fake.getAgentArgsForCall = append(fake.getAgentArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.GetAgentStub - fakeReturns := fake.getAgentReturns - fake.recordInvocation("GetAgent", []interface{}{arg1}) - fake.getAgentMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeAgentManager) GetAgentCallCount() int { - fake.getAgentMutex.RLock() - defer fake.getAgentMutex.RUnlock() - return len(fake.getAgentArgsForCall) -} - -func (fake *FakeAgentManager) GetAgentCalls(stub func(string) commander.Agent) { - fake.getAgentMutex.Lock() - defer fake.getAgentMutex.Unlock() - fake.GetAgentStub = stub -} - -func (fake *FakeAgentManager) GetAgentArgsForCall(i int) string { - fake.getAgentMutex.RLock() - defer fake.getAgentMutex.RUnlock() - argsForCall := fake.getAgentArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeAgentManager) GetAgentReturns(result1 commander.Agent) { - fake.getAgentMutex.Lock() - defer fake.getAgentMutex.Unlock() - fake.GetAgentStub = nil - fake.getAgentReturns = struct { - result1 commander.Agent - }{result1} -} - -func (fake *FakeAgentManager) GetAgentReturnsOnCall(i int, result1 commander.Agent) { - fake.getAgentMutex.Lock() - defer fake.getAgentMutex.Unlock() - fake.GetAgentStub = nil - if fake.getAgentReturnsOnCall == nil { - fake.getAgentReturnsOnCall = make(map[int]struct { - result1 commander.Agent - }) - } - fake.getAgentReturnsOnCall[i] = struct { - result1 commander.Agent - }{result1} -} - -func (fake *FakeAgentManager) RemoveAgent(arg1 string) { - fake.removeAgentMutex.Lock() - fake.removeAgentArgsForCall = append(fake.removeAgentArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.RemoveAgentStub - fake.recordInvocation("RemoveAgent", []interface{}{arg1}) - fake.removeAgentMutex.Unlock() - if stub != nil { - fake.RemoveAgentStub(arg1) - } -} - -func (fake *FakeAgentManager) RemoveAgentCallCount() int { - fake.removeAgentMutex.RLock() - defer fake.removeAgentMutex.RUnlock() - return len(fake.removeAgentArgsForCall) -} - -func (fake *FakeAgentManager) RemoveAgentCalls(stub func(string)) { - fake.removeAgentMutex.Lock() - defer fake.removeAgentMutex.Unlock() - fake.RemoveAgentStub = stub -} - -func (fake *FakeAgentManager) RemoveAgentArgsForCall(i int) string { - fake.removeAgentMutex.RLock() - defer fake.removeAgentMutex.RUnlock() - argsForCall := fake.removeAgentArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeAgentManager) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.addAgentMutex.RLock() - defer fake.addAgentMutex.RUnlock() - fake.getAgentMutex.RLock() - defer fake.getAgentMutex.RUnlock() - fake.removeAgentMutex.RLock() - defer fake.removeAgentMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeAgentManager) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ commander.AgentManager = new(FakeAgentManager) From 897a27d73949ac0fc7bf90ff07d3feb0ed08642d Mon Sep 17 00:00:00 2001 From: Kate Osborn Date: Tue, 4 Apr 2023 14:06:07 -0600 Subject: [PATCH 07/16] Track connections on commander; implement download and upload --- internal/grpc/commander/commander.go | 124 ++++++++++++++++++---- internal/grpc/commander/commander_test.go | 78 -------------- internal/grpc/server_test.go | 4 +- 3 files changed, 102 insertions(+), 104 deletions(-) delete mode 100644 internal/grpc/commander/commander_test.go diff --git a/internal/grpc/commander/commander.go b/internal/grpc/commander/commander.go index 0022922ad3..0f616d14ab 100644 --- a/internal/grpc/commander/commander.go +++ b/internal/grpc/commander/commander.go @@ -3,12 +3,15 @@ package commander import ( "context" "errors" + "fmt" + "sync" "github.com/go-logr/logr" "github.com/nginx/agent/sdk/v2/proto" - "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" + + "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/agent" + "github.com/nginxinc/nginx-kubernetes-gateway/internal/observer" ) // nolint:lll @@ -17,17 +20,26 @@ import ( const serverUUIDKey = "uuid" +type observedNginxConfig interface { + observer.Subject + GetLatestConfig() *agent.NginxConfig +} + // Commander implements the proto.CommanderServer interface. type Commander struct { - agentMgr AgentManager - logger logr.Logger + connections map[string]*connection + observedConfig observedNginxConfig + logger logr.Logger + + connLock sync.Mutex } // NewCommander returns a new instance of the Commander. -func NewCommander(logger logr.Logger, agentMgr AgentManager) *Commander { +func NewCommander(logger logr.Logger, observedConfig observedNginxConfig) *Commander { return &Commander{ - logger: logger, - agentMgr: agentMgr, + logger: logger, + connections: make(map[string]*connection), + observedConfig: observedConfig, } } @@ -48,36 +60,102 @@ func (c *Commander) CommandChannel(server proto.Commander_CommandChannelServer) return err } - c.logger.Info("New agent connection", "id", id) - defer func() { - c.logger.Info("Removing agent from manager") - c.agentMgr.RemoveAgent(id) + c.removeConnection(id) }() - agentLogger := c.logger.WithValues("id", id) + idLogger := c.logger.WithValues("id", id) - agentConn := newConnection( + conn := newConnection( id, - agentLogger.WithName("connection"), - NewBidirectionalChannel(server, agentLogger.WithName("channel")), + idLogger.WithName("connection"), + NewBidirectionalChannel(server, idLogger.WithName("channel")), + c.observedConfig, ) - c.logger.Info("Adding agent to manager") - c.agentMgr.AddAgent(agentConn) + c.addConnection(conn) - return agentConn.run(server.Context()) + return conn.run(server.Context()) } -// Download will be implemented in a future PR. -func (c *Commander) Download(_ *proto.DownloadRequest, _ proto.Commander_DownloadServer) error { - return nil +// Download implements the Download method of the Commander gRPC service. An agent invokes this method to download the +// latest version of the NGINX configuration. +func (c *Commander) Download(request *proto.DownloadRequest, server proto.Commander_DownloadServer) error { + c.logger.Info("Download requested", "message ID", request.GetMeta().GetMessageId()) + + id, err := getUUIDFromContext(server.Context()) + if err != nil { + c.logger.Error(err, "failed download") + return err + } + + conn := c.getConnection(id) + if conn == nil { + err := fmt.Errorf("connection with id: %s not found", id) + c.logger.Error(err, "failed download") + return err + } + + // TODO: can there be a race condition here? + if conn.State() != StateRegistered { + err := fmt.Errorf("connection with id: %s is not registered", id) + c.logger.Error(err, "failed upload") + return err + } + + return conn.sendConfig(request, server) } -func (c *Commander) Upload(_ proto.Commander_UploadServer) error { +// Upload implements the Upload method of the Commander gRPC service. +// FIXME(kate-osborn): NKG doesn't need this functionality and ideally we wouldn't have to implement and maintain this. +// Figure out how to remove this without causing errors in the agent. +func (c *Commander) Upload(server proto.Commander_UploadServer) error { c.logger.Info("Commander Upload requested") - return status.Error(codes.Unimplemented, "upload method is not implemented") + id, err := getUUIDFromContext(server.Context()) + if err != nil { + c.logger.Error(err, "failed upload; cannot get the UUID of the conn") + return err + } + + conn := c.getConnection(id) + if conn == nil { + err := fmt.Errorf("connection with id: %s not found", id) + c.logger.Error(err, "failed upload") + return err + } + + // TODO: can there be a race condition here? + if conn.State() != StateRegistered { + err := fmt.Errorf("connection with id: %s is not registered", id) + c.logger.Error(err, "failed upload") + return err + } + + return conn.receiveFromUploadServer(server) +} + +func (c *Commander) removeConnection(id string) { + c.connLock.Lock() + defer c.connLock.Unlock() + + delete(c.connections, id) + c.logger.Info("removed connection", "id", id, "total connections", len(c.connections)) +} + +func (c *Commander) addConnection(conn *connection) { + c.connLock.Lock() + defer c.connLock.Unlock() + + c.connections[conn.id] = conn + c.logger.Info("added connection", "id", conn.id, "total connections", len(c.connections)) +} + +func (c *Commander) getConnection(id string) *connection { + c.connLock.Lock() + defer c.connLock.Unlock() + + return c.connections[id] } func getUUIDFromContext(ctx context.Context) (string, error) { diff --git a/internal/grpc/commander/commander_test.go b/internal/grpc/commander/commander_test.go deleted file mode 100644 index d8a7d88154..0000000000 --- a/internal/grpc/commander/commander_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package commander_test - -import ( - "context" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander" - "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander/commanderfakes" -) - -var _ = Describe("Commander", func() { - Describe("CommandChannel", func() { - It("adds and removes agents over its lifetime", func() { - ctx, cancel := context.WithCancel(context.Background()) - - fakeServer := &commanderfakes.FakeCommander_CommandChannelServer{ - ContextStub: func() context.Context { - return metadata.NewIncomingContext(ctx, metadata.New(map[string]string{"uuid": "uuid"})) - }, - } - - added := make(chan struct{}) - fakeMgr := &commanderfakes.FakeAgentManager{ - AddAgentStub: func(_ commander.Agent) { - close(added) - }, - } - - cmdr := commander.NewCommander(zap.New(), fakeMgr) - - errCh := make(chan error) - go func() { - errCh <- cmdr.CommandChannel(fakeServer) - }() - - <-added - Expect(fakeMgr.AddAgentCallCount()).To(Equal(1)) - - cancel() - - err := <-errCh - Expect(err).Should(MatchError(context.Canceled)) - Expect(fakeMgr.RemoveAgentCallCount()).To(Equal(1)) - }) - When("server context metadata is missing UUID", func() { - It("errors and does not add agent to manager", func() { - fakeMgr := new(commanderfakes.FakeAgentManager) - - fakeServer := &commanderfakes.FakeCommander_CommandChannelServer{ - ContextStub: func() context.Context { - return context.Background() - }, - } - - cmdr := commander.NewCommander(zap.New(), fakeMgr) - err := cmdr.CommandChannel(fakeServer) - Expect(err).ToNot(BeNil()) - - Expect(fakeMgr.AddAgentCallCount()).To(Equal(0)) - }) - }) - }) - Describe("Upload", func() { - It("returns Unimplemented error code", func() { - cmdr := commander.NewCommander(zap.New(), &commanderfakes.FakeAgentManager{}) - err := cmdr.Upload(&commanderfakes.FakeCommander_UploadServer{}) - grpcStatus, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(grpcStatus.Code()).To(Equal(codes.Unimplemented)) - }) - }) -}) diff --git a/internal/grpc/server_test.go b/internal/grpc/server_test.go index 5dee59ec0d..dc94bde92e 100644 --- a/internal/grpc/server_test.go +++ b/internal/grpc/server_test.go @@ -17,7 +17,6 @@ import ( "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc" "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander" - "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander/commanderfakes" ) func createTestClient(serverAddr string, clientUUID string) client.Commander { @@ -35,8 +34,7 @@ func createTestClient(serverAddr string, clientUUID string) client.Commander { func TestServer_ConcurrentConnections(t *testing.T) { g := NewGomegaWithT(t) - fakeMgr := new(commanderfakes.FakeAgentManager) - commanderService := commander.NewCommander(zap.New(), fakeMgr) + commanderService := commander.NewCommander(zap.New(), nil) server, err := grpc.NewServer(zap.New(), "localhost:0", commanderService) g.Expect(err).To(BeNil()) From 3a126da83891907f52e845bad7e124557787600d Mon Sep 17 00:00:00 2001 From: Kate Osborn Date: Tue, 4 Apr 2023 14:14:20 -0600 Subject: [PATCH 08/16] Implement config pull and send --- go.mod | 2 +- internal/grpc/commander/channel.go | 4 +- internal/grpc/commander/connection.go | 407 ++++++++++- internal/grpc/commander/connection_test.go | 747 +++++++++++---------- internal/grpc/commander/doc.go | 7 +- 5 files changed, 762 insertions(+), 405 deletions(-) diff --git a/go.mod b/go.mod index e85591d467..232c4fe54d 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.19 require ( github.com/go-logr/logr v1.2.3 + github.com/gogo/protobuf v1.3.2 github.com/google/go-cmp v0.5.9 github.com/maxbrunsfeld/counterfeiter/v6 v6.6.1 github.com/nginx/agent/sdk/v2 v2.23.1 @@ -34,7 +35,6 @@ require ( github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/swag v0.19.14 // indirect github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect diff --git a/internal/grpc/commander/channel.go b/internal/grpc/commander/channel.go index 1913bc5743..492cf36f46 100644 --- a/internal/grpc/commander/channel.go +++ b/internal/grpc/commander/channel.go @@ -21,7 +21,7 @@ const channelLength = 25 // Commands can be sent to the CommandChannelServer by placing them on the toClient channel, // which is accessible through the In() method. // -// To use the BidirectionalChannel you must call the Run() method to kick of the receive and send loops. +// To use the BidirectionalChannel you must call the Run() method to kick off the receive and send loops. type BidirectionalChannel struct { channel proto.Commander_CommandChannelServer fromClient chan *proto.Command @@ -113,7 +113,7 @@ func (bc *BidirectionalChannel) send(ctx context.Context) error { if cmd == nil { panic("outgoing command is nil") } - bc.logger.Info("Sending command", "command", cmd) + bc.logger.Info("Sending command", "command type", fmt.Sprintf("%T", cmd.Data)) if err := bc.channel.Send(cmd); err != nil { return fmt.Errorf("error sending command to CommandChannel: %w", err) } diff --git a/internal/grpc/commander/connection.go b/internal/grpc/commander/connection.go index e045e4b4d6..4d49259786 100644 --- a/internal/grpc/commander/connection.go +++ b/internal/grpc/commander/connection.go @@ -2,14 +2,21 @@ package commander import ( "context" + "encoding/json" + "errors" "fmt" + "io" + "time" "github.com/go-logr/logr" + "github.com/gogo/protobuf/types" + "github.com/nginx/agent/sdk/v2/checksum" "github.com/nginx/agent/sdk/v2/grpc" "github.com/nginx/agent/sdk/v2/proto" "golang.org/x/sync/errgroup" "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander/exchanger" + "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/agent" ) // State is the state of the connection. @@ -25,22 +32,47 @@ const ( StateInvalid ) -// connection represents a connection to an agent. -type connection struct { - cmdExchanger exchanger.CommandExchanger - logger logr.Logger - id string - nginxID string - systemID string - state State +type configApplyStatus int + +const ( + configApplyStatusSuccess configApplyStatus = iota + configApplyStatusFailure +) + +func (s configApplyStatus) String() string { + switch s { + case configApplyStatusSuccess: + return "success" + case configApplyStatusFailure: + return "failure" + default: + return "unknown" + } } -func (c *connection) ID() string { - return c.id +type configApplyResponse struct { + correlationID string + message string + status configApplyStatus } -func (c *connection) State() State { - return c.state +// configUpdatedChSize is the size of the channel that notifies the connection that the config has been updated. +// The length is 1 because we do not want to miss a notification while the connection is processing the last config. +const configUpdatedChSize = 1 + +// connection represents a connection to an agent. +type connection struct { + cmdExchanger exchanger.CommandExchanger + observedConfig observedNginxConfig + configUpdatedCh chan struct{} + configApplyResponseCh chan configApplyResponse + pendingConfig *agent.NginxConfig + logger logr.Logger + id string + podName string + nginxID string + systemID string + state State } // newConnection creates a new instance of connection. @@ -53,17 +85,138 @@ func newConnection( id string, logger logr.Logger, cmdExchanger exchanger.CommandExchanger, + configSubject observedNginxConfig, ) *connection { return &connection{ - logger: logger, - cmdExchanger: cmdExchanger, - id: id, + logger: logger, + cmdExchanger: cmdExchanger, + observedConfig: configSubject, + configUpdatedCh: make(chan struct{}, configUpdatedChSize), + configApplyResponseCh: make(chan configApplyResponse), + id: id, + } +} + +func (c *connection) ID() string { + return c.id +} + +func (c *connection) State() State { + return c.state +} + +func (c *connection) Update() { + select { + case c.configUpdatedCh <- struct{}{}: + c.logger.Info("Queued config update") + default: + } +} + +func createDownloadCommand(msgID, systemID, nginxID string) *proto.Command { + return &proto.Command{ + Meta: &proto.Metadata{ + MessageId: msgID, + }, + Type: proto.Command_DOWNLOAD, + Data: &proto.Command_NginxConfig{ + NginxConfig: &proto.NginxConfig{ + Action: proto.NginxConfigAction_APPLY, + ConfigData: &proto.ConfigDescriptor{ + SystemId: systemID, + NginxId: nginxID, + }, + }, + }, + } +} + +func (c *connection) sendConfig(request *proto.DownloadRequest, downloadServer proto.Commander_DownloadServer) error { + config := c.pendingConfig + + if config.ID != request.GetMeta().GetMessageId() { + err := fmt.Errorf( + "pending config ID %q does not match request %q", + config.ID, + request.GetMeta().GetMessageId(), + ) + c.logger.Error(err, "failed to send config") + return err + } + + cfg := &proto.NginxConfig{ + Action: proto.NginxConfigAction_APPLY, + ConfigData: &proto.ConfigDescriptor{ + SystemId: c.systemID, + NginxId: c.nginxID, + }, + Zconfig: config.Config, + Zaux: config.Aux, + DirectoryMap: &proto.DirectoryMap{ + Directories: config.Directories, + }, } + + payload, err := json.Marshal(cfg) + if err != nil { + c.logger.Error(err, "failed to send config") + return err + } + + metadata := &proto.Metadata{ + Timestamp: types.TimestampNow(), + MessageId: request.GetMeta().GetMessageId(), + } + + payloadChecksum := checksum.Checksum(payload) + chunks := checksum.Chunk(payload, 4*1024) + + err = downloadServer.Send(&proto.DataChunk{ + Chunk: &proto.DataChunk_Header{ + Header: &proto.ChunkedResourceHeader{ + Meta: metadata, + Chunks: int32(len(chunks)), + Checksum: payloadChecksum, + ChunkSize: 4 * 1024, + }, + }, + }) + + if err != nil { + c.logger.Error(err, "failed to send config") + return err + } + + for id, chunk := range chunks { + c.logger.Info("Sending data chunk", "chunk ID", id) + err = downloadServer.Send(&proto.DataChunk{ + Chunk: &proto.DataChunk_Data{ + Data: &proto.ChunkedResourceChunk{ + ChunkId: int32(id), + Data: chunk, + Meta: metadata, + }, + }, + }) + + if err != nil { + c.logger.Error(err, "failed to send chunk") + return err + } + } + + c.logger.Info("Download finished") + + return nil } // run is a blocking method that kicks off the connection's receive loop and the CommandExchanger's Run loop. // run will return when the context is canceled or if either loop returns an error. func (c *connection) run(parent context.Context) error { + defer func() { + c.observedConfig.Remove(c) + }() + eg, ctx := errgroup.WithContext(parent) eg.Go(func() error { @@ -74,6 +227,10 @@ func (c *connection) run(parent context.Context) error { return c.cmdExchanger.Run(ctx) }) + eg.Go(func() error { + return c.updateConfigLoop(ctx) + }) + return eg.Wait() } @@ -93,19 +250,123 @@ func (c *connection) receive(ctx context.Context) error { } } +func (c *connection) updateConfigLoop(ctx context.Context) error { + defer func() { + c.logger.Info("Stopping update config loop") + }() + c.logger.Info("Starting update config loop") + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-c.configUpdatedCh: + c.waitForConfigApply(ctx) + } + } +} + +func (c *connection) waitForConfigApply(ctx context.Context) { + config := c.observedConfig.GetLatestConfig() + if config == nil { + c.logger.Info("Latest config is nil, skipping update") + return + } + + c.pendingConfig = config + + c.logger.Info("Updating to latest config", "config generation", config.ID) + + status := c.statusForID(ctx, config.ID) + + select { + case <-ctx.Done(): + c.logger.Error(ctx.Err(), "failed to update config") + return + case c.cmdExchanger.Out() <- createDownloadCommand(config.ID, c.nginxID, c.systemID): + } + + now := time.Now() + c.logger.Info("Waiting for config status", "config generation", config.ID) + + select { + case <-ctx.Done(): + return + case s := <-status: + elapsedTime := time.Since(now) + c.logger.Info( + fmt.Sprintf("Config apply complete [%s]", s.status), + "message", + s.message, + "config generation", + config.ID, + "duration", + elapsedTime.String(), + ) + } +} + +// statusForID returns a channel that will receive a configApplyResponse when a final ( +// not pending) status is received for the given config ID. +func (c *connection) statusForID(ctx context.Context, id string) <-chan configApplyResponse { + statusForID := make(chan configApplyResponse) + + go func() { + defer close(statusForID) + + for { + select { + case <-ctx.Done(): + return + case status := <-c.configApplyResponseCh: + // Not every status contains a correlation ID, so we only need to check it if it's not empty. + // This is a workaround for some inconsistencies in the way the agent reports config apply statuses. + if status.correlationID != "" && status.correlationID != id { + c.logger.Info("Config status is for wrong generation", + "actual config generation", + status.correlationID, + "expected config generation", + id, + "status", + status.status, + "message", + status.message, + ) + continue + } + + select { + case <-ctx.Done(): + return + case statusForID <- status: + return + } + } + } + }() + + return statusForID +} + func (c *connection) handleCommand(ctx context.Context, cmd *proto.Command) { switch cmd.Data.(type) { case *proto.Command_AgentConnectRequest: - c.handleAgentConnectRequest(ctx, cmd) + c.handleAgentConnectRequestCmd(ctx, cmd) + case *proto.Command_DataplaneStatus: + c.handleDataplaneStatus(ctx, cmd.GetDataplaneStatus()) + case *proto.Command_NginxConfigResponse: + c.handleNginxConfigResponse(ctx, cmd.GetNginxConfigResponse()) default: - c.logger.Info("Ignoring command", "command data type", fmt.Sprintf("%T", cmd.Data)) + c.logger.Info("Ignoring command", "data type", fmt.Sprintf("%T", cmd.Data)) } } -func (c *connection) handleAgentConnectRequest(ctx context.Context, cmd *proto.Command) { +func (c *connection) handleAgentConnectRequestCmd(ctx context.Context, cmd *proto.Command) { req := cmd.GetAgentConnectRequest() - c.logger.Info("Received agent connect request", "message ID", cmd.GetMeta().GetMessageId()) + c.logger.Info("Received agent connect request") + + c.logger = c.logger.WithValues("podName", req.GetMeta().DisplayName) requestStatusCode := proto.AgentConnectStatus_CONNECT_OK msg := "Connected" @@ -117,11 +378,7 @@ func (c *connection) handleAgentConnectRequest(ctx context.Context, cmd *proto.C c.logger.Error(err, "failed to register agent") } - res := createAgentConnectResponseCmd( - cmd.GetMeta().GetMessageId(), - requestStatusCode, - msg, - ) + res := createAgentConnectResponseCmd(cmd.GetMeta().GetMessageId(), requestStatusCode, msg) select { case <-ctx.Done(): @@ -142,9 +399,111 @@ func (c *connection) register(nginxID, systemID string) error { c.systemID = systemID c.state = StateRegistered + // trigger an update + c.Update() + // register for future config updates + c.observedConfig.Register(c) + return nil } +// receiveFromUploadServer uploads data chunks from the UploadServer and logs them. +// FIXME(kate-osborn): NKG doesn't need this functionality and ideally we wouldn't have to implement and maintain this. +// Figure out how to remove this without causing errors in the agent. +func (c *connection) receiveFromUploadServer(server proto.Commander_UploadServer) error { + c.logger.Info("Upload request") + + for { + // Recv blocks until it receives a message into or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + _, err := server.Recv() + + if err != nil && !errors.Is(err, io.EOF) { + c.logger.Error(err, "upload receive error") + return err + } + + c.logger.Info("Received chunk from upload channel") + + if errors.Is(err, io.EOF) { + c.logger.Info("Upload completed") + return server.SendAndClose(&proto.UploadStatus{Status: proto.UploadStatus_OK}) + } + } +} + +func (c *connection) handleDataplaneStatus(ctx context.Context, status *proto.DataplaneStatus) { + // Right now, we only care about AgentActivityStatuses that contain NginxConfigStatuses. + if status.GetAgentActivityStatus() != nil { + for _, activityStatus := range status.GetAgentActivityStatus() { + if cfgStatus := activityStatus.GetNginxConfigStatus(); cfgStatus != nil { + c.handleNginxConfigStatus(ctx, cfgStatus) + } + } + } +} + +func (c *connection) handleNginxConfigStatus(ctx context.Context, status *proto.NginxConfigStatus) { + c.logger.Info("Received nginx config status", "status", status.Status, "message", status.Message) + // If status is pending then we need to wait for the next status update + if status.Status == proto.NginxConfigStatus_PENDING { + return + } + + applyStatus := configApplyStatusSuccess + if status.Status == proto.NginxConfigStatus_ERROR { + applyStatus = configApplyStatusFailure + } + + res := configApplyResponse{ + correlationID: status.CorrelationId, + status: applyStatus, + message: status.Message, + } + + c.sendConfigApplyResponse(ctx, res) +} + +func (c *connection) handleNginxConfigResponse(ctx context.Context, res *proto.NginxConfigResponse) { + status := res.Status + + c.logger.Info("Received nginx config response", "status", status.Status, "message", status.Message) + + // We only care about ERROR status because it indicates that the config apply action is complete. + // An OK status can indicate that the config apply action is still in progress or that it is complete. However, + // the Agent will send a DataplaneStatus update on a successful config apply, so we don't need to handle it here. + // We handle the error case here, because in some cases, the Agent will not send a DataplaneStatus update on a + // failed config apply. + if status.Status != proto.CommandStatusResponse_CMD_ERROR { + return + } + + car := configApplyResponse{ + status: configApplyStatusFailure, + message: status.Error, + } + + c.sendConfigApplyResponse(ctx, car) +} + +func (c *connection) sendConfigApplyResponse(ctx context.Context, response configApplyResponse) { + select { + case <-ctx.Done(): + return + case c.configApplyResponseCh <- response: + default: + // If there's no listener on c.configApplyResponseCh, then there's no pending config apply + // and these status updates are extraneous. + c.logger.Info( + "Ignoring config apply response; no pending config apply", + "config generation", + response.correlationID, + ) + } +} + func createAgentConnectResponseCmd( msgID string, statusCode proto.AgentConnectStatus_StatusCode, diff --git a/internal/grpc/commander/connection_test.go b/internal/grpc/commander/connection_test.go index 89353e28bd..b81d153f19 100644 --- a/internal/grpc/commander/connection_test.go +++ b/internal/grpc/commander/connection_test.go @@ -1,375 +1,376 @@ package commander -import ( - "context" - "errors" - "testing" - - "github.com/nginx/agent/sdk/v2/proto" - . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander/exchanger/exchangerfakes" -) - -func TestConnection_Run_ExchangerErr(t *testing.T) { - g := NewGomegaWithT(t) - - exchangerClose := make(chan struct{}) - exchangerErr := errors.New("exchanger error") - - fakeExchanger := &exchangerfakes.FakeCommandExchanger{ - RunStub: func(ctx context.Context) error { - <-exchangerClose - return errors.New("exchanger error") - }, - } - - conn := newConnection("id", zap.New(), fakeExchanger) - - errCh := make(chan error) - go func() { - errCh <- conn.run(context.Background()) - }() - - close(exchangerClose) - - err := <-errCh - g.Expect(err).Should(MatchError(exchangerErr)) -} - -func TestConnection_Run_ConnectionError(t *testing.T) { - g := NewGomegaWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - - fakeExchanger := &exchangerfakes.FakeCommandExchanger{ - RunStub: func(ctx context.Context) error { - <-ctx.Done() - return nil - }, - } - - conn := newConnection("id", zap.New(), fakeExchanger) - - errCh := make(chan error) - go func() { - errCh <- conn.run(ctx) - }() - - cancel() - - err := <-errCh - g.Expect(err).Should(MatchError(context.Canceled)) -} - -func TestConnection_Receive(t *testing.T) { - g := NewGomegaWithT(t) - - out := make(chan *proto.Command) - in := make(chan *proto.Command) - - ctx, cancel := context.WithCancel(context.Background()) - - fakeExchanger := &exchangerfakes.FakeCommandExchanger{ - OutStub: func() chan<- *proto.Command { - return out - }, - InStub: func() <-chan *proto.Command { - return in - }, - } - - conn := newConnection("id", zap.New(), fakeExchanger) - - errCh := make(chan error) - go func() { - errCh <- conn.receive(ctx) - }() - - sendCmdAndVerifyResponse := func(msgID string) { - in <- CreateAgentConnectRequestCmd(msgID) - - res := <-out - g.Expect(res).ToNot(BeNil()) - meta := res.GetMeta() - g.Expect(meta).ToNot(BeNil()) - g.Expect(meta.MessageId).To(Equal(msgID)) - } - - sendCmdAndVerifyResponse("msg-1") - sendCmdAndVerifyResponse("msg-2") - - cancel() - - receiveErr := <-errCh - g.Expect(receiveErr).Should(MatchError(context.Canceled)) -} - -func TestConnection_State(t *testing.T) { - g := NewGomegaWithT(t) - - conn := newConnection("id", zap.New(), new(exchangerfakes.FakeCommandExchanger)) - g.Expect(conn.State()).To(Equal(StateConnected)) - - // change state - conn.state = StateRegistered - g.Expect(conn.State()).To(Equal(StateRegistered)) -} - -func TestConnection_ID(t *testing.T) { - g := NewGomegaWithT(t) - - conn := newConnection("id", zap.New(), new(exchangerfakes.FakeCommandExchanger)) - g.Expect(conn.ID()).To(Equal("id")) -} - -func TestConnection_HandleCommand(t *testing.T) { - tests := []struct { - cmd *proto.Command - expCmdType *proto.Command - msg string - expInboundCmd bool - }{ - { - msg: "unsupported command", - cmd: &proto.Command{Data: &proto.Command_EventReport{}}, - expInboundCmd: false, - }, - { - msg: "agent connect request command", - cmd: CreateAgentConnectRequestCmd("msg-id"), - expInboundCmd: true, - expCmdType: &proto.Command{Data: &proto.Command_AgentConnectResponse{}}, - }, - } - - for _, test := range tests { - t.Run(test.msg, func(t *testing.T) { - g := NewGomegaWithT(t) - - out := make(chan *proto.Command, 1) - - fakeExchanger := &exchangerfakes.FakeCommandExchanger{ - OutStub: func() chan<- *proto.Command { - return out - }, - } - - conn := newConnection("id", zap.New(), fakeExchanger) - - conn.handleCommand(context.Background(), test.cmd) - - if test.expInboundCmd { - cmd := <-out - g.Expect(cmd.Data).To(BeAssignableToTypeOf(test.expCmdType.Data)) - } else { - g.Expect(out).To(BeEmpty()) - } - - close(out) - }) - } -} - -func TestConnection_HandleAgentConnectRequest(t *testing.T) { - invalidConnectRequest := &proto.Command{ - Meta: &proto.Metadata{ - MessageId: "msg-id", - }, - Data: &proto.Command_AgentConnectRequest{ - AgentConnectRequest: &proto.AgentConnectRequest{ - Meta: &proto.AgentMeta{}, - Details: []*proto.NginxDetails{}, - }, - }, - } - - tests := []struct { - request *proto.Command - name string - expStatusMsg string - expStatusCode proto.AgentConnectStatus_StatusCode - }{ - { - name: "normal", - request: CreateAgentConnectRequestCmd("msg-id"), - expStatusCode: proto.AgentConnectStatus_CONNECT_OK, - expStatusMsg: "Connected", - }, - { - name: "invalid", - request: invalidConnectRequest, - expStatusCode: proto.AgentConnectStatus_CONNECT_REJECTED_OTHER, - expStatusMsg: "missing nginxID: '' and/or systemID: ''", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - g := NewGomegaWithT(t) - - out := make(chan *proto.Command) - - fakeExchanger := &exchangerfakes.FakeCommandExchanger{ - OutStub: func() chan<- *proto.Command { - return out - }, - } - - conn := newConnection("id", zap.New(), fakeExchanger) - - go conn.handleAgentConnectRequest(context.Background(), test.request) - - response := <-out - - meta := response.GetMeta() - g.Expect(meta).ToNot(BeNil()) - g.Expect(meta.MessageId).To(Equal("msg-id")) - - agentConnResponse := response.GetAgentConnectResponse() - g.Expect(agentConnResponse).ToNot(BeNil()) - g.Expect(agentConnResponse.Status.StatusCode).To(Equal(test.expStatusCode)) - g.Expect(agentConnResponse.Status.Message).To(Equal(test.expStatusMsg)) - - if test.expStatusCode == proto.AgentConnectStatus_CONNECT_OK { - g.Expect(conn.state).To(Equal(StateRegistered)) - } else { - g.Expect(conn.state).To(Equal(StateInvalid)) - } - }) - } -} - -func TestConnection_HandleAgentConnectRequest_CtxCanceled(t *testing.T) { - g := NewGomegaWithT(t) - - out := make(chan *proto.Command) - - fakeExchanger := &exchangerfakes.FakeCommandExchanger{ - OutStub: func() chan<- *proto.Command { - return out - }, - } - - conn := newConnection("id", zap.New(), fakeExchanger) - - ctx, cancel := context.WithCancel(context.Background()) - - cmd := CreateAgentConnectRequestCmd("msg-id") - - done := make(chan struct{}) - go func() { - conn.handleAgentConnectRequest(ctx, cmd) - close(done) - }() - - cancel() - - g.Eventually(done).Should(BeClosed()) -} - -func TestConnection_Register(t *testing.T) { - tests := []struct { - msg string - nginxID string - systemID string - expRegister bool - }{ - { - msg: "valid nginxID and systemID", - nginxID: "nginx", - systemID: "system", - expRegister: true, - }, - { - msg: "invalid nginxID", - nginxID: "", - systemID: "system", - expRegister: false, - }, - { - msg: "invalid systemID", - nginxID: "nginx", - systemID: "", - expRegister: false, - }, - { - msg: "invalid nginxID and systemID", - nginxID: "", - systemID: "", - expRegister: false, - }, - } - - for _, test := range tests { - t.Run(test.msg, func(t *testing.T) { - g := NewGomegaWithT(t) - - conn := newConnection( - "conn-id", - zap.New(), - new(exchangerfakes.FakeCommandExchanger), - ) - - g.Expect(conn.state).To(Equal(StateConnected)) - g.Expect(conn.nginxID).To(BeEmpty()) - g.Expect(conn.systemID).To(BeEmpty()) - - err := conn.register(test.nginxID, test.systemID) - if test.expRegister { - g.Expect(err).To(BeNil()) - g.Expect(conn.state).To(Equal(StateRegistered)) - g.Expect(conn.nginxID).To(Equal(test.nginxID)) - g.Expect(conn.systemID).To(Equal(test.systemID)) - } else { - g.Expect(err).ToNot(BeNil()) - g.Expect(conn.state).To(Equal(StateInvalid)) - g.Expect(conn.nginxID).To(BeEmpty()) - g.Expect(conn.systemID).To(BeEmpty()) - } - }) - } -} - -func TestGetFirstNginxID(t *testing.T) { - tests := []struct { - name string - expID string - details []*proto.NginxDetails - }{ - { - name: "details with many nginxes", - details: []*proto.NginxDetails{ - { - NginxId: "1", - }, - { - NginxId: "2", - }, - { - NginxId: "3", - }, - }, - expID: "1", - }, - { - name: "nil details", - details: nil, - expID: "", - }, - { - name: "empty details", - details: []*proto.NginxDetails{}, - expID: "", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - g := NewGomegaWithT(t) - - id := getFirstNginxID(test.details) - g.Expect(id).To(Equal(test.expID)) - }) - } -} +// +// import ( +// "context" +// "errors" +// "testing" +// +// "github.com/nginx/agent/sdk/v2/proto" +// . "github.com/onsi/gomega" +// "sigs.k8s.io/controller-runtime/pkg/log/zap" +// +// "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander/exchanger/exchangerfakes" +// ) +// +// func TestConnection_Run_ExchangerErr(t *testing.T) { +// g := NewGomegaWithT(t) +// +// exchangerClose := make(chan struct{}) +// exchangerErr := errors.New("exchanger error") +// +// fakeExchanger := &exchangerfakes.FakeCommandExchanger{ +// RunStub: func(ctx context.Context) error { +// <-exchangerClose +// return errors.New("exchanger error") +// }, +// } +// +// conn := newConnection("id", zap.New(), fakeExchanger) +// +// errCh := make(chan error) +// go func() { +// errCh <- conn.run(context.Background()) +// }() +// +// close(exchangerClose) +// +// err := <-errCh +// g.Expect(err).Should(MatchError(exchangerErr)) +// } +// +// func TestConnection_Run_ConnectionError(t *testing.T) { +// g := NewGomegaWithT(t) +// ctx, cancel := context.WithCancel(context.Background()) +// +// fakeExchanger := &exchangerfakes.FakeCommandExchanger{ +// RunStub: func(ctx context.Context) error { +// <-ctx.Done() +// return nil +// }, +// } +// +// conn := newConnection("id", zap.New(), fakeExchanger) +// +// errCh := make(chan error) +// go func() { +// errCh <- conn.run(ctx) +// }() +// +// cancel() +// +// err := <-errCh +// g.Expect(err).Should(MatchError(context.Canceled)) +// } +// +// func TestConnection_Receive(t *testing.T) { +// g := NewGomegaWithT(t) +// +// out := make(chan *proto.Command) +// in := make(chan *proto.Command) +// +// ctx, cancel := context.WithCancel(context.Background()) +// +// fakeExchanger := &exchangerfakes.FakeCommandExchanger{ +// OutStub: func() chan<- *proto.Command { +// return out +// }, +// InStub: func() <-chan *proto.Command { +// return in +// }, +// } +// +// conn := newConnection("id", zap.New(), fakeExchanger) +// +// errCh := make(chan error) +// go func() { +// errCh <- conn.receive(ctx) +// }() +// +// sendCmdAndVerifyResponse := func(msgID string) { +// in <- CreateAgentConnectRequestCmd(msgID) +// +// res := <-out +// g.Expect(res).ToNot(BeNil()) +// meta := res.GetMeta() +// g.Expect(meta).ToNot(BeNil()) +// g.Expect(meta.MessageId).To(Equal(msgID)) +// } +// +// sendCmdAndVerifyResponse("msg-1") +// sendCmdAndVerifyResponse("msg-2") +// +// cancel() +// +// receiveErr := <-errCh +// g.Expect(receiveErr).Should(MatchError(context.Canceled)) +// } +// +// func TestConnection_State(t *testing.T) { +// g := NewGomegaWithT(t) +// +// conn := newConnection("id", zap.New(), new(exchangerfakes.FakeCommandExchanger)) +// g.Expect(conn.State()).To(Equal(StateConnected)) +// +// // change state +// conn.state = StateRegistered +// g.Expect(conn.State()).To(Equal(StateRegistered)) +// } +// +// func TestConnection_ID(t *testing.T) { +// g := NewGomegaWithT(t) +// +// conn := newConnection("id", zap.New(), new(exchangerfakes.FakeCommandExchanger)) +// g.Expect(conn.ID()).To(Equal("id")) +// } +// +// func TestConnection_HandleCommand(t *testing.T) { +// tests := []struct { +// cmd *proto.Command +// expCmdType *proto.Command +// msg string +// expInboundCmd bool +// }{ +// { +// msg: "unsupported command", +// cmd: &proto.Command{Data: &proto.Command_EventReport{}}, +// expInboundCmd: false, +// }, +// { +// msg: "agent connect request command", +// cmd: CreateAgentConnectRequestCmd("msg-id"), +// expInboundCmd: true, +// expCmdType: &proto.Command{Data: &proto.Command_AgentConnectResponse{}}, +// }, +// } +// +// for _, test := range tests { +// t.Run(test.msg, func(t *testing.T) { +// g := NewGomegaWithT(t) +// +// out := make(chan *proto.Command, 1) +// +// fakeExchanger := &exchangerfakes.FakeCommandExchanger{ +// OutStub: func() chan<- *proto.Command { +// return out +// }, +// } +// +// conn := newConnection("id", zap.New(), fakeExchanger) +// +// conn.handleCommand(context.Background(), test.cmd) +// +// if test.expInboundCmd { +// cmd := <-out +// g.Expect(cmd.Data).To(BeAssignableToTypeOf(test.expCmdType.Data)) +// } else { +// g.Expect(out).To(BeEmpty()) +// } +// +// close(out) +// }) +// } +// } +// +// func TestConnection_HandleAgentConnectRequest(t *testing.T) { +// invalidConnectRequest := &proto.Command{ +// Meta: &proto.Metadata{ +// MessageId: "msg-id", +// }, +// Data: &proto.Command_AgentConnectRequest{ +// AgentConnectRequest: &proto.AgentConnectRequest{ +// Meta: &proto.AgentMeta{}, +// Details: []*proto.NginxDetails{}, +// }, +// }, +// } +// +// tests := []struct { +// request *proto.Command +// name string +// expStatusMsg string +// expStatusCode proto.AgentConnectStatus_StatusCode +// }{ +// { +// name: "normal", +// request: CreateAgentConnectRequestCmd("msg-id"), +// expStatusCode: proto.AgentConnectStatus_CONNECT_OK, +// expStatusMsg: "Connected", +// }, +// { +// name: "invalid", +// request: invalidConnectRequest, +// expStatusCode: proto.AgentConnectStatus_CONNECT_REJECTED_OTHER, +// expStatusMsg: "missing nginxID: '' and/or systemID: ''", +// }, +// } +// +// for _, test := range tests { +// t.Run(test.name, func(t *testing.T) { +// g := NewGomegaWithT(t) +// +// out := make(chan *proto.Command) +// +// fakeExchanger := &exchangerfakes.FakeCommandExchanger{ +// OutStub: func() chan<- *proto.Command { +// return out +// }, +// } +// +// conn := newConnection("id", zap.New(), fakeExchanger) +// +// go conn.handleAgentConnectRequestCmd(context.Background(), test.request) +// +// response := <-out +// +// meta := response.GetMeta() +// g.Expect(meta).ToNot(BeNil()) +// g.Expect(meta.MessageId).To(Equal("msg-id")) +// +// agentConnResponse := response.GetAgentConnectResponse() +// g.Expect(agentConnResponse).ToNot(BeNil()) +// g.Expect(agentConnResponse.Status.StatusCode).To(Equal(test.expStatusCode)) +// g.Expect(agentConnResponse.Status.Message).To(Equal(test.expStatusMsg)) +// +// if test.expStatusCode == proto.AgentConnectStatus_CONNECT_OK { +// g.Expect(conn.state).To(Equal(StateRegistered)) +// } else { +// g.Expect(conn.state).To(Equal(StateInvalid)) +// } +// }) +// } +// } +// +// func TestConnection_HandleAgentConnectRequest_CtxCanceled(t *testing.T) { +// g := NewGomegaWithT(t) +// +// out := make(chan *proto.Command) +// +// fakeExchanger := &exchangerfakes.FakeCommandExchanger{ +// OutStub: func() chan<- *proto.Command { +// return out +// }, +// } +// +// conn := newConnection("id", zap.New(), fakeExchanger) +// +// ctx, cancel := context.WithCancel(context.Background()) +// +// cmd := CreateAgentConnectRequestCmd("msg-id") +// +// done := make(chan struct{}) +// go func() { +// conn.handleAgentConnectRequestCmd(ctx, cmd) +// close(done) +// }() +// +// cancel() +// +// g.Eventually(done).Should(BeClosed()) +// } +// +// func TestConnection_Register(t *testing.T) { +// tests := []struct { +// msg string +// nginxID string +// systemID string +// expRegister bool +// }{ +// { +// msg: "valid nginxID and systemID", +// nginxID: "nginx", +// systemID: "system", +// expRegister: true, +// }, +// { +// msg: "invalid nginxID", +// nginxID: "", +// systemID: "system", +// expRegister: false, +// }, +// { +// msg: "invalid systemID", +// nginxID: "nginx", +// systemID: "", +// expRegister: false, +// }, +// { +// msg: "invalid nginxID and systemID", +// nginxID: "", +// systemID: "", +// expRegister: false, +// }, +// } +// +// for _, test := range tests { +// t.Run(test.msg, func(t *testing.T) { +// g := NewGomegaWithT(t) +// +// conn := newConnection( +// "conn-id", +// zap.New(), +// new(exchangerfakes.FakeCommandExchanger), +// ) +// +// g.Expect(conn.state).To(Equal(StateConnected)) +// g.Expect(conn.nginxID).To(BeEmpty()) +// g.Expect(conn.systemID).To(BeEmpty()) +// +// err := conn.register(test.nginxID, test.systemID) +// if test.expRegister { +// g.Expect(err).To(BeNil()) +// g.Expect(conn.state).To(Equal(StateRegistered)) +// g.Expect(conn.nginxID).To(Equal(test.nginxID)) +// g.Expect(conn.systemID).To(Equal(test.systemID)) +// } else { +// g.Expect(err).ToNot(BeNil()) +// g.Expect(conn.state).To(Equal(StateInvalid)) +// g.Expect(conn.nginxID).To(BeEmpty()) +// g.Expect(conn.systemID).To(BeEmpty()) +// } +// }) +// } +// } +// +// func TestGetFirstNginxID(t *testing.T) { +// tests := []struct { +// name string +// expID string +// details []*proto.NginxDetails +// }{ +// { +// name: "details with many nginxes", +// details: []*proto.NginxDetails{ +// { +// NginxId: "1", +// }, +// { +// NginxId: "2", +// }, +// { +// NginxId: "3", +// }, +// }, +// expID: "1", +// }, +// { +// name: "nil details", +// details: nil, +// expID: "", +// }, +// { +// name: "empty details", +// details: []*proto.NginxDetails{}, +// expID: "", +// }, +// } +// +// for _, test := range tests { +// t.Run(test.name, func(t *testing.T) { +// g := NewGomegaWithT(t) +// +// id := getFirstNginxID(test.details) +// g.Expect(id).To(Equal(test.expID)) +// }) +// } +// } diff --git a/internal/grpc/commander/doc.go b/internal/grpc/commander/doc.go index 9411f9fc15..88017abe63 100644 --- a/internal/grpc/commander/doc.go +++ b/internal/grpc/commander/doc.go @@ -1,11 +1,8 @@ /* -Package commander holds all the objects and methods for interacting with agents through the gRPC Commander Service. +Package commander holds all the objects and methods for interacting with connections through the gRPC Commander Service. This package includes: - Commander: object that implements the Commander interface. -- Agent: interface for agent. -- AgentManager: interface for managing agents. - connection: object that encapsulates a connection to an agent. - BidirectionalChannel: object that encapsulates the bidirectional streaming channel: CommandChannelServer. -*/ -package commander +*/package commander From 0b74ef90c5dfe268d35d1e493252796ddd731180 Mon Sep 17 00:00:00 2001 From: Kate Osborn Date: Tue, 4 Apr 2023 14:15:25 -0600 Subject: [PATCH 09/16] Add test example files --- examples/many-updates/1-cafe-routes.yaml | 37 ++++++++++++++ examples/many-updates/2-cafe-routes.yaml | 37 ++++++++++++++ examples/many-updates/3-cafe-routes.yaml | 37 ++++++++++++++ examples/many-updates/4-cafe-routes.yaml | 37 ++++++++++++++ examples/many-updates/5-cafe-routes.yaml | 37 ++++++++++++++ examples/many-updates/6-cafe-routes.yaml | 37 ++++++++++++++ examples/many-updates/README.md | 1 + examples/many-updates/cafe.yaml | 65 ++++++++++++++++++++++++ examples/many-updates/deploy.sh | 11 ++++ examples/many-updates/gateway.yaml | 12 +++++ 10 files changed, 311 insertions(+) create mode 100644 examples/many-updates/1-cafe-routes.yaml create mode 100644 examples/many-updates/2-cafe-routes.yaml create mode 100644 examples/many-updates/3-cafe-routes.yaml create mode 100644 examples/many-updates/4-cafe-routes.yaml create mode 100644 examples/many-updates/5-cafe-routes.yaml create mode 100644 examples/many-updates/6-cafe-routes.yaml create mode 100644 examples/many-updates/README.md create mode 100644 examples/many-updates/cafe.yaml create mode 100755 examples/many-updates/deploy.sh create mode 100644 examples/many-updates/gateway.yaml diff --git a/examples/many-updates/1-cafe-routes.yaml b/examples/many-updates/1-cafe-routes.yaml new file mode 100644 index 0000000000..e47a50d0ef --- /dev/null +++ b/examples/many-updates/1-cafe-routes.yaml @@ -0,0 +1,37 @@ +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: HTTPRoute +metadata: + name: coffee +spec: + parentRefs: + - name: gateway + sectionName: http + hostnames: + - "cafe.example.com" + rules: + - matches: + - path: + type: PathPrefix + value: /coffee + backendRefs: + - name: coffee + port: 80 +--- +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: HTTPRoute +metadata: + name: tea +spec: + parentRefs: + - name: gateway + sectionName: http + hostnames: + - "cafe.example.com" + rules: + - matches: + - path: + type: PathPrefix + value: /tea + backendRefs: + - name: tea + port: 80 diff --git a/examples/many-updates/2-cafe-routes.yaml b/examples/many-updates/2-cafe-routes.yaml new file mode 100644 index 0000000000..049457e003 --- /dev/null +++ b/examples/many-updates/2-cafe-routes.yaml @@ -0,0 +1,37 @@ +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: HTTPRoute +metadata: + name: coffee +spec: + parentRefs: + - name: gateway + sectionName: http + hostnames: + - "cafe.example.com" + rules: + - matches: + - path: + type: PathPrefix + value: /coffee-v2 + backendRefs: + - name: coffee + port: 80 +--- +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: HTTPRoute +metadata: + name: tea +spec: + parentRefs: + - name: gateway + sectionName: http + hostnames: + - "cafe.example.com" + rules: + - matches: + - path: + type: PathPrefix + value: /tea-v2 + backendRefs: + - name: tea + port: 80 diff --git a/examples/many-updates/3-cafe-routes.yaml b/examples/many-updates/3-cafe-routes.yaml new file mode 100644 index 0000000000..1ac8db64ed --- /dev/null +++ b/examples/many-updates/3-cafe-routes.yaml @@ -0,0 +1,37 @@ +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: HTTPRoute +metadata: + name: coffee +spec: + parentRefs: + - name: gateway + sectionName: http + hostnames: + - "cafe.example.com" + rules: + - matches: + - path: + type: PathPrefix + value: /coffee-v3 + backendRefs: + - name: coffee + port: 80 +--- +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: HTTPRoute +metadata: + name: tea +spec: + parentRefs: + - name: gateway + sectionName: http + hostnames: + - "cafe.example.com" + rules: + - matches: + - path: + type: PathPrefix + value: /tea-v3 + backendRefs: + - name: tea + port: 80 diff --git a/examples/many-updates/4-cafe-routes.yaml b/examples/many-updates/4-cafe-routes.yaml new file mode 100644 index 0000000000..f70c330115 --- /dev/null +++ b/examples/many-updates/4-cafe-routes.yaml @@ -0,0 +1,37 @@ +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: HTTPRoute +metadata: + name: coffee +spec: + parentRefs: + - name: gateway + sectionName: http + hostnames: + - "cafe.example.com" + rules: + - matches: + - path: + type: PathPrefix + value: /coffee-v4 + backendRefs: + - name: coffee + port: 80 +--- +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: HTTPRoute +metadata: + name: tea +spec: + parentRefs: + - name: gateway + sectionName: http + hostnames: + - "cafe.example.com" + rules: + - matches: + - path: + type: PathPrefix + value: /tea-v4 + backendRefs: + - name: tea + port: 80 diff --git a/examples/many-updates/5-cafe-routes.yaml b/examples/many-updates/5-cafe-routes.yaml new file mode 100644 index 0000000000..3b6e9b46aa --- /dev/null +++ b/examples/many-updates/5-cafe-routes.yaml @@ -0,0 +1,37 @@ +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: HTTPRoute +metadata: + name: coffee +spec: + parentRefs: + - name: gateway + sectionName: http + hostnames: + - "cafe.example.com" + rules: + - matches: + - path: + type: PathPrefix + value: /coffee-v5 + backendRefs: + - name: coffee + port: 80 +--- +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: HTTPRoute +metadata: + name: tea +spec: + parentRefs: + - name: gateway + sectionName: http + hostnames: + - "cafe.example.com" + rules: + - matches: + - path: + type: PathPrefix + value: /tea-v5 + backendRefs: + - name: tea + port: 80 diff --git a/examples/many-updates/6-cafe-routes.yaml b/examples/many-updates/6-cafe-routes.yaml new file mode 100644 index 0000000000..532619ea96 --- /dev/null +++ b/examples/many-updates/6-cafe-routes.yaml @@ -0,0 +1,37 @@ +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: HTTPRoute +metadata: + name: coffee +spec: + parentRefs: + - name: gateway + sectionName: http + hostnames: + - "cafe.example.com" + rules: + - matches: + - path: + type: PathPrefix + value: /coffee-v6 + backendRefs: + - name: coffee + port: 80 +--- +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: HTTPRoute +metadata: + name: tea +spec: + parentRefs: + - name: gateway + sectionName: http + hostnames: + - "cafe.example.com" + rules: + - matches: + - path: + type: PathPrefix + value: /tea-v6 + backendRefs: + - name: tea + port: 80 diff --git a/examples/many-updates/README.md b/examples/many-updates/README.md new file mode 100644 index 0000000000..ccca41b7b9 --- /dev/null +++ b/examples/many-updates/README.md @@ -0,0 +1 @@ +FIXME:(kate-osborn): This example is for testing purposes only. It is not intended to be merged to main. diff --git a/examples/many-updates/cafe.yaml b/examples/many-updates/cafe.yaml new file mode 100644 index 0000000000..2d03ae59ff --- /dev/null +++ b/examples/many-updates/cafe.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coffee +spec: + replicas: 1 + selector: + matchLabels: + app: coffee + template: + metadata: + labels: + app: coffee + spec: + containers: + - name: coffee + image: nginxdemos/nginx-hello:plain-text + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: coffee +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: coffee +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tea +spec: + replicas: 1 + selector: + matchLabels: + app: tea + template: + metadata: + labels: + app: tea + spec: + containers: + - name: tea + image: nginxdemos/nginx-hello:plain-text + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: tea +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: tea diff --git a/examples/many-updates/deploy.sh b/examples/many-updates/deploy.sh new file mode 100755 index 0000000000..cfd6373a39 --- /dev/null +++ b/examples/many-updates/deploy.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +kubectl apply -f gateway.yaml +kubectl apply -f cafe.yaml + +for i in {1..6}; do + kubectl scale --replicas ${i} deployment tea + kubectl apply -f "${i}-cafe-routes.yaml" + kubectl scale --replicas ${i} deployment coffee + sleep 0.1 +done diff --git a/examples/many-updates/gateway.yaml b/examples/many-updates/gateway.yaml new file mode 100644 index 0000000000..9fb0ebd1af --- /dev/null +++ b/examples/many-updates/gateway.yaml @@ -0,0 +1,12 @@ +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: Gateway +metadata: + name: gateway + labels: + domain: k8s-gateway.nginx.org +spec: + gatewayClassName: nginx + listeners: + - name: http + port: 80 + protocol: HTTP From 67a8bc643ae748a5277a0bd918d95ee68485a6f8 Mon Sep 17 00:00:00 2001 From: Kate Osborn Date: Tue, 4 Apr 2023 14:26:34 -0600 Subject: [PATCH 10/16] Lint fixes --- internal/grpc/commander/connection.go | 1 - .../config/configfakes/fake_generator.go | 152 +++++++++++++----- internal/nginx/config/generator_test.go | 5 +- internal/state/dataplane/configuration.go | 15 +- .../fake_secret_disk_memory_manager.go | 65 ++++++++ 5 files changed, 185 insertions(+), 53 deletions(-) diff --git a/internal/grpc/commander/connection.go b/internal/grpc/commander/connection.go index 4d49259786..47d2716ccf 100644 --- a/internal/grpc/commander/connection.go +++ b/internal/grpc/commander/connection.go @@ -69,7 +69,6 @@ type connection struct { pendingConfig *agent.NginxConfig logger logr.Logger id string - podName string nginxID string systemID string state State diff --git a/internal/nginx/config/configfakes/fake_generator.go b/internal/nginx/config/configfakes/fake_generator.go index 73247d7388..a07aeebd20 100644 --- a/internal/nginx/config/configfakes/fake_generator.go +++ b/internal/nginx/config/configfakes/fake_generator.go @@ -9,31 +9,103 @@ import ( ) type FakeGenerator struct { - GenerateStub func(dataplane.Configuration) []byte - generateMutex sync.RWMutex - generateArgsForCall []struct { + GenerateHTTPConfStub func(dataplane.Configuration) []byte + generateHTTPConfMutex sync.RWMutex + generateHTTPConfArgsForCall []struct { arg1 dataplane.Configuration } - generateReturns struct { + generateHTTPConfReturns struct { result1 []byte } - generateReturnsOnCall map[int]struct { + generateHTTPConfReturnsOnCall map[int]struct { + result1 []byte + } + GenerateMainConfStub func(int) []byte + generateMainConfMutex sync.RWMutex + generateMainConfArgsForCall []struct { + arg1 int + } + generateMainConfReturns struct { + result1 []byte + } + generateMainConfReturnsOnCall map[int]struct { result1 []byte } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } -func (fake *FakeGenerator) Generate(arg1 dataplane.Configuration) []byte { - fake.generateMutex.Lock() - ret, specificReturn := fake.generateReturnsOnCall[len(fake.generateArgsForCall)] - fake.generateArgsForCall = append(fake.generateArgsForCall, struct { +func (fake *FakeGenerator) GenerateHTTPConf(arg1 dataplane.Configuration) []byte { + fake.generateHTTPConfMutex.Lock() + ret, specificReturn := fake.generateHTTPConfReturnsOnCall[len(fake.generateHTTPConfArgsForCall)] + fake.generateHTTPConfArgsForCall = append(fake.generateHTTPConfArgsForCall, struct { arg1 dataplane.Configuration }{arg1}) - stub := fake.GenerateStub - fakeReturns := fake.generateReturns - fake.recordInvocation("Generate", []interface{}{arg1}) - fake.generateMutex.Unlock() + stub := fake.GenerateHTTPConfStub + fakeReturns := fake.generateHTTPConfReturns + fake.recordInvocation("GenerateHTTPConf", []interface{}{arg1}) + fake.generateHTTPConfMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeGenerator) GenerateHTTPConfCallCount() int { + fake.generateHTTPConfMutex.RLock() + defer fake.generateHTTPConfMutex.RUnlock() + return len(fake.generateHTTPConfArgsForCall) +} + +func (fake *FakeGenerator) GenerateHTTPConfCalls(stub func(dataplane.Configuration) []byte) { + fake.generateHTTPConfMutex.Lock() + defer fake.generateHTTPConfMutex.Unlock() + fake.GenerateHTTPConfStub = stub +} + +func (fake *FakeGenerator) GenerateHTTPConfArgsForCall(i int) dataplane.Configuration { + fake.generateHTTPConfMutex.RLock() + defer fake.generateHTTPConfMutex.RUnlock() + argsForCall := fake.generateHTTPConfArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeGenerator) GenerateHTTPConfReturns(result1 []byte) { + fake.generateHTTPConfMutex.Lock() + defer fake.generateHTTPConfMutex.Unlock() + fake.GenerateHTTPConfStub = nil + fake.generateHTTPConfReturns = struct { + result1 []byte + }{result1} +} + +func (fake *FakeGenerator) GenerateHTTPConfReturnsOnCall(i int, result1 []byte) { + fake.generateHTTPConfMutex.Lock() + defer fake.generateHTTPConfMutex.Unlock() + fake.GenerateHTTPConfStub = nil + if fake.generateHTTPConfReturnsOnCall == nil { + fake.generateHTTPConfReturnsOnCall = make(map[int]struct { + result1 []byte + }) + } + fake.generateHTTPConfReturnsOnCall[i] = struct { + result1 []byte + }{result1} +} + +func (fake *FakeGenerator) GenerateMainConf(arg1 int) []byte { + fake.generateMainConfMutex.Lock() + ret, specificReturn := fake.generateMainConfReturnsOnCall[len(fake.generateMainConfArgsForCall)] + fake.generateMainConfArgsForCall = append(fake.generateMainConfArgsForCall, struct { + arg1 int + }{arg1}) + stub := fake.GenerateMainConfStub + fakeReturns := fake.generateMainConfReturns + fake.recordInvocation("GenerateMainConf", []interface{}{arg1}) + fake.generateMainConfMutex.Unlock() if stub != nil { return stub(arg1) } @@ -43,44 +115,44 @@ func (fake *FakeGenerator) Generate(arg1 dataplane.Configuration) []byte { return fakeReturns.result1 } -func (fake *FakeGenerator) GenerateCallCount() int { - fake.generateMutex.RLock() - defer fake.generateMutex.RUnlock() - return len(fake.generateArgsForCall) +func (fake *FakeGenerator) GenerateMainConfCallCount() int { + fake.generateMainConfMutex.RLock() + defer fake.generateMainConfMutex.RUnlock() + return len(fake.generateMainConfArgsForCall) } -func (fake *FakeGenerator) GenerateCalls(stub func(dataplane.Configuration) []byte) { - fake.generateMutex.Lock() - defer fake.generateMutex.Unlock() - fake.GenerateStub = stub +func (fake *FakeGenerator) GenerateMainConfCalls(stub func(int) []byte) { + fake.generateMainConfMutex.Lock() + defer fake.generateMainConfMutex.Unlock() + fake.GenerateMainConfStub = stub } -func (fake *FakeGenerator) GenerateArgsForCall(i int) dataplane.Configuration { - fake.generateMutex.RLock() - defer fake.generateMutex.RUnlock() - argsForCall := fake.generateArgsForCall[i] +func (fake *FakeGenerator) GenerateMainConfArgsForCall(i int) int { + fake.generateMainConfMutex.RLock() + defer fake.generateMainConfMutex.RUnlock() + argsForCall := fake.generateMainConfArgsForCall[i] return argsForCall.arg1 } -func (fake *FakeGenerator) GenerateReturns(result1 []byte) { - fake.generateMutex.Lock() - defer fake.generateMutex.Unlock() - fake.GenerateStub = nil - fake.generateReturns = struct { +func (fake *FakeGenerator) GenerateMainConfReturns(result1 []byte) { + fake.generateMainConfMutex.Lock() + defer fake.generateMainConfMutex.Unlock() + fake.GenerateMainConfStub = nil + fake.generateMainConfReturns = struct { result1 []byte }{result1} } -func (fake *FakeGenerator) GenerateReturnsOnCall(i int, result1 []byte) { - fake.generateMutex.Lock() - defer fake.generateMutex.Unlock() - fake.GenerateStub = nil - if fake.generateReturnsOnCall == nil { - fake.generateReturnsOnCall = make(map[int]struct { +func (fake *FakeGenerator) GenerateMainConfReturnsOnCall(i int, result1 []byte) { + fake.generateMainConfMutex.Lock() + defer fake.generateMainConfMutex.Unlock() + fake.GenerateMainConfStub = nil + if fake.generateMainConfReturnsOnCall == nil { + fake.generateMainConfReturnsOnCall = make(map[int]struct { result1 []byte }) } - fake.generateReturnsOnCall[i] = struct { + fake.generateMainConfReturnsOnCall[i] = struct { result1 []byte }{result1} } @@ -88,8 +160,10 @@ func (fake *FakeGenerator) GenerateReturnsOnCall(i int, result1 []byte) { func (fake *FakeGenerator) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() - fake.generateMutex.RLock() - defer fake.generateMutex.RUnlock() + fake.generateHTTPConfMutex.RLock() + defer fake.generateHTTPConfMutex.RUnlock() + fake.generateMainConfMutex.RLock() + defer fake.generateMainConfMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/internal/nginx/config/generator_test.go b/internal/nginx/config/generator_test.go index 97a36695af..b57eeed117 100644 --- a/internal/nginx/config/generator_test.go +++ b/internal/nginx/config/generator_test.go @@ -11,8 +11,9 @@ import ( "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/graph" ) -// Note: this test only verifies that GenerateHTTPConf() returns a byte array with upstream, server, and split_client blocks. -// It does not test the correctness of those blocks. That functionality is covered by other tests in this package. +// Note: this test only verifies that GenerateHTTPConf() returns a byte array with upstream, server, +// and split_client blocks. It does not test the correctness of those blocks. +// That functionality is covered by other tests in this package. func TestGenerateHTTPConf(t *testing.T) { bg := graph.BackendGroup{ Source: types.NamespacedName{Namespace: "test", Name: "hr"}, diff --git a/internal/state/dataplane/configuration.go b/internal/state/dataplane/configuration.go index 4abdd3a4f5..6cf922f030 100644 --- a/internal/state/dataplane/configuration.go +++ b/internal/state/dataplane/configuration.go @@ -15,18 +15,11 @@ const wildcardHostname = "~^" // Configuration is an intermediate representation of dataplane configuration. type Configuration struct { - Generation int - // HTTPServers holds all HTTPServers. - // FIXME(pleshakov) We assume that all servers are HTTP and listen on port 80. - HTTPServers []VirtualServer - // SSLServers holds all SSLServers. - // FIXME(kate-osborn) We assume that all SSL servers listen on port 443. - SSLServers []VirtualServer - // Upstreams holds all unique Upstreams. - Upstreams []Upstream - // BackendGroups holds all unique BackendGroups. - // FIXME(pleshakov): Ensure Configuration doesn't include types from the graph package. + HTTPServers []VirtualServer + SSLServers []VirtualServer + Upstreams []Upstream BackendGroups []graph.BackendGroup + Generation int } // VirtualServer is a virtual server. diff --git a/internal/state/secrets/secretsfakes/fake_secret_disk_memory_manager.go b/internal/state/secrets/secretsfakes/fake_secret_disk_memory_manager.go index 3fe9f5943d..12f71fe4f0 100644 --- a/internal/state/secrets/secretsfakes/fake_secret_disk_memory_manager.go +++ b/internal/state/secrets/secretsfakes/fake_secret_disk_memory_manager.go @@ -9,6 +9,16 @@ import ( ) type FakeSecretDiskMemoryManager struct { + GetAllRequestedSecretsStub func() []secrets.File + getAllRequestedSecretsMutex sync.RWMutex + getAllRequestedSecretsArgsForCall []struct { + } + getAllRequestedSecretsReturns struct { + result1 []secrets.File + } + getAllRequestedSecretsReturnsOnCall map[int]struct { + result1 []secrets.File + } RequestStub func(types.NamespacedName) (string, error) requestMutex sync.RWMutex requestArgsForCall []struct { @@ -36,6 +46,59 @@ type FakeSecretDiskMemoryManager struct { invocationsMutex sync.RWMutex } +func (fake *FakeSecretDiskMemoryManager) GetAllRequestedSecrets() []secrets.File { + fake.getAllRequestedSecretsMutex.Lock() + ret, specificReturn := fake.getAllRequestedSecretsReturnsOnCall[len(fake.getAllRequestedSecretsArgsForCall)] + fake.getAllRequestedSecretsArgsForCall = append(fake.getAllRequestedSecretsArgsForCall, struct { + }{}) + stub := fake.GetAllRequestedSecretsStub + fakeReturns := fake.getAllRequestedSecretsReturns + fake.recordInvocation("GetAllRequestedSecrets", []interface{}{}) + fake.getAllRequestedSecretsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeSecretDiskMemoryManager) GetAllRequestedSecretsCallCount() int { + fake.getAllRequestedSecretsMutex.RLock() + defer fake.getAllRequestedSecretsMutex.RUnlock() + return len(fake.getAllRequestedSecretsArgsForCall) +} + +func (fake *FakeSecretDiskMemoryManager) GetAllRequestedSecretsCalls(stub func() []secrets.File) { + fake.getAllRequestedSecretsMutex.Lock() + defer fake.getAllRequestedSecretsMutex.Unlock() + fake.GetAllRequestedSecretsStub = stub +} + +func (fake *FakeSecretDiskMemoryManager) GetAllRequestedSecretsReturns(result1 []secrets.File) { + fake.getAllRequestedSecretsMutex.Lock() + defer fake.getAllRequestedSecretsMutex.Unlock() + fake.GetAllRequestedSecretsStub = nil + fake.getAllRequestedSecretsReturns = struct { + result1 []secrets.File + }{result1} +} + +func (fake *FakeSecretDiskMemoryManager) GetAllRequestedSecretsReturnsOnCall(i int, result1 []secrets.File) { + fake.getAllRequestedSecretsMutex.Lock() + defer fake.getAllRequestedSecretsMutex.Unlock() + fake.GetAllRequestedSecretsStub = nil + if fake.getAllRequestedSecretsReturnsOnCall == nil { + fake.getAllRequestedSecretsReturnsOnCall = make(map[int]struct { + result1 []secrets.File + }) + } + fake.getAllRequestedSecretsReturnsOnCall[i] = struct { + result1 []secrets.File + }{result1} +} + func (fake *FakeSecretDiskMemoryManager) Request(arg1 types.NamespacedName) (string, error) { fake.requestMutex.Lock() ret, specificReturn := fake.requestReturnsOnCall[len(fake.requestArgsForCall)] @@ -156,6 +219,8 @@ func (fake *FakeSecretDiskMemoryManager) WriteAllRequestedSecretsReturnsOnCall(i func (fake *FakeSecretDiskMemoryManager) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() + fake.getAllRequestedSecretsMutex.RLock() + defer fake.getAllRequestedSecretsMutex.RUnlock() fake.requestMutex.RLock() defer fake.requestMutex.RUnlock() fake.writeAllRequestedSecretsMutex.RLock() From 1be91ebc9f9e7f43d6ec226ea6358cd7947101e4 Mon Sep 17 00:00:00 2001 From: Kate Osborn Date: Tue, 4 Apr 2023 15:22:56 -0600 Subject: [PATCH 11/16] Fix some errors --- deploy/manifests/nginx-gateway.yaml | 4 ++++ internal/nginx/agent/config_builder.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/deploy/manifests/nginx-gateway.yaml b/deploy/manifests/nginx-gateway.yaml index 1085b561c4..5285de24c3 100644 --- a/deploy/manifests/nginx-gateway.yaml +++ b/deploy/manifests/nginx-gateway.yaml @@ -85,6 +85,10 @@ spec: app: nginx-gateway spec: serviceAccountName: nginx-gateway + securityContext: + sysctls: + - name: "net.ipv4.ip_unprivileged_port_start" + value: "0" containers: - image: docker.io/library/nginx-kubernetes-gateway:edge # FIXME(kate-osborn): change back to ghcr before merging to main imagePullPolicy: IfNotPresent # FIXME(kate-osborn): change back to Always before merging to main diff --git a/internal/nginx/agent/config_builder.go b/internal/nginx/agent/config_builder.go index 47e456fbb4..93859ea482 100644 --- a/internal/nginx/agent/config_builder.go +++ b/internal/nginx/agent/config_builder.go @@ -19,7 +19,7 @@ const ( confPrefix = "/etc/nginx" secretsPrefix = "/etc/nginx/secrets" //nolint:gosec nginxConfFilePath = "nginx.conf" - httpConfFilePath = "/conf.d/http.conf" + httpConfFilePath = "conf.d/http.conf" ) type directory struct { From 88ca7638854a968c06d5e2a5150510f2e164af1c Mon Sep 17 00:00:00 2001 From: Kate Osborn Date: Wed, 12 Apr 2023 09:16:53 -0600 Subject: [PATCH 12/16] Update manifests; remove capabilizer --- build/Dockerfile | 27 +++------------- build/nginx-with-agent/clusterip.yaml | 14 -------- build/nginx-with-agent/nginx-agent.conf | 2 +- build/nginx-with-agent/service.yaml | 20 ------------ deploy/manifests/nginx-gateway.yaml | 32 ++++++++----------- .../manifests}/nginx-with-agent.yaml | 0 .../service/loadbalancer-aws-nlb.yaml | 4 +-- deploy/manifests/service/loadbalancer.yaml | 4 +-- deploy/manifests/service/nodeport.yaml | 4 +-- 9 files changed, 26 insertions(+), 81 deletions(-) delete mode 100644 build/nginx-with-agent/clusterip.yaml delete mode 100644 build/nginx-with-agent/service.yaml rename {build/nginx-with-agent => deploy/manifests}/nginx-with-agent.yaml (100%) diff --git a/build/Dockerfile b/build/Dockerfile index 23281429c4..93463dfb5c 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -1,4 +1,4 @@ -# syntax=docker/dockerfile:1.4 +# syntax=docker/dockerfile:1.5 FROM golang:1.20 as builder ARG VERSION ARG GIT_COMMIT @@ -6,39 +6,22 @@ ARG DATE WORKDIR /go/src/github.com/nginxinc/nginx-kubernetes-gateway/cmd/gateway -COPY go.mod go.sum /go/src/github.com/nginxinc/nginx-kubernetes-gateway +COPY go.mod go.sum /go/src/github.com/nginxinc/nginx-kubernetes-gateway/ RUN go mod download COPY cmd /go/src/github.com/nginxinc/nginx-kubernetes-gateway/cmd COPY internal /go/src/github.com/nginxinc/nginx-kubernetes-gateway/internal -COPY pkg /go/src/github.com/nginxinc/nginx-kubernetes-gateway/pkg RUN CGO_ENABLED=0 GOOS=linux go build -trimpath -a -ldflags "-s -w -X main.version=${VERSION} -X main.commit=${GIT_COMMIT} -X main.date=${DATE}" -o gateway . -FROM alpine:3.17 as capabilizer -RUN apk add --no-cache libcap - -FROM capabilizer as local-capabilizer -COPY ./build/.out/gateway /usr/bin/ -RUN setcap 'cap_kill=+ep' /usr/bin/gateway - -FROM capabilizer as container-capabilizer -COPY --from=builder /go/src/github.com/nginxinc/nginx-kubernetes-gateway/cmd/gateway/gateway /usr/bin/ -RUN setcap 'cap_kill=+ep' /usr/bin/gateway - -FROM capabilizer as goreleaser-capabilizer -ARG TARGETARCH -COPY dist/gateway_linux_$TARGETARCH*/gateway /usr/bin/ -RUN setcap 'cap_kill=+ep' /usr/bin/gateway - FROM scratch as common USER 1001:1001 ENTRYPOINT [ "/usr/bin/gateway" ] FROM common as container -COPY --from=container-capabilizer /usr/bin/gateway /usr/bin/ +COPY --from=builder /go/src/github.com/nginxinc/nginx-kubernetes-gateway/cmd/gateway/gateway /usr/bin/ FROM common as local -COPY --from=local-capabilizer /usr/bin/gateway /usr/bin/ +COPY ./build/.out/gateway /usr/bin/ FROM common as goreleaser -COPY --from=goreleaser-capabilizer /usr/bin/gateway /usr/bin/ +COPY dist/gateway_linux_$TARGETARCH*/gateway /usr/bin/ diff --git a/build/nginx-with-agent/clusterip.yaml b/build/nginx-with-agent/clusterip.yaml deleted file mode 100644 index f0c4a05270..0000000000 --- a/build/nginx-with-agent/clusterip.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# This service is for testing purposes for the nginx agent -apiVersion: v1 -kind: Service -metadata: - name: nginx-gateway - namespace: nginx-gateway -spec: - ports: - - port: 54789 - targetPort: 54789 - protocol: TCP - name: grpc - selector: - app: nginx-gateway diff --git a/build/nginx-with-agent/nginx-agent.conf b/build/nginx-with-agent/nginx-agent.conf index e623cd03f2..79b9507761 100644 --- a/build/nginx-with-agent/nginx-agent.conf +++ b/build/nginx-with-agent/nginx-agent.conf @@ -34,7 +34,7 @@ metrics: config_dirs: "/etc/nginx:/etc/nginx/conf.d:/etc/nginx/secrets" server: - host: 127.0.0.1 # change to nginx-gateway.nginx-gateway if testing agent in separate deployment + host: nginx-gateway.nginx-gateway grpcPort: 54789 # TLS is temporarily disabled. Once we fully separate the data plane from the control plane TLS will be enabled. diff --git a/build/nginx-with-agent/service.yaml b/build/nginx-with-agent/service.yaml deleted file mode 100644 index f7494acbb0..0000000000 --- a/build/nginx-with-agent/service.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# This manifest is for testing purposes and may not be the final manifest for the nginx-with-agent. -apiVersion: v1 -kind: Service -metadata: - name: nginx-with-agent - namespace: nginx-gateway -spec: - externalTrafficPolicy: Local - type: LoadBalancer - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - - port: 443 - targetPort: 443 - protocol: TCP - name: https - selector: - app: nginx-with-agent diff --git a/deploy/manifests/nginx-gateway.yaml b/deploy/manifests/nginx-gateway.yaml index 5285de24c3..551af745d7 100644 --- a/deploy/manifests/nginx-gateway.yaml +++ b/deploy/manifests/nginx-gateway.yaml @@ -85,10 +85,6 @@ spec: app: nginx-gateway spec: serviceAccountName: nginx-gateway - securityContext: - sysctls: - - name: "net.ipv4.ip_unprivileged_port_start" - value: "0" containers: - image: docker.io/library/nginx-kubernetes-gateway:edge # FIXME(kate-osborn): change back to ghcr before merging to main imagePullPolicy: IfNotPresent # FIXME(kate-osborn): change back to Always before merging to main @@ -101,17 +97,17 @@ spec: args: - --gateway-ctlr-name=k8s-gateway.nginx.org/nginx-gateway-controller - --gatewayclass=nginx - - image: docker.io/nginx-kubernetes-gateway/nginx-with-agent:edge - name: nginx-with-agent - imagePullPolicy: IfNotPresent - securityContext: - runAsNonRoot: true - runAsUser: 101 #nginx - capabilities: - drop: - - ALL - ports: - - name: http - containerPort: 80 - - name: https - containerPort: 443 +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx-gateway + namespace: nginx-gateway +spec: + ports: + - port: 54789 + targetPort: 54789 + protocol: TCP + name: grpc + selector: + app: nginx-gateway diff --git a/build/nginx-with-agent/nginx-with-agent.yaml b/deploy/manifests/nginx-with-agent.yaml similarity index 100% rename from build/nginx-with-agent/nginx-with-agent.yaml rename to deploy/manifests/nginx-with-agent.yaml diff --git a/deploy/manifests/service/loadbalancer-aws-nlb.yaml b/deploy/manifests/service/loadbalancer-aws-nlb.yaml index 60937abadd..394686c453 100644 --- a/deploy/manifests/service/loadbalancer-aws-nlb.yaml +++ b/deploy/manifests/service/loadbalancer-aws-nlb.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Service metadata: - name: nginx-gateway + name: nginx-with-agent namespace: nginx-gateway annotations: service.beta.kubernetes.io/aws-load-balancer-type: "nlb" @@ -13,4 +13,4 @@ spec: protocol: TCP name: http selector: - app: nginx-gateway + app: nginx-with-agent diff --git a/deploy/manifests/service/loadbalancer.yaml b/deploy/manifests/service/loadbalancer.yaml index 9fddd17b1a..ced64da144 100644 --- a/deploy/manifests/service/loadbalancer.yaml +++ b/deploy/manifests/service/loadbalancer.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Service metadata: - name: nginx-gateway + name: nginx-with-agent namespace: nginx-gateway spec: externalTrafficPolicy: Local @@ -16,4 +16,4 @@ spec: protocol: TCP name: https selector: - app: nginx-gateway + app: nginx-with-agent diff --git a/deploy/manifests/service/nodeport.yaml b/deploy/manifests/service/nodeport.yaml index 5e460a7e4a..d49664c044 100644 --- a/deploy/manifests/service/nodeport.yaml +++ b/deploy/manifests/service/nodeport.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Service metadata: - name: nginx-gateway + name: nginx-with-agent namespace: nginx-gateway spec: type: NodePort @@ -11,4 +11,4 @@ spec: protocol: TCP name: http selector: - app: nginx-gateway + app: nginx-with-agent From 4dd6fc957ff7d5255115bce8bd8f0d3228a7954a Mon Sep 17 00:00:00 2001 From: Kate Osborn Date: Wed, 12 Apr 2023 09:25:54 -0600 Subject: [PATCH 13/16] Remove file writing from secret mgr --- internal/manager/manager.go | 6 +- internal/state/change_processor.go | 6 +- internal/state/change_processor_test.go | 20 +- internal/state/graph/gateway.go | 26 +- internal/state/graph/gateway_test.go | 4 +- internal/state/graph/graph.go | 4 +- internal/state/graph/graph_test.go | 4 +- internal/state/secrets/file_manager.go | 34 -- internal/state/secrets/secrets.go | 120 +---- internal/state/secrets/secrets_test.go | 124 +---- .../secrets/secretsfakes/fake_dir_entry.go | 301 ------------ .../secrets/secretsfakes/fake_file_manager.go | 428 ------------------ .../secretsfakes/fake_request_manager.go | 182 ++++++++ .../fake_secret_disk_memory_manager.go | 247 ---------- 14 files changed, 255 insertions(+), 1251 deletions(-) delete mode 100644 internal/state/secrets/file_manager.go delete mode 100644 internal/state/secrets/secretsfakes/fake_dir_entry.go delete mode 100644 internal/state/secrets/secretsfakes/fake_file_manager.go create mode 100644 internal/state/secrets/secretsfakes/fake_request_manager.go delete mode 100644 internal/state/secrets/secretsfakes/fake_secret_disk_memory_manager.go diff --git a/internal/manager/manager.go b/internal/manager/manager.go index 7a6ce20860..be9e4df510 100644 --- a/internal/manager/manager.go +++ b/internal/manager/manager.go @@ -123,12 +123,12 @@ func Start(cfg config.Config) error { } secretStore := secrets.NewSecretStore() - secretMemoryMgr := secrets.NewSecretDiskMemoryManager(secretsFolder, secretStore) + secretRequestMgr := secrets.NewRequestManagerImpl(secretsFolder, secretStore) processor := state.NewChangeProcessorImpl(state.ChangeProcessorConfig{ GatewayCtlrName: cfg.GatewayCtlrName, GatewayClassName: cfg.GatewayClassName, - SecretMemoryManager: secretMemoryMgr, + SecretRequestManager: secretRequestMgr, ServiceResolver: resolver.NewServiceResolverImpl(mgr.GetClient()), RelationshipCapturer: relationship.NewCapturerImpl(), Logger: cfg.Logger.WithName("changeProcessor"), @@ -145,7 +145,7 @@ func Start(cfg config.Config) error { Clock: status.NewRealClock(), }) - nginxAgentConfigBuilder := agent.NewNginxConfigBuilder(ngxcfg.NewGeneratorImpl(), secretMemoryMgr) + nginxAgentConfigBuilder := agent.NewNginxConfigBuilder(ngxcfg.NewGeneratorImpl(), secretRequestMgr) agentConfigStore := agent.NewConfigStore(nginxAgentConfigBuilder, cfg.Logger.WithName("agentConfigStore")) diff --git a/internal/state/change_processor.go b/internal/state/change_processor.go index eb87a0a4dd..61f0819e6c 100644 --- a/internal/state/change_processor.go +++ b/internal/state/change_processor.go @@ -45,8 +45,8 @@ type ChangeProcessorConfig struct { GatewayCtlrName string // GatewayClassName is the name of the GatewayClass resource. GatewayClassName string - // SecretMemoryManager is the secret memory manager. - SecretMemoryManager secrets.SecretDiskMemoryManager + // SecretRequestManager manages secret requests. + SecretRequestManager secrets.RequestManager // ServiceResolver resolves Services to Endpoints. ServiceResolver resolver.ServiceResolver // RelationshipCapturer captures relationships between Kubernetes API resources and Gateway API resources. @@ -161,7 +161,7 @@ func (c *ChangeProcessorImpl) Process( }, c.cfg.GatewayCtlrName, c.cfg.GatewayClassName, - c.cfg.SecretMemoryManager, + c.cfg.SecretRequestManager, ) var warnings dataplane.Warnings diff --git a/internal/state/change_processor_test.go b/internal/state/change_processor_test.go index 53d6706d62..d1b64b5c5a 100644 --- a/internal/state/change_processor_test.go +++ b/internal/state/change_processor_test.go @@ -179,22 +179,22 @@ var _ = Describe("ChangeProcessor", func() { ControllerName: controllerName, }, } - processor state.ChangeProcessor - fakeSecretMemoryMgr *secretsfakes.FakeSecretDiskMemoryManager + processor state.ChangeProcessor + secretRequestMgr *secretsfakes.FakeRequestManager ) BeforeEach(OncePerOrdered, func() { - fakeSecretMemoryMgr = &secretsfakes.FakeSecretDiskMemoryManager{} + secretRequestMgr = &secretsfakes.FakeRequestManager{} processor = state.NewChangeProcessorImpl(state.ChangeProcessorConfig{ GatewayCtlrName: controllerName, GatewayClassName: gcName, - SecretMemoryManager: fakeSecretMemoryMgr, + SecretRequestManager: secretRequestMgr, RelationshipCapturer: relationship.NewCapturerImpl(), Logger: zap.New(), }) - fakeSecretMemoryMgr.RequestReturns(certificatePath, nil) + secretRequestMgr.RequestReturns(certificatePath, nil) }) Describe("Process gateway resources", Ordered, func() { @@ -1569,13 +1569,13 @@ var _ = Describe("ChangeProcessor", func() { ) BeforeEach(OncePerOrdered, func() { - fakeSecretMemoryMgr := &secretsfakes.FakeSecretDiskMemoryManager{} + fakeSecretRequestMgr := &secretsfakes.FakeRequestManager{} fakeRelationshipCapturer = &relationshipfakes.FakeCapturer{} processor = state.NewChangeProcessorImpl(state.ChangeProcessorConfig{ GatewayCtlrName: "test.controller", GatewayClassName: "my-class", - SecretMemoryManager: fakeSecretMemoryMgr, + SecretRequestManager: fakeSecretRequestMgr, RelationshipCapturer: fakeRelationshipCapturer, }) @@ -1866,18 +1866,18 @@ var _ = Describe("ChangeProcessor", func() { Describe("Edge cases with panic", func() { var ( processor state.ChangeProcessor - fakeSecretMemoryMgr *secretsfakes.FakeSecretDiskMemoryManager + fakeSecretRequestMgr *secretsfakes.FakeRequestManager fakeRelationshipCapturer *relationshipfakes.FakeCapturer ) BeforeEach(func() { - fakeSecretMemoryMgr = &secretsfakes.FakeSecretDiskMemoryManager{} + fakeSecretRequestMgr = &secretsfakes.FakeRequestManager{} fakeRelationshipCapturer = &relationshipfakes.FakeCapturer{} processor = state.NewChangeProcessorImpl(state.ChangeProcessorConfig{ GatewayCtlrName: "test.controller", GatewayClassName: "my-class", - SecretMemoryManager: fakeSecretMemoryMgr, + SecretRequestManager: fakeSecretRequestMgr, RelationshipCapturer: fakeRelationshipCapturer, }) }) diff --git a/internal/state/graph/gateway.go b/internal/state/graph/gateway.go index d97425db0b..fd70ac03cf 100644 --- a/internal/state/graph/gateway.go +++ b/internal/state/graph/gateway.go @@ -80,7 +80,7 @@ func processGateways( func buildListeners( gw *v1beta1.Gateway, gcName string, - secretMemoryMgr secrets.SecretDiskMemoryManager, + secretRequestMgr secrets.RequestManager, ) map[string]*Listener { listeners := make(map[string]*Listener) @@ -88,7 +88,7 @@ func buildListeners( return listeners } - listenerFactory := newListenerConfiguratorFactory(gw, secretMemoryMgr) + listenerFactory := newListenerConfiguratorFactory(gw, secretRequestMgr) for _, gl := range gw.Spec.Listeners { configurator := listenerFactory.getConfiguratorForListener(gl) @@ -120,19 +120,19 @@ func (f *listenerConfiguratorFactory) getConfiguratorForListener(l v1beta1.Liste func newListenerConfiguratorFactory( gw *v1beta1.Gateway, - secretMemoryMgr secrets.SecretDiskMemoryManager, + secretRequestMgr secrets.RequestManager, ) *listenerConfiguratorFactory { return &listenerConfiguratorFactory{ - https: newHTTPSListenerConfigurator(gw, secretMemoryMgr), + https: newHTTPSListenerConfigurator(gw, secretRequestMgr), http: newHTTPListenerConfigurator(gw), } } type httpListenerConfigurator struct { - gateway *v1beta1.Gateway - secretMemoryMgr secrets.SecretDiskMemoryManager - usedHostnames map[string]*Listener - validate func(gl v1beta1.Listener) []conditions.Condition + gateway *v1beta1.Gateway + secretRequestMgr secrets.RequestManager + usedHostnames map[string]*Listener + validate func(gl v1beta1.Listener) []conditions.Condition } func newHTTPListenerConfigurator(gw *v1beta1.Gateway) *httpListenerConfigurator { @@ -145,12 +145,12 @@ func newHTTPListenerConfigurator(gw *v1beta1.Gateway) *httpListenerConfigurator func newHTTPSListenerConfigurator( gateway *v1beta1.Gateway, - secretMemoryMgr secrets.SecretDiskMemoryManager, + secretRequestMgr secrets.RequestManager, ) *httpListenerConfigurator { return &httpListenerConfigurator{ - gateway: gateway, - secretMemoryMgr: secretMemoryMgr, - usedHostnames: make(map[string]*Listener), + gateway: gateway, + secretRequestMgr: secretRequestMgr, + usedHostnames: make(map[string]*Listener), validate: func(gl v1beta1.Listener) []conditions.Condition { return validateHTTPSListener(gl, gateway.Namespace) }, @@ -211,7 +211,7 @@ func (c *httpListenerConfigurator) loadSecretIntoListener(l *Listener) { var err error - l.SecretPath, err = c.secretMemoryMgr.Request(nsname) + l.SecretPath, err = c.secretRequestMgr.Request(nsname) if err != nil { msg := fmt.Sprintf("Failed to get the certificate %s: %v", nsname.String(), err) l.Conditions = append(l.Conditions, conditions.NewListenerInvalidCertificateRef(msg)...) diff --git a/internal/state/graph/gateway_test.go b/internal/state/graph/gateway_test.go index c9d70fb622..21ded9b4e4 100644 --- a/internal/state/graph/gateway_test.go +++ b/internal/state/graph/gateway_test.go @@ -544,13 +544,13 @@ func TestBuildListeners(t *testing.T) { secretStore := secrets.NewSecretStore() secretStore.Upsert(testSecret) - secretMemoryMgr := secrets.NewSecretDiskMemoryManager(secretsDirectory, secretStore) + secretRequestMgr := secrets.NewRequestManagerImpl(secretsDirectory, secretStore) for _, test := range tests { t.Run(test.name, func(t *testing.T) { g := NewGomegaWithT(t) - result := buildListeners(test.gateway, gcName, secretMemoryMgr) + result := buildListeners(test.gateway, gcName, secretRequestMgr) g.Expect(helpers.Diff(test.expected, result)).To(BeEmpty()) }) } diff --git a/internal/state/graph/graph.go b/internal/state/graph/graph.go index 32d221cd25..e21ebad995 100644 --- a/internal/state/graph/graph.go +++ b/internal/state/graph/graph.go @@ -36,13 +36,13 @@ func BuildGraph( store ClusterStore, controllerName string, gcName string, - secretMemoryMgr secrets.SecretDiskMemoryManager, + secretRequestMgr secrets.RequestManager, ) *Graph { gc := buildGatewayClass(store.GatewayClass, controllerName) gw, ignoredGws := processGateways(store.Gateways, gcName) - listeners := buildListeners(gw, gcName, secretMemoryMgr) + listeners := buildListeners(gw, gcName, secretRequestMgr) routes := make(map[types.NamespacedName]*Route) for _, ghr := range store.HTTPRoutes { diff --git a/internal/state/graph/graph_test.go b/internal/state/graph/graph_test.go index 61be3767de..e7c4db0854 100644 --- a/internal/state/graph/graph_test.go +++ b/internal/state/graph/graph_test.go @@ -248,7 +248,7 @@ func TestBuildGraph(t *testing.T) { // add test secret to store secretStore := secrets.NewSecretStore() secretStore.Upsert(testSecret) - secretMemoryMgr := secrets.NewSecretDiskMemoryManager(secretsDirectory, secretStore) + secretRequestMgr := secrets.NewRequestManagerImpl(secretsDirectory, secretStore) expected := &Graph{ GatewayClass: &GatewayClass{ @@ -290,7 +290,7 @@ func TestBuildGraph(t *testing.T) { }, } - result := BuildGraph(store, controllerName, gcName, secretMemoryMgr) + result := BuildGraph(store, controllerName, gcName, secretRequestMgr) if diff := cmp.Diff(expected, result); diff != "" { t.Errorf("BuildGraph() mismatch (-want +got):\n%s", diff) } diff --git a/internal/state/secrets/file_manager.go b/internal/state/secrets/file_manager.go deleted file mode 100644 index 3daf6d50b2..0000000000 --- a/internal/state/secrets/file_manager.go +++ /dev/null @@ -1,34 +0,0 @@ -package secrets - -import ( - "io/fs" - "os" -) - -type stdLibFileManager struct{} - -func newStdLibFileManager() *stdLibFileManager { - return &stdLibFileManager{} -} - -func (s *stdLibFileManager) ReadDir(dirname string) ([]fs.DirEntry, error) { - return os.ReadDir(dirname) -} - -func (s *stdLibFileManager) Remove(name string) error { - return os.Remove(name) -} - -func (s *stdLibFileManager) Write(file *os.File, contents []byte) error { - _, err := file.Write(contents) - - return err -} - -func (s *stdLibFileManager) Create(name string) (*os.File, error) { - return os.Create(name) -} - -func (s *stdLibFileManager) Chmod(file *os.File, mode os.FileMode) error { - return file.Chmod(mode) -} diff --git a/internal/state/secrets/secrets.go b/internal/state/secrets/secrets.go index c5cdaa986f..78f1253a3c 100644 --- a/internal/state/secrets/secrets.go +++ b/internal/state/secrets/secrets.go @@ -4,8 +4,6 @@ import ( "bytes" "crypto/tls" "fmt" - "io/fs" - "os" "path" apiv1 "k8s.io/api/core/v1" @@ -13,12 +11,7 @@ import ( ) //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . SecretStore -//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . SecretDiskMemoryManager -//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . FileManager -//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 io/fs.DirEntry - -// tlsSecretFileMode defines the default file mode for files with TLS Secrets. -const tlsSecretFileMode = 0o600 +//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . RequestManager // SecretStore stores secrets. type SecretStore interface { @@ -54,7 +47,7 @@ func NewSecretStore() *SecretStoreImpl { } } -func (s SecretStoreImpl) Upsert(secret *apiv1.Secret) { +func (s *SecretStoreImpl) Upsert(secret *apiv1.Secret) { nsname := types.NamespacedName{ Namespace: secret.Namespace, Name: secret.Name, @@ -64,46 +57,29 @@ func (s SecretStoreImpl) Upsert(secret *apiv1.Secret) { s.secrets[nsname] = &Secret{Secret: secret, Valid: valid} } -func (s SecretStoreImpl) Delete(nsname types.NamespacedName) { +func (s *SecretStoreImpl) Delete(nsname types.NamespacedName) { delete(s.secrets, nsname) } -func (s SecretStoreImpl) Get(nsname types.NamespacedName) *Secret { +func (s *SecretStoreImpl) Get(nsname types.NamespacedName) *Secret { return s.secrets[nsname] } -// SecretDiskMemoryManager manages secrets that are requested by Gateway resources. -type SecretDiskMemoryManager interface { +// RequestManager manages secrets that are requested by Gateway resources. +type RequestManager interface { // Request marks the secret as requested so that it can be written to disk before reloading NGINX. // Returns the path to the secret if it exists. // Returns an error if the secret does not exist in the secret store or the secret is invalid. Request(nsname types.NamespacedName) (string, error) - // WriteAllRequestedSecrets writes all requested secrets to disk. - WriteAllRequestedSecrets() error - // GetAllRequestedSecrets returns all request secrets as Files. - GetAllRequestedSecrets() []File -} - -// FileManager is an interface that exposes File I/O operations. -// Used for unit testing. -type FileManager interface { - // ReadDir returns the directory entries for the directory. - ReadDir(dirname string) ([]fs.DirEntry, error) - // Remove file with given name. - Remove(name string) error - // Create file at the provided filepath. - Create(name string) (*os.File, error) - // Chmod sets the mode of the file. - Chmod(file *os.File, mode os.FileMode) error - // Write writes contents to the file. - Write(file *os.File, contents []byte) error + // GetAndResetRequestedSecrets returns all request secrets as Files and resets the requested secrets. + GetAndResetRequestedSecrets() []File } +// RequestManagerImpl is the implementation of RequestManager. // FIXME(kate-osborn): Is it necessary to make this concurrent-safe? -type SecretDiskMemoryManagerImpl struct { +type RequestManagerImpl struct { requestedSecrets map[types.NamespacedName]requestedSecret secretStore SecretStore - fileManager FileManager secretDirectory string } @@ -112,37 +88,18 @@ type requestedSecret struct { path string } -// SecretDiskMemoryManagerOption is a function that modifies the configuration of the SecretDiskMemoryManager. -type SecretDiskMemoryManagerOption func(*SecretDiskMemoryManagerImpl) - -// WithSecretFileManager sets the file manager of the SecretDiskMemoryManager. -// Used to inject a fake fileManager for unit tests. -func WithSecretFileManager(fileManager FileManager) SecretDiskMemoryManagerOption { - return func(mm *SecretDiskMemoryManagerImpl) { - mm.fileManager = fileManager - } -} - -func NewSecretDiskMemoryManager( +func NewRequestManagerImpl( secretDirectory string, secretStore SecretStore, - options ...SecretDiskMemoryManagerOption, -) *SecretDiskMemoryManagerImpl { - sm := &SecretDiskMemoryManagerImpl{ +) *RequestManagerImpl { + return &RequestManagerImpl{ requestedSecrets: make(map[types.NamespacedName]requestedSecret), secretStore: secretStore, secretDirectory: secretDirectory, - fileManager: newStdLibFileManager(), } - - for _, o := range options { - o(sm) - } - - return sm } -func (s *SecretDiskMemoryManagerImpl) Request(nsname types.NamespacedName) (string, error) { +func (s *RequestManagerImpl) Request(nsname types.NamespacedName) (string, error) { secret := s.secretStore.Get(nsname) if secret == nil { return "", fmt.Errorf("secret %s does not exist", nsname) @@ -166,52 +123,7 @@ func (s *SecretDiskMemoryManagerImpl) Request(nsname types.NamespacedName) (stri return ss.path, nil } -func (s *SecretDiskMemoryManagerImpl) WriteAllRequestedSecrets() error { - // Remove all existing secrets from secrets directory - dir, err := s.fileManager.ReadDir(s.secretDirectory) - if err != nil { - return fmt.Errorf("failed to remove all secrets from %s: %w", s.secretDirectory, err) - } - - for _, d := range dir { - filepath := path.Join(s.secretDirectory, d.Name()) - if err := s.fileManager.Remove(filepath); err != nil { - return fmt.Errorf("failed to remove secret %s: %w", filepath, err) - } - } - - // Write all secrets to secrets directory - for nsname, ss := range s.requestedSecrets { - - file, err := s.fileManager.Create(ss.path) - if err != nil { - return fmt.Errorf("failed to create file %s for secret %s: %w", ss.path, nsname, err) - } - - if err = s.fileManager.Chmod(file, tlsSecretFileMode); err != nil { - return fmt.Errorf( - "failed to change mode of file %s for secret %s: %w", - ss.path, - nsname, - err, - ) - } - - contents := generateCertAndKeyFileContent(ss.secret) - - err = s.fileManager.Write(file, contents) - if err != nil { - return fmt.Errorf("failed to write secret %s to file %s: %w", nsname, ss.path, err) - } - } - - // reset stored secrets - s.requestedSecrets = make(map[types.NamespacedName]requestedSecret) - - return nil -} - -func (s *SecretDiskMemoryManagerImpl) GetAllRequestedSecrets() []File { +func (s *RequestManagerImpl) GetAndResetRequestedSecrets() []File { files := make([]File, 0, len(s.requestedSecrets)) for _, secret := range s.requestedSecrets { files = append(files, File{ @@ -220,6 +132,8 @@ func (s *SecretDiskMemoryManagerImpl) GetAllRequestedSecrets() []File { }) } + s.requestedSecrets = make(map[types.NamespacedName]requestedSecret) + return files } diff --git a/internal/state/secrets/secrets_test.go b/internal/state/secrets/secrets_test.go index d24e2d8ad2..6283733857 100644 --- a/internal/state/secrets/secrets_test.go +++ b/internal/state/secrets/secrets_test.go @@ -2,9 +2,6 @@ package secrets_test import ( - "errors" - "io/fs" - "os" "path" . "github.com/onsi/ginkgo/v2" @@ -134,30 +131,22 @@ var ( } ) -var _ = Describe("SecretDiskMemoryManager", func() { +var _ = Describe("RequestManager", func() { var ( - fakeStore *secretsfakes.FakeSecretStore - memMgr secrets.SecretDiskMemoryManager - tmpSecretsDir string + fakeStore *secretsfakes.FakeSecretStore + mgr secrets.RequestManager + secretsDir string ) BeforeEach(OncePerOrdered, func() { - dir, err := os.MkdirTemp("", "secrets-test") - tmpSecretsDir = dir - Expect(err).ToNot(HaveOccurred(), "failed to create temp directory for tests") - fakeStore = &secretsfakes.FakeSecretStore{} - memMgr = secrets.NewSecretDiskMemoryManager(tmpSecretsDir, fakeStore) - }) - - AfterEach(OncePerOrdered, func() { - Expect(os.RemoveAll(tmpSecretsDir)).To(Succeed()) + mgr = secrets.NewRequestManagerImpl(secretsDir, fakeStore) }) - Describe("Manages secrets on disk", Ordered, func() { + Describe("Manages requested secrets", Ordered, func() { testRequest := func(s *apiv1.Secret, expPath string, expErr bool) { nsname := types.NamespacedName{Namespace: s.Namespace, Name: s.Name} - actualPath, err := memMgr.Request(nsname) + actualPath, err := mgr.Request(nsname) if expErr { Expect(err).To(HaveOccurred()) @@ -175,14 +164,14 @@ var _ = Describe("SecretDiskMemoryManager", func() { }) It("request should return the file path for a valid secret", func() { fakeStore.GetReturns(&secrets.Secret{Secret: secret1, Valid: true}) - expectedPath := path.Join(tmpSecretsDir, "test_secret1") + expectedPath := path.Join(secretsDir, "test_secret1") testRequest(secret1, expectedPath, false) }) It("request should return the file path for another valid secret", func() { fakeStore.GetReturns(&secrets.Secret{Secret: secret2, Valid: true}) - expectedPath := path.Join(tmpSecretsDir, "test_secret2") + expectedPath := path.Join(secretsDir, "test_secret2") testRequest(secret2, expectedPath, false) }) @@ -193,110 +182,39 @@ var _ = Describe("SecretDiskMemoryManager", func() { testRequest(invalidSecretType, "", true) }) - It("should write all requested secrets", func() { - err := memMgr.WriteAllRequestedSecrets() - Expect(err).ToNot(HaveOccurred()) + It("should return all requested secrets", func() { + secretFiles := mgr.GetAndResetRequestedSecrets() expectedFileNames := []string{"test_secret1", "test_secret2"} + Expect(secretFiles).To(HaveLen(2)) - // read all files from directory - dir, err := os.ReadDir(tmpSecretsDir) - Expect(err).ToNot(HaveOccurred()) - - // test that the files exist that we expect - Expect(dir).To(HaveLen(2)) - actualFilenames := []string{dir[0].Name(), dir[1].Name()} + actualFilenames := []string{secretFiles[0].Name, secretFiles[1].Name} Expect(actualFilenames).To(ConsistOf(expectedFileNames)) }) - It("request should return the file path for secret after write", func() { + It("request should return the file path for secret after get", func() { fakeStore.GetReturns(&secrets.Secret{Secret: secret3, Valid: true}) - expectedPath := path.Join(tmpSecretsDir, "test_secret3") + expectedPath := path.Join(secretsDir, "test_secret3") testRequest(secret3, expectedPath, false) }) - It("should write all requested secrets", func() { - err := memMgr.WriteAllRequestedSecrets() - Expect(err).ToNot(HaveOccurred()) - - // read all files from directory - dir, err := os.ReadDir(tmpSecretsDir) - Expect(err).ToNot(HaveOccurred()) + It("should return all requested secrets", func() { + secretFiles := mgr.GetAndResetRequestedSecrets() // only the secrets stored after the last write should be written to disk. - Expect(dir).To(HaveLen(1)) - Expect(dir[0].Name()).To(Equal("test_secret3")) + Expect(secretFiles).To(HaveLen(1)) + Expect(secretFiles[0].Name).To(Equal("test_secret3")) }) When("no secrets are requested", func() { It("write all secrets should remove all existing secrets and write no additional secrets", func() { - err := memMgr.WriteAllRequestedSecrets() - Expect(err).ToNot(HaveOccurred()) - - // read all files from directory - dir, err := os.ReadDir(tmpSecretsDir) - Expect(err).ToNot(HaveOccurred()) - + secretFiles := mgr.GetAndResetRequestedSecrets() // no secrets should exist - Expect(dir).To(BeEmpty()) + Expect(secretFiles).To(HaveLen(0)) }) }) }) - Describe("Write all requested secrets", func() { - var ( - fakeFileManager *secretsfakes.FakeFileManager - fakeStore *secretsfakes.FakeSecretStore - fakeDirEntries []fs.DirEntry - memMgr *secrets.SecretDiskMemoryManagerImpl - ) - - BeforeEach(OncePerOrdered, func() { - fakeFileManager = &secretsfakes.FakeFileManager{} - fakeStore = &secretsfakes.FakeSecretStore{} - fakeDirEntries = []fs.DirEntry{&secretsfakes.FakeDirEntry{}} - memMgr = secrets.NewSecretDiskMemoryManager("", fakeStore, secrets.WithSecretFileManager(fakeFileManager)) - - // populate a requested secret - fakeStore.GetReturns(&secrets.Secret{Secret: secret1, Valid: true}) - _, err := memMgr.Request(types.NamespacedName{Namespace: secret1.Namespace, Name: secret1.Name}) - Expect(err).ToNot(HaveOccurred()) - }) - - DescribeTable("error cases", Ordered, - func(e error, preparer func(e error)) { - preparer(e) - - err := memMgr.WriteAllRequestedSecrets() - Expect(err).To(MatchError(e)) - }, - Entry("read directory error", errors.New("read dir"), - func(e error) { - fakeFileManager.ReadDirReturns(nil, e) - }), - Entry("remove file error", errors.New("remove file"), - func(e error) { - fakeFileManager.ReadDirReturns(fakeDirEntries, nil) - fakeFileManager.RemoveReturns(e) - }), - Entry("create file error", errors.New("create error"), - func(e error) { - fakeFileManager.RemoveReturns(nil) - fakeFileManager.CreateReturns(nil, e) - }), - Entry("chmod error", errors.New("chmod"), - func(e error) { - fakeFileManager.CreateReturns(&os.File{}, nil) - fakeFileManager.ChmodReturns(e) - }), - Entry("write error", errors.New("write"), - func(e error) { - fakeFileManager.ChmodReturns(nil) - fakeFileManager.WriteReturns(e) - }), - ) - }) }) - var _ = Describe("SecretStore", func() { var store secrets.SecretStore var invalidToValidSecret, validToInvalidSecret *apiv1.Secret diff --git a/internal/state/secrets/secretsfakes/fake_dir_entry.go b/internal/state/secrets/secretsfakes/fake_dir_entry.go deleted file mode 100644 index ee1ba1342f..0000000000 --- a/internal/state/secrets/secretsfakes/fake_dir_entry.go +++ /dev/null @@ -1,301 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package secretsfakes - -import ( - "io/fs" - "sync" -) - -type FakeDirEntry struct { - InfoStub func() (fs.FileInfo, error) - infoMutex sync.RWMutex - infoArgsForCall []struct { - } - infoReturns struct { - result1 fs.FileInfo - result2 error - } - infoReturnsOnCall map[int]struct { - result1 fs.FileInfo - result2 error - } - IsDirStub func() bool - isDirMutex sync.RWMutex - isDirArgsForCall []struct { - } - isDirReturns struct { - result1 bool - } - isDirReturnsOnCall map[int]struct { - result1 bool - } - NameStub func() string - nameMutex sync.RWMutex - nameArgsForCall []struct { - } - nameReturns struct { - result1 string - } - nameReturnsOnCall map[int]struct { - result1 string - } - TypeStub func() fs.FileMode - typeMutex sync.RWMutex - typeArgsForCall []struct { - } - typeReturns struct { - result1 fs.FileMode - } - typeReturnsOnCall map[int]struct { - result1 fs.FileMode - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeDirEntry) Info() (fs.FileInfo, error) { - fake.infoMutex.Lock() - ret, specificReturn := fake.infoReturnsOnCall[len(fake.infoArgsForCall)] - fake.infoArgsForCall = append(fake.infoArgsForCall, struct { - }{}) - stub := fake.InfoStub - fakeReturns := fake.infoReturns - fake.recordInvocation("Info", []interface{}{}) - fake.infoMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeDirEntry) InfoCallCount() int { - fake.infoMutex.RLock() - defer fake.infoMutex.RUnlock() - return len(fake.infoArgsForCall) -} - -func (fake *FakeDirEntry) InfoCalls(stub func() (fs.FileInfo, error)) { - fake.infoMutex.Lock() - defer fake.infoMutex.Unlock() - fake.InfoStub = stub -} - -func (fake *FakeDirEntry) InfoReturns(result1 fs.FileInfo, result2 error) { - fake.infoMutex.Lock() - defer fake.infoMutex.Unlock() - fake.InfoStub = nil - fake.infoReturns = struct { - result1 fs.FileInfo - result2 error - }{result1, result2} -} - -func (fake *FakeDirEntry) InfoReturnsOnCall(i int, result1 fs.FileInfo, result2 error) { - fake.infoMutex.Lock() - defer fake.infoMutex.Unlock() - fake.InfoStub = nil - if fake.infoReturnsOnCall == nil { - fake.infoReturnsOnCall = make(map[int]struct { - result1 fs.FileInfo - result2 error - }) - } - fake.infoReturnsOnCall[i] = struct { - result1 fs.FileInfo - result2 error - }{result1, result2} -} - -func (fake *FakeDirEntry) IsDir() bool { - fake.isDirMutex.Lock() - ret, specificReturn := fake.isDirReturnsOnCall[len(fake.isDirArgsForCall)] - fake.isDirArgsForCall = append(fake.isDirArgsForCall, struct { - }{}) - stub := fake.IsDirStub - fakeReturns := fake.isDirReturns - fake.recordInvocation("IsDir", []interface{}{}) - fake.isDirMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeDirEntry) IsDirCallCount() int { - fake.isDirMutex.RLock() - defer fake.isDirMutex.RUnlock() - return len(fake.isDirArgsForCall) -} - -func (fake *FakeDirEntry) IsDirCalls(stub func() bool) { - fake.isDirMutex.Lock() - defer fake.isDirMutex.Unlock() - fake.IsDirStub = stub -} - -func (fake *FakeDirEntry) IsDirReturns(result1 bool) { - fake.isDirMutex.Lock() - defer fake.isDirMutex.Unlock() - fake.IsDirStub = nil - fake.isDirReturns = struct { - result1 bool - }{result1} -} - -func (fake *FakeDirEntry) IsDirReturnsOnCall(i int, result1 bool) { - fake.isDirMutex.Lock() - defer fake.isDirMutex.Unlock() - fake.IsDirStub = nil - if fake.isDirReturnsOnCall == nil { - fake.isDirReturnsOnCall = make(map[int]struct { - result1 bool - }) - } - fake.isDirReturnsOnCall[i] = struct { - result1 bool - }{result1} -} - -func (fake *FakeDirEntry) Name() string { - fake.nameMutex.Lock() - ret, specificReturn := fake.nameReturnsOnCall[len(fake.nameArgsForCall)] - fake.nameArgsForCall = append(fake.nameArgsForCall, struct { - }{}) - stub := fake.NameStub - fakeReturns := fake.nameReturns - fake.recordInvocation("Name", []interface{}{}) - fake.nameMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeDirEntry) NameCallCount() int { - fake.nameMutex.RLock() - defer fake.nameMutex.RUnlock() - return len(fake.nameArgsForCall) -} - -func (fake *FakeDirEntry) NameCalls(stub func() string) { - fake.nameMutex.Lock() - defer fake.nameMutex.Unlock() - fake.NameStub = stub -} - -func (fake *FakeDirEntry) NameReturns(result1 string) { - fake.nameMutex.Lock() - defer fake.nameMutex.Unlock() - fake.NameStub = nil - fake.nameReturns = struct { - result1 string - }{result1} -} - -func (fake *FakeDirEntry) NameReturnsOnCall(i int, result1 string) { - fake.nameMutex.Lock() - defer fake.nameMutex.Unlock() - fake.NameStub = nil - if fake.nameReturnsOnCall == nil { - fake.nameReturnsOnCall = make(map[int]struct { - result1 string - }) - } - fake.nameReturnsOnCall[i] = struct { - result1 string - }{result1} -} - -func (fake *FakeDirEntry) Type() fs.FileMode { - fake.typeMutex.Lock() - ret, specificReturn := fake.typeReturnsOnCall[len(fake.typeArgsForCall)] - fake.typeArgsForCall = append(fake.typeArgsForCall, struct { - }{}) - stub := fake.TypeStub - fakeReturns := fake.typeReturns - fake.recordInvocation("Type", []interface{}{}) - fake.typeMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeDirEntry) TypeCallCount() int { - fake.typeMutex.RLock() - defer fake.typeMutex.RUnlock() - return len(fake.typeArgsForCall) -} - -func (fake *FakeDirEntry) TypeCalls(stub func() fs.FileMode) { - fake.typeMutex.Lock() - defer fake.typeMutex.Unlock() - fake.TypeStub = stub -} - -func (fake *FakeDirEntry) TypeReturns(result1 fs.FileMode) { - fake.typeMutex.Lock() - defer fake.typeMutex.Unlock() - fake.TypeStub = nil - fake.typeReturns = struct { - result1 fs.FileMode - }{result1} -} - -func (fake *FakeDirEntry) TypeReturnsOnCall(i int, result1 fs.FileMode) { - fake.typeMutex.Lock() - defer fake.typeMutex.Unlock() - fake.TypeStub = nil - if fake.typeReturnsOnCall == nil { - fake.typeReturnsOnCall = make(map[int]struct { - result1 fs.FileMode - }) - } - fake.typeReturnsOnCall[i] = struct { - result1 fs.FileMode - }{result1} -} - -func (fake *FakeDirEntry) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.infoMutex.RLock() - defer fake.infoMutex.RUnlock() - fake.isDirMutex.RLock() - defer fake.isDirMutex.RUnlock() - fake.nameMutex.RLock() - defer fake.nameMutex.RUnlock() - fake.typeMutex.RLock() - defer fake.typeMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeDirEntry) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ fs.DirEntry = new(FakeDirEntry) diff --git a/internal/state/secrets/secretsfakes/fake_file_manager.go b/internal/state/secrets/secretsfakes/fake_file_manager.go deleted file mode 100644 index 3c4e958f30..0000000000 --- a/internal/state/secrets/secretsfakes/fake_file_manager.go +++ /dev/null @@ -1,428 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package secretsfakes - -import ( - "io/fs" - "os" - "sync" - - "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/secrets" -) - -type FakeFileManager struct { - ChmodStub func(*os.File, fs.FileMode) error - chmodMutex sync.RWMutex - chmodArgsForCall []struct { - arg1 *os.File - arg2 fs.FileMode - } - chmodReturns struct { - result1 error - } - chmodReturnsOnCall map[int]struct { - result1 error - } - CreateStub func(string) (*os.File, error) - createMutex sync.RWMutex - createArgsForCall []struct { - arg1 string - } - createReturns struct { - result1 *os.File - result2 error - } - createReturnsOnCall map[int]struct { - result1 *os.File - result2 error - } - ReadDirStub func(string) ([]fs.DirEntry, error) - readDirMutex sync.RWMutex - readDirArgsForCall []struct { - arg1 string - } - readDirReturns struct { - result1 []fs.DirEntry - result2 error - } - readDirReturnsOnCall map[int]struct { - result1 []fs.DirEntry - result2 error - } - RemoveStub func(string) error - removeMutex sync.RWMutex - removeArgsForCall []struct { - arg1 string - } - removeReturns struct { - result1 error - } - removeReturnsOnCall map[int]struct { - result1 error - } - WriteStub func(*os.File, []byte) error - writeMutex sync.RWMutex - writeArgsForCall []struct { - arg1 *os.File - arg2 []byte - } - writeReturns struct { - result1 error - } - writeReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeFileManager) Chmod(arg1 *os.File, arg2 fs.FileMode) error { - fake.chmodMutex.Lock() - ret, specificReturn := fake.chmodReturnsOnCall[len(fake.chmodArgsForCall)] - fake.chmodArgsForCall = append(fake.chmodArgsForCall, struct { - arg1 *os.File - arg2 fs.FileMode - }{arg1, arg2}) - stub := fake.ChmodStub - fakeReturns := fake.chmodReturns - fake.recordInvocation("Chmod", []interface{}{arg1, arg2}) - fake.chmodMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeFileManager) ChmodCallCount() int { - fake.chmodMutex.RLock() - defer fake.chmodMutex.RUnlock() - return len(fake.chmodArgsForCall) -} - -func (fake *FakeFileManager) ChmodCalls(stub func(*os.File, fs.FileMode) error) { - fake.chmodMutex.Lock() - defer fake.chmodMutex.Unlock() - fake.ChmodStub = stub -} - -func (fake *FakeFileManager) ChmodArgsForCall(i int) (*os.File, fs.FileMode) { - fake.chmodMutex.RLock() - defer fake.chmodMutex.RUnlock() - argsForCall := fake.chmodArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeFileManager) ChmodReturns(result1 error) { - fake.chmodMutex.Lock() - defer fake.chmodMutex.Unlock() - fake.ChmodStub = nil - fake.chmodReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeFileManager) ChmodReturnsOnCall(i int, result1 error) { - fake.chmodMutex.Lock() - defer fake.chmodMutex.Unlock() - fake.ChmodStub = nil - if fake.chmodReturnsOnCall == nil { - fake.chmodReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.chmodReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeFileManager) Create(arg1 string) (*os.File, error) { - fake.createMutex.Lock() - ret, specificReturn := fake.createReturnsOnCall[len(fake.createArgsForCall)] - fake.createArgsForCall = append(fake.createArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.CreateStub - fakeReturns := fake.createReturns - fake.recordInvocation("Create", []interface{}{arg1}) - fake.createMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeFileManager) CreateCallCount() int { - fake.createMutex.RLock() - defer fake.createMutex.RUnlock() - return len(fake.createArgsForCall) -} - -func (fake *FakeFileManager) CreateCalls(stub func(string) (*os.File, error)) { - fake.createMutex.Lock() - defer fake.createMutex.Unlock() - fake.CreateStub = stub -} - -func (fake *FakeFileManager) CreateArgsForCall(i int) string { - fake.createMutex.RLock() - defer fake.createMutex.RUnlock() - argsForCall := fake.createArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeFileManager) CreateReturns(result1 *os.File, result2 error) { - fake.createMutex.Lock() - defer fake.createMutex.Unlock() - fake.CreateStub = nil - fake.createReturns = struct { - result1 *os.File - result2 error - }{result1, result2} -} - -func (fake *FakeFileManager) CreateReturnsOnCall(i int, result1 *os.File, result2 error) { - fake.createMutex.Lock() - defer fake.createMutex.Unlock() - fake.CreateStub = nil - if fake.createReturnsOnCall == nil { - fake.createReturnsOnCall = make(map[int]struct { - result1 *os.File - result2 error - }) - } - fake.createReturnsOnCall[i] = struct { - result1 *os.File - result2 error - }{result1, result2} -} - -func (fake *FakeFileManager) ReadDir(arg1 string) ([]fs.DirEntry, error) { - fake.readDirMutex.Lock() - ret, specificReturn := fake.readDirReturnsOnCall[len(fake.readDirArgsForCall)] - fake.readDirArgsForCall = append(fake.readDirArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.ReadDirStub - fakeReturns := fake.readDirReturns - fake.recordInvocation("ReadDir", []interface{}{arg1}) - fake.readDirMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeFileManager) ReadDirCallCount() int { - fake.readDirMutex.RLock() - defer fake.readDirMutex.RUnlock() - return len(fake.readDirArgsForCall) -} - -func (fake *FakeFileManager) ReadDirCalls(stub func(string) ([]fs.DirEntry, error)) { - fake.readDirMutex.Lock() - defer fake.readDirMutex.Unlock() - fake.ReadDirStub = stub -} - -func (fake *FakeFileManager) ReadDirArgsForCall(i int) string { - fake.readDirMutex.RLock() - defer fake.readDirMutex.RUnlock() - argsForCall := fake.readDirArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeFileManager) ReadDirReturns(result1 []fs.DirEntry, result2 error) { - fake.readDirMutex.Lock() - defer fake.readDirMutex.Unlock() - fake.ReadDirStub = nil - fake.readDirReturns = struct { - result1 []fs.DirEntry - result2 error - }{result1, result2} -} - -func (fake *FakeFileManager) ReadDirReturnsOnCall(i int, result1 []fs.DirEntry, result2 error) { - fake.readDirMutex.Lock() - defer fake.readDirMutex.Unlock() - fake.ReadDirStub = nil - if fake.readDirReturnsOnCall == nil { - fake.readDirReturnsOnCall = make(map[int]struct { - result1 []fs.DirEntry - result2 error - }) - } - fake.readDirReturnsOnCall[i] = struct { - result1 []fs.DirEntry - result2 error - }{result1, result2} -} - -func (fake *FakeFileManager) Remove(arg1 string) error { - fake.removeMutex.Lock() - ret, specificReturn := fake.removeReturnsOnCall[len(fake.removeArgsForCall)] - fake.removeArgsForCall = append(fake.removeArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.RemoveStub - fakeReturns := fake.removeReturns - fake.recordInvocation("Remove", []interface{}{arg1}) - fake.removeMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeFileManager) RemoveCallCount() int { - fake.removeMutex.RLock() - defer fake.removeMutex.RUnlock() - return len(fake.removeArgsForCall) -} - -func (fake *FakeFileManager) RemoveCalls(stub func(string) error) { - fake.removeMutex.Lock() - defer fake.removeMutex.Unlock() - fake.RemoveStub = stub -} - -func (fake *FakeFileManager) RemoveArgsForCall(i int) string { - fake.removeMutex.RLock() - defer fake.removeMutex.RUnlock() - argsForCall := fake.removeArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeFileManager) RemoveReturns(result1 error) { - fake.removeMutex.Lock() - defer fake.removeMutex.Unlock() - fake.RemoveStub = nil - fake.removeReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeFileManager) RemoveReturnsOnCall(i int, result1 error) { - fake.removeMutex.Lock() - defer fake.removeMutex.Unlock() - fake.RemoveStub = nil - if fake.removeReturnsOnCall == nil { - fake.removeReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.removeReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeFileManager) Write(arg1 *os.File, arg2 []byte) error { - var arg2Copy []byte - if arg2 != nil { - arg2Copy = make([]byte, len(arg2)) - copy(arg2Copy, arg2) - } - fake.writeMutex.Lock() - ret, specificReturn := fake.writeReturnsOnCall[len(fake.writeArgsForCall)] - fake.writeArgsForCall = append(fake.writeArgsForCall, struct { - arg1 *os.File - arg2 []byte - }{arg1, arg2Copy}) - stub := fake.WriteStub - fakeReturns := fake.writeReturns - fake.recordInvocation("Write", []interface{}{arg1, arg2Copy}) - fake.writeMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeFileManager) WriteCallCount() int { - fake.writeMutex.RLock() - defer fake.writeMutex.RUnlock() - return len(fake.writeArgsForCall) -} - -func (fake *FakeFileManager) WriteCalls(stub func(*os.File, []byte) error) { - fake.writeMutex.Lock() - defer fake.writeMutex.Unlock() - fake.WriteStub = stub -} - -func (fake *FakeFileManager) WriteArgsForCall(i int) (*os.File, []byte) { - fake.writeMutex.RLock() - defer fake.writeMutex.RUnlock() - argsForCall := fake.writeArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeFileManager) WriteReturns(result1 error) { - fake.writeMutex.Lock() - defer fake.writeMutex.Unlock() - fake.WriteStub = nil - fake.writeReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeFileManager) WriteReturnsOnCall(i int, result1 error) { - fake.writeMutex.Lock() - defer fake.writeMutex.Unlock() - fake.WriteStub = nil - if fake.writeReturnsOnCall == nil { - fake.writeReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.writeReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeFileManager) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.chmodMutex.RLock() - defer fake.chmodMutex.RUnlock() - fake.createMutex.RLock() - defer fake.createMutex.RUnlock() - fake.readDirMutex.RLock() - defer fake.readDirMutex.RUnlock() - fake.removeMutex.RLock() - defer fake.removeMutex.RUnlock() - fake.writeMutex.RLock() - defer fake.writeMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeFileManager) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ secrets.FileManager = new(FakeFileManager) diff --git a/internal/state/secrets/secretsfakes/fake_request_manager.go b/internal/state/secrets/secretsfakes/fake_request_manager.go new file mode 100644 index 0000000000..56c1a31d84 --- /dev/null +++ b/internal/state/secrets/secretsfakes/fake_request_manager.go @@ -0,0 +1,182 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package secretsfakes + +import ( + "sync" + + "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/secrets" + "k8s.io/apimachinery/pkg/types" +) + +type FakeRequestManager struct { + GetAndResetRequestedSecretsStub func() []secrets.File + getAndResetRequestedSecretsMutex sync.RWMutex + getAndResetRequestedSecretsArgsForCall []struct { + } + getAndResetRequestedSecretsReturns struct { + result1 []secrets.File + } + getAndResetRequestedSecretsReturnsOnCall map[int]struct { + result1 []secrets.File + } + RequestStub func(types.NamespacedName) (string, error) + requestMutex sync.RWMutex + requestArgsForCall []struct { + arg1 types.NamespacedName + } + requestReturns struct { + result1 string + result2 error + } + requestReturnsOnCall map[int]struct { + result1 string + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeRequestManager) GetAndResetRequestedSecrets() []secrets.File { + fake.getAndResetRequestedSecretsMutex.Lock() + ret, specificReturn := fake.getAndResetRequestedSecretsReturnsOnCall[len(fake.getAndResetRequestedSecretsArgsForCall)] + fake.getAndResetRequestedSecretsArgsForCall = append(fake.getAndResetRequestedSecretsArgsForCall, struct { + }{}) + stub := fake.GetAndResetRequestedSecretsStub + fakeReturns := fake.getAndResetRequestedSecretsReturns + fake.recordInvocation("GetAndResetRequestedSecrets", []interface{}{}) + fake.getAndResetRequestedSecretsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeRequestManager) GetAndResetRequestedSecretsCallCount() int { + fake.getAndResetRequestedSecretsMutex.RLock() + defer fake.getAndResetRequestedSecretsMutex.RUnlock() + return len(fake.getAndResetRequestedSecretsArgsForCall) +} + +func (fake *FakeRequestManager) GetAndResetRequestedSecretsCalls(stub func() []secrets.File) { + fake.getAndResetRequestedSecretsMutex.Lock() + defer fake.getAndResetRequestedSecretsMutex.Unlock() + fake.GetAndResetRequestedSecretsStub = stub +} + +func (fake *FakeRequestManager) GetAndResetRequestedSecretsReturns(result1 []secrets.File) { + fake.getAndResetRequestedSecretsMutex.Lock() + defer fake.getAndResetRequestedSecretsMutex.Unlock() + fake.GetAndResetRequestedSecretsStub = nil + fake.getAndResetRequestedSecretsReturns = struct { + result1 []secrets.File + }{result1} +} + +func (fake *FakeRequestManager) GetAndResetRequestedSecretsReturnsOnCall(i int, result1 []secrets.File) { + fake.getAndResetRequestedSecretsMutex.Lock() + defer fake.getAndResetRequestedSecretsMutex.Unlock() + fake.GetAndResetRequestedSecretsStub = nil + if fake.getAndResetRequestedSecretsReturnsOnCall == nil { + fake.getAndResetRequestedSecretsReturnsOnCall = make(map[int]struct { + result1 []secrets.File + }) + } + fake.getAndResetRequestedSecretsReturnsOnCall[i] = struct { + result1 []secrets.File + }{result1} +} + +func (fake *FakeRequestManager) Request(arg1 types.NamespacedName) (string, error) { + fake.requestMutex.Lock() + ret, specificReturn := fake.requestReturnsOnCall[len(fake.requestArgsForCall)] + fake.requestArgsForCall = append(fake.requestArgsForCall, struct { + arg1 types.NamespacedName + }{arg1}) + stub := fake.RequestStub + fakeReturns := fake.requestReturns + fake.recordInvocation("Request", []interface{}{arg1}) + fake.requestMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeRequestManager) RequestCallCount() int { + fake.requestMutex.RLock() + defer fake.requestMutex.RUnlock() + return len(fake.requestArgsForCall) +} + +func (fake *FakeRequestManager) RequestCalls(stub func(types.NamespacedName) (string, error)) { + fake.requestMutex.Lock() + defer fake.requestMutex.Unlock() + fake.RequestStub = stub +} + +func (fake *FakeRequestManager) RequestArgsForCall(i int) types.NamespacedName { + fake.requestMutex.RLock() + defer fake.requestMutex.RUnlock() + argsForCall := fake.requestArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeRequestManager) RequestReturns(result1 string, result2 error) { + fake.requestMutex.Lock() + defer fake.requestMutex.Unlock() + fake.RequestStub = nil + fake.requestReturns = struct { + result1 string + result2 error + }{result1, result2} +} + +func (fake *FakeRequestManager) RequestReturnsOnCall(i int, result1 string, result2 error) { + fake.requestMutex.Lock() + defer fake.requestMutex.Unlock() + fake.RequestStub = nil + if fake.requestReturnsOnCall == nil { + fake.requestReturnsOnCall = make(map[int]struct { + result1 string + result2 error + }) + } + fake.requestReturnsOnCall[i] = struct { + result1 string + result2 error + }{result1, result2} +} + +func (fake *FakeRequestManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getAndResetRequestedSecretsMutex.RLock() + defer fake.getAndResetRequestedSecretsMutex.RUnlock() + fake.requestMutex.RLock() + defer fake.requestMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeRequestManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ secrets.RequestManager = new(FakeRequestManager) diff --git a/internal/state/secrets/secretsfakes/fake_secret_disk_memory_manager.go b/internal/state/secrets/secretsfakes/fake_secret_disk_memory_manager.go deleted file mode 100644 index 12f71fe4f0..0000000000 --- a/internal/state/secrets/secretsfakes/fake_secret_disk_memory_manager.go +++ /dev/null @@ -1,247 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package secretsfakes - -import ( - "sync" - - "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/secrets" - "k8s.io/apimachinery/pkg/types" -) - -type FakeSecretDiskMemoryManager struct { - GetAllRequestedSecretsStub func() []secrets.File - getAllRequestedSecretsMutex sync.RWMutex - getAllRequestedSecretsArgsForCall []struct { - } - getAllRequestedSecretsReturns struct { - result1 []secrets.File - } - getAllRequestedSecretsReturnsOnCall map[int]struct { - result1 []secrets.File - } - RequestStub func(types.NamespacedName) (string, error) - requestMutex sync.RWMutex - requestArgsForCall []struct { - arg1 types.NamespacedName - } - requestReturns struct { - result1 string - result2 error - } - requestReturnsOnCall map[int]struct { - result1 string - result2 error - } - WriteAllRequestedSecretsStub func() error - writeAllRequestedSecretsMutex sync.RWMutex - writeAllRequestedSecretsArgsForCall []struct { - } - writeAllRequestedSecretsReturns struct { - result1 error - } - writeAllRequestedSecretsReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeSecretDiskMemoryManager) GetAllRequestedSecrets() []secrets.File { - fake.getAllRequestedSecretsMutex.Lock() - ret, specificReturn := fake.getAllRequestedSecretsReturnsOnCall[len(fake.getAllRequestedSecretsArgsForCall)] - fake.getAllRequestedSecretsArgsForCall = append(fake.getAllRequestedSecretsArgsForCall, struct { - }{}) - stub := fake.GetAllRequestedSecretsStub - fakeReturns := fake.getAllRequestedSecretsReturns - fake.recordInvocation("GetAllRequestedSecrets", []interface{}{}) - fake.getAllRequestedSecretsMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeSecretDiskMemoryManager) GetAllRequestedSecretsCallCount() int { - fake.getAllRequestedSecretsMutex.RLock() - defer fake.getAllRequestedSecretsMutex.RUnlock() - return len(fake.getAllRequestedSecretsArgsForCall) -} - -func (fake *FakeSecretDiskMemoryManager) GetAllRequestedSecretsCalls(stub func() []secrets.File) { - fake.getAllRequestedSecretsMutex.Lock() - defer fake.getAllRequestedSecretsMutex.Unlock() - fake.GetAllRequestedSecretsStub = stub -} - -func (fake *FakeSecretDiskMemoryManager) GetAllRequestedSecretsReturns(result1 []secrets.File) { - fake.getAllRequestedSecretsMutex.Lock() - defer fake.getAllRequestedSecretsMutex.Unlock() - fake.GetAllRequestedSecretsStub = nil - fake.getAllRequestedSecretsReturns = struct { - result1 []secrets.File - }{result1} -} - -func (fake *FakeSecretDiskMemoryManager) GetAllRequestedSecretsReturnsOnCall(i int, result1 []secrets.File) { - fake.getAllRequestedSecretsMutex.Lock() - defer fake.getAllRequestedSecretsMutex.Unlock() - fake.GetAllRequestedSecretsStub = nil - if fake.getAllRequestedSecretsReturnsOnCall == nil { - fake.getAllRequestedSecretsReturnsOnCall = make(map[int]struct { - result1 []secrets.File - }) - } - fake.getAllRequestedSecretsReturnsOnCall[i] = struct { - result1 []secrets.File - }{result1} -} - -func (fake *FakeSecretDiskMemoryManager) Request(arg1 types.NamespacedName) (string, error) { - fake.requestMutex.Lock() - ret, specificReturn := fake.requestReturnsOnCall[len(fake.requestArgsForCall)] - fake.requestArgsForCall = append(fake.requestArgsForCall, struct { - arg1 types.NamespacedName - }{arg1}) - stub := fake.RequestStub - fakeReturns := fake.requestReturns - fake.recordInvocation("Request", []interface{}{arg1}) - fake.requestMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeSecretDiskMemoryManager) RequestCallCount() int { - fake.requestMutex.RLock() - defer fake.requestMutex.RUnlock() - return len(fake.requestArgsForCall) -} - -func (fake *FakeSecretDiskMemoryManager) RequestCalls(stub func(types.NamespacedName) (string, error)) { - fake.requestMutex.Lock() - defer fake.requestMutex.Unlock() - fake.RequestStub = stub -} - -func (fake *FakeSecretDiskMemoryManager) RequestArgsForCall(i int) types.NamespacedName { - fake.requestMutex.RLock() - defer fake.requestMutex.RUnlock() - argsForCall := fake.requestArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeSecretDiskMemoryManager) RequestReturns(result1 string, result2 error) { - fake.requestMutex.Lock() - defer fake.requestMutex.Unlock() - fake.RequestStub = nil - fake.requestReturns = struct { - result1 string - result2 error - }{result1, result2} -} - -func (fake *FakeSecretDiskMemoryManager) RequestReturnsOnCall(i int, result1 string, result2 error) { - fake.requestMutex.Lock() - defer fake.requestMutex.Unlock() - fake.RequestStub = nil - if fake.requestReturnsOnCall == nil { - fake.requestReturnsOnCall = make(map[int]struct { - result1 string - result2 error - }) - } - fake.requestReturnsOnCall[i] = struct { - result1 string - result2 error - }{result1, result2} -} - -func (fake *FakeSecretDiskMemoryManager) WriteAllRequestedSecrets() error { - fake.writeAllRequestedSecretsMutex.Lock() - ret, specificReturn := fake.writeAllRequestedSecretsReturnsOnCall[len(fake.writeAllRequestedSecretsArgsForCall)] - fake.writeAllRequestedSecretsArgsForCall = append(fake.writeAllRequestedSecretsArgsForCall, struct { - }{}) - stub := fake.WriteAllRequestedSecretsStub - fakeReturns := fake.writeAllRequestedSecretsReturns - fake.recordInvocation("WriteAllRequestedSecrets", []interface{}{}) - fake.writeAllRequestedSecretsMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeSecretDiskMemoryManager) WriteAllRequestedSecretsCallCount() int { - fake.writeAllRequestedSecretsMutex.RLock() - defer fake.writeAllRequestedSecretsMutex.RUnlock() - return len(fake.writeAllRequestedSecretsArgsForCall) -} - -func (fake *FakeSecretDiskMemoryManager) WriteAllRequestedSecretsCalls(stub func() error) { - fake.writeAllRequestedSecretsMutex.Lock() - defer fake.writeAllRequestedSecretsMutex.Unlock() - fake.WriteAllRequestedSecretsStub = stub -} - -func (fake *FakeSecretDiskMemoryManager) WriteAllRequestedSecretsReturns(result1 error) { - fake.writeAllRequestedSecretsMutex.Lock() - defer fake.writeAllRequestedSecretsMutex.Unlock() - fake.WriteAllRequestedSecretsStub = nil - fake.writeAllRequestedSecretsReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeSecretDiskMemoryManager) WriteAllRequestedSecretsReturnsOnCall(i int, result1 error) { - fake.writeAllRequestedSecretsMutex.Lock() - defer fake.writeAllRequestedSecretsMutex.Unlock() - fake.WriteAllRequestedSecretsStub = nil - if fake.writeAllRequestedSecretsReturnsOnCall == nil { - fake.writeAllRequestedSecretsReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.writeAllRequestedSecretsReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeSecretDiskMemoryManager) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.getAllRequestedSecretsMutex.RLock() - defer fake.getAllRequestedSecretsMutex.RUnlock() - fake.requestMutex.RLock() - defer fake.requestMutex.RUnlock() - fake.writeAllRequestedSecretsMutex.RLock() - defer fake.writeAllRequestedSecretsMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeSecretDiskMemoryManager) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ secrets.SecretDiskMemoryManager = new(FakeSecretDiskMemoryManager) From 5ff2d9496b57d4b69665274b00a04b35b01ec391 Mon Sep 17 00:00:00 2001 From: Kate Osborn Date: Wed, 12 Apr 2023 09:27:10 -0600 Subject: [PATCH 14/16] Remove file and runtime nginx management --- internal/nginx/file/filefakes/fake_manager.go | 118 ------------------ internal/nginx/file/manager.go | 49 -------- internal/nginx/file/manager_test.go | 12 -- internal/nginx/runtime/manager.go | 80 ------------ internal/nginx/runtime/manager_test.go | 70 ----------- .../runtime/runtimefakes/fake_manager.go | 112 ----------------- 6 files changed, 441 deletions(-) delete mode 100644 internal/nginx/file/filefakes/fake_manager.go delete mode 100644 internal/nginx/file/manager.go delete mode 100644 internal/nginx/file/manager_test.go delete mode 100644 internal/nginx/runtime/manager.go delete mode 100644 internal/nginx/runtime/manager_test.go delete mode 100644 internal/nginx/runtime/runtimefakes/fake_manager.go diff --git a/internal/nginx/file/filefakes/fake_manager.go b/internal/nginx/file/filefakes/fake_manager.go deleted file mode 100644 index ed2c52caac..0000000000 --- a/internal/nginx/file/filefakes/fake_manager.go +++ /dev/null @@ -1,118 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package filefakes - -import ( - "sync" - - "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/file" -) - -type FakeManager struct { - WriteHTTPConfigStub func(string, []byte) error - writeHTTPConfigMutex sync.RWMutex - writeHTTPConfigArgsForCall []struct { - arg1 string - arg2 []byte - } - writeHTTPConfigReturns struct { - result1 error - } - writeHTTPConfigReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeManager) WriteHTTPConfig(arg1 string, arg2 []byte) error { - var arg2Copy []byte - if arg2 != nil { - arg2Copy = make([]byte, len(arg2)) - copy(arg2Copy, arg2) - } - fake.writeHTTPConfigMutex.Lock() - ret, specificReturn := fake.writeHTTPConfigReturnsOnCall[len(fake.writeHTTPConfigArgsForCall)] - fake.writeHTTPConfigArgsForCall = append(fake.writeHTTPConfigArgsForCall, struct { - arg1 string - arg2 []byte - }{arg1, arg2Copy}) - stub := fake.WriteHTTPConfigStub - fakeReturns := fake.writeHTTPConfigReturns - fake.recordInvocation("WriteHTTPConfig", []interface{}{arg1, arg2Copy}) - fake.writeHTTPConfigMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeManager) WriteHTTPConfigCallCount() int { - fake.writeHTTPConfigMutex.RLock() - defer fake.writeHTTPConfigMutex.RUnlock() - return len(fake.writeHTTPConfigArgsForCall) -} - -func (fake *FakeManager) WriteHTTPConfigCalls(stub func(string, []byte) error) { - fake.writeHTTPConfigMutex.Lock() - defer fake.writeHTTPConfigMutex.Unlock() - fake.WriteHTTPConfigStub = stub -} - -func (fake *FakeManager) WriteHTTPConfigArgsForCall(i int) (string, []byte) { - fake.writeHTTPConfigMutex.RLock() - defer fake.writeHTTPConfigMutex.RUnlock() - argsForCall := fake.writeHTTPConfigArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeManager) WriteHTTPConfigReturns(result1 error) { - fake.writeHTTPConfigMutex.Lock() - defer fake.writeHTTPConfigMutex.Unlock() - fake.WriteHTTPConfigStub = nil - fake.writeHTTPConfigReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) WriteHTTPConfigReturnsOnCall(i int, result1 error) { - fake.writeHTTPConfigMutex.Lock() - defer fake.writeHTTPConfigMutex.Unlock() - fake.WriteHTTPConfigStub = nil - if fake.writeHTTPConfigReturnsOnCall == nil { - fake.writeHTTPConfigReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.writeHTTPConfigReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.writeHTTPConfigMutex.RLock() - defer fake.writeHTTPConfigMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeManager) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ file.Manager = new(FakeManager) diff --git a/internal/nginx/file/manager.go b/internal/nginx/file/manager.go deleted file mode 100644 index 2fbed7fdd9..0000000000 --- a/internal/nginx/file/manager.go +++ /dev/null @@ -1,49 +0,0 @@ -package file - -import ( - "fmt" - "os" - "path/filepath" -) - -const confdFolder = "/etc/nginx/conf.d" - -//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . Manager - -// Manager manages NGINX configuration files. -type Manager interface { - // WriteHTTPConfig writes the http config on the file system. - // The name distinguishes this config among all other configs. For that, it must be unique. - // Note that name is not the name of the corresponding configuration file. - WriteHTTPConfig(name string, cfg []byte) error -} - -// ManagerImpl is an implementation of Manager. -type ManagerImpl struct{} - -// NewManagerImpl creates a new NewManagerImpl. -func NewManagerImpl() *ManagerImpl { - return &ManagerImpl{} -} - -func (m *ManagerImpl) WriteHTTPConfig(name string, cfg []byte) error { - path := getPathForConfig(name) - - file, err := os.Create(path) - if err != nil { - return fmt.Errorf("failed to create server config %s: %w", path, err) - } - - defer file.Close() - - _, err = file.Write(cfg) - if err != nil { - return fmt.Errorf("failed to write server config %s: %w", path, err) - } - - return nil -} - -func getPathForConfig(name string) string { - return filepath.Join(confdFolder, name+".conf") -} diff --git a/internal/nginx/file/manager_test.go b/internal/nginx/file/manager_test.go deleted file mode 100644 index 6bcd87d408..0000000000 --- a/internal/nginx/file/manager_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package file - -import "testing" - -func TestGetPathForServerConfig(t *testing.T) { - expected := "/etc/nginx/conf.d/test.example.com.conf" - - result := getPathForConfig("test.example.com") - if result != expected { - t.Errorf("getPathForConfig() returned %q but expected %q", result, expected) - } -} diff --git a/internal/nginx/runtime/manager.go b/internal/nginx/runtime/manager.go deleted file mode 100644 index ba745aead3..0000000000 --- a/internal/nginx/runtime/manager.go +++ /dev/null @@ -1,80 +0,0 @@ -package runtime - -import ( - "context" - "fmt" - "os" - "strconv" - "strings" - "syscall" - "time" -) - -const pidFile = "/etc/nginx/nginx.pid" - -type readFileFunc func(string) ([]byte, error) - -//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . Manager - -// Manager manages the runtime of NGINX. -type Manager interface { - // Reload reloads NGINX configuration. It is a blocking operation. - Reload(ctx context.Context) error -} - -// ManagerImpl implements Manager. -type ManagerImpl struct{} - -// NewManagerImpl creates a new ManagerImpl. -func NewManagerImpl() *ManagerImpl { - return &ManagerImpl{} -} - -func (m *ManagerImpl) Reload(ctx context.Context) error { - // FIXME(pleshakov): Before reload attempt, make sure NGINX is running. - // If the gateway container starts before NGINX container (which is possible), - // then it is possible that a reload can be attempted when NGINX is not running yet. - // Make sure to prevent this case, so we don't get an error. - - // We find the main NGINX PID on every reload because it will change if the NGINX container is restarted. - pid, err := findMainProcess(os.ReadFile) - if err != nil { - return fmt.Errorf("failed to find NGINX main process: %w", err) - } - - // send HUP signal to the NGINX main process reload configuration - // See https://nginx.org/en/docs/control.html - err = syscall.Kill(pid, syscall.SIGHUP) - if err != nil { - return fmt.Errorf("failed to send the HUP signal to NGINX main: %w", err) - } - - // FIXME(pleshakov) - // (1) ensure the reload actually happens. - // (2) ensure that in case of an error, the error message can be seen by the admins. - - // for now, to prevent a subsequent reload starting before the in-flight reload finishes, we simply sleep. - // Fixing (1) will make the sleep unnecessary. - - select { - case <-ctx.Done(): - return nil - case <-time.After(1 * time.Second): - } - - return nil -} - -func findMainProcess(readFile readFileFunc) (int, error) { - content, err := readFile(pidFile) - if err != nil { - return 0, err - } - - pid, err := strconv.Atoi(strings.TrimSpace(string(content))) - if err != nil { - return 0, fmt.Errorf("invalid pid file content %q: %w", content, err) - } - - return pid, nil -} diff --git a/internal/nginx/runtime/manager_test.go b/internal/nginx/runtime/manager_test.go deleted file mode 100644 index 8d716f8d9d..0000000000 --- a/internal/nginx/runtime/manager_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package runtime - -import ( - "errors" - "testing" -) - -func TestFindMainProcess(t *testing.T) { - readFileFuncGen := func(content []byte) readFileFunc { - return func(name string) ([]byte, error) { - if name != pidFile { - return nil, errors.New("error") - } - return content, nil - } - } - readFileError := func(string) ([]byte, error) { - return nil, errors.New("error") - } - - tests := []struct { - readFile readFileFunc - msg string - expected int - expectError bool - }{ - { - readFile: readFileFuncGen([]byte("1\n")), - expected: 1, - expectError: false, - msg: "normal case", - }, - { - readFile: readFileFuncGen([]byte("")), - expected: 0, - expectError: true, - msg: "empty file content", - }, - { - readFile: readFileFuncGen([]byte("not a number")), - expected: 0, - expectError: true, - msg: "bad file content", - }, - { - readFile: readFileError, - expected: 0, - expectError: true, - msg: "cannot read file", - }, - } - - for _, test := range tests { - result, err := findMainProcess(test.readFile) - - if result != test.expected { - t.Errorf("findMainProcess() returned %d but expected %d for case %q", result, test.expected, test.msg) - } - - if test.expectError { - if err == nil { - t.Errorf("findMainProcess() didn't return error for case %q", test.msg) - } - } else { - if err != nil { - t.Errorf("findMainProcess() returned unexpected error %v for case %q", err, test.msg) - } - } - } -} diff --git a/internal/nginx/runtime/runtimefakes/fake_manager.go b/internal/nginx/runtime/runtimefakes/fake_manager.go deleted file mode 100644 index 73593d09aa..0000000000 --- a/internal/nginx/runtime/runtimefakes/fake_manager.go +++ /dev/null @@ -1,112 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package runtimefakes - -import ( - "context" - "sync" - - "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/runtime" -) - -type FakeManager struct { - ReloadStub func(context.Context) error - reloadMutex sync.RWMutex - reloadArgsForCall []struct { - arg1 context.Context - } - reloadReturns struct { - result1 error - } - reloadReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeManager) Reload(arg1 context.Context) error { - fake.reloadMutex.Lock() - ret, specificReturn := fake.reloadReturnsOnCall[len(fake.reloadArgsForCall)] - fake.reloadArgsForCall = append(fake.reloadArgsForCall, struct { - arg1 context.Context - }{arg1}) - stub := fake.ReloadStub - fakeReturns := fake.reloadReturns - fake.recordInvocation("Reload", []interface{}{arg1}) - fake.reloadMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeManager) ReloadCallCount() int { - fake.reloadMutex.RLock() - defer fake.reloadMutex.RUnlock() - return len(fake.reloadArgsForCall) -} - -func (fake *FakeManager) ReloadCalls(stub func(context.Context) error) { - fake.reloadMutex.Lock() - defer fake.reloadMutex.Unlock() - fake.ReloadStub = stub -} - -func (fake *FakeManager) ReloadArgsForCall(i int) context.Context { - fake.reloadMutex.RLock() - defer fake.reloadMutex.RUnlock() - argsForCall := fake.reloadArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeManager) ReloadReturns(result1 error) { - fake.reloadMutex.Lock() - defer fake.reloadMutex.Unlock() - fake.ReloadStub = nil - fake.reloadReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) ReloadReturnsOnCall(i int, result1 error) { - fake.reloadMutex.Lock() - defer fake.reloadMutex.Unlock() - fake.ReloadStub = nil - if fake.reloadReturnsOnCall == nil { - fake.reloadReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.reloadReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.reloadMutex.RLock() - defer fake.reloadMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeManager) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ runtime.Manager = new(FakeManager) From 38ea307eaca7b99c56d8e0240ee2250a8f09364a Mon Sep 17 00:00:00 2001 From: Kate Osborn Date: Wed, 12 Apr 2023 09:28:46 -0600 Subject: [PATCH 15/16] rework observer subject --- internal/nginx/agent/config_store.go | 100 --------------------------- internal/observer/config_subject.go | 80 +++++++++++++++++++++ internal/observer/observer.go | 2 +- 3 files changed, 81 insertions(+), 101 deletions(-) delete mode 100644 internal/nginx/agent/config_store.go create mode 100644 internal/observer/config_subject.go diff --git a/internal/nginx/agent/config_store.go b/internal/nginx/agent/config_store.go deleted file mode 100644 index ebb4c495a2..0000000000 --- a/internal/nginx/agent/config_store.go +++ /dev/null @@ -1,100 +0,0 @@ -package agent - -import ( - "fmt" - "sync" - "sync/atomic" - - "github.com/go-logr/logr" - "github.com/nginx/agent/sdk/v2/proto" - - "github.com/nginxinc/nginx-kubernetes-gateway/internal/observer" - "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/dataplane" -) - -// NginxConfig is an intermediate object that contains nginx configuration in a form that agent expects. -// We convert the dataplane configuration to NginxConfig in the config store, so we only need to do it once -// per configuration change. The NginxConfig is then used by the agent to generate the nginx configuration payload. -type NginxConfig struct { - ID string - Config *proto.ZippedFile - Aux *proto.ZippedFile - Directories []*proto.Directory -} - -// ConfigStore stores accepts the latest dataplane configuration and stores is as NginxConfig. -// ConfigStore implements the observer.Subject interface, -// so that it can notify the agent observers when the configuration changes. -// ConfigStore is thread-safe. -type ConfigStore struct { - latestConfig atomic.Value - configBuilder *NginxConfigBuilder - logger logr.Logger - observers []observer.Observer - observerLock sync.Mutex -} - -// NewConfigStore creates a new ConfigStore. -func NewConfigStore(configBuilder *NginxConfigBuilder, logger logr.Logger) *ConfigStore { - return &ConfigStore{ - observers: make([]observer.Observer, 0), - configBuilder: configBuilder, - logger: logger, - } -} - -// Register registers an observer. -func (a *ConfigStore) Register(observer observer.Observer) { - a.observerLock.Lock() - defer a.observerLock.Unlock() - - a.observers = append(a.observers, observer) - a.logger.Info("Registering observer", "number of registered observers", len(a.observers)) -} - -// Notify notifies all registered observers. -func (a *ConfigStore) Notify() { - a.observerLock.Lock() - defer a.observerLock.Unlock() - - a.logger.Info("Notifying observers", "number of registered observers", len(a.observers)) - for _, o := range a.observers { - o.Update() - } -} - -// Remove removes an observer. -func (a *ConfigStore) Remove(observer observer.Observer) { - a.observerLock.Lock() - defer a.observerLock.Unlock() - - for i, o := range a.observers { - if o == observer { - a.observers = append(a.observers[:i], a.observers[i+1:]...) - a.logger.Info("Removed observer", "number of registered observers", len(a.observers)) - return - } - } -} - -// Store accepts the latest dataplane configuration, builds the NginxConfig from it, and stores it. -// It's possible for an error to occur when building the NginxConfig, -// in which case the error is returned, and the configuration is not stored. -// If the configuration is successfully stored, the observers are notified. -func (a *ConfigStore) Store(configuration dataplane.Configuration) error { - agentConf, err := a.configBuilder.Build(configuration) - if err != nil { - return fmt.Errorf("error building nginx agent configuration: %w", err) - } - - a.logger.Info("Storing configuration", "config generation", configuration.Generation) - - a.latestConfig.Store(agentConf) - a.Notify() - return nil -} - -// GetLatestConfig returns the latest NginxConfig. -func (a *ConfigStore) GetLatestConfig() *NginxConfig { - return a.latestConfig.Load().(*NginxConfig) -} diff --git a/internal/observer/config_subject.go b/internal/observer/config_subject.go new file mode 100644 index 0000000000..3616b1ae6f --- /dev/null +++ b/internal/observer/config_subject.go @@ -0,0 +1,80 @@ +package observer + +import ( + "fmt" + "sync" + "sync/atomic" + + "github.com/go-logr/logr" +) + +type VersionedConfig interface { + GetVersion() string +} + +// ConfigSubject stores the latest VersionedConfig. +// It implements the Subject interface and can be observed by Observers. +// When a new VersionedConfig is stored, all registered Observers are notified. +type ConfigSubject[T VersionedConfig] struct { + latestConfig atomic.Value + logger logr.Logger + observers map[string]Observer + observerLock sync.Mutex +} + +// NewConfigSubject creates a new ConfigSubject. +func NewConfigSubject[T VersionedConfig](logger logr.Logger) *ConfigSubject[T] { + return &ConfigSubject[T]{ + observers: make(map[string]Observer), + logger: logger, + } +} + +// Register registers an observer. +func (a *ConfigSubject[T]) Register(observer Observer) { + a.observerLock.Lock() + defer a.observerLock.Unlock() + + a.observers[observer.ID()] = observer + a.logger.Info( + fmt.Sprintf("Registering observer %s", observer.ID()), + "number of registered observers", + len(a.observers), + ) +} + +// Notify notifies all registered observers. +func (a *ConfigSubject[T]) notify() { + a.observerLock.Lock() + defer a.observerLock.Unlock() + + a.logger.Info("Notifying observers", "number of registered observers", len(a.observers)) + for _, o := range a.observers { + o.Update() + } +} + +// Remove removes an observer. +func (a *ConfigSubject[T]) Remove(observer Observer) { + a.observerLock.Lock() + defer a.observerLock.Unlock() + + delete(a.observers, observer.ID()) + a.logger.Info( + fmt.Sprintf("Removing observer %s", observer.ID()), + "number of registered observers", + len(a.observers), + ) +} + +func (a *ConfigSubject[T]) Update(cfg VersionedConfig) { + a.logger.Info("Storing configuration", "config version", cfg.GetVersion()) + + a.latestConfig.Store(cfg) + a.notify() +} + +// GetLatestConfig returns the current stored config. +func (a *ConfigSubject[T]) GetLatestConfig() T { + return a.latestConfig.Load().(T) +} diff --git a/internal/observer/observer.go b/internal/observer/observer.go index 10631bd3f5..42911354e2 100644 --- a/internal/observer/observer.go +++ b/internal/observer/observer.go @@ -4,10 +4,10 @@ package observer type Subject interface { Register(observer Observer) Remove(observer Observer) - Notify() } // Observer is an interface for objects that can observe a Subject. type Observer interface { + ID() string Update() } From b92156666a9a8e9a57c5eed91e9e5c1cda34c965 Mon Sep 17 00:00:00 2001 From: Kate Osborn Date: Wed, 12 Apr 2023 09:50:24 -0600 Subject: [PATCH 16/16] Refactor --- examples/many-updates/deploy.sh | 11 - examples/many-updates/make-changes.sh | 24 + .../config/adapter.go} | 101 +-- internal/agent/config/request.go | 41 ++ internal/agent/config/response.go | 111 ++++ internal/agent/config/updater.go | 329 ++++++++++ internal/agent/connect_info.go | 45 ++ internal/agent/store.go | 113 ++++ internal/async/monitor.go | 47 ++ internal/async/promise.go | 12 + internal/events/handler.go | 27 +- internal/grpc/commander/channel.go | 134 ---- internal/grpc/commander/channel_test.go | 173 ------ internal/grpc/commander/commander.go | 367 ++++++++--- .../grpc/commander/commander_suite_test.go | 13 - .../fake_commander_command_channel_server.go | 583 ------------------ .../fake_commander_upload_server.go | 583 ------------------ internal/grpc/commander/connection.go | 531 ---------------- internal/grpc/commander/connection_test.go | 376 ----------- internal/grpc/commander/doc.go | 8 - internal/grpc/commander/exchanger/doc.go | 8 - .../grpc/commander/exchanger/exchanger.go | 24 - .../exchangerfakes/fake_command_exchanger.go | 243 -------- internal/manager/manager.go | 29 +- internal/nginx/agent/doc.go | 9 - internal/nginx/config/nginx_conf_template.go | 28 +- internal/observer/config_subject.go | 29 +- internal/observer/observer.go | 10 +- internal/state/dataplane/configuration.go | 16 +- internal/state/secrets/secrets_test.go | 1 + 30 files changed, 1165 insertions(+), 2861 deletions(-) delete mode 100755 examples/many-updates/deploy.sh create mode 100755 examples/many-updates/make-changes.sh rename internal/{nginx/agent/config_builder.go => agent/config/adapter.go} (54%) create mode 100644 internal/agent/config/request.go create mode 100644 internal/agent/config/response.go create mode 100644 internal/agent/config/updater.go create mode 100644 internal/agent/connect_info.go create mode 100644 internal/agent/store.go create mode 100644 internal/async/monitor.go create mode 100644 internal/async/promise.go delete mode 100644 internal/grpc/commander/channel.go delete mode 100644 internal/grpc/commander/channel_test.go delete mode 100644 internal/grpc/commander/commander_suite_test.go delete mode 100644 internal/grpc/commander/commanderfakes/fake_commander_command_channel_server.go delete mode 100644 internal/grpc/commander/commanderfakes/fake_commander_upload_server.go delete mode 100644 internal/grpc/commander/connection.go delete mode 100644 internal/grpc/commander/connection_test.go delete mode 100644 internal/grpc/commander/doc.go delete mode 100644 internal/grpc/commander/exchanger/doc.go delete mode 100644 internal/grpc/commander/exchanger/exchanger.go delete mode 100644 internal/grpc/commander/exchanger/exchangerfakes/fake_command_exchanger.go delete mode 100644 internal/nginx/agent/doc.go diff --git a/examples/many-updates/deploy.sh b/examples/many-updates/deploy.sh deleted file mode 100755 index cfd6373a39..0000000000 --- a/examples/many-updates/deploy.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -kubectl apply -f gateway.yaml -kubectl apply -f cafe.yaml - -for i in {1..6}; do - kubectl scale --replicas ${i} deployment tea - kubectl apply -f "${i}-cafe-routes.yaml" - kubectl scale --replicas ${i} deployment coffee - sleep 0.1 -done diff --git a/examples/many-updates/make-changes.sh b/examples/many-updates/make-changes.sh new file mode 100755 index 0000000000..d8ab2b2a00 --- /dev/null +++ b/examples/many-updates/make-changes.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +kubectl apply -f gateway.yaml +kubectl apply -f cafe.yaml + +trap "echo 'Ctrl+C pressed. Exiting...'; exit" INT + +while true +do + echo "Scaling up..." + for i in {1..6}; do + kubectl scale --replicas ${i} deployment tea + kubectl apply -f "${i}-cafe-routes.yaml" + kubectl scale --replicas ${i} deployment coffee + done + + sleep 1 + echo "Scaling down..." + for i in {6..1}; do + kubectl scale --replicas ${i} deployment tea + kubectl apply -f "${i}-cafe-routes.yaml" + kubectl scale --replicas ${i} deployment coffee + done +done diff --git a/internal/nginx/agent/config_builder.go b/internal/agent/config/adapter.go similarity index 54% rename from internal/nginx/agent/config_builder.go rename to internal/agent/config/adapter.go index 93859ea482..a5cc3be15a 100644 --- a/internal/nginx/agent/config_builder.go +++ b/internal/agent/config/adapter.go @@ -1,4 +1,4 @@ -package agent +package config import ( "bytes" @@ -9,12 +9,12 @@ import ( "github.com/nginx/agent/sdk/v2/zip" "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/config" + "github.com/nginxinc/nginx-kubernetes-gateway/internal/observer" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/dataplane" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/secrets" ) const ( - // TODO: do we need another file mode for config files? secretsFileMode = 0o600 confPrefix = "/etc/nginx" secretsPrefix = "/etc/nginx/secrets" //nolint:gosec @@ -22,6 +22,20 @@ const ( httpConfFilePath = "conf.d/http.conf" ) +// NginxConfig is an intermediate object that contains nginx configuration in a form that agent expects. +// We convert the dataplane configuration to NginxConfig in the config store, so we only need to do it once +// per configuration change. The NginxConfig is then used by the agent to generate the nginx configuration payload. +type NginxConfig struct { + version string + config *proto.ZippedFile + aux *proto.ZippedFile + directories []*proto.Directory +} + +func (n *NginxConfig) GetVersion() string { + return n.version +} + type directory struct { prefix string files []file @@ -33,27 +47,28 @@ type file struct { mode os.FileMode } -// NginxConfigBuilder builds NginxConfig from the dataplane configuration. -type NginxConfigBuilder struct { - generator config.Generator - secretMemMgr secrets.SecretDiskMemoryManager +// NginxConfigAdapter adapts the dataplane.Configuration to NginxConfig. +type NginxConfigAdapter struct { + generator config.Generator + secretRequestMgr secrets.RequestManager } -// NewNginxConfigBuilder creates a new NginxConfigBuilder. -func NewNginxConfigBuilder( +// NewNginxConfigAdapter creates a new NginxConfigAdapter. +func NewNginxConfigAdapter( generator config.Generator, - secretMemMgr secrets.SecretDiskMemoryManager, -) *NginxConfigBuilder { - return &NginxConfigBuilder{ - generator: generator, - secretMemMgr: secretMemMgr, + secretRequestMgr secrets.RequestManager, +) *NginxConfigAdapter { + return &NginxConfigAdapter{ + generator: generator, + secretRequestMgr: secretRequestMgr, } } -// Build builds NginxConfig from the dataplane configuration. -// It generates the nginx configuration files using the config.Generator and the -// secrets files using the secrets.SecretDiskMemoryManager. -func (u *NginxConfigBuilder) Build(cfg dataplane.Configuration) (*NginxConfig, error) { +// VersionedConfig adapts the dataplane.Configuration to NginxConfig. +// It uses the config.Generator to generate the nginx.conf and http.conf files, +// and the secrets.RequestManager to generate the secret files. +// Implements VersionedConfigAdapter. +func (u *NginxConfigAdapter) VersionedConfig(cfg dataplane.Configuration) (observer.VersionedConfig, error) { confDirectory := u.generateConfigDirectory(cfg) auxDirectory := u.generateAuxConfigDirectory() @@ -62,47 +77,32 @@ func (u *NginxConfigBuilder) Build(cfg dataplane.Configuration) (*NginxConfig, e convertToProtoDirectory(auxDirectory), } - zconfig, err := u.generateZippedFile(confDirectory) + zconfig, err := generateZippedFile(confDirectory) if err != nil { return nil, err } - zaux, err := u.generateZippedFile(auxDirectory) + zaux, err := generateZippedFile(auxDirectory) if err != nil { return nil, err } return &NginxConfig{ - ID: fmt.Sprintf("%d", cfg.Generation), - Config: zconfig, - Aux: zaux, - Directories: directories, + version: fmt.Sprintf("%d", cfg.Version), + config: zconfig, + aux: zaux, + directories: directories, }, nil } -func convertToProtoDirectory(d directory) *proto.Directory { - files := make([]*proto.File, len(d.files)) - - for idx, f := range d.files { - files[idx] = &proto.File{ - Name: f.path, - } - } - - return &proto.Directory{ - Name: d.prefix, - Files: files, - } -} - -func (u *NginxConfigBuilder) generateConfigDirectory(cfg dataplane.Configuration) directory { +func (u *NginxConfigAdapter) generateConfigDirectory(cfg dataplane.Configuration) directory { return directory{ prefix: confPrefix, files: []file{ { path: nginxConfFilePath, mode: secretsFileMode, - contents: u.generator.GenerateMainConf(cfg.Generation), + contents: u.generator.GenerateMainConf(cfg.Version), }, { path: httpConfFilePath, @@ -113,8 +113,8 @@ func (u *NginxConfigBuilder) generateConfigDirectory(cfg dataplane.Configuration } } -func (u *NginxConfigBuilder) generateAuxConfigDirectory() directory { - secretFiles := u.secretMemMgr.GetAllRequestedSecrets() +func (u *NginxConfigAdapter) generateAuxConfigDirectory() directory { + secretFiles := u.secretRequestMgr.GetAndResetRequestedSecrets() files := make([]file, 0, len(secretFiles)) for _, secret := range secretFiles { @@ -131,7 +131,22 @@ func (u *NginxConfigBuilder) generateAuxConfigDirectory() directory { } } -func (u *NginxConfigBuilder) generateZippedFile(dir directory) (*proto.ZippedFile, error) { +func convertToProtoDirectory(d directory) *proto.Directory { + files := make([]*proto.File, len(d.files)) + + for idx, f := range d.files { + files[idx] = &proto.File{ + Name: f.path, + } + } + + return &proto.Directory{ + Name: d.prefix, + Files: files, + } +} + +func generateZippedFile(dir directory) (*proto.ZippedFile, error) { w, err := zip.NewWriter(dir.prefix) if err != nil { return nil, err diff --git a/internal/agent/config/request.go b/internal/agent/config/request.go new file mode 100644 index 0000000000..51b1a630cb --- /dev/null +++ b/internal/agent/config/request.go @@ -0,0 +1,41 @@ +package config + +import ( + "context" + + "github.com/nginx/agent/sdk/v2/proto" +) + +// Request is a request for a *proto.Config. +type Request struct { + replyCh chan *proto.NginxConfig + id string +} + +// NewRequest returns a Request with the provided ID. +func NewRequest(id string) *Request { + return &Request{ + id: id, + replyCh: make(chan *proto.NginxConfig), + } +} + +// WaitForReply blocks until a reply is received or the context is canceled. +func (r *Request) WaitForReply(ctx context.Context) (*proto.NginxConfig, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case reply := <-r.replyCh: + return reply, nil + } +} + +// reply replies to the request with the provided config. Blocks until reply is received or the context is canceled. +func (r *Request) reply(ctx context.Context, config *proto.NginxConfig) error { + select { + case <-ctx.Done(): + return ctx.Err() + case r.replyCh <- config: + return nil + } +} diff --git a/internal/agent/config/response.go b/internal/agent/config/response.go new file mode 100644 index 0000000000..f954bc8ba1 --- /dev/null +++ b/internal/agent/config/response.go @@ -0,0 +1,111 @@ +package config + +import ( + "strings" + + "github.com/nginx/agent/sdk/v2/proto" +) + +type applyResponse struct { + correlationID string + message string + status applyStatus +} + +type applyStatus string + +const ( + applyStatusSuccess applyStatus = "success" + applyStatusFailure applyStatus = "failure" + applyStatusPending applyStatus = "pending" + applyStatusUnknown applyStatus = "unknown" +) + +// FIXME(kate-osborn): We should only need to check for dataplane status. +// There's a bug in the agent where sometimes a dataplane status command is not sent when the config apply fails. +func cmdToApplyStatus(cmd *proto.Command) *applyResponse { + switch cmd.Data.(type) { + case *proto.Command_NginxConfigResponse: + return convertNginxConfigResponse(cmd.GetNginxConfigResponse()) + case *proto.Command_DataplaneStatus: + return convertDataplaneStatus(cmd.GetDataplaneStatus()) + default: + return nil + } +} + +func convertDataplaneStatus(status *proto.DataplaneStatus) *applyResponse { + activityStatuses := status.GetAgentActivityStatus() + + if activityStatuses == nil { + return nil + } + + s := getFirstNginxConfigStatus(activityStatuses) + if s == nil { + return nil + } + + return &applyResponse{ + correlationID: s.CorrelationId, + message: s.Message, + status: convertNginxConfigStatus(s.Status), + } +} + +func convertNginxConfigResponse(res *proto.NginxConfigResponse) *applyResponse { + status := res.Status + + if status == nil { + return nil + } + + if strings.Contains(status.Message, "upload") { + // ignore upload config responses + return nil + } + + return &applyResponse{ + message: status.Message, + status: convertCommandStatusResponse(status), + } +} + +func convertNginxConfigStatus(status proto.NginxConfigStatus_Status) applyStatus { + switch status { + case proto.NginxConfigStatus_OK: + return applyStatusSuccess + case proto.NginxConfigStatus_PENDING: + return applyStatusPending + case proto.NginxConfigStatus_ERROR: + return applyStatusFailure + default: + return applyStatusUnknown + } +} + +func convertCommandStatusResponse(status *proto.CommandStatusResponse) applyStatus { + if status.Status == proto.CommandStatusResponse_CMD_OK { + if strings.Contains(status.Message, "config applied successfully") { + return applyStatusSuccess + } + + return applyStatusPending + } + + if status.Status == proto.CommandStatusResponse_CMD_ERROR { + return applyStatusFailure + } + + return applyStatusUnknown +} + +// TODO: figure out if it's possible to have multiple NginxConfigStatus in a single DataplaneStatus. +func getFirstNginxConfigStatus(status []*proto.AgentActivityStatus) *proto.NginxConfigStatus { + for _, s := range status { + if s.GetNginxConfigStatus() != nil { + return s.GetNginxConfigStatus() + } + } + return nil +} diff --git a/internal/agent/config/updater.go b/internal/agent/config/updater.go new file mode 100644 index 0000000000..39f205aeaa --- /dev/null +++ b/internal/agent/config/updater.go @@ -0,0 +1,329 @@ +package config + +import ( + "context" + "errors" + "fmt" + "sync/atomic" + "time" + + "github.com/go-logr/logr" + "github.com/google/uuid" + "github.com/nginx/agent/sdk/v2/proto" + "golang.org/x/sync/errgroup" + + "github.com/nginxinc/nginx-kubernetes-gateway/internal/agent" + "github.com/nginxinc/nginx-kubernetes-gateway/internal/async" + "github.com/nginxinc/nginx-kubernetes-gateway/internal/observer" +) + +const ( + // configUpdatedChSize is 1 to allow an update to be queued while the current update is processing. + // This guarantees that we will not miss updates. + configUpdatedChSize = 1 + // applyTimeout is the time the Updater waits for the agent to send a status update after a config apply. + applyTimeout = 1 * time.Minute + // configApplyResponsesTimeout is the time the Updater waits to place a response on the configApplyResponses + // channel. + configApplyResponsesTimeout = 100 * time.Millisecond +) + +// Updater is responsible for updating the NGINX configuration on the agent. +type Updater struct { + // server is the bidirectional command channel server. It sends and receives commands to and from the agent. + server proto.Commander_CommandChannelServer + // configSubject pushes *NginxConfig to the Updater every time the *NginxConfig is updated. + configSubject observer.Subject[*NginxConfig] + // latestConfig synchronously stores the latest *NginxConfig received by the configSubject. + latestConfig atomic.Value + // configUpdateCompleted is the channel the Updater writes to when the latest config update is complete. + // It signals that the agent is ready for another config. + configUpdateCompleted chan struct{} + // configUpdated is the channel that the configSubject writes to when the *NginxConfig is updated. + configUpdated chan struct{} + // configRequests is the channel that the commander writes to when the agent requests to Download the config. + // The agent will reply to the request with the latest config. + configRequests chan *Request + // configApplyResponses is the channel that the Updater writes to when the server receives a config apply response. + // It is used to verify whether a config apply was successful or not. + configApplyResponses chan *applyResponse + // connectInfo is the identifying information that the agent provides in its connect request. + // This information is needed to create the *proto.NginxConfig payload. + connectInfo agent.ConnectInfo + // logger is the Updater's logger. + logger logr.Logger +} + +func NewUpdater( + server proto.Commander_CommandChannelServer, + info agent.ConnectInfo, + configSubject observer.Subject[*NginxConfig], + logger logr.Logger, +) *Updater { + return &Updater{ + connectInfo: info, + server: server, + logger: logger, + configSubject: configSubject, + configUpdated: make(chan struct{}, configUpdatedChSize), + configRequests: make(chan *Request), + configApplyResponses: make(chan *applyResponse), + configUpdateCompleted: make(chan struct{}), + } +} + +// Start starts the Updater's loops and registers itself with the configSubject. +// It will block until the context is canceled, or an error occurs in one of the loops. +// On termination, it will remove itself from the configSubject. +func (u *Updater) Start(parent context.Context) error { + u.logger.Info("Starting agent config updater") + + eg, ctx := errgroup.WithContext(parent) + + eg.Go(func() error { + return u.receiveCommandLoop(ctx) + }) + + eg.Go(func() error { + return u.updateConfigLoop(ctx) + }) + + eg.Go(func() error { + return u.configRequestLoop(ctx) + }) + + u.configSubject.Register(u) + + defer func() { + u.logger.Info("Stopping agent config updater") + u.configSubject.Remove(u) + }() + + return eg.Wait() +} + +// Requests returns a write-only channel of *Request. +// Writing to this channel is equivalent to requesting the latest stored Nginx configuration. +func (u *Updater) Requests() chan<- *Request { + return u.configRequests +} + +// ID returns the ID of the updater. The observer.Subject calls this method. +func (u *Updater) ID() string { + return u.connectInfo.ID +} + +// Update is the method that the observer.Subject calls when the NginxConfig is updated. +// This method is required +func (u *Updater) Update(config observer.VersionedConfig) { + select { + case u.configUpdated <- struct{}{}: + default: + } + + u.latestConfig.Store(config) +} + +// receiveCommandLoop receives commands from the server until an error occurs or the context is canceled. +func (u *Updater) receiveCommandLoop(ctx context.Context) error { + for { + cmd, err := u.server.Recv() + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + u.handleCommand(ctx, cmd) + } + } +} + +// updateConfigLoop receives from the configUpdated channel until an error occurs or the context is canceled. +// It sends a download command to the agent when it receives from the configUpdate channel and blocks until the +// update is complete or the context is canceled. +func (u *Updater) updateConfigLoop(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-u.configUpdated: + if err := u.sendDownloadCommand(ctx); err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-u.configUpdateCompleted: + } + } + } +} + +// configRequestLoop reads from the configRequest channel until an error occurs or the context is canceled. +// When it receives a Request, it calls handleRequest, which will block until the Request has been completed. +func (u *Updater) configRequestLoop(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case req := <-u.configRequests: + if err := u.handleRequest(ctx, req); err != nil { + return err + } + } + } +} + +// handleRequest replies to the Request with the latest NGINX configuration. +// It then waits until it receives a definitive response from the agent on the success of the config apply. +func (u *Updater) handleRequest(ctx context.Context, req *Request) error { + reqID := req.id + + u.logger.Info("Handling request", "requestID", reqID) + + statusPromise := async.MonitorWithMatcher( + ctx, + applyTimeout, + u.configApplyResponses, + newApplyResponseMatcher(reqID), + ) + + config, ok := u.latestConfig.Load().(*NginxConfig) + if !ok { + panic(fmt.Sprintf("expected *NginxConfig, got %T", config)) + } + + u.logger.Info("Replying to request with config", "reqID", reqID, "version", config.version) + + if err := req.reply(ctx, u.toProtoConfig(config)); err != nil { + return err + } + + status, err := statusPromise.Get() + + deadlineExceeded := errors.Is(err, context.DeadlineExceeded) + if err != nil && !deadlineExceeded { + return err + } + + if deadlineExceeded { + u.logger.Info("Timed out waiting for status", "reqID", reqID, "version", config.version) + } else { + u.logger.Info( + fmt.Sprintf("Config apply complete [%s]", status.status), + "message", + status.message, + "reqID", + status.correlationID, + "version", + config.version, + ) + } + + select { + case <-ctx.Done(): + return nil + case u.configUpdateCompleted <- struct{}{}: + u.logger.Info("Config update complete") + default: + } + + return nil +} + +// newApplyResponseMatcher returns an async.Matcher function that matches on applyResponses that are determinate ( +// success/failure). It discards applyResponses that are indeterminate ( +// pending/unknown status) or applyResponses that have a correlationID that does not match the one provided. +func newApplyResponseMatcher(correlationID string) func(res *applyResponse) bool { + return func(res *applyResponse) bool { + if res.correlationID != "" && res.correlationID != correlationID { + return false + } + return res.status == applyStatusSuccess || res.status == applyStatusFailure + } +} + +// handleCommand handles commands sent by the agent. +// The updater is only concerned with commands that are config apply responses. +// This method attempts to convert commands to an applyResponse. If a command can't be converted, +// it is ignored. Otherwise, it attempts to place the command on the configApplyResponses channel. +// If the channel blocks for longer than the configApplyResponsesTimeout, +// it assumes there is no config apply in progress and ignores the command. +func (u *Updater) handleCommand(ctx context.Context, cmd *proto.Command) { + if cmd == nil { + // The agent should never send us a nil command, but we catch this case out of an abundance of caution. + // We don't want to return an error in this case because that would break the CommandChannel + // connection with the agent. Instead, we log the abnormality and continue processing. + u.logger.Error(errors.New("received nil command"), "expected non-nil command") + return + } + + response := cmdToApplyStatus(cmd) + if response == nil { + u.logger.Info("Ignoring command", "type", fmt.Sprintf("%T", cmd.Data)) + return + } + + u.logger.Info("Handling command", "command", cmd) + + select { + case <-ctx.Done(): + return + case u.configApplyResponses <- response: + case <-time.After(configApplyResponsesTimeout): + u.logger.Info( + "Ignoring config apply response; no config apply in progress", + "status", + response.status, + "correlationID", + response.correlationID, + "msg", + response.message, + ) + } +} + +func (u *Updater) sendDownloadCommand(ctx context.Context) error { + cmd := &proto.Command{ + Meta: &proto.Metadata{ + MessageId: uuid.NewString(), + }, + Type: proto.Command_DOWNLOAD, + Data: &proto.Command_NginxConfig{ + NginxConfig: &proto.NginxConfig{ + Action: proto.NginxConfigAction_APPLY, + ConfigData: &proto.ConfigDescriptor{ + SystemId: u.connectInfo.SystemID, + NginxId: u.connectInfo.NginxID, + }, + }, + }, + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + u.logger.Info("Sending download command") + return u.server.Send(cmd) + } +} + +func (u *Updater) toProtoConfig(config *NginxConfig) *proto.NginxConfig { + return &proto.NginxConfig{ + Action: proto.NginxConfigAction_APPLY, + ConfigData: &proto.ConfigDescriptor{ + SystemId: u.connectInfo.SystemID, + NginxId: u.connectInfo.NginxID, + }, + Zconfig: config.config, + Zaux: config.aux, + DirectoryMap: &proto.DirectoryMap{ + Directories: config.directories, + }, + } +} diff --git a/internal/agent/connect_info.go b/internal/agent/connect_info.go new file mode 100644 index 0000000000..2a46e514f3 --- /dev/null +++ b/internal/agent/connect_info.go @@ -0,0 +1,45 @@ +package agent + +import ( + "fmt" + + "github.com/nginx/agent/sdk/v2/proto" +) + +// ConnectInfo is the identifying information that the agent sends in its connect request. +type ConnectInfo struct { + ID string + NginxID string + SystemID string + PodName string +} + +func NewConnectInfo(id string, req *proto.AgentConnectRequest) ConnectInfo { + details := ConnectInfo{ + ID: id, + NginxID: getFirstNginxID(req.GetDetails()), + } + + if meta := req.GetMeta(); meta != nil { + details.SystemID = meta.GetSystemUid() + details.PodName = meta.GetDisplayName() + } + + return details +} + +func (d ConnectInfo) Validate() error { + if d.NginxID == "" || d.SystemID == "" { + return fmt.Errorf("missing NginxID: '%s' and/or SystemID: '%s'", d.NginxID, d.SystemID) + } + + return nil +} + +func getFirstNginxID(details []*proto.NginxDetails) (id string) { + if len(details) > 0 { + id = details[0].GetNginxId() + } + + return +} diff --git a/internal/agent/store.go b/internal/agent/store.go new file mode 100644 index 0000000000..1ead19d875 --- /dev/null +++ b/internal/agent/store.go @@ -0,0 +1,113 @@ +package agent + +import ( + "context" + "sync" + "time" + + "github.com/go-logr/logr" +) + +// ConnectInfoStore stores Agent ConnectInfo. +// +// Agents only send the ConnectInfo once on startup, and in the event of a reconnect, +// we need to be able to recover the ConnectInfo. However, we don't want to persist ConnectInfo +// for Agents that are no longer running. +// +// The ConnectInfoStore handles this by accepting a ttl, which is the minimum amount of time that +// ConnectInfo entries live after deletion. +// +// When ConnectInfo is deleted from the store, it will be marked for deletion, but will still be accessible until the +// ttl has been reached. During this time, if the ConnectInfo is requested, it will be unmarked for deletion. +type ConnectInfoStore struct { + deleted map[string]*entry + entries map[string]*entry + logger logr.Logger + collectionInterval time.Duration + ttl time.Duration + + // mu protects both maps + mu sync.Mutex +} + +type entry struct { + deletionTimestamp time.Time + info ConnectInfo +} + +// NewConnectInfoStore returns a ConnectInfoStore with the provided ttl. +func NewConnectInfoStore(logger logr.Logger, ttl time.Duration) *ConnectInfoStore { + return &ConnectInfoStore{ + deleted: make(map[string]*entry), + entries: make(map[string]*entry), + collectionInterval: ttl, + ttl: ttl, + logger: logger, + } +} + +// Start kicks off the garbage collection job for the ConnectInfoStore. +func (s *ConnectInfoStore) Start(ctx context.Context) error { + ticker := time.NewTicker(s.collectionInterval) + + s.logger.Info("Staring garbage collection job for agent store") + + for { + select { + case <-ctx.Done(): + s.logger.Info("Stopping garbage collection job for agent store") + return nil + case <-ticker.C: + s.logger.Info("Checking for expired entries") + s.mu.Lock() + for id, entry := range s.deleted { + if time.Since(entry.deletionTimestamp) > s.ttl { + s.logger.Info("Deleting entry", "id", id) + delete(s.deleted, id) + } + } + s.mu.Unlock() + } + } +} + +func (s *ConnectInfoStore) Get(id string) (ConnectInfo, bool) { + s.mu.Lock() + defer s.mu.Unlock() + + entry, ok := s.entries[id] + if ok { + return entry.info, ok + } + + entry, ok = s.deleted[id] + if ok { + delete(s.deleted, id) + s.entries[id] = entry + return entry.info, ok + } + + return ConnectInfo{}, false +} + +func (s *ConnectInfoStore) Add(info ConnectInfo) { + s.mu.Lock() + defer s.mu.Unlock() + + s.entries[info.ID] = &entry{info: info} +} + +func (s *ConnectInfoStore) Delete(id string) { + s.mu.Lock() + defer s.mu.Unlock() + + entry, ok := s.entries[id] + if !ok { + return + } + + entry.deletionTimestamp = time.Now() + + s.deleted[id] = entry + delete(s.entries, id) +} diff --git a/internal/async/monitor.go b/internal/async/monitor.go new file mode 100644 index 0000000000..a0b117e1b4 --- /dev/null +++ b/internal/async/monitor.go @@ -0,0 +1,47 @@ +package async + +import ( + "context" + "time" +) + +type Matcher[V any] func(v V) bool + +// MonitorWithMatcher monitors the channel until it receives an item that satisfies the Matcher, +// the context is canceled, or the timeout is reached. It immediately returns a Promise that will contain the result +// once it is available. +func MonitorWithMatcher[V any]( + ctx context.Context, + timeout time.Duration, + ch <-chan V, + matcher Matcher[V], +) *Promise[V] { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, timeout) + + done := make(chan struct{}) + p := Promise[V]{ + done: done, + } + + go func() { + defer func() { + close(done) + cancel() + }() + for { + select { + case <-ctx.Done(): + var zero V + p.val, p.err = zero, ctx.Err() + return + case v := <-ch: + if matcher(v) { + p.val, p.err = v, nil + return + } + } + } + }() + return &p +} diff --git a/internal/async/promise.go b/internal/async/promise.go new file mode 100644 index 0000000000..4b159e18ef --- /dev/null +++ b/internal/async/promise.go @@ -0,0 +1,12 @@ +package async + +type Promise[V any] struct { + val V + err error + done <-chan struct{} +} + +func (p *Promise[V]) Get() (V, error) { + <-p.done + return p.val, p.err +} diff --git a/internal/events/handler.go b/internal/events/handler.go index 9747e34de8..7334348c4b 100644 --- a/internal/events/handler.go +++ b/internal/events/handler.go @@ -9,6 +9,7 @@ import ( discoveryV1 "k8s.io/api/discovery/v1" "sigs.k8s.io/gateway-api/apis/v1beta1" + "github.com/nginxinc/nginx-kubernetes-gateway/internal/observer" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/dataplane" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/secrets" @@ -24,9 +25,12 @@ type EventHandler interface { HandleEventBatch(ctx context.Context, batch EventBatch) } -// ConfigStorer stores dataplane configuration. -type ConfigStorer interface { - Store(conf dataplane.Configuration) error +type ConfigUpdater interface { + Update(cfg observer.VersionedConfig) +} + +type VersionedConfigAdapter interface { + VersionedConfig(cfg dataplane.Configuration) (observer.VersionedConfig, error) } // EventHandlerConfig holds configuration parameters for EventHandlerImpl. @@ -35,8 +39,10 @@ type EventHandlerConfig struct { Processor state.ChangeProcessor // SecretStore is the state SecretStore. SecretStore secrets.SecretStore - // ConfigStorer stores dataplane configuration. - ConfigStorer ConfigStorer + // ConfigAdapter adapts dataplane.Configuration to a dataplane-specific versioned configuration. + ConfigAdapter VersionedConfigAdapter + // ConfigUpdater updates configuration. + ConfigUpdater ConfigUpdater // StatusUpdater updates statuses on Kubernetes resources. StatusUpdater status.Updater // Logger is the logger to be used by the EventHandler. @@ -79,14 +85,17 @@ func (h *EventHandlerImpl) HandleEventBatch(ctx context.Context, batch EventBatc } h.changeCounter++ - conf.Generation = h.changeCounter + conf.Version = h.changeCounter - err := h.cfg.ConfigStorer.Store(conf) + // TODO should I pass in the version to the adapter? + vc, err := h.cfg.ConfigAdapter.VersionedConfig(conf) if err != nil { - h.cfg.Logger.Error(err, "error storing dataplane configuration") - // FIXME(kate-osborn): Update status to indicate that the gateway is not accepted or programmed. + h.cfg.Logger.Error(err, "error adapting dataplane configuration to a versioned configuration") + return } + h.cfg.ConfigUpdater.Update(vc) + h.cfg.StatusUpdater.Update(ctx, statuses) } diff --git a/internal/grpc/commander/channel.go b/internal/grpc/commander/channel.go deleted file mode 100644 index 492cf36f46..0000000000 --- a/internal/grpc/commander/channel.go +++ /dev/null @@ -1,134 +0,0 @@ -package commander - -import ( - "context" - "errors" - "fmt" - - "github.com/go-logr/logr" - "github.com/nginx/agent/sdk/v2/proto" - "golang.org/x/sync/errgroup" -) - -const channelLength = 25 - -// BidirectionalChannel encapsulates the CommandChannelServer which is a bidirectional streaming channel. -// The BidirectionalChannel is responsible for sending and receiving commands to and from the CommandChannelServer. -// -// All commands received from the CommandChannelServer are put on the fromClient channel and can be accessed through -// the Out() method. -// -// Commands can be sent to the CommandChannelServer by placing them on the toClient channel, -// which is accessible through the In() method. -// -// To use the BidirectionalChannel you must call the Run() method to kick off the receive and send loops. -type BidirectionalChannel struct { - channel proto.Commander_CommandChannelServer - fromClient chan *proto.Command - toClient chan *proto.Command - logger logr.Logger -} - -// NewBidirectionalChannel returns a new instance of the BidirectionalChannel. -func NewBidirectionalChannel( - channel proto.Commander_CommandChannelServer, - logger logr.Logger, -) *BidirectionalChannel { - return &BidirectionalChannel{ - channel: channel, - fromClient: make(chan *proto.Command, channelLength), - toClient: make(chan *proto.Command, channelLength), - logger: logger, - } -} - -// Run runs the receive and send loops on the BidirectionalChannel. -// Run is blocking and will return if an error occurs in either loop or the context is canceled. -func (bc *BidirectionalChannel) Run(parent context.Context) error { - defer func() { - close(bc.fromClient) - close(bc.toClient) - }() - - eg, ctx := errgroup.WithContext(parent) - - eg.Go(func() error { - return bc.receive(ctx) - }) - - eg.Go(func() error { - return bc.send(ctx) - }) - - return eg.Wait() -} - -func (bc *BidirectionalChannel) receive(ctx context.Context) error { - defer func() { - bc.logger.Info("Stopping receive command loop") - }() - - bc.logger.Info("Starting receive command loop") - - for { - cmd, err := bc.channel.Recv() - if err != nil { - return fmt.Errorf("error receiving command from CommandChannel: %w", err) - } - - select { - case <-ctx.Done(): - return ctx.Err() - default: - if cmd != nil { - bc.logger.Info("Received command", "command type", fmt.Sprintf("%T", cmd.Data)) - - select { - case <-ctx.Done(): - return ctx.Err() - case bc.fromClient <- cmd: - } - } else { - // The agent should never send us a nil command, but we catch this case out of an abundance of caution. - // We don't want to return an error in this case because that would break the CommandChannel - // connection with the agent. Instead, we log the abnormality and continue processing. - bc.logger.Error(errors.New("received nil command"), "expected non-nil command") - } - } - } -} - -func (bc *BidirectionalChannel) send(ctx context.Context) error { - defer func() { - bc.logger.Info("Stopping send command loop") - }() - - bc.logger.Info("Starting send command loop") - - for { - select { - case <-ctx.Done(): - return ctx.Err() - case cmd := <-bc.toClient: - if cmd == nil { - panic("outgoing command is nil") - } - bc.logger.Info("Sending command", "command type", fmt.Sprintf("%T", cmd.Data)) - if err := bc.channel.Send(cmd); err != nil { - return fmt.Errorf("error sending command to CommandChannel: %w", err) - } - } - } -} - -// Out returns a write-only channel of commands. -// Commands written to this channel will be sent to the client over the CommandChannelServer. -func (bc *BidirectionalChannel) Out() chan<- *proto.Command { - return bc.toClient -} - -// In returns a read-only channel of commands. -// The BidirectionalChannel writes commands that it receives from the CommandChannelServer to this channel. -func (bc *BidirectionalChannel) In() <-chan *proto.Command { - return bc.fromClient -} diff --git a/internal/grpc/commander/channel_test.go b/internal/grpc/commander/channel_test.go deleted file mode 100644 index 07e483d7ce..0000000000 --- a/internal/grpc/commander/channel_test.go +++ /dev/null @@ -1,173 +0,0 @@ -package commander_test - -import ( - "context" - "errors" - "fmt" - "testing" - - "github.com/nginx/agent/sdk/v2/proto" - . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander" - "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander/commanderfakes" -) - -func newTestCommand(msgID string) *proto.Command { - return &proto.Command{Meta: &proto.Metadata{MessageId: msgID}} -} - -func TestBidirectionalChannel(t *testing.T) { - g := NewGomegaWithT(t) - - recvCommands := make(chan *proto.Command) - sentCommands := make(chan *proto.Command) - - ctx, cancel := context.WithCancel(context.Background()) - - fakeServer := &commanderfakes.FakeCommander_CommandChannelServer{ - RecvStub: func() (*proto.Command, error) { - select { - case <-ctx.Done(): - return nil, nil - case cmd := <-recvCommands: - return cmd, nil - } - }, - SendStub: func(command *proto.Command) error { - sentCommands <- command - return nil - }, - } - - ch := commander.NewBidirectionalChannel(fakeServer, zap.New()) - - errCh := make(chan error) - - go func() { - errCh <- ch.Run(ctx) - }() - - testRecvCommand := func(msgID string) { - recvCommands <- newTestCommand(msgID) - cmd := <-ch.In() - g.Expect(cmd.GetMeta().GetMessageId()).To(Equal(msgID)) - } - - testSendCommand := func(msgID string) { - ch.Out() <- newTestCommand(msgID) - cmd := <-sentCommands - g.Expect(cmd.GetMeta().GetMessageId()).To(Equal(msgID)) - } - - for i := 0; i < 5; i++ { - msgID := fmt.Sprintf("msg-%d", i) - testRecvCommand(msgID) - testSendCommand(msgID) - } - - cancel() - - err := <-errCh - g.Expect(err).Should(MatchError(context.Canceled)) -} - -func TestBidirectionalChannel_SendError(t *testing.T) { - g := NewGomegaWithT(t) - - done := make(chan struct{}) - - fakeServer := &commanderfakes.FakeCommander_CommandChannelServer{ - RecvStub: func() (*proto.Command, error) { - return newTestCommand("msg-id"), nil - }, - SendStub: func(command *proto.Command) error { - <-done - return errors.New("send error") - }, - } - - ch := commander.NewBidirectionalChannel(fakeServer, zap.New()) - - errCh := make(chan error) - - go func() { - errCh <- ch.Run(context.Background()) - }() - - ch.Out() <- newTestCommand("msg-id") - close(done) - - err := <-errCh - g.Expect(err).Should(MatchError("error sending command to CommandChannel: send error")) -} - -func TestBidirectionalChannel_RecvError(t *testing.T) { - g := NewGomegaWithT(t) - - done := make(chan struct{}) - - fakeServer := &commanderfakes.FakeCommander_CommandChannelServer{ - RecvStub: func() (*proto.Command, error) { - <-done - return nil, errors.New("recv error") - }, - } - - ch := commander.NewBidirectionalChannel(fakeServer, zap.New()) - - errCh := make(chan error) - - go func() { - errCh <- ch.Run(context.Background()) - }() - - close(done) - - err := <-errCh - g.Expect(err).Should(MatchError("error receiving command from CommandChannel: recv error")) -} - -func TestBidirectionalChannel_NilCommand(t *testing.T) { - g := NewGomegaWithT(t) - - recvCommands := make(chan *proto.Command) - ctx, cancel := context.WithCancel(context.Background()) - - fakeServer := &commanderfakes.FakeCommander_CommandChannelServer{ - RecvStub: func() (*proto.Command, error) { - select { - case <-ctx.Done(): - return nil, nil - case cmd := <-recvCommands: - return cmd, nil - } - }, - } - - ch := commander.NewBidirectionalChannel(fakeServer, zap.New()) - - errCh := make(chan error) - - go func() { - errCh <- ch.Run(ctx) - }() - - testRecvCommand := func(msgID string) { - recvCommands <- newTestCommand(msgID) - cmd := <-ch.In() - g.Expect(cmd.GetMeta().GetMessageId()).To(Equal(msgID)) - } - - testRecvCommand("msg-1") - // add a nil command to the recv channel - recvCommands <- nil - // test that channel is still running and can receive non-nil commands - testRecvCommand("msg-2") - - cancel() - - err := <-errCh - g.Expect(err).Should(MatchError(context.Canceled)) -} diff --git a/internal/grpc/commander/commander.go b/internal/grpc/commander/commander.go index 0f616d14ab..7894fde97e 100644 --- a/internal/grpc/commander/commander.go +++ b/internal/grpc/commander/commander.go @@ -2,15 +2,23 @@ package commander import ( "context" + "encoding/json" "errors" "fmt" + "io" "sync" "github.com/go-logr/logr" + "github.com/gogo/protobuf/types" + "github.com/nginx/agent/sdk/v2/checksum" + "github.com/nginx/agent/sdk/v2/grpc" "github.com/nginx/agent/sdk/v2/proto" + "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" - "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/agent" + "github.com/nginxinc/nginx-kubernetes-gateway/internal/agent" + "github.com/nginxinc/nginx-kubernetes-gateway/internal/agent/config" "github.com/nginxinc/nginx-kubernetes-gateway/internal/observer" ) @@ -18,92 +26,146 @@ import ( //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 github.com/nginx/agent/sdk/v2/proto.Commander_CommandChannelServer //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 github.com/nginx/agent/sdk/v2/proto.Commander_UploadServer -const serverUUIDKey = "uuid" +const ( + serverUUIDKey = "uuid" + connectedMsg = "CONNECTED" +) + +type configUpdater interface { + Start(ctx context.Context) error + Requests() chan<- *config.Request +} -type observedNginxConfig interface { - observer.Subject - GetLatestConfig() *agent.NginxConfig +type agentStore interface { + Add(info agent.ConnectInfo) + Delete(id string) + Get(id string) (agent.ConnectInfo, bool) } // Commander implements the proto.CommanderServer interface. type Commander struct { - connections map[string]*connection - observedConfig observedNginxConfig - logger logr.Logger + subject observer.Subject[*config.NginxConfig] + store agentStore + logger logr.Logger - connLock sync.Mutex + updaters map[string]configUpdater + updatersLock sync.Mutex } // NewCommander returns a new instance of the Commander. -func NewCommander(logger logr.Logger, observedConfig observedNginxConfig) *Commander { +func NewCommander( + store agentStore, + subject observer.Subject[*config.NginxConfig], + logger logr.Logger, +) *Commander { return &Commander{ - logger: logger, - connections: make(map[string]*connection), - observedConfig: observedConfig, + logger: logger, + subject: subject, + updaters: make(map[string]configUpdater), + store: store, } } -// CommandChannel is a bidirectional streaming channel that is established by the agent and remains open for the -// agent's lifetime. -// -// On every invocation, the Commander will create a new connection with the UUID of the server, -// add the connection to the AgentManager, and invoke the connection's blocking Run method. -// If the UUID field is not present in the server's context metadata, no connection is created and an error is returned. -// Once the Run method returns, ths Commander will remove the connection from the AgentManager. -// This ensures that only active (connected) connections are tracked by the AgentManager. func (c *Commander) CommandChannel(server proto.Commander_CommandChannelServer) error { - c.logger.Info("Commander CommandChannel") - id, err := getUUIDFromContext(server.Context()) if err != nil { c.logger.Error(err, "cannot get the UUID of the agent") return err } - defer func() { - c.removeConnection(id) - }() + if err = c.startConfigUpdater(server, id); err != nil { + if isContextCanceled(err) { + return nil + } - idLogger := c.logger.WithValues("id", id) + c.logger.Error(err, "error starting config updater for agent", "agentID", id) + return err + } + + return nil +} - conn := newConnection( - id, - idLogger.WithName("connection"), - NewBidirectionalChannel(server, idLogger.WithName("channel")), - c.observedConfig, +func (c *Commander) startConfigUpdater(server proto.Commander_CommandChannelServer, agentID string) error { + info, ok := c.store.Get(agentID) + + if !ok { + var err error + info, err = c.waitForAgentConnect(server, agentID) + if err != nil { + return err + } + + c.store.Add(info) + } + + updater := config.NewUpdater( + server, + info, + c.subject, + c.logger.WithName("configUpdater").WithValues("agentID", agentID, "podName", info.PodName), ) - c.addConnection(conn) + c.addUpdater(agentID, updater) + + defer func() { + c.store.Delete(agentID) + c.deleteUpdater(agentID) + c.logger.Info("CommandChannel closed", "agentID", agentID) + }() + + c.logger.Info("CommandChannel established", "agentID", agentID) - return conn.run(server.Context()) + return updater.Start(server.Context()) } -// Download implements the Download method of the Commander gRPC service. An agent invokes this method to download the -// latest version of the NGINX configuration. +// Download implements the Download method of the Commander gRPC service. +// An agent uses this method to download the NGINX configuration. func (c *Commander) Download(request *proto.DownloadRequest, server proto.Commander_DownloadServer) error { - c.logger.Info("Download requested", "message ID", request.GetMeta().GetMessageId()) - id, err := getUUIDFromContext(server.Context()) if err != nil { - c.logger.Error(err, "failed download") + c.logger.Error(err, "failed download; cannot get the UUID of the agent") return err } - conn := c.getConnection(id) - if conn == nil { - err := fmt.Errorf("connection with id: %s not found", id) - c.logger.Error(err, "failed download") + c.logger.Info("Download Request", "agentID", id) + + if err = c.download(request, server, id); err != nil { + if isContextCanceled(err) { + c.logger.Info("Download not completed; context canceled", "agentID", id) + return nil + } + + c.logger.Error(err, "failed download", "agentID", id) return err } - // TODO: can there be a race condition here? - if conn.State() != StateRegistered { - err := fmt.Errorf("connection with id: %s is not registered", id) - c.logger.Error(err, "failed upload") + return nil +} + +func (c *Commander) download(request *proto.DownloadRequest, server proto.Commander_DownloadServer, id string) error { + updater := c.getUpdater(id) + + if updater == nil { + return fmt.Errorf("no config updater registered for agent with ID %q", id) + } + + msgID := getMessageIDFromMeta(request.GetMeta()) + req := config.NewRequest(msgID) + ctx := server.Context() + + select { + case <-ctx.Done(): + return ctx.Err() + case updater.Requests() <- req: + c.logger.Info("Sent config update request", "req ID", msgID) + } + + cfg, err := req.WaitForReply(ctx) + if err != nil { return err } - return conn.sendConfig(request, server) + return sendConfigToDownloadServer(cfg, server, msgID) } // Upload implements the Upload method of the Commander gRPC service. @@ -112,50 +174,168 @@ func (c *Commander) Download(request *proto.DownloadRequest, server proto.Comman func (c *Commander) Upload(server proto.Commander_UploadServer) error { c.logger.Info("Commander Upload requested") - id, err := getUUIDFromContext(server.Context()) + for { + // Recv blocks until it receives a message into or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + _, err := server.Recv() + + if err != nil && !errors.Is(err, io.EOF) { + c.logger.Error(err, "upload receive error") + return err + } + + if errors.Is(err, io.EOF) { + c.logger.Info("Upload completed") + return server.SendAndClose(&proto.UploadStatus{Status: proto.UploadStatus_OK}) + } + } +} + +func (c *Commander) waitForAgentConnect( + server proto.Commander_CommandChannelServer, + id string, +) (agent.ConnectInfo, error) { + c.logger.Info("Waiting for agent to send connect request", "agentID", id) + + cmd, err := c.waitForConnectRequestCmd(server) if err != nil { - c.logger.Error(err, "failed upload; cannot get the UUID of the conn") - return err + return agent.ConnectInfo{}, fmt.Errorf("error waiting for connect request: %w", err) } - conn := c.getConnection(id) - if conn == nil { - err := fmt.Errorf("connection with id: %s not found", id) - c.logger.Error(err, "failed upload") - return err + connectInfo := agent.NewConnectInfo(id, cmd.GetAgentConnectRequest()) + + code := proto.AgentConnectStatus_CONNECT_OK + msg := connectedMsg + + validateErr := connectInfo.Validate() + if validateErr != nil { + code = proto.AgentConnectStatus_CONNECT_REJECTED_OTHER + msg = err.Error() } - // TODO: can there be a race condition here? - if conn.State() != StateRegistered { - err := fmt.Errorf("connection with id: %s is not registered", id) - c.logger.Error(err, "failed upload") - return err + err = c.sendConnectResponse(server, id, connectResponse(getMessageIDFromMeta(cmd.GetMeta()), code, msg)) + + return connectInfo, errors.Join(validateErr, err) +} + +func (c *Commander) sendConnectResponse( + server proto.Commander_CommandChannelServer, + id string, + cmd *proto.Command, +) error { + ctx := server.Context() + + select { + case <-ctx.Done(): + return ctx.Err() + default: + // We don't return an error here, because it isn't necessary for the agent to receive this response. + // It's just for debugging purposes. + if err := server.Send(cmd); err != nil { + c.logger.Error(err, "failed to send connect response", "agentID", id) + } } - return conn.receiveFromUploadServer(server) + return nil } -func (c *Commander) removeConnection(id string) { - c.connLock.Lock() - defer c.connLock.Unlock() +func (c *Commander) waitForConnectRequestCmd(server proto.Commander_CommandChannelServer) (*proto.Command, error) { + ctx := server.Context() + + for { + cmd, err := server.Recv() + if err != nil { + return nil, err + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + if cmd != nil { + if req := cmd.GetAgentConnectRequest(); req != nil { + return cmd, nil + } + c.logger.Info("Ignoring command", "command data type", fmt.Sprintf("%T", cmd.Data)) + } else { + // The agent should never send us a nil command, but we catch this case out of an abundance of caution. + // We don't want to return an error in this case because that would break the CommandChannel + // connection with the agent. Instead, we log the abnormality and continue processing. + c.logger.Error(errors.New("received nil command"), "expected non-nil command") + } + } + } +} - delete(c.connections, id) - c.logger.Info("removed connection", "id", id, "total connections", len(c.connections)) +func (c *Commander) deleteUpdater(agentID string) { + c.updatersLock.Lock() + defer c.updatersLock.Unlock() + + delete(c.updaters, agentID) } -func (c *Commander) addConnection(conn *connection) { - c.connLock.Lock() - defer c.connLock.Unlock() +func (c *Commander) addUpdater(agentID string, updater *config.Updater) { + c.updatersLock.Lock() + defer c.updatersLock.Unlock() - c.connections[conn.id] = conn - c.logger.Info("added connection", "id", conn.id, "total connections", len(c.connections)) + c.updaters[agentID] = updater } -func (c *Commander) getConnection(id string) *connection { - c.connLock.Lock() - defer c.connLock.Unlock() +func (c *Commander) getUpdater(id string) configUpdater { + c.updatersLock.Lock() + defer c.updatersLock.Unlock() + + return c.updaters[id] +} + +func sendConfigToDownloadServer(cfg *proto.NginxConfig, server proto.Commander_DownloadServer, msgID string) error { + payload, err := json.Marshal(cfg) + if err != nil { + return err + } + + payloadChecksum := checksum.Checksum(payload) + chunks := checksum.Chunk(payload, 4*1024) + + meta := &proto.Metadata{ + Timestamp: types.TimestampNow(), + MessageId: msgID, + } + + err = server.Send(&proto.DataChunk{ + Chunk: &proto.DataChunk_Header{ + Header: &proto.ChunkedResourceHeader{ + Meta: meta, + Chunks: int32(len(chunks)), + Checksum: payloadChecksum, + ChunkSize: 4 * 1024, + }, + }, + }) - return c.connections[id] + if err != nil { + return err + } + + for id, chunk := range chunks { + err = server.Send(&proto.DataChunk{ + Chunk: &proto.DataChunk_Data{ + Data: &proto.ChunkedResourceChunk{ + ChunkId: int32(id), + Data: chunk, + Meta: meta, + }, + }, + }) + + if err != nil { + return err + } + } + + return nil } func getUUIDFromContext(ctx context.Context) (string, error) { @@ -175,3 +355,38 @@ func getUUIDFromContext(ctx context.Context) (string, error) { return vals[0], nil } + +func connectResponse(msgID string, statusCode proto.AgentConnectStatus_StatusCode, msg string) *proto.Command { + return &proto.Command{ + Data: &proto.Command_AgentConnectResponse{ + AgentConnectResponse: &proto.AgentConnectResponse{ + Status: &proto.AgentConnectStatus{ + StatusCode: statusCode, + Message: msg, + }, + }, + }, + Meta: grpc.NewMessageMeta(msgID), + Type: proto.Command_NORMAL, + } +} + +func getMessageIDFromMeta(meta *proto.Metadata) string { + if meta != nil { + return meta.GetMessageId() + } + + return "" +} + +func isContextCanceled(err error) bool { + if errors.Is(err, context.Canceled) { + return true + } + + if st, ok := status.FromError(err); ok { + return st.Code() == codes.Canceled + } + + return false +} diff --git a/internal/grpc/commander/commander_suite_test.go b/internal/grpc/commander/commander_suite_test.go deleted file mode 100644 index eafb3d813d..0000000000 --- a/internal/grpc/commander/commander_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package commander_test - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestCommander(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Commander Suite") -} diff --git a/internal/grpc/commander/commanderfakes/fake_commander_command_channel_server.go b/internal/grpc/commander/commanderfakes/fake_commander_command_channel_server.go deleted file mode 100644 index e392b4ad00..0000000000 --- a/internal/grpc/commander/commanderfakes/fake_commander_command_channel_server.go +++ /dev/null @@ -1,583 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package commanderfakes - -import ( - "context" - "sync" - - "github.com/nginx/agent/sdk/v2/proto" - "google.golang.org/grpc/metadata" -) - -type FakeCommander_CommandChannelServer struct { - ContextStub func() context.Context - contextMutex sync.RWMutex - contextArgsForCall []struct { - } - contextReturns struct { - result1 context.Context - } - contextReturnsOnCall map[int]struct { - result1 context.Context - } - RecvStub func() (*proto.Command, error) - recvMutex sync.RWMutex - recvArgsForCall []struct { - } - recvReturns struct { - result1 *proto.Command - result2 error - } - recvReturnsOnCall map[int]struct { - result1 *proto.Command - result2 error - } - RecvMsgStub func(interface{}) error - recvMsgMutex sync.RWMutex - recvMsgArgsForCall []struct { - arg1 interface{} - } - recvMsgReturns struct { - result1 error - } - recvMsgReturnsOnCall map[int]struct { - result1 error - } - SendStub func(*proto.Command) error - sendMutex sync.RWMutex - sendArgsForCall []struct { - arg1 *proto.Command - } - sendReturns struct { - result1 error - } - sendReturnsOnCall map[int]struct { - result1 error - } - SendHeaderStub func(metadata.MD) error - sendHeaderMutex sync.RWMutex - sendHeaderArgsForCall []struct { - arg1 metadata.MD - } - sendHeaderReturns struct { - result1 error - } - sendHeaderReturnsOnCall map[int]struct { - result1 error - } - SendMsgStub func(interface{}) error - sendMsgMutex sync.RWMutex - sendMsgArgsForCall []struct { - arg1 interface{} - } - sendMsgReturns struct { - result1 error - } - sendMsgReturnsOnCall map[int]struct { - result1 error - } - SetHeaderStub func(metadata.MD) error - setHeaderMutex sync.RWMutex - setHeaderArgsForCall []struct { - arg1 metadata.MD - } - setHeaderReturns struct { - result1 error - } - setHeaderReturnsOnCall map[int]struct { - result1 error - } - SetTrailerStub func(metadata.MD) - setTrailerMutex sync.RWMutex - setTrailerArgsForCall []struct { - arg1 metadata.MD - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeCommander_CommandChannelServer) Context() context.Context { - fake.contextMutex.Lock() - ret, specificReturn := fake.contextReturnsOnCall[len(fake.contextArgsForCall)] - fake.contextArgsForCall = append(fake.contextArgsForCall, struct { - }{}) - stub := fake.ContextStub - fakeReturns := fake.contextReturns - fake.recordInvocation("Context", []interface{}{}) - fake.contextMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeCommander_CommandChannelServer) ContextCallCount() int { - fake.contextMutex.RLock() - defer fake.contextMutex.RUnlock() - return len(fake.contextArgsForCall) -} - -func (fake *FakeCommander_CommandChannelServer) ContextCalls(stub func() context.Context) { - fake.contextMutex.Lock() - defer fake.contextMutex.Unlock() - fake.ContextStub = stub -} - -func (fake *FakeCommander_CommandChannelServer) ContextReturns(result1 context.Context) { - fake.contextMutex.Lock() - defer fake.contextMutex.Unlock() - fake.ContextStub = nil - fake.contextReturns = struct { - result1 context.Context - }{result1} -} - -func (fake *FakeCommander_CommandChannelServer) ContextReturnsOnCall(i int, result1 context.Context) { - fake.contextMutex.Lock() - defer fake.contextMutex.Unlock() - fake.ContextStub = nil - if fake.contextReturnsOnCall == nil { - fake.contextReturnsOnCall = make(map[int]struct { - result1 context.Context - }) - } - fake.contextReturnsOnCall[i] = struct { - result1 context.Context - }{result1} -} - -func (fake *FakeCommander_CommandChannelServer) Recv() (*proto.Command, error) { - fake.recvMutex.Lock() - ret, specificReturn := fake.recvReturnsOnCall[len(fake.recvArgsForCall)] - fake.recvArgsForCall = append(fake.recvArgsForCall, struct { - }{}) - stub := fake.RecvStub - fakeReturns := fake.recvReturns - fake.recordInvocation("Recv", []interface{}{}) - fake.recvMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeCommander_CommandChannelServer) RecvCallCount() int { - fake.recvMutex.RLock() - defer fake.recvMutex.RUnlock() - return len(fake.recvArgsForCall) -} - -func (fake *FakeCommander_CommandChannelServer) RecvCalls(stub func() (*proto.Command, error)) { - fake.recvMutex.Lock() - defer fake.recvMutex.Unlock() - fake.RecvStub = stub -} - -func (fake *FakeCommander_CommandChannelServer) RecvReturns(result1 *proto.Command, result2 error) { - fake.recvMutex.Lock() - defer fake.recvMutex.Unlock() - fake.RecvStub = nil - fake.recvReturns = struct { - result1 *proto.Command - result2 error - }{result1, result2} -} - -func (fake *FakeCommander_CommandChannelServer) RecvReturnsOnCall(i int, result1 *proto.Command, result2 error) { - fake.recvMutex.Lock() - defer fake.recvMutex.Unlock() - fake.RecvStub = nil - if fake.recvReturnsOnCall == nil { - fake.recvReturnsOnCall = make(map[int]struct { - result1 *proto.Command - result2 error - }) - } - fake.recvReturnsOnCall[i] = struct { - result1 *proto.Command - result2 error - }{result1, result2} -} - -func (fake *FakeCommander_CommandChannelServer) RecvMsg(arg1 interface{}) error { - fake.recvMsgMutex.Lock() - ret, specificReturn := fake.recvMsgReturnsOnCall[len(fake.recvMsgArgsForCall)] - fake.recvMsgArgsForCall = append(fake.recvMsgArgsForCall, struct { - arg1 interface{} - }{arg1}) - stub := fake.RecvMsgStub - fakeReturns := fake.recvMsgReturns - fake.recordInvocation("RecvMsg", []interface{}{arg1}) - fake.recvMsgMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeCommander_CommandChannelServer) RecvMsgCallCount() int { - fake.recvMsgMutex.RLock() - defer fake.recvMsgMutex.RUnlock() - return len(fake.recvMsgArgsForCall) -} - -func (fake *FakeCommander_CommandChannelServer) RecvMsgCalls(stub func(interface{}) error) { - fake.recvMsgMutex.Lock() - defer fake.recvMsgMutex.Unlock() - fake.RecvMsgStub = stub -} - -func (fake *FakeCommander_CommandChannelServer) RecvMsgArgsForCall(i int) interface{} { - fake.recvMsgMutex.RLock() - defer fake.recvMsgMutex.RUnlock() - argsForCall := fake.recvMsgArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeCommander_CommandChannelServer) RecvMsgReturns(result1 error) { - fake.recvMsgMutex.Lock() - defer fake.recvMsgMutex.Unlock() - fake.RecvMsgStub = nil - fake.recvMsgReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_CommandChannelServer) RecvMsgReturnsOnCall(i int, result1 error) { - fake.recvMsgMutex.Lock() - defer fake.recvMsgMutex.Unlock() - fake.RecvMsgStub = nil - if fake.recvMsgReturnsOnCall == nil { - fake.recvMsgReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.recvMsgReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_CommandChannelServer) Send(arg1 *proto.Command) error { - fake.sendMutex.Lock() - ret, specificReturn := fake.sendReturnsOnCall[len(fake.sendArgsForCall)] - fake.sendArgsForCall = append(fake.sendArgsForCall, struct { - arg1 *proto.Command - }{arg1}) - stub := fake.SendStub - fakeReturns := fake.sendReturns - fake.recordInvocation("Send", []interface{}{arg1}) - fake.sendMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeCommander_CommandChannelServer) SendCallCount() int { - fake.sendMutex.RLock() - defer fake.sendMutex.RUnlock() - return len(fake.sendArgsForCall) -} - -func (fake *FakeCommander_CommandChannelServer) SendCalls(stub func(*proto.Command) error) { - fake.sendMutex.Lock() - defer fake.sendMutex.Unlock() - fake.SendStub = stub -} - -func (fake *FakeCommander_CommandChannelServer) SendArgsForCall(i int) *proto.Command { - fake.sendMutex.RLock() - defer fake.sendMutex.RUnlock() - argsForCall := fake.sendArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeCommander_CommandChannelServer) SendReturns(result1 error) { - fake.sendMutex.Lock() - defer fake.sendMutex.Unlock() - fake.SendStub = nil - fake.sendReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_CommandChannelServer) SendReturnsOnCall(i int, result1 error) { - fake.sendMutex.Lock() - defer fake.sendMutex.Unlock() - fake.SendStub = nil - if fake.sendReturnsOnCall == nil { - fake.sendReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.sendReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_CommandChannelServer) SendHeader(arg1 metadata.MD) error { - fake.sendHeaderMutex.Lock() - ret, specificReturn := fake.sendHeaderReturnsOnCall[len(fake.sendHeaderArgsForCall)] - fake.sendHeaderArgsForCall = append(fake.sendHeaderArgsForCall, struct { - arg1 metadata.MD - }{arg1}) - stub := fake.SendHeaderStub - fakeReturns := fake.sendHeaderReturns - fake.recordInvocation("SendHeader", []interface{}{arg1}) - fake.sendHeaderMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeCommander_CommandChannelServer) SendHeaderCallCount() int { - fake.sendHeaderMutex.RLock() - defer fake.sendHeaderMutex.RUnlock() - return len(fake.sendHeaderArgsForCall) -} - -func (fake *FakeCommander_CommandChannelServer) SendHeaderCalls(stub func(metadata.MD) error) { - fake.sendHeaderMutex.Lock() - defer fake.sendHeaderMutex.Unlock() - fake.SendHeaderStub = stub -} - -func (fake *FakeCommander_CommandChannelServer) SendHeaderArgsForCall(i int) metadata.MD { - fake.sendHeaderMutex.RLock() - defer fake.sendHeaderMutex.RUnlock() - argsForCall := fake.sendHeaderArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeCommander_CommandChannelServer) SendHeaderReturns(result1 error) { - fake.sendHeaderMutex.Lock() - defer fake.sendHeaderMutex.Unlock() - fake.SendHeaderStub = nil - fake.sendHeaderReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_CommandChannelServer) SendHeaderReturnsOnCall(i int, result1 error) { - fake.sendHeaderMutex.Lock() - defer fake.sendHeaderMutex.Unlock() - fake.SendHeaderStub = nil - if fake.sendHeaderReturnsOnCall == nil { - fake.sendHeaderReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.sendHeaderReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_CommandChannelServer) SendMsg(arg1 interface{}) error { - fake.sendMsgMutex.Lock() - ret, specificReturn := fake.sendMsgReturnsOnCall[len(fake.sendMsgArgsForCall)] - fake.sendMsgArgsForCall = append(fake.sendMsgArgsForCall, struct { - arg1 interface{} - }{arg1}) - stub := fake.SendMsgStub - fakeReturns := fake.sendMsgReturns - fake.recordInvocation("SendMsg", []interface{}{arg1}) - fake.sendMsgMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeCommander_CommandChannelServer) SendMsgCallCount() int { - fake.sendMsgMutex.RLock() - defer fake.sendMsgMutex.RUnlock() - return len(fake.sendMsgArgsForCall) -} - -func (fake *FakeCommander_CommandChannelServer) SendMsgCalls(stub func(interface{}) error) { - fake.sendMsgMutex.Lock() - defer fake.sendMsgMutex.Unlock() - fake.SendMsgStub = stub -} - -func (fake *FakeCommander_CommandChannelServer) SendMsgArgsForCall(i int) interface{} { - fake.sendMsgMutex.RLock() - defer fake.sendMsgMutex.RUnlock() - argsForCall := fake.sendMsgArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeCommander_CommandChannelServer) SendMsgReturns(result1 error) { - fake.sendMsgMutex.Lock() - defer fake.sendMsgMutex.Unlock() - fake.SendMsgStub = nil - fake.sendMsgReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_CommandChannelServer) SendMsgReturnsOnCall(i int, result1 error) { - fake.sendMsgMutex.Lock() - defer fake.sendMsgMutex.Unlock() - fake.SendMsgStub = nil - if fake.sendMsgReturnsOnCall == nil { - fake.sendMsgReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.sendMsgReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_CommandChannelServer) SetHeader(arg1 metadata.MD) error { - fake.setHeaderMutex.Lock() - ret, specificReturn := fake.setHeaderReturnsOnCall[len(fake.setHeaderArgsForCall)] - fake.setHeaderArgsForCall = append(fake.setHeaderArgsForCall, struct { - arg1 metadata.MD - }{arg1}) - stub := fake.SetHeaderStub - fakeReturns := fake.setHeaderReturns - fake.recordInvocation("SetHeader", []interface{}{arg1}) - fake.setHeaderMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeCommander_CommandChannelServer) SetHeaderCallCount() int { - fake.setHeaderMutex.RLock() - defer fake.setHeaderMutex.RUnlock() - return len(fake.setHeaderArgsForCall) -} - -func (fake *FakeCommander_CommandChannelServer) SetHeaderCalls(stub func(metadata.MD) error) { - fake.setHeaderMutex.Lock() - defer fake.setHeaderMutex.Unlock() - fake.SetHeaderStub = stub -} - -func (fake *FakeCommander_CommandChannelServer) SetHeaderArgsForCall(i int) metadata.MD { - fake.setHeaderMutex.RLock() - defer fake.setHeaderMutex.RUnlock() - argsForCall := fake.setHeaderArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeCommander_CommandChannelServer) SetHeaderReturns(result1 error) { - fake.setHeaderMutex.Lock() - defer fake.setHeaderMutex.Unlock() - fake.SetHeaderStub = nil - fake.setHeaderReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_CommandChannelServer) SetHeaderReturnsOnCall(i int, result1 error) { - fake.setHeaderMutex.Lock() - defer fake.setHeaderMutex.Unlock() - fake.SetHeaderStub = nil - if fake.setHeaderReturnsOnCall == nil { - fake.setHeaderReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.setHeaderReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_CommandChannelServer) SetTrailer(arg1 metadata.MD) { - fake.setTrailerMutex.Lock() - fake.setTrailerArgsForCall = append(fake.setTrailerArgsForCall, struct { - arg1 metadata.MD - }{arg1}) - stub := fake.SetTrailerStub - fake.recordInvocation("SetTrailer", []interface{}{arg1}) - fake.setTrailerMutex.Unlock() - if stub != nil { - fake.SetTrailerStub(arg1) - } -} - -func (fake *FakeCommander_CommandChannelServer) SetTrailerCallCount() int { - fake.setTrailerMutex.RLock() - defer fake.setTrailerMutex.RUnlock() - return len(fake.setTrailerArgsForCall) -} - -func (fake *FakeCommander_CommandChannelServer) SetTrailerCalls(stub func(metadata.MD)) { - fake.setTrailerMutex.Lock() - defer fake.setTrailerMutex.Unlock() - fake.SetTrailerStub = stub -} - -func (fake *FakeCommander_CommandChannelServer) SetTrailerArgsForCall(i int) metadata.MD { - fake.setTrailerMutex.RLock() - defer fake.setTrailerMutex.RUnlock() - argsForCall := fake.setTrailerArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeCommander_CommandChannelServer) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.contextMutex.RLock() - defer fake.contextMutex.RUnlock() - fake.recvMutex.RLock() - defer fake.recvMutex.RUnlock() - fake.recvMsgMutex.RLock() - defer fake.recvMsgMutex.RUnlock() - fake.sendMutex.RLock() - defer fake.sendMutex.RUnlock() - fake.sendHeaderMutex.RLock() - defer fake.sendHeaderMutex.RUnlock() - fake.sendMsgMutex.RLock() - defer fake.sendMsgMutex.RUnlock() - fake.setHeaderMutex.RLock() - defer fake.setHeaderMutex.RUnlock() - fake.setTrailerMutex.RLock() - defer fake.setTrailerMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeCommander_CommandChannelServer) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ proto.Commander_CommandChannelServer = new(FakeCommander_CommandChannelServer) diff --git a/internal/grpc/commander/commanderfakes/fake_commander_upload_server.go b/internal/grpc/commander/commanderfakes/fake_commander_upload_server.go deleted file mode 100644 index e5688f2718..0000000000 --- a/internal/grpc/commander/commanderfakes/fake_commander_upload_server.go +++ /dev/null @@ -1,583 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package commanderfakes - -import ( - "context" - "sync" - - "github.com/nginx/agent/sdk/v2/proto" - "google.golang.org/grpc/metadata" -) - -type FakeCommander_UploadServer struct { - ContextStub func() context.Context - contextMutex sync.RWMutex - contextArgsForCall []struct { - } - contextReturns struct { - result1 context.Context - } - contextReturnsOnCall map[int]struct { - result1 context.Context - } - RecvStub func() (*proto.DataChunk, error) - recvMutex sync.RWMutex - recvArgsForCall []struct { - } - recvReturns struct { - result1 *proto.DataChunk - result2 error - } - recvReturnsOnCall map[int]struct { - result1 *proto.DataChunk - result2 error - } - RecvMsgStub func(interface{}) error - recvMsgMutex sync.RWMutex - recvMsgArgsForCall []struct { - arg1 interface{} - } - recvMsgReturns struct { - result1 error - } - recvMsgReturnsOnCall map[int]struct { - result1 error - } - SendAndCloseStub func(*proto.UploadStatus) error - sendAndCloseMutex sync.RWMutex - sendAndCloseArgsForCall []struct { - arg1 *proto.UploadStatus - } - sendAndCloseReturns struct { - result1 error - } - sendAndCloseReturnsOnCall map[int]struct { - result1 error - } - SendHeaderStub func(metadata.MD) error - sendHeaderMutex sync.RWMutex - sendHeaderArgsForCall []struct { - arg1 metadata.MD - } - sendHeaderReturns struct { - result1 error - } - sendHeaderReturnsOnCall map[int]struct { - result1 error - } - SendMsgStub func(interface{}) error - sendMsgMutex sync.RWMutex - sendMsgArgsForCall []struct { - arg1 interface{} - } - sendMsgReturns struct { - result1 error - } - sendMsgReturnsOnCall map[int]struct { - result1 error - } - SetHeaderStub func(metadata.MD) error - setHeaderMutex sync.RWMutex - setHeaderArgsForCall []struct { - arg1 metadata.MD - } - setHeaderReturns struct { - result1 error - } - setHeaderReturnsOnCall map[int]struct { - result1 error - } - SetTrailerStub func(metadata.MD) - setTrailerMutex sync.RWMutex - setTrailerArgsForCall []struct { - arg1 metadata.MD - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeCommander_UploadServer) Context() context.Context { - fake.contextMutex.Lock() - ret, specificReturn := fake.contextReturnsOnCall[len(fake.contextArgsForCall)] - fake.contextArgsForCall = append(fake.contextArgsForCall, struct { - }{}) - stub := fake.ContextStub - fakeReturns := fake.contextReturns - fake.recordInvocation("Context", []interface{}{}) - fake.contextMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeCommander_UploadServer) ContextCallCount() int { - fake.contextMutex.RLock() - defer fake.contextMutex.RUnlock() - return len(fake.contextArgsForCall) -} - -func (fake *FakeCommander_UploadServer) ContextCalls(stub func() context.Context) { - fake.contextMutex.Lock() - defer fake.contextMutex.Unlock() - fake.ContextStub = stub -} - -func (fake *FakeCommander_UploadServer) ContextReturns(result1 context.Context) { - fake.contextMutex.Lock() - defer fake.contextMutex.Unlock() - fake.ContextStub = nil - fake.contextReturns = struct { - result1 context.Context - }{result1} -} - -func (fake *FakeCommander_UploadServer) ContextReturnsOnCall(i int, result1 context.Context) { - fake.contextMutex.Lock() - defer fake.contextMutex.Unlock() - fake.ContextStub = nil - if fake.contextReturnsOnCall == nil { - fake.contextReturnsOnCall = make(map[int]struct { - result1 context.Context - }) - } - fake.contextReturnsOnCall[i] = struct { - result1 context.Context - }{result1} -} - -func (fake *FakeCommander_UploadServer) Recv() (*proto.DataChunk, error) { - fake.recvMutex.Lock() - ret, specificReturn := fake.recvReturnsOnCall[len(fake.recvArgsForCall)] - fake.recvArgsForCall = append(fake.recvArgsForCall, struct { - }{}) - stub := fake.RecvStub - fakeReturns := fake.recvReturns - fake.recordInvocation("Recv", []interface{}{}) - fake.recvMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeCommander_UploadServer) RecvCallCount() int { - fake.recvMutex.RLock() - defer fake.recvMutex.RUnlock() - return len(fake.recvArgsForCall) -} - -func (fake *FakeCommander_UploadServer) RecvCalls(stub func() (*proto.DataChunk, error)) { - fake.recvMutex.Lock() - defer fake.recvMutex.Unlock() - fake.RecvStub = stub -} - -func (fake *FakeCommander_UploadServer) RecvReturns(result1 *proto.DataChunk, result2 error) { - fake.recvMutex.Lock() - defer fake.recvMutex.Unlock() - fake.RecvStub = nil - fake.recvReturns = struct { - result1 *proto.DataChunk - result2 error - }{result1, result2} -} - -func (fake *FakeCommander_UploadServer) RecvReturnsOnCall(i int, result1 *proto.DataChunk, result2 error) { - fake.recvMutex.Lock() - defer fake.recvMutex.Unlock() - fake.RecvStub = nil - if fake.recvReturnsOnCall == nil { - fake.recvReturnsOnCall = make(map[int]struct { - result1 *proto.DataChunk - result2 error - }) - } - fake.recvReturnsOnCall[i] = struct { - result1 *proto.DataChunk - result2 error - }{result1, result2} -} - -func (fake *FakeCommander_UploadServer) RecvMsg(arg1 interface{}) error { - fake.recvMsgMutex.Lock() - ret, specificReturn := fake.recvMsgReturnsOnCall[len(fake.recvMsgArgsForCall)] - fake.recvMsgArgsForCall = append(fake.recvMsgArgsForCall, struct { - arg1 interface{} - }{arg1}) - stub := fake.RecvMsgStub - fakeReturns := fake.recvMsgReturns - fake.recordInvocation("RecvMsg", []interface{}{arg1}) - fake.recvMsgMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeCommander_UploadServer) RecvMsgCallCount() int { - fake.recvMsgMutex.RLock() - defer fake.recvMsgMutex.RUnlock() - return len(fake.recvMsgArgsForCall) -} - -func (fake *FakeCommander_UploadServer) RecvMsgCalls(stub func(interface{}) error) { - fake.recvMsgMutex.Lock() - defer fake.recvMsgMutex.Unlock() - fake.RecvMsgStub = stub -} - -func (fake *FakeCommander_UploadServer) RecvMsgArgsForCall(i int) interface{} { - fake.recvMsgMutex.RLock() - defer fake.recvMsgMutex.RUnlock() - argsForCall := fake.recvMsgArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeCommander_UploadServer) RecvMsgReturns(result1 error) { - fake.recvMsgMutex.Lock() - defer fake.recvMsgMutex.Unlock() - fake.RecvMsgStub = nil - fake.recvMsgReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_UploadServer) RecvMsgReturnsOnCall(i int, result1 error) { - fake.recvMsgMutex.Lock() - defer fake.recvMsgMutex.Unlock() - fake.RecvMsgStub = nil - if fake.recvMsgReturnsOnCall == nil { - fake.recvMsgReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.recvMsgReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_UploadServer) SendAndClose(arg1 *proto.UploadStatus) error { - fake.sendAndCloseMutex.Lock() - ret, specificReturn := fake.sendAndCloseReturnsOnCall[len(fake.sendAndCloseArgsForCall)] - fake.sendAndCloseArgsForCall = append(fake.sendAndCloseArgsForCall, struct { - arg1 *proto.UploadStatus - }{arg1}) - stub := fake.SendAndCloseStub - fakeReturns := fake.sendAndCloseReturns - fake.recordInvocation("SendAndClose", []interface{}{arg1}) - fake.sendAndCloseMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeCommander_UploadServer) SendAndCloseCallCount() int { - fake.sendAndCloseMutex.RLock() - defer fake.sendAndCloseMutex.RUnlock() - return len(fake.sendAndCloseArgsForCall) -} - -func (fake *FakeCommander_UploadServer) SendAndCloseCalls(stub func(*proto.UploadStatus) error) { - fake.sendAndCloseMutex.Lock() - defer fake.sendAndCloseMutex.Unlock() - fake.SendAndCloseStub = stub -} - -func (fake *FakeCommander_UploadServer) SendAndCloseArgsForCall(i int) *proto.UploadStatus { - fake.sendAndCloseMutex.RLock() - defer fake.sendAndCloseMutex.RUnlock() - argsForCall := fake.sendAndCloseArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeCommander_UploadServer) SendAndCloseReturns(result1 error) { - fake.sendAndCloseMutex.Lock() - defer fake.sendAndCloseMutex.Unlock() - fake.SendAndCloseStub = nil - fake.sendAndCloseReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_UploadServer) SendAndCloseReturnsOnCall(i int, result1 error) { - fake.sendAndCloseMutex.Lock() - defer fake.sendAndCloseMutex.Unlock() - fake.SendAndCloseStub = nil - if fake.sendAndCloseReturnsOnCall == nil { - fake.sendAndCloseReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.sendAndCloseReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_UploadServer) SendHeader(arg1 metadata.MD) error { - fake.sendHeaderMutex.Lock() - ret, specificReturn := fake.sendHeaderReturnsOnCall[len(fake.sendHeaderArgsForCall)] - fake.sendHeaderArgsForCall = append(fake.sendHeaderArgsForCall, struct { - arg1 metadata.MD - }{arg1}) - stub := fake.SendHeaderStub - fakeReturns := fake.sendHeaderReturns - fake.recordInvocation("SendHeader", []interface{}{arg1}) - fake.sendHeaderMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeCommander_UploadServer) SendHeaderCallCount() int { - fake.sendHeaderMutex.RLock() - defer fake.sendHeaderMutex.RUnlock() - return len(fake.sendHeaderArgsForCall) -} - -func (fake *FakeCommander_UploadServer) SendHeaderCalls(stub func(metadata.MD) error) { - fake.sendHeaderMutex.Lock() - defer fake.sendHeaderMutex.Unlock() - fake.SendHeaderStub = stub -} - -func (fake *FakeCommander_UploadServer) SendHeaderArgsForCall(i int) metadata.MD { - fake.sendHeaderMutex.RLock() - defer fake.sendHeaderMutex.RUnlock() - argsForCall := fake.sendHeaderArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeCommander_UploadServer) SendHeaderReturns(result1 error) { - fake.sendHeaderMutex.Lock() - defer fake.sendHeaderMutex.Unlock() - fake.SendHeaderStub = nil - fake.sendHeaderReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_UploadServer) SendHeaderReturnsOnCall(i int, result1 error) { - fake.sendHeaderMutex.Lock() - defer fake.sendHeaderMutex.Unlock() - fake.SendHeaderStub = nil - if fake.sendHeaderReturnsOnCall == nil { - fake.sendHeaderReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.sendHeaderReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_UploadServer) SendMsg(arg1 interface{}) error { - fake.sendMsgMutex.Lock() - ret, specificReturn := fake.sendMsgReturnsOnCall[len(fake.sendMsgArgsForCall)] - fake.sendMsgArgsForCall = append(fake.sendMsgArgsForCall, struct { - arg1 interface{} - }{arg1}) - stub := fake.SendMsgStub - fakeReturns := fake.sendMsgReturns - fake.recordInvocation("SendMsg", []interface{}{arg1}) - fake.sendMsgMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeCommander_UploadServer) SendMsgCallCount() int { - fake.sendMsgMutex.RLock() - defer fake.sendMsgMutex.RUnlock() - return len(fake.sendMsgArgsForCall) -} - -func (fake *FakeCommander_UploadServer) SendMsgCalls(stub func(interface{}) error) { - fake.sendMsgMutex.Lock() - defer fake.sendMsgMutex.Unlock() - fake.SendMsgStub = stub -} - -func (fake *FakeCommander_UploadServer) SendMsgArgsForCall(i int) interface{} { - fake.sendMsgMutex.RLock() - defer fake.sendMsgMutex.RUnlock() - argsForCall := fake.sendMsgArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeCommander_UploadServer) SendMsgReturns(result1 error) { - fake.sendMsgMutex.Lock() - defer fake.sendMsgMutex.Unlock() - fake.SendMsgStub = nil - fake.sendMsgReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_UploadServer) SendMsgReturnsOnCall(i int, result1 error) { - fake.sendMsgMutex.Lock() - defer fake.sendMsgMutex.Unlock() - fake.SendMsgStub = nil - if fake.sendMsgReturnsOnCall == nil { - fake.sendMsgReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.sendMsgReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_UploadServer) SetHeader(arg1 metadata.MD) error { - fake.setHeaderMutex.Lock() - ret, specificReturn := fake.setHeaderReturnsOnCall[len(fake.setHeaderArgsForCall)] - fake.setHeaderArgsForCall = append(fake.setHeaderArgsForCall, struct { - arg1 metadata.MD - }{arg1}) - stub := fake.SetHeaderStub - fakeReturns := fake.setHeaderReturns - fake.recordInvocation("SetHeader", []interface{}{arg1}) - fake.setHeaderMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeCommander_UploadServer) SetHeaderCallCount() int { - fake.setHeaderMutex.RLock() - defer fake.setHeaderMutex.RUnlock() - return len(fake.setHeaderArgsForCall) -} - -func (fake *FakeCommander_UploadServer) SetHeaderCalls(stub func(metadata.MD) error) { - fake.setHeaderMutex.Lock() - defer fake.setHeaderMutex.Unlock() - fake.SetHeaderStub = stub -} - -func (fake *FakeCommander_UploadServer) SetHeaderArgsForCall(i int) metadata.MD { - fake.setHeaderMutex.RLock() - defer fake.setHeaderMutex.RUnlock() - argsForCall := fake.setHeaderArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeCommander_UploadServer) SetHeaderReturns(result1 error) { - fake.setHeaderMutex.Lock() - defer fake.setHeaderMutex.Unlock() - fake.SetHeaderStub = nil - fake.setHeaderReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_UploadServer) SetHeaderReturnsOnCall(i int, result1 error) { - fake.setHeaderMutex.Lock() - defer fake.setHeaderMutex.Unlock() - fake.SetHeaderStub = nil - if fake.setHeaderReturnsOnCall == nil { - fake.setHeaderReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.setHeaderReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeCommander_UploadServer) SetTrailer(arg1 metadata.MD) { - fake.setTrailerMutex.Lock() - fake.setTrailerArgsForCall = append(fake.setTrailerArgsForCall, struct { - arg1 metadata.MD - }{arg1}) - stub := fake.SetTrailerStub - fake.recordInvocation("SetTrailer", []interface{}{arg1}) - fake.setTrailerMutex.Unlock() - if stub != nil { - fake.SetTrailerStub(arg1) - } -} - -func (fake *FakeCommander_UploadServer) SetTrailerCallCount() int { - fake.setTrailerMutex.RLock() - defer fake.setTrailerMutex.RUnlock() - return len(fake.setTrailerArgsForCall) -} - -func (fake *FakeCommander_UploadServer) SetTrailerCalls(stub func(metadata.MD)) { - fake.setTrailerMutex.Lock() - defer fake.setTrailerMutex.Unlock() - fake.SetTrailerStub = stub -} - -func (fake *FakeCommander_UploadServer) SetTrailerArgsForCall(i int) metadata.MD { - fake.setTrailerMutex.RLock() - defer fake.setTrailerMutex.RUnlock() - argsForCall := fake.setTrailerArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeCommander_UploadServer) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.contextMutex.RLock() - defer fake.contextMutex.RUnlock() - fake.recvMutex.RLock() - defer fake.recvMutex.RUnlock() - fake.recvMsgMutex.RLock() - defer fake.recvMsgMutex.RUnlock() - fake.sendAndCloseMutex.RLock() - defer fake.sendAndCloseMutex.RUnlock() - fake.sendHeaderMutex.RLock() - defer fake.sendHeaderMutex.RUnlock() - fake.sendMsgMutex.RLock() - defer fake.sendMsgMutex.RUnlock() - fake.setHeaderMutex.RLock() - defer fake.setHeaderMutex.RUnlock() - fake.setTrailerMutex.RLock() - defer fake.setTrailerMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeCommander_UploadServer) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ proto.Commander_UploadServer = new(FakeCommander_UploadServer) diff --git a/internal/grpc/commander/connection.go b/internal/grpc/commander/connection.go deleted file mode 100644 index 47d2716ccf..0000000000 --- a/internal/grpc/commander/connection.go +++ /dev/null @@ -1,531 +0,0 @@ -package commander - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "time" - - "github.com/go-logr/logr" - "github.com/gogo/protobuf/types" - "github.com/nginx/agent/sdk/v2/checksum" - "github.com/nginx/agent/sdk/v2/grpc" - "github.com/nginx/agent/sdk/v2/proto" - "golang.org/x/sync/errgroup" - - "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander/exchanger" - "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/agent" -) - -// State is the state of the connection. -type State int - -const ( - // StateConnected means the connection is active (connected) but is not registered. - StateConnected State = iota - // StateRegistered means the connection is active and registered. - StateRegistered - // StateInvalid means the connection is active and attempted to register, but its registration info was invalid. - // We cannot push config to a connection in this state. - StateInvalid -) - -type configApplyStatus int - -const ( - configApplyStatusSuccess configApplyStatus = iota - configApplyStatusFailure -) - -func (s configApplyStatus) String() string { - switch s { - case configApplyStatusSuccess: - return "success" - case configApplyStatusFailure: - return "failure" - default: - return "unknown" - } -} - -type configApplyResponse struct { - correlationID string - message string - status configApplyStatus -} - -// configUpdatedChSize is the size of the channel that notifies the connection that the config has been updated. -// The length is 1 because we do not want to miss a notification while the connection is processing the last config. -const configUpdatedChSize = 1 - -// connection represents a connection to an agent. -type connection struct { - cmdExchanger exchanger.CommandExchanger - observedConfig observedNginxConfig - configUpdatedCh chan struct{} - configApplyResponseCh chan configApplyResponse - pendingConfig *agent.NginxConfig - logger logr.Logger - id string - nginxID string - systemID string - state State -} - -// newConnection creates a new instance of connection. -// -// id is the unique ID of the connection. -// cmdExchanger sends and receives commands to and from the CommandChannelServer. -// -// The creator of connection must call its run method in order for the connection send and receive commands. -func newConnection( - id string, - logger logr.Logger, - cmdExchanger exchanger.CommandExchanger, - configSubject observedNginxConfig, -) *connection { - return &connection{ - logger: logger, - cmdExchanger: cmdExchanger, - observedConfig: configSubject, - configUpdatedCh: make(chan struct{}, configUpdatedChSize), - configApplyResponseCh: make(chan configApplyResponse), - id: id, - } -} - -func (c *connection) ID() string { - return c.id -} - -func (c *connection) State() State { - return c.state -} - -func (c *connection) Update() { - select { - case c.configUpdatedCh <- struct{}{}: - c.logger.Info("Queued config update") - default: - } -} - -func createDownloadCommand(msgID, systemID, nginxID string) *proto.Command { - return &proto.Command{ - Meta: &proto.Metadata{ - MessageId: msgID, - }, - Type: proto.Command_DOWNLOAD, - Data: &proto.Command_NginxConfig{ - NginxConfig: &proto.NginxConfig{ - Action: proto.NginxConfigAction_APPLY, - ConfigData: &proto.ConfigDescriptor{ - SystemId: systemID, - NginxId: nginxID, - }, - }, - }, - } -} - -func (c *connection) sendConfig(request *proto.DownloadRequest, downloadServer proto.Commander_DownloadServer) error { - config := c.pendingConfig - - if config.ID != request.GetMeta().GetMessageId() { - err := fmt.Errorf( - "pending config ID %q does not match request %q", - config.ID, - request.GetMeta().GetMessageId(), - ) - c.logger.Error(err, "failed to send config") - return err - } - - cfg := &proto.NginxConfig{ - Action: proto.NginxConfigAction_APPLY, - ConfigData: &proto.ConfigDescriptor{ - SystemId: c.systemID, - NginxId: c.nginxID, - }, - Zconfig: config.Config, - Zaux: config.Aux, - DirectoryMap: &proto.DirectoryMap{ - Directories: config.Directories, - }, - } - - payload, err := json.Marshal(cfg) - if err != nil { - c.logger.Error(err, "failed to send config") - return err - } - - metadata := &proto.Metadata{ - Timestamp: types.TimestampNow(), - MessageId: request.GetMeta().GetMessageId(), - } - - payloadChecksum := checksum.Checksum(payload) - chunks := checksum.Chunk(payload, 4*1024) - - err = downloadServer.Send(&proto.DataChunk{ - Chunk: &proto.DataChunk_Header{ - Header: &proto.ChunkedResourceHeader{ - Meta: metadata, - Chunks: int32(len(chunks)), - Checksum: payloadChecksum, - ChunkSize: 4 * 1024, - }, - }, - }) - - if err != nil { - c.logger.Error(err, "failed to send config") - return err - } - - for id, chunk := range chunks { - c.logger.Info("Sending data chunk", "chunk ID", id) - err = downloadServer.Send(&proto.DataChunk{ - Chunk: &proto.DataChunk_Data{ - Data: &proto.ChunkedResourceChunk{ - ChunkId: int32(id), - Data: chunk, - Meta: metadata, - }, - }, - }) - - if err != nil { - c.logger.Error(err, "failed to send chunk") - return err - } - } - - c.logger.Info("Download finished") - - return nil -} - -// run is a blocking method that kicks off the connection's receive loop and the CommandExchanger's Run loop. -// run will return when the context is canceled or if either loop returns an error. -func (c *connection) run(parent context.Context) error { - defer func() { - c.observedConfig.Remove(c) - }() - - eg, ctx := errgroup.WithContext(parent) - - eg.Go(func() error { - return c.receive(ctx) - }) - - eg.Go(func() error { - return c.cmdExchanger.Run(ctx) - }) - - eg.Go(func() error { - return c.updateConfigLoop(ctx) - }) - - return eg.Wait() -} - -func (c *connection) receive(ctx context.Context) error { - defer func() { - c.logger.Info("Stopping receive loop") - }() - c.logger.Info("Starting receive loop") - - for { - select { - case <-ctx.Done(): - return ctx.Err() - case cmd := <-c.cmdExchanger.In(): - c.handleCommand(ctx, cmd) - } - } -} - -func (c *connection) updateConfigLoop(ctx context.Context) error { - defer func() { - c.logger.Info("Stopping update config loop") - }() - c.logger.Info("Starting update config loop") - - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-c.configUpdatedCh: - c.waitForConfigApply(ctx) - } - } -} - -func (c *connection) waitForConfigApply(ctx context.Context) { - config := c.observedConfig.GetLatestConfig() - if config == nil { - c.logger.Info("Latest config is nil, skipping update") - return - } - - c.pendingConfig = config - - c.logger.Info("Updating to latest config", "config generation", config.ID) - - status := c.statusForID(ctx, config.ID) - - select { - case <-ctx.Done(): - c.logger.Error(ctx.Err(), "failed to update config") - return - case c.cmdExchanger.Out() <- createDownloadCommand(config.ID, c.nginxID, c.systemID): - } - - now := time.Now() - c.logger.Info("Waiting for config status", "config generation", config.ID) - - select { - case <-ctx.Done(): - return - case s := <-status: - elapsedTime := time.Since(now) - c.logger.Info( - fmt.Sprintf("Config apply complete [%s]", s.status), - "message", - s.message, - "config generation", - config.ID, - "duration", - elapsedTime.String(), - ) - } -} - -// statusForID returns a channel that will receive a configApplyResponse when a final ( -// not pending) status is received for the given config ID. -func (c *connection) statusForID(ctx context.Context, id string) <-chan configApplyResponse { - statusForID := make(chan configApplyResponse) - - go func() { - defer close(statusForID) - - for { - select { - case <-ctx.Done(): - return - case status := <-c.configApplyResponseCh: - // Not every status contains a correlation ID, so we only need to check it if it's not empty. - // This is a workaround for some inconsistencies in the way the agent reports config apply statuses. - if status.correlationID != "" && status.correlationID != id { - c.logger.Info("Config status is for wrong generation", - "actual config generation", - status.correlationID, - "expected config generation", - id, - "status", - status.status, - "message", - status.message, - ) - continue - } - - select { - case <-ctx.Done(): - return - case statusForID <- status: - return - } - } - } - }() - - return statusForID -} - -func (c *connection) handleCommand(ctx context.Context, cmd *proto.Command) { - switch cmd.Data.(type) { - case *proto.Command_AgentConnectRequest: - c.handleAgentConnectRequestCmd(ctx, cmd) - case *proto.Command_DataplaneStatus: - c.handleDataplaneStatus(ctx, cmd.GetDataplaneStatus()) - case *proto.Command_NginxConfigResponse: - c.handleNginxConfigResponse(ctx, cmd.GetNginxConfigResponse()) - default: - c.logger.Info("Ignoring command", "data type", fmt.Sprintf("%T", cmd.Data)) - } -} - -func (c *connection) handleAgentConnectRequestCmd(ctx context.Context, cmd *proto.Command) { - req := cmd.GetAgentConnectRequest() - - c.logger.Info("Received agent connect request") - - c.logger = c.logger.WithValues("podName", req.GetMeta().DisplayName) - - requestStatusCode := proto.AgentConnectStatus_CONNECT_OK - msg := "Connected" - - if err := c.register(getFirstNginxID(req.GetDetails()), req.GetMeta().SystemUid); err != nil { - requestStatusCode = proto.AgentConnectStatus_CONNECT_REJECTED_OTHER - msg = err.Error() - - c.logger.Error(err, "failed to register agent") - } - - res := createAgentConnectResponseCmd(cmd.GetMeta().GetMessageId(), requestStatusCode, msg) - - select { - case <-ctx.Done(): - return - case c.cmdExchanger.Out() <- res: - } -} - -func (c *connection) register(nginxID, systemID string) error { - if nginxID == "" || systemID == "" { - c.state = StateInvalid - return fmt.Errorf("missing nginxID: '%s' and/or systemID: '%s'", nginxID, systemID) - } - - c.logger.Info("Registering agent", "nginxID", nginxID, "systemID", systemID) - - c.nginxID = nginxID - c.systemID = systemID - c.state = StateRegistered - - // trigger an update - c.Update() - // register for future config updates - c.observedConfig.Register(c) - - return nil -} - -// receiveFromUploadServer uploads data chunks from the UploadServer and logs them. -// FIXME(kate-osborn): NKG doesn't need this functionality and ideally we wouldn't have to implement and maintain this. -// Figure out how to remove this without causing errors in the agent. -func (c *connection) receiveFromUploadServer(server proto.Commander_UploadServer) error { - c.logger.Info("Upload request") - - for { - // Recv blocks until it receives a message into or the stream is - // done. It returns io.EOF when the client has performed a CloseSend. On - // any non-EOF error, the stream is aborted and the error contains the - // RPC status. - _, err := server.Recv() - - if err != nil && !errors.Is(err, io.EOF) { - c.logger.Error(err, "upload receive error") - return err - } - - c.logger.Info("Received chunk from upload channel") - - if errors.Is(err, io.EOF) { - c.logger.Info("Upload completed") - return server.SendAndClose(&proto.UploadStatus{Status: proto.UploadStatus_OK}) - } - } -} - -func (c *connection) handleDataplaneStatus(ctx context.Context, status *proto.DataplaneStatus) { - // Right now, we only care about AgentActivityStatuses that contain NginxConfigStatuses. - if status.GetAgentActivityStatus() != nil { - for _, activityStatus := range status.GetAgentActivityStatus() { - if cfgStatus := activityStatus.GetNginxConfigStatus(); cfgStatus != nil { - c.handleNginxConfigStatus(ctx, cfgStatus) - } - } - } -} - -func (c *connection) handleNginxConfigStatus(ctx context.Context, status *proto.NginxConfigStatus) { - c.logger.Info("Received nginx config status", "status", status.Status, "message", status.Message) - // If status is pending then we need to wait for the next status update - if status.Status == proto.NginxConfigStatus_PENDING { - return - } - - applyStatus := configApplyStatusSuccess - if status.Status == proto.NginxConfigStatus_ERROR { - applyStatus = configApplyStatusFailure - } - - res := configApplyResponse{ - correlationID: status.CorrelationId, - status: applyStatus, - message: status.Message, - } - - c.sendConfigApplyResponse(ctx, res) -} - -func (c *connection) handleNginxConfigResponse(ctx context.Context, res *proto.NginxConfigResponse) { - status := res.Status - - c.logger.Info("Received nginx config response", "status", status.Status, "message", status.Message) - - // We only care about ERROR status because it indicates that the config apply action is complete. - // An OK status can indicate that the config apply action is still in progress or that it is complete. However, - // the Agent will send a DataplaneStatus update on a successful config apply, so we don't need to handle it here. - // We handle the error case here, because in some cases, the Agent will not send a DataplaneStatus update on a - // failed config apply. - if status.Status != proto.CommandStatusResponse_CMD_ERROR { - return - } - - car := configApplyResponse{ - status: configApplyStatusFailure, - message: status.Error, - } - - c.sendConfigApplyResponse(ctx, car) -} - -func (c *connection) sendConfigApplyResponse(ctx context.Context, response configApplyResponse) { - select { - case <-ctx.Done(): - return - case c.configApplyResponseCh <- response: - default: - // If there's no listener on c.configApplyResponseCh, then there's no pending config apply - // and these status updates are extraneous. - c.logger.Info( - "Ignoring config apply response; no pending config apply", - "config generation", - response.correlationID, - ) - } -} - -func createAgentConnectResponseCmd( - msgID string, - statusCode proto.AgentConnectStatus_StatusCode, - statusMsg string, -) *proto.Command { - return &proto.Command{ - Data: &proto.Command_AgentConnectResponse{ - AgentConnectResponse: &proto.AgentConnectResponse{ - Status: &proto.AgentConnectStatus{ - StatusCode: statusCode, - Message: statusMsg, - }, - }, - }, - Meta: grpc.NewMessageMeta(msgID), - Type: proto.Command_NORMAL, - } -} - -func getFirstNginxID(details []*proto.NginxDetails) (id string) { - if len(details) > 0 { - id = details[0].GetNginxId() - } - - return -} diff --git a/internal/grpc/commander/connection_test.go b/internal/grpc/commander/connection_test.go deleted file mode 100644 index b81d153f19..0000000000 --- a/internal/grpc/commander/connection_test.go +++ /dev/null @@ -1,376 +0,0 @@ -package commander - -// -// import ( -// "context" -// "errors" -// "testing" -// -// "github.com/nginx/agent/sdk/v2/proto" -// . "github.com/onsi/gomega" -// "sigs.k8s.io/controller-runtime/pkg/log/zap" -// -// "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander/exchanger/exchangerfakes" -// ) -// -// func TestConnection_Run_ExchangerErr(t *testing.T) { -// g := NewGomegaWithT(t) -// -// exchangerClose := make(chan struct{}) -// exchangerErr := errors.New("exchanger error") -// -// fakeExchanger := &exchangerfakes.FakeCommandExchanger{ -// RunStub: func(ctx context.Context) error { -// <-exchangerClose -// return errors.New("exchanger error") -// }, -// } -// -// conn := newConnection("id", zap.New(), fakeExchanger) -// -// errCh := make(chan error) -// go func() { -// errCh <- conn.run(context.Background()) -// }() -// -// close(exchangerClose) -// -// err := <-errCh -// g.Expect(err).Should(MatchError(exchangerErr)) -// } -// -// func TestConnection_Run_ConnectionError(t *testing.T) { -// g := NewGomegaWithT(t) -// ctx, cancel := context.WithCancel(context.Background()) -// -// fakeExchanger := &exchangerfakes.FakeCommandExchanger{ -// RunStub: func(ctx context.Context) error { -// <-ctx.Done() -// return nil -// }, -// } -// -// conn := newConnection("id", zap.New(), fakeExchanger) -// -// errCh := make(chan error) -// go func() { -// errCh <- conn.run(ctx) -// }() -// -// cancel() -// -// err := <-errCh -// g.Expect(err).Should(MatchError(context.Canceled)) -// } -// -// func TestConnection_Receive(t *testing.T) { -// g := NewGomegaWithT(t) -// -// out := make(chan *proto.Command) -// in := make(chan *proto.Command) -// -// ctx, cancel := context.WithCancel(context.Background()) -// -// fakeExchanger := &exchangerfakes.FakeCommandExchanger{ -// OutStub: func() chan<- *proto.Command { -// return out -// }, -// InStub: func() <-chan *proto.Command { -// return in -// }, -// } -// -// conn := newConnection("id", zap.New(), fakeExchanger) -// -// errCh := make(chan error) -// go func() { -// errCh <- conn.receive(ctx) -// }() -// -// sendCmdAndVerifyResponse := func(msgID string) { -// in <- CreateAgentConnectRequestCmd(msgID) -// -// res := <-out -// g.Expect(res).ToNot(BeNil()) -// meta := res.GetMeta() -// g.Expect(meta).ToNot(BeNil()) -// g.Expect(meta.MessageId).To(Equal(msgID)) -// } -// -// sendCmdAndVerifyResponse("msg-1") -// sendCmdAndVerifyResponse("msg-2") -// -// cancel() -// -// receiveErr := <-errCh -// g.Expect(receiveErr).Should(MatchError(context.Canceled)) -// } -// -// func TestConnection_State(t *testing.T) { -// g := NewGomegaWithT(t) -// -// conn := newConnection("id", zap.New(), new(exchangerfakes.FakeCommandExchanger)) -// g.Expect(conn.State()).To(Equal(StateConnected)) -// -// // change state -// conn.state = StateRegistered -// g.Expect(conn.State()).To(Equal(StateRegistered)) -// } -// -// func TestConnection_ID(t *testing.T) { -// g := NewGomegaWithT(t) -// -// conn := newConnection("id", zap.New(), new(exchangerfakes.FakeCommandExchanger)) -// g.Expect(conn.ID()).To(Equal("id")) -// } -// -// func TestConnection_HandleCommand(t *testing.T) { -// tests := []struct { -// cmd *proto.Command -// expCmdType *proto.Command -// msg string -// expInboundCmd bool -// }{ -// { -// msg: "unsupported command", -// cmd: &proto.Command{Data: &proto.Command_EventReport{}}, -// expInboundCmd: false, -// }, -// { -// msg: "agent connect request command", -// cmd: CreateAgentConnectRequestCmd("msg-id"), -// expInboundCmd: true, -// expCmdType: &proto.Command{Data: &proto.Command_AgentConnectResponse{}}, -// }, -// } -// -// for _, test := range tests { -// t.Run(test.msg, func(t *testing.T) { -// g := NewGomegaWithT(t) -// -// out := make(chan *proto.Command, 1) -// -// fakeExchanger := &exchangerfakes.FakeCommandExchanger{ -// OutStub: func() chan<- *proto.Command { -// return out -// }, -// } -// -// conn := newConnection("id", zap.New(), fakeExchanger) -// -// conn.handleCommand(context.Background(), test.cmd) -// -// if test.expInboundCmd { -// cmd := <-out -// g.Expect(cmd.Data).To(BeAssignableToTypeOf(test.expCmdType.Data)) -// } else { -// g.Expect(out).To(BeEmpty()) -// } -// -// close(out) -// }) -// } -// } -// -// func TestConnection_HandleAgentConnectRequest(t *testing.T) { -// invalidConnectRequest := &proto.Command{ -// Meta: &proto.Metadata{ -// MessageId: "msg-id", -// }, -// Data: &proto.Command_AgentConnectRequest{ -// AgentConnectRequest: &proto.AgentConnectRequest{ -// Meta: &proto.AgentMeta{}, -// Details: []*proto.NginxDetails{}, -// }, -// }, -// } -// -// tests := []struct { -// request *proto.Command -// name string -// expStatusMsg string -// expStatusCode proto.AgentConnectStatus_StatusCode -// }{ -// { -// name: "normal", -// request: CreateAgentConnectRequestCmd("msg-id"), -// expStatusCode: proto.AgentConnectStatus_CONNECT_OK, -// expStatusMsg: "Connected", -// }, -// { -// name: "invalid", -// request: invalidConnectRequest, -// expStatusCode: proto.AgentConnectStatus_CONNECT_REJECTED_OTHER, -// expStatusMsg: "missing nginxID: '' and/or systemID: ''", -// }, -// } -// -// for _, test := range tests { -// t.Run(test.name, func(t *testing.T) { -// g := NewGomegaWithT(t) -// -// out := make(chan *proto.Command) -// -// fakeExchanger := &exchangerfakes.FakeCommandExchanger{ -// OutStub: func() chan<- *proto.Command { -// return out -// }, -// } -// -// conn := newConnection("id", zap.New(), fakeExchanger) -// -// go conn.handleAgentConnectRequestCmd(context.Background(), test.request) -// -// response := <-out -// -// meta := response.GetMeta() -// g.Expect(meta).ToNot(BeNil()) -// g.Expect(meta.MessageId).To(Equal("msg-id")) -// -// agentConnResponse := response.GetAgentConnectResponse() -// g.Expect(agentConnResponse).ToNot(BeNil()) -// g.Expect(agentConnResponse.Status.StatusCode).To(Equal(test.expStatusCode)) -// g.Expect(agentConnResponse.Status.Message).To(Equal(test.expStatusMsg)) -// -// if test.expStatusCode == proto.AgentConnectStatus_CONNECT_OK { -// g.Expect(conn.state).To(Equal(StateRegistered)) -// } else { -// g.Expect(conn.state).To(Equal(StateInvalid)) -// } -// }) -// } -// } -// -// func TestConnection_HandleAgentConnectRequest_CtxCanceled(t *testing.T) { -// g := NewGomegaWithT(t) -// -// out := make(chan *proto.Command) -// -// fakeExchanger := &exchangerfakes.FakeCommandExchanger{ -// OutStub: func() chan<- *proto.Command { -// return out -// }, -// } -// -// conn := newConnection("id", zap.New(), fakeExchanger) -// -// ctx, cancel := context.WithCancel(context.Background()) -// -// cmd := CreateAgentConnectRequestCmd("msg-id") -// -// done := make(chan struct{}) -// go func() { -// conn.handleAgentConnectRequestCmd(ctx, cmd) -// close(done) -// }() -// -// cancel() -// -// g.Eventually(done).Should(BeClosed()) -// } -// -// func TestConnection_Register(t *testing.T) { -// tests := []struct { -// msg string -// nginxID string -// systemID string -// expRegister bool -// }{ -// { -// msg: "valid nginxID and systemID", -// nginxID: "nginx", -// systemID: "system", -// expRegister: true, -// }, -// { -// msg: "invalid nginxID", -// nginxID: "", -// systemID: "system", -// expRegister: false, -// }, -// { -// msg: "invalid systemID", -// nginxID: "nginx", -// systemID: "", -// expRegister: false, -// }, -// { -// msg: "invalid nginxID and systemID", -// nginxID: "", -// systemID: "", -// expRegister: false, -// }, -// } -// -// for _, test := range tests { -// t.Run(test.msg, func(t *testing.T) { -// g := NewGomegaWithT(t) -// -// conn := newConnection( -// "conn-id", -// zap.New(), -// new(exchangerfakes.FakeCommandExchanger), -// ) -// -// g.Expect(conn.state).To(Equal(StateConnected)) -// g.Expect(conn.nginxID).To(BeEmpty()) -// g.Expect(conn.systemID).To(BeEmpty()) -// -// err := conn.register(test.nginxID, test.systemID) -// if test.expRegister { -// g.Expect(err).To(BeNil()) -// g.Expect(conn.state).To(Equal(StateRegistered)) -// g.Expect(conn.nginxID).To(Equal(test.nginxID)) -// g.Expect(conn.systemID).To(Equal(test.systemID)) -// } else { -// g.Expect(err).ToNot(BeNil()) -// g.Expect(conn.state).To(Equal(StateInvalid)) -// g.Expect(conn.nginxID).To(BeEmpty()) -// g.Expect(conn.systemID).To(BeEmpty()) -// } -// }) -// } -// } -// -// func TestGetFirstNginxID(t *testing.T) { -// tests := []struct { -// name string -// expID string -// details []*proto.NginxDetails -// }{ -// { -// name: "details with many nginxes", -// details: []*proto.NginxDetails{ -// { -// NginxId: "1", -// }, -// { -// NginxId: "2", -// }, -// { -// NginxId: "3", -// }, -// }, -// expID: "1", -// }, -// { -// name: "nil details", -// details: nil, -// expID: "", -// }, -// { -// name: "empty details", -// details: []*proto.NginxDetails{}, -// expID: "", -// }, -// } -// -// for _, test := range tests { -// t.Run(test.name, func(t *testing.T) { -// g := NewGomegaWithT(t) -// -// id := getFirstNginxID(test.details) -// g.Expect(id).To(Equal(test.expID)) -// }) -// } -// } diff --git a/internal/grpc/commander/doc.go b/internal/grpc/commander/doc.go deleted file mode 100644 index 88017abe63..0000000000 --- a/internal/grpc/commander/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -/* -Package commander holds all the objects and methods for interacting with connections through the gRPC Commander Service. - -This package includes: -- Commander: object that implements the Commander interface. -- connection: object that encapsulates a connection to an agent. -- BidirectionalChannel: object that encapsulates the bidirectional streaming channel: CommandChannelServer. -*/package commander diff --git a/internal/grpc/commander/exchanger/doc.go b/internal/grpc/commander/exchanger/doc.go deleted file mode 100644 index 65055738fb..0000000000 --- a/internal/grpc/commander/exchanger/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -/* -Package exchanger holds the CommandExchanger interface. - -FIXME(kate-osborn): this package only holds one interface that is only used by the commander package. -It should be defined client-side, but the counterfeiter mock generator prevents this because of a cyclical import -cycle. Figure out a way to move this to the commander package. -*/ -package exchanger diff --git a/internal/grpc/commander/exchanger/exchanger.go b/internal/grpc/commander/exchanger/exchanger.go deleted file mode 100644 index 5ff5c98887..0000000000 --- a/internal/grpc/commander/exchanger/exchanger.go +++ /dev/null @@ -1,24 +0,0 @@ -package exchanger - -import ( - "context" - - "github.com/nginx/agent/sdk/v2/proto" -) - -//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . CommandExchanger - -// CommandExchanger exchanges commands between a client and server through the In() and Out() methods. -// Run runs the exchanger. -// To send a command to the client, place it on the Out() channel. -// To receive a command from the client, listen to the In() chanel. -type CommandExchanger interface { - // Run the CommandExchanger. - Run(ctx context.Context) error - // Out returns a write-only channel of commands. - // Commands placed on this channel are sent to the client. - Out() chan<- *proto.Command - // In returns a read-only channel of commands. - // Commands read from this channel are from the client. - In() <-chan *proto.Command -} diff --git a/internal/grpc/commander/exchanger/exchangerfakes/fake_command_exchanger.go b/internal/grpc/commander/exchanger/exchangerfakes/fake_command_exchanger.go deleted file mode 100644 index 7ea4872bf6..0000000000 --- a/internal/grpc/commander/exchanger/exchangerfakes/fake_command_exchanger.go +++ /dev/null @@ -1,243 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package exchangerfakes - -import ( - "context" - "sync" - - "github.com/nginx/agent/sdk/v2/proto" - "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc/commander/exchanger" -) - -type FakeCommandExchanger struct { - InStub func() <-chan *proto.Command - inMutex sync.RWMutex - inArgsForCall []struct { - } - inReturns struct { - result1 <-chan *proto.Command - } - inReturnsOnCall map[int]struct { - result1 <-chan *proto.Command - } - OutStub func() chan<- *proto.Command - outMutex sync.RWMutex - outArgsForCall []struct { - } - outReturns struct { - result1 chan<- *proto.Command - } - outReturnsOnCall map[int]struct { - result1 chan<- *proto.Command - } - RunStub func(context.Context) error - runMutex sync.RWMutex - runArgsForCall []struct { - arg1 context.Context - } - runReturns struct { - result1 error - } - runReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeCommandExchanger) In() <-chan *proto.Command { - fake.inMutex.Lock() - ret, specificReturn := fake.inReturnsOnCall[len(fake.inArgsForCall)] - fake.inArgsForCall = append(fake.inArgsForCall, struct { - }{}) - stub := fake.InStub - fakeReturns := fake.inReturns - fake.recordInvocation("In", []interface{}{}) - fake.inMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeCommandExchanger) InCallCount() int { - fake.inMutex.RLock() - defer fake.inMutex.RUnlock() - return len(fake.inArgsForCall) -} - -func (fake *FakeCommandExchanger) InCalls(stub func() <-chan *proto.Command) { - fake.inMutex.Lock() - defer fake.inMutex.Unlock() - fake.InStub = stub -} - -func (fake *FakeCommandExchanger) InReturns(result1 <-chan *proto.Command) { - fake.inMutex.Lock() - defer fake.inMutex.Unlock() - fake.InStub = nil - fake.inReturns = struct { - result1 <-chan *proto.Command - }{result1} -} - -func (fake *FakeCommandExchanger) InReturnsOnCall(i int, result1 <-chan *proto.Command) { - fake.inMutex.Lock() - defer fake.inMutex.Unlock() - fake.InStub = nil - if fake.inReturnsOnCall == nil { - fake.inReturnsOnCall = make(map[int]struct { - result1 <-chan *proto.Command - }) - } - fake.inReturnsOnCall[i] = struct { - result1 <-chan *proto.Command - }{result1} -} - -func (fake *FakeCommandExchanger) Out() chan<- *proto.Command { - fake.outMutex.Lock() - ret, specificReturn := fake.outReturnsOnCall[len(fake.outArgsForCall)] - fake.outArgsForCall = append(fake.outArgsForCall, struct { - }{}) - stub := fake.OutStub - fakeReturns := fake.outReturns - fake.recordInvocation("Out", []interface{}{}) - fake.outMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeCommandExchanger) OutCallCount() int { - fake.outMutex.RLock() - defer fake.outMutex.RUnlock() - return len(fake.outArgsForCall) -} - -func (fake *FakeCommandExchanger) OutCalls(stub func() chan<- *proto.Command) { - fake.outMutex.Lock() - defer fake.outMutex.Unlock() - fake.OutStub = stub -} - -func (fake *FakeCommandExchanger) OutReturns(result1 chan<- *proto.Command) { - fake.outMutex.Lock() - defer fake.outMutex.Unlock() - fake.OutStub = nil - fake.outReturns = struct { - result1 chan<- *proto.Command - }{result1} -} - -func (fake *FakeCommandExchanger) OutReturnsOnCall(i int, result1 chan<- *proto.Command) { - fake.outMutex.Lock() - defer fake.outMutex.Unlock() - fake.OutStub = nil - if fake.outReturnsOnCall == nil { - fake.outReturnsOnCall = make(map[int]struct { - result1 chan<- *proto.Command - }) - } - fake.outReturnsOnCall[i] = struct { - result1 chan<- *proto.Command - }{result1} -} - -func (fake *FakeCommandExchanger) Run(arg1 context.Context) error { - fake.runMutex.Lock() - ret, specificReturn := fake.runReturnsOnCall[len(fake.runArgsForCall)] - fake.runArgsForCall = append(fake.runArgsForCall, struct { - arg1 context.Context - }{arg1}) - stub := fake.RunStub - fakeReturns := fake.runReturns - fake.recordInvocation("Run", []interface{}{arg1}) - fake.runMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeCommandExchanger) RunCallCount() int { - fake.runMutex.RLock() - defer fake.runMutex.RUnlock() - return len(fake.runArgsForCall) -} - -func (fake *FakeCommandExchanger) RunCalls(stub func(context.Context) error) { - fake.runMutex.Lock() - defer fake.runMutex.Unlock() - fake.RunStub = stub -} - -func (fake *FakeCommandExchanger) RunArgsForCall(i int) context.Context { - fake.runMutex.RLock() - defer fake.runMutex.RUnlock() - argsForCall := fake.runArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeCommandExchanger) RunReturns(result1 error) { - fake.runMutex.Lock() - defer fake.runMutex.Unlock() - fake.RunStub = nil - fake.runReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeCommandExchanger) RunReturnsOnCall(i int, result1 error) { - fake.runMutex.Lock() - defer fake.runMutex.Unlock() - fake.RunStub = nil - if fake.runReturnsOnCall == nil { - fake.runReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.runReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeCommandExchanger) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.inMutex.RLock() - defer fake.inMutex.RUnlock() - fake.outMutex.RLock() - defer fake.outMutex.RUnlock() - fake.runMutex.RLock() - defer fake.runMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeCommandExchanger) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ exchanger.CommandExchanger = new(FakeCommandExchanger) diff --git a/internal/manager/manager.go b/internal/manager/manager.go index be9e4df510..c77b23706e 100644 --- a/internal/manager/manager.go +++ b/internal/manager/manager.go @@ -16,6 +16,8 @@ import ( gatewayv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" "sigs.k8s.io/gateway-api/apis/v1beta1/validation" + "github.com/nginxinc/nginx-kubernetes-gateway/internal/agent" + agentConfig "github.com/nginxinc/nginx-kubernetes-gateway/internal/agent/config" "github.com/nginxinc/nginx-kubernetes-gateway/internal/config" "github.com/nginxinc/nginx-kubernetes-gateway/internal/events" "github.com/nginxinc/nginx-kubernetes-gateway/internal/grpc" @@ -23,8 +25,8 @@ import ( "github.com/nginxinc/nginx-kubernetes-gateway/internal/manager/filter" "github.com/nginxinc/nginx-kubernetes-gateway/internal/manager/index" "github.com/nginxinc/nginx-kubernetes-gateway/internal/manager/predicate" - "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/agent" ngxcfg "github.com/nginxinc/nginx-kubernetes-gateway/internal/nginx/config" + "github.com/nginxinc/nginx-kubernetes-gateway/internal/observer" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/relationship" "github.com/nginxinc/nginx-kubernetes-gateway/internal/state/resolver" @@ -40,6 +42,8 @@ const ( secretsFolder = "/etc/nginx/secrets" // grpcAddress is the address that the grpc server is listening on grpcAddress = ":54789" + // agentTTL is the TTL for the agent's connection info + agentTTL = 5 * time.Minute ) var scheme = runtime.NewScheme() @@ -145,14 +149,15 @@ func Start(cfg config.Config) error { Clock: status.NewRealClock(), }) - nginxAgentConfigBuilder := agent.NewNginxConfigBuilder(ngxcfg.NewGeneratorImpl(), secretRequestMgr) + agentNginxConfigAdapter := agentConfig.NewNginxConfigAdapter(ngxcfg.NewGeneratorImpl(), secretRequestMgr) - agentConfigStore := agent.NewConfigStore(nginxAgentConfigBuilder, cfg.Logger.WithName("agentConfigStore")) + agentConfigSubject := observer.NewConfigSubject[*agentConfig.NginxConfig](cfg.Logger.WithName("agentConfigSubject")) eventHandler := events.NewEventHandlerImpl(events.EventHandlerConfig{ Processor: processor, SecretStore: secretStore, - ConfigStorer: agentConfigStore, + ConfigAdapter: agentNginxConfigAdapter, + ConfigUpdater: agentConfigSubject, Logger: cfg.Logger.WithName("eventHandler"), StatusUpdater: statusUpdater, }) @@ -182,12 +187,22 @@ func Start(cfg config.Config) error { return fmt.Errorf("cannot register event loop with manager: %w", err) } + agentStore := agent.NewConnectInfoStore(cfg.Logger.WithName("agentConnectInfoStore"), agentTTL) + err = mgr.Add(agentStore) + if err != nil { + return fmt.Errorf("cannot register agent connect info store with manager: %w", err) + } + + cmdr := commander.NewCommander( + agentStore, + agentConfigSubject, + cfg.Logger.WithName("commanderService"), + ) + server, err := grpc.NewServer( cfg.Logger.WithName("grpcServer"), grpcAddress, - commander.NewCommander(cfg.Logger.WithName("commanderService"), - agentConfigStore, - ), + cmdr, ) if err != nil { return fmt.Errorf("cannot create gRPC server: %w", err) diff --git a/internal/nginx/agent/doc.go b/internal/nginx/agent/doc.go deleted file mode 100644 index 4f14317022..0000000000 --- a/internal/nginx/agent/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -/* -Package agent contains objects and methods for configuring agents. - -The package includes: -- ConfigStore: a thread-safe store for latest agent nginx configuration. -- NginxConfigBuilder: builds agent nginx configuration from dataplane.Configuration. -- NginxConfig: an intermediate object that contains nginx configuration in a form that agent expects. -*/ -package agent diff --git a/internal/nginx/config/nginx_conf_template.go b/internal/nginx/config/nginx_conf_template.go index 61176d42ca..733ff1f780 100644 --- a/internal/nginx/config/nginx_conf_template.go +++ b/internal/nginx/config/nginx_conf_template.go @@ -1,6 +1,6 @@ package config -var nginxConfTemplateText = `# config generation: {{ . }} +var nginxConfTemplateText = `# config version: {{ . }} load_module /usr/lib/nginx/modules/ngx_http_js_module.so; events {} @@ -30,17 +30,17 @@ http { } } - server { - listen unix:/var/lib/nginx/nginx-502-server.sock; - access_log off; - - return 502; - } - - server { - listen unix:/var/lib/nginx/nginx-500-server.sock; - access_log off; - - return 500; - } + server { + listen unix:/var/lib/nginx/nginx-502-server.sock; + access_log off; + + return 502; + } + + server { + listen unix:/var/lib/nginx/nginx-500-server.sock; + access_log off; + + return 500; + } }` diff --git a/internal/observer/config_subject.go b/internal/observer/config_subject.go index 3616b1ae6f..8302644209 100644 --- a/internal/observer/config_subject.go +++ b/internal/observer/config_subject.go @@ -17,25 +17,29 @@ type VersionedConfig interface { // When a new VersionedConfig is stored, all registered Observers are notified. type ConfigSubject[T VersionedConfig] struct { latestConfig atomic.Value + observers map[string]Observer[T] logger logr.Logger - observers map[string]Observer observerLock sync.Mutex } // NewConfigSubject creates a new ConfigSubject. func NewConfigSubject[T VersionedConfig](logger logr.Logger) *ConfigSubject[T] { return &ConfigSubject[T]{ - observers: make(map[string]Observer), + observers: make(map[string]Observer[T]), logger: logger, } } // Register registers an observer. -func (a *ConfigSubject[T]) Register(observer Observer) { +func (a *ConfigSubject[T]) Register(observer Observer[T]) { a.observerLock.Lock() defer a.observerLock.Unlock() a.observers[observer.ID()] = observer + + config := a.latestConfig.Load().(VersionedConfig) + observer.Update(config) + a.logger.Info( fmt.Sprintf("Registering observer %s", observer.ID()), "number of registered observers", @@ -44,18 +48,29 @@ func (a *ConfigSubject[T]) Register(observer Observer) { } // Notify notifies all registered observers. -func (a *ConfigSubject[T]) notify() { +func (a *ConfigSubject[T]) notify(cfg VersionedConfig) { a.observerLock.Lock() defer a.observerLock.Unlock() a.logger.Info("Notifying observers", "number of registered observers", len(a.observers)) + + wg := &sync.WaitGroup{} + for _, o := range a.observers { - o.Update() + wg.Add(1) + + go func(observer Observer[T]) { + observer.Update(cfg) + wg.Done() + }(o) + } + + wg.Wait() } // Remove removes an observer. -func (a *ConfigSubject[T]) Remove(observer Observer) { +func (a *ConfigSubject[T]) Remove(observer Observer[T]) { a.observerLock.Lock() defer a.observerLock.Unlock() @@ -71,7 +86,7 @@ func (a *ConfigSubject[T]) Update(cfg VersionedConfig) { a.logger.Info("Storing configuration", "config version", cfg.GetVersion()) a.latestConfig.Store(cfg) - a.notify() + a.notify(cfg) } // GetLatestConfig returns the current stored config. diff --git a/internal/observer/observer.go b/internal/observer/observer.go index 42911354e2..effaf60e06 100644 --- a/internal/observer/observer.go +++ b/internal/observer/observer.go @@ -1,13 +1,13 @@ package observer // Subject is an interface for objects that can be observed. -type Subject interface { - Register(observer Observer) - Remove(observer Observer) +type Subject[T VersionedConfig] interface { + Register(observer Observer[T]) + Remove(observer Observer[T]) } // Observer is an interface for objects that can observe a Subject. -type Observer interface { +type Observer[T VersionedConfig] interface { ID() string - Update() + Update(VersionedConfig) } diff --git a/internal/state/dataplane/configuration.go b/internal/state/dataplane/configuration.go index 6cf922f030..6e1e3af0ac 100644 --- a/internal/state/dataplane/configuration.go +++ b/internal/state/dataplane/configuration.go @@ -15,11 +15,19 @@ const wildcardHostname = "~^" // Configuration is an intermediate representation of dataplane configuration. type Configuration struct { - HTTPServers []VirtualServer - SSLServers []VirtualServer - Upstreams []Upstream + // HTTPServers holds all HTTPServers. + // FIXME(pleshakov) We assume that all servers are HTTP and listen on port 8 + HTTPServers []VirtualServer + // SSLServers holds all SSLServers. + // FIXME(kate-osborn) We assume that all SSL servers listen on port 443. + SSLServers []VirtualServer + // Upstreams holds all unique Upstreams. + Upstreams []Upstream + // BackendGroups holds all unique BackendGroups. + // FIXME(pleshakov): Ensure Configuration doesn't include types from the graph package. BackendGroups []graph.BackendGroup - Generation int + // Version is the version of the configuration. + Version int } // VirtualServer is a virtual server. diff --git a/internal/state/secrets/secrets_test.go b/internal/state/secrets/secrets_test.go index 6283733857..18552ee97f 100644 --- a/internal/state/secrets/secrets_test.go +++ b/internal/state/secrets/secrets_test.go @@ -215,6 +215,7 @@ var _ = Describe("RequestManager", func() { }) }) }) + var _ = Describe("SecretStore", func() { var store secrets.SecretStore var invalidToValidSecret, validToInvalidSecret *apiv1.Secret